repo_id stringlengths 15 89 | file_path stringlengths 27 180 | content stringlengths 1 2.23M | __index_level_0__ int64 0 0 |
|---|---|---|---|
hf_public_repos | hf_public_repos/tokenizers/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
... | 0 |
hf_public_repos | hf_public_repos/tokenizers/CITATION.cff | # This CITATION.cff file was generated with cffinit.
# Visit https://bit.ly/cffinit to generate yours today!
cff-version: 1.2.0
title: HuggingFace's Tokenizers
message: >-
Fast State-of-the-Art Tokenizers optimized for Research
and Production.
type: software
authors:
- given-names: Anthony
family-names: Moi
... | 0 |
hf_public_repos | hf_public_repos/tokenizers/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/main/LI... | 0 |
hf_public_repos | hf_public_repos/tokenizers/RELEASE.md | ## How to release
# Before the release
Simple checklist on how to make releases for `tokenizers`.
- Freeze `master` branch.
- Run all tests (Check CI has properly run)
- If any significant work, check benchmarks:
- `cd tokenizers && cargo bench` (needs to be run on latest release tag to measure difference if it's ... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/Cargo.toml | [package]
authors = ["Anthony MOI <m.anthony.moi@gmail.com>", "Nicolas Patry <patry.nicolas@protonmail.com>"]
edition = "2018"
name = "tokenizers"
version = "0.15.1-dev.0"
homepage = "https://github.com/huggingface/tokenizers"
repository = "https://github.com/huggingface/tokenizers"
documentation = "https://docs.rs/tok... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/rust-toolchain | stable
| 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/LICENSE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/CHANGELOG.md | # Changelog
All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## [0.13.2]
- Python only changes
## [0.13.1]
- [#1072] Fixing ... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/Makefile | DATA_DIR = data
BENCHMARK_DIR = benches
TESTS_DIR = tests
dir_guard=@mkdir -p $(@D)
SHARED_RESOURCES = $(DATA_DIR)/gpt2-vocab.json $(DATA_DIR)/gpt2-merges.txt $(DATA_DIR)/bert-base-uncased-vocab.txt $(DATA_DIR)/big.txt $(DATA_DIR)/small.txt
BENCHMARK_RESOURCES = $(SHARED_RESOURCES)
TESTS_RESOURCES = $(SHARED_RESOURCE... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/README.md | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/master/... | 0 |
hf_public_repos/tokenizers | hf_public_repos/tokenizers/tokenizers/README.tpl | <p align="center">
<br>
<img src="https://huggingface.co/landing/assets/tokenizers/tokenizers-logo.png" width="600"/>
<br>
<p>
<p align="center">
<img alt="Build" src="https://github.com/huggingface/tokenizers/workflows/Rust/badge.svg">
<a href="https://github.com/huggingface/tokenizers/blob/master/... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/added_tokens.rs | mod common;
use common::*;
use tokenizers::tokenizer::AddedToken;
#[test]
fn add_tokens() {
let mut tokenizer = get_empty();
assert_eq!(
tokenizer.add_special_tokens(&[
AddedToken::from("<cls>", true),
AddedToken::from("<sep>", true)
]),
2
);
assert_eq!... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/unigram.rs | #[cfg(not(debug_assertions))]
use assert_approx_eq::assert_approx_eq;
use std::collections::HashMap;
use std::fs::read_to_string;
use std::path::Path;
#[cfg(not(debug_assertions))]
use tokenizers::models::unigram::Lattice;
use tokenizers::models::unigram::Unigram;
use tokenizers::models::unigram::UnigramTrainer;
use to... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/documentation.rs | use tokenizers::models::bpe::{BpeTrainerBuilder, BPE};
use tokenizers::normalizers::{Sequence, Strip, NFC};
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokenizers::{AddedToken, TokenizerBuilder};
use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper};
use tokenizer... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/serialization.rs | mod common;
use common::*;
use tokenizers::decoders::byte_level::ByteLevel;
use tokenizers::decoders::DecoderWrapper;
use tokenizers::models::bpe::BPE;
use tokenizers::models::wordlevel::WordLevel;
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::models::ModelWrapper;
use tokenizers::normalizers::bert::Be... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/training.rs | use tokenizers::models::bpe::BPE;
use tokenizers::pre_tokenizers::whitespace::Whitespace;
use tokenizers::{DecoderWrapper, NormalizerWrapper, PostProcessorWrapper, PreTokenizerWrapper};
use tokenizers::{Model, Tokenizer, TokenizerBuilder};
#[test]
fn bpe_values_after_training() {
let mut tokenizer = TokenizerBuild... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/from_pretrained.rs | #![cfg(feature = "http")]
use tokenizers::{FromPretrainedParameters, Result, Tokenizer};
#[test]
fn test_from_pretrained() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.ge... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/tests/offsets.rs | mod common;
use common::*;
use tokenizers::tokenizer::AddedToken;
macro_rules! check_offsets {
($input: expr, $output:expr, $offset:expr, $result:expr) => {
let offsets = $output.get_offsets()[$offset];
assert_eq!(&$input[offsets.0..offsets.1], $result);
};
}
#[test]
fn byte_level_basic() {
... | 0 |
hf_public_repos/tokenizers/tokenizers/tests | hf_public_repos/tokenizers/tokenizers/tests/common/mod.rs | use tokenizers::decoders::wordpiece::WordPiece as WordPieceDecoder;
use tokenizers::models::bpe::BPE;
use tokenizers::models::wordpiece::WordPiece;
use tokenizers::normalizers::bert::BertNormalizer;
use tokenizers::pre_tokenizers::bert::BertPreTokenizer;
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokeni... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/examples/serialization.rs | use tokenizers::models::wordpiece::WordPiece;
use tokenizers::{AddedToken, Tokenizer};
fn main() {
let start = std::time::Instant::now();
let mut tokenizer = Tokenizer::new(WordPiece::default());
// Mix special and not special
// You can make sure ids are in order, and special status is correct.
l... | 0 |
hf_public_repos/tokenizers/tokenizers/examples | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/Cargo.toml | [package]
name = "unstable_wasm"
version = "0.1.0"
authors = ["Nicolas Patry"]
edition = "2018"
[lib]
crate-type = ["cdylib", "rlib"]
[features]
default = ["console_error_panic_hook"]
[dependencies]
wasm-bindgen = "0.2.63"
# The `console_error_panic_hook` crate provides better debugging of panics by
# logging them ... | 0 |
hf_public_repos/tokenizers/tokenizers/examples | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/README.md | <div align="center">
<h1><code>wasm-pack-template</code></h1>
<strong>A template for kick starting a Rust and WebAssembly project using <a href="https://github.com/rustwasm/wasm-pack">wasm-pack</a>.</strong>
<p>
<a href="https://travis-ci.org/rustwasm/wasm-pack-template"><img src="https://img.shields.io/tr... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/tests/web.rs | //! Test suite for the Web and headless browsers.
#![cfg(target_arch = "wasm32")]
extern crate wasm_bindgen_test;
use wasm_bindgen_test::*;
wasm_bindgen_test_configure!(run_in_browser);
#[wasm_bindgen_test]
fn pass() {
assert_eq!(1 + 1, 2);
}
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/LICENSE-APACHE | Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution ... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/index.html | <!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello wasm-pack!</title>
</head>
<body>
<noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript>
<script src="./bootstrap.js"></script>
</body>
</html>
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/bootstrap.js | // A dependency graph that contains any wasm must all be imported
// asynchronously. This `bootstrap.js` file does the single async import, so
// that no one else needs to worry about it again.
import("./index.js")
.catch(e => console.error("Error importing `index.js`:", e));
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/index.js | import * as wasm from "unstable_wasm";
console.log(wasm.tokenize("ab"));
console.log(wasm.tokenize("abc"));
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/webpack.config.js | const CopyWebpackPlugin = require("copy-webpack-plugin");
const path = require('path');
module.exports = {
entry: "./bootstrap.js",
output: {
path: path.resolve(__dirname, "dist"),
filename: "bootstrap.js",
},
mode: "development",
plugins: [
new CopyWebpackPlugin(['index.html'])
],
};
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/README.md | <div align="center">
<h1><code>create-wasm-app</code></h1>
<strong>An <code>npm init</code> template for kick starting a project that uses NPM packages containing Rust-generated WebAssembly and bundles them with Webpack.</strong>
<p>
<a href="https://travis-ci.org/rustwasm/create-wasm-app"><img src="https:... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/package-lock.json | {
"name": "create-wasm-app",
"version": "0.1.0",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "create-wasm-app",
"version": "0.1.0",
"license": "(MIT OR Apache-2.0)",
"dependencies": {
"unstable_wasm": "fi... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/package.json | {
"name": "create-wasm-app",
"version": "0.1.0",
"description": "create an app to consume rust-generated wasm packages",
"main": "index.js",
"bin": {
"create-wasm-app": ".bin/create-wasm-app.js"
},
"scripts": {
"build": "webpack --config webpack.config.js",
"start": "... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/.travis.yml | language: node_js
node_js: "10"
script:
- ./node_modules/.bin/webpack
| 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/LICENSE-MIT | Copyright (c) [year] [name]
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicens... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/www/.bin/create-wasm-app.js | #!/usr/bin/env node
const { spawn } = require("child_process");
const fs = require("fs");
let folderName = '.';
if (process.argv.length >= 3) {
folderName = process.argv[2];
if (!fs.existsSync(folderName)) {
fs.mkdirSync(folderName);
}
}
const clone = spawn("git", ["clone", "https://github.com/rustwasm/cr... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/src/lib.rs | mod utils;
use tokenizers::models::bpe::{Vocab, BPE};
use tokenizers::Tokenizer;
use wasm_bindgen::prelude::*;
// When the `wee_alloc` feature is enabled, use `wee_alloc` as the global
// allocator.
#[cfg(feature = "wee_alloc")]
#[global_allocator]
static ALLOC: wee_alloc::WeeAlloc = wee_alloc::WeeAlloc::INIT;
#[was... | 0 |
hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm | hf_public_repos/tokenizers/tokenizers/examples/unstable_wasm/src/utils.rs | pub fn set_panic_hook() {
// When the `console_error_panic_hook` feature is enabled, we can call the
// `set_panic_hook` function at least once during initialization, and then
// we will get better error messages if our code ever panics.
//
// For more details see
// https://github.com/rustwasm/... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/benches/layout_benchmark.rs | #[macro_use]
extern crate criterion;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::{Duration, Instant};
use criterion::black_box;
use criterion::Criterion;
use tokenizers::processors::template::TemplateProcessing;
use tokenizers::{EncodeInput, Encoding, PostProcessor, Token... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/benches/unigram_benchmark.rs | #[macro_use]
extern crate criterion;
use criterion::Criterion;
use std::collections::HashMap;
use std::fs::read_to_string;
use std::time::{Duration, Instant};
use tokenizers::models::unigram::Unigram;
use tokenizers::models::unigram::UnigramTrainer;
pub fn bench_train(c: &mut Criterion) {
let trainer = UnigramTra... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/benches/bpe_benchmark.rs | #[macro_use]
extern crate criterion;
mod common;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use criterion::Criterion;
use tokenizers::models::bpe::{BpeTrainerBuilder, BPE};
use tokenizers::models::TrainerWrapper;
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokenizers::p... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/benches/bert_benchmark.rs | #[macro_use]
extern crate criterion;
mod common;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use criterion::Criterion;
use tokenizers::models::wordpiece::{WordPiece, WordPieceTrainerBuilder};
use tokenizers::normalizers::{BertNormalizer, NormalizerWrapper};
use tokenizers::pre_tokenize... | 0 |
hf_public_repos/tokenizers/tokenizers/benches | hf_public_repos/tokenizers/tokenizers/benches/common/mod.rs | use std::time::{Duration, Instant};
use criterion::black_box;
use tokenizers::{
Decoder, EncodeInput, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerImpl, Trainer,
};
pub fn iter_bench_encode<M, N, PT, PP, D>(
iters: u64,
tokenizer: &TokenizerImpl<M, N, PT, PP, D>,
lines: &[EncodeInput],
) ... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/src/lib.rs | #![warn(clippy::all)]
#![allow(clippy::upper_case_acronyms)]
#![doc(html_favicon_url = "https://huggingface.co/favicon.ico")]
#![doc(html_logo_url = "https://huggingface.co/landing/assets/huggingface_logo.svg")]
//! The core of `tokenizers`, written in Rust.
//! Provides an implementation of today's most used tokenize... | 0 |
hf_public_repos/tokenizers/tokenizers | hf_public_repos/tokenizers/tokenizers/src/cli.rs | //!
//! This is the CLI binary for the Tokenizers project
//!
use clap::{Parser, Subcommand};
use std::io::{self, BufRead, Write};
use tokenizers::models::bpe::BPE;
use tokenizers::pre_tokenizers::byte_level::ByteLevel;
use tokenizers::tokenizer::{AddedToken, Result};
use tokenizers::Tokenizer;
/// Generate custom To... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/unicode.rs | use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Default, Copy, Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct NFD;
impl Normalizer for NFD {
fn normalize(&self, normalized: &mut NormalizedString) -> Result<()> {
normalize... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/mod.rs | pub mod bert;
pub mod precompiled;
pub mod prepend;
pub mod replace;
pub mod strip;
pub mod unicode;
pub mod utils;
pub use crate::normalizers::bert::BertNormalizer;
pub use crate::normalizers::precompiled::Precompiled;
pub use crate::normalizers::prepend::Prepend;
pub use crate::normalizers::replace::Replace;
pub use... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/replace.rs | use crate::tokenizer::pattern::Pattern;
use crate::tokenizer::Decoder;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::SysRegex;
use serde::{Deserialize, Serialize};
/// Represents the different patterns that `Replace` can use
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize, Eq... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/strip.rs | use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
use unicode_normalization_alignments::char::is_combining_mark;
#[derive(Copy, Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
#[non_exhaustive]
pub struct Strip {
... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/bert.rs | use crate::tokenizer::{NormalizedString, Normalizer, Result};
use serde::{Deserialize, Serialize};
use unicode_categories::UnicodeCategories;
/// Checks whether a character is whitespace
fn is_whitespace(c: char) -> bool {
// These are technically control characters but we count them as whitespace
match c {
... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/precompiled.rs | use crate::tokenizer::{NormalizedString, Normalizer, Result};
pub use spm_precompiled::Precompiled;
use std::cmp::Ordering;
use unicode_segmentation::UnicodeSegmentation;
fn replace(transformations: &mut Vec<(char, isize)>, old_part: &str, new_part: &str) {
let old_count = old_part.chars().count() as isize;
le... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/utils.rs | use serde::{Deserialize, Serialize};
use crate::normalizers::NormalizerWrapper;
use crate::tokenizer::{NormalizedString, Normalizer, Result};
use crate::utils::macro_rules_attribute;
#[derive(Clone, Deserialize, Debug, Serialize)]
#[serde(tag = "type")]
/// Allows concatenating multiple other Normalizer as a Sequence... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/normalizers/prepend.rs | use crate::tokenizer::{NormalizedString, Normalizer, Result};
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(tag = "type")]
pub struct Prepend {
pub prepend: String,
}
impl Prepend {
pub fn new(prepend: String) -> Self {
Self { prepend }
}
}
impl Norm... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/pattern.rs | use crate::utils::SysRegex;
use crate::{Offsets, Result};
use regex::Regex;
/// Pattern used to split a NormalizedString
pub trait Pattern {
/// Slice the given string in a list of pattern match positions, with
/// a boolean indicating whether this is a match or not.
///
/// This method *must* cover th... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/encoding.rs | use crate::parallelism::*;
use crate::tokenizer::{Offsets, Token};
use crate::utils::padding::PaddingDirection;
use crate::utils::truncation::TruncationDirection;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::ops::Range;
/// Represents the output of a `Tokenizer`.
#[derive(Default, Parti... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/serialization.rs | use std::marker::PhantomData;
use serde::{
self,
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use super::{added_vocabulary::AddedTokenWithId, TokenizerImpl};
use crate::{Decoder, Model, Normalizer, PostProcessor, PreTokenizer, TokenizerBui... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/mod.rs | //! Represents a tokenization pipeline.
//!
//! A [`Tokenizer`](struct.Tokenizer.html) is composed of some of the following parts.
//! - [`Normalizer`](trait.Normalizer.html): Takes care of the text normalization (like unicode normalization).
//! - [`PreTokenizer`](trait.PreTokenizer.html): Takes care of the pre to... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/added_vocabulary.rs | use super::{
normalizer::Range, Model, NormalizedString, Normalizer, Offsets, PreTokenizedString, Token,
};
use aho_corasick::{AhoCorasick, AhoCorasickBuilder, MatchKind};
use regex::Regex;
use serde::{ser::SerializeSeq, Deserialize, Serialize, Serializer};
use std::collections::{HashMap, HashSet};
/// Represent a... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/normalizer.rs | use crate::pattern::Pattern;
use crate::{Offsets, Result};
use std::ops::{Bound, RangeBounds};
use unicode_normalization_alignments::UnicodeNormalization;
use serde::{Deserialize, Serialize};
/// Add or Substract a signed isize on a usize. Makes sure of avoiding
/// any substraction overflow, flooring at 0.
macro_rul... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/tokenizer/pre_tokenizer.rs | use crate::{
normalizer::Range, Encoding, NormalizedString, OffsetReferential, Offsets, Result, Token,
};
use std::collections::HashMap;
/// Various possible types of offsets
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum OffsetType {
Byte,
Char,
}
/// Wrapper for a subpart of a `NormalizedString`.... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/cache.rs | use std::borrow::Borrow;
use std::collections::HashMap;
use std::hash::Hash;
use std::sync::RwLock;
/// The default capacity for a `BPE`'s internal cache.
pub static DEFAULT_CACHE_CAPACITY: usize = 10_000;
/// Provides a simple multithread cache to speed up BPE tokenization that will try to read values
/// concurrent... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/fancy.rs | use fancy_regex::Regex;
use std::error::Error;
#[derive(Debug)]
pub struct SysRegex {
regex: Regex,
}
impl SysRegex {
pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> Matches<'r, 't> {
Matches(self.regex.find_iter(inside))
}
pub fn new(regex_str: &str) -> Result<Self, Box<dyn Error + Se... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/iter.rs | //! This comes from the Rust libcore and is duplicated here because it is not exported
//! (cf <https://github.com/rust-lang/rust/blob/25091ed9b7739e12466fb2490baa1e8a2815121c/src/libcore/iter/adapters/mod.rs#L2664>)
//! We are now using the version from <https://stackoverflow.com/questions/44544323/how-to-unzip-a-sequ... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/from_pretrained.rs | use crate::Result;
use hf_hub::{api::sync::ApiBuilder, Repo, RepoType};
use std::collections::HashMap;
use std::path::PathBuf;
/// Defines the aditional parameters available for the `from_pretrained` function
#[derive(Debug, Clone)]
pub struct FromPretrainedParameters {
pub revision: String,
pub user_agent: Ha... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/padding.rs | use crate::parallelism::*;
use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
/// The various possible padding directions.
#[derive(Debug, Clone, Copy, Serialize, Deserialize)]
pub enum PaddingDirection {
Left,
Right,
}
impl std::convert::AsRef<str> for PaddingDirection {
fn as... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/parallelism.rs | //!
//! This module defines helpers to allow optional Rayon usage.
//!
use rayon::iter::IterBridge;
use rayon::prelude::*;
use rayon_cond::CondIterator;
// Re-export rayon current_num_threads
pub use rayon::current_num_threads;
pub const ENV_VARIABLE: &str = "TOKENIZERS_PARALLELISM";
// Reading/Writing this variabl... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/mod.rs | pub(crate) mod cache;
#[cfg(feature = "http")]
pub(crate) mod from_pretrained;
#[cfg(feature = "unstable_wasm")]
mod fancy;
#[cfg(feature = "unstable_wasm")]
pub use fancy::SysRegex;
#[cfg(not(feature = "unstable_wasm"))]
mod onig;
#[cfg(not(feature = "unstable_wasm"))]
pub use crate::utils::onig::SysRegex;
pub mod i... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/progress.rs | #[cfg(feature = "progressbar")]
pub(crate) use indicatif::{ProgressBar, ProgressStyle};
#[cfg(not(feature = "progressbar"))]
mod progressbar {
use std::borrow::Cow;
pub struct ProgressBar;
impl ProgressBar {
pub fn new(_length: u64) -> Self {
Self {}
}
pub fn set_length... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/onig.rs | use crate::tokenizer::pattern::Pattern;
use crate::{Offsets, Result};
use onig::Regex;
use std::error::Error;
#[derive(Debug)]
pub struct SysRegex {
regex: Regex,
}
impl SysRegex {
pub fn find_iter<'r, 't>(&'r self, inside: &'t str) -> onig::FindMatches<'r, 't> {
self.regex.find_iter(inside)
}
... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/utils/truncation.rs | use crate::tokenizer::{Encoding, Result};
use serde::{Deserialize, Serialize};
use std::cmp;
use std::mem;
#[derive(Debug, Clone, Copy, PartialEq, Serialize, Deserialize, Eq, Default)]
pub enum TruncationDirection {
Left,
#[default]
Right,
}
impl std::convert::AsRef<str> for TruncationDirection {
fn a... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/models/mod.rs | //! Popular tokenizer models.
pub mod bpe;
pub mod unigram;
pub mod wordlevel;
pub mod wordpiece;
use std::collections::HashMap;
use std::path::{Path, PathBuf};
use serde::{Deserialize, Serialize, Serializer};
use crate::models::bpe::{BpeTrainer, BPE};
use crate::models::unigram::{Unigram, UnigramTrainer};
use crat... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/serialization.rs | use super::model::Unigram;
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
impl Serialize for Unigram {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model ... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/mod.rs | //! [Unigram](https://arxiv.org/abs/1804.10959) model.
mod lattice;
mod model;
mod serialization;
mod trainer;
mod trie;
pub use lattice::*;
pub use model::*;
pub use trainer::*;
| 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/trie.rs | use std::collections::HashMap;
use std::hash::Hash;
#[derive(Default)]
pub struct TrieBuilder<Label> {
trie: Trie<Label>,
}
impl<Label: Eq + Hash + Copy> TrieBuilder<Label> {
pub fn push(&mut self, element: &[Label]) {
self.trie.push(element);
}
pub fn build(self) -> Trie<Label> {
sel... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/trainer.rs | use crate::models::unigram::{lattice::Lattice, model::Unigram};
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::parallelism::*;
use crate::utils::progress::{ProgressBar, ProgressStyle};
use log::debug;
use serde::{Deserialize, Serialize};
use std::cmp::Reverse;
use std::collections::{HashMap, Has... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/lattice.rs | use rand::distributions::WeightedIndex;
use rand::prelude::*;
use std::cell::RefCell;
use std::cmp::{min, Ordering};
use std::collections::BinaryHeap;
use std::rc::Rc;
type NodeRef = Rc<RefCell<Node>>;
type HypothesisRef = Rc<RefCell<Hypothesis>>;
type Agenda = BinaryHeap<Hypothesis>;
struct Hypothesis {
node_ref... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/unigram/model.rs | use super::{
lattice::Lattice,
trainer::UnigramTrainer,
trie::{Trie, TrieBuilder},
};
use crate::tokenizer::{Model, Result, Token};
use crate::utils::cache::Cache;
use std::collections::HashMap;
use std::convert::TryInto;
use std::fs::read_to_string;
use std::path::{Path, PathBuf};
type TokenMap = HashMap... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/serialization.rs | use super::{super::OrderedVocabIter, convert_merges_to_hashmap, BpeBuilder, Pair, BPE};
use serde::{
de::{Error, MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashMap;
impl Serialize for BPE {
fn serialize<S>(&self, serializer: S) ... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/word.rs | use super::Pair;
use rand::{thread_rng, Rng};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, HashMap};
#[derive(Debug, Eq)]
struct Merge {
pos: usize,
rank: u32,
new_id: u32,
}
impl PartialEq for Merge {
fn eq(&self, other: &Self) -> bool {
self.rank == other.rank && self.pos == ot... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/mod.rs | //! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// ... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/trainer.rs | #![allow(clippy::map_entry)]
use super::{Pair, WithFirstLastIterator, Word, BPE};
use crate::parallelism::*;
use crate::tokenizer::{AddedToken, Result, Trainer};
use crate::utils::progress::{ProgressBar, ProgressStyle};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::{BinaryHeap, Has... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/bpe/model.rs | use super::{super::OrderedVocabIter, trainer::BpeTrainer, Error, Pair, Word};
use crate::tokenizer::{Model, Result, Token};
use crate::utils::cache::{Cache, DEFAULT_CACHE_CAPACITY};
use crate::utils::iter::ResultShunt;
use serde_json::Value;
use std::borrow::Cow;
use std::{
collections::HashMap,
fs::File,
i... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/serialization.rs | use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Er... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/mod.rs | //! [WordPiece](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf)
//! model.
use crate::models::bpe::BPE;
use crate::tokenizer::{Model, Result, Token};
use std::{
borrow::Cow,
collections::HashMap,
fs::File,
io::prelude::*,
io::{BufRead, BufReader},
path... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordpiece/trainer.rs | use super::WordPiece;
use crate::models::bpe::{BpeTrainer, BpeTrainerBuilder, BPE};
use crate::tokenizer::{AddedToken, Result, Trainer};
use serde::{Deserialize, Serialize};
use std::collections::HashSet;
/// A `WordPieceTrainerBuilder` can be used to create a `WordPieceTrainer` with a custom
/// configuration.
pub st... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/serialization.rs | use super::{super::OrderedVocabIter, WordLevel, WordLevelBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordLevel {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Er... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/mod.rs | use super::OrderedVocabIter;
use crate::tokenizer::{Model, Result, Token};
use serde_json::Value;
use std::collections::HashMap;
use std::fs::File;
use std::io::{BufReader, Read, Write};
use std::path::{Path, PathBuf};
mod serialization;
mod trainer;
// Re-export
pub use trainer::*;
type Vocab = HashMap<String, u32>... | 0 |
hf_public_repos/tokenizers/tokenizers/src/models | hf_public_repos/tokenizers/tokenizers/src/models/wordlevel/trainer.rs | use super::WordLevel;
use crate::utils::parallelism::*;
use crate::{AddedToken, Result, Trainer};
use serde::{Deserialize, Serialize};
use std::cmp::Ordering;
use std::collections::HashMap;
#[non_exhaustive]
#[derive(Debug, Clone, Builder, Serialize, Deserialize)]
pub struct WordLevelTrainer {
/// The minimum freq... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/processors/sequence.rs | use crate::processors::PostProcessorWrapper;
use crate::tokenizer::{Encoding, PostProcessor, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
processors: Vec<PostProcessorWr... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/processors/roberta.rs | use crate::processors::byte_level::process_offsets;
use crate::tokenizer::{Encoding, PostProcessor, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::iter::FromIterator;
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)]
#[serde(tag = "type")]
pub struct RobertaProcessin... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/processors/mod.rs | pub mod bert;
pub mod roberta;
pub mod sequence;
pub mod template;
// Re-export these as processors
pub use super::pre_tokenizers::byte_level;
use serde::{Deserialize, Serialize};
use crate::pre_tokenizers::byte_level::ByteLevel;
use crate::processors::bert::BertProcessing;
use crate::processors::roberta::RobertaPro... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/processors/bert.rs | use crate::tokenizer::{Encoding, PostProcessor, Result};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::iter::FromIterator;
#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq)]
#[serde(tag = "type")]
pub struct BertProcessing {
sep: (String, u32),
cls: (String, u32),
}
... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/processors/template.rs | //! # Template Processing
//!
//! Provides a way to specify templates in order to add the special tokens to each
//! input sequence as relevant.
//!
//! ## Example
//!
//! Let's take `BERT` tokenizer as an example. It uses two special tokens, used to
//! delimitate each sequence. `[CLS]` is always used at the beginning... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/sequence.rs | use crate::decoders::DecoderWrapper;
use crate::tokenizer::{Decoder, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
decoders: Vec<DecoderWrapper>,
}
impl Sequence {
pub fn new(decod... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/wordpiece.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// The WordPiece decoder takes care of decoding a list of wordpiece tokens
/// back into a readable string.
#[serde(tag = "type")]
#[non_exhaustive]
pub struct WordPiece {
/// The prefix ... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/ctc.rs | use crate::decoders::wordpiece;
use crate::tokenizer::{Decoder, Result};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize, Deserialize)]
/// The CTC (Connectionist Temporal Classification) decoder takes care
/// of sanitizing a list of inputs token.
/// Due to some align... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/fuse.rs | use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Serialize, Deserialize, Default)]
/// Fuse simply fuses all tokens into one big string.
/// It's usually the last decoding step anyway, but this
/// decoder exists incase some decoders need to ha... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/mod.rs | pub mod bpe;
pub mod byte_fallback;
pub mod ctc;
pub mod fuse;
pub mod sequence;
pub mod strip;
pub mod wordpiece;
// Re-export these as decoders
pub use super::pre_tokenizers::byte_level;
pub use super::pre_tokenizers::metaspace;
use serde::{Deserialize, Serialize};
use crate::decoders::bpe::BPEDecoder;
use crate::... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/bpe.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize)]
/// Allows decoding Original BPE by joining all the tokens and then replacing
/// the suffix used to identify end-of-words by whitespaces
#[serde(tag = "type")]
#[non_exhaustive]
pub struct BP... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/strip.rs | use crate::tokenizer::{Decoder, Result};
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// Strip is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// cannot be decoded you will get � ... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/decoders/byte_fallback.rs | use crate::tokenizer::{Decoder, Result};
use monostate::MustBe;
use serde::{Deserialize, Serialize};
#[derive(Deserialize, Clone, Debug, Serialize, Default)]
/// ByteFallback is a simple trick which converts tokens looking like `<0x61>`
/// to pure bytes, and attempts to make them into a string. If the tokens
/// can... | 0 |
hf_public_repos/tokenizers/tokenizers/src | hf_public_repos/tokenizers/tokenizers/src/pre_tokenizers/sequence.rs | use crate::pre_tokenizers::PreTokenizerWrapper;
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result};
use crate::utils::macro_rules_attribute;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, PartialEq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Sequence {
pretokenizers: Vec<PreT... | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.