A SentencePiece based tokenizer for production uses with AI21's models
- If you wish to use the tokenizers for
Jamba Mini
orJamba Large
, you will need to request access to the relevant model's HuggingFace repo:
pip install ai21-tokenizer
poetry add ai21-tokenizer
from ai21_tokenizer import Tokenizer
# Create tokenizer (defaults to Jamba Mini)
tokenizer = Tokenizer.get_tokenizer()
# Encode text to token IDs
text = "Hello, world!"
encoded = tokenizer.encode(text)
print(f"Encoded: {encoded}")
# Decode token IDs back to text
decoded = tokenizer.decode(encoded)
print(f"Decoded: {decoded}")
from ai21_tokenizer import Tokenizer, PreTrainedTokenizers
# Jamba Mini tokenizer
tokenizer = Tokenizer.get_tokenizer(PreTrainedTokenizers.JAMBA_MINI_TOKENIZER)
# Jamba Large tokenizer
tokenizer = Tokenizer.get_tokenizer(PreTrainedTokenizers.JAMBA_LARGE_TOKENIZER)
import asyncio
from ai21_tokenizer import Tokenizer
async def main():
tokenizer = await Tokenizer.get_async_tokenizer()
text = "Hello, world!"
encoded = await tokenizer.encode(text)
decoded = await tokenizer.decode(encoded)
print(f"Original: {text}")
print(f"Encoded: {encoded}")
print(f"Decoded: {decoded}")
asyncio.run(main())
# Convert between tokens and IDs
tokens = tokenizer.convert_ids_to_tokens(encoded)
print(f"Tokens: {tokens}")
ids = tokenizer.convert_tokens_to_ids(tokens)
print(f"IDs: {ids}")
from ai21_tokenizer import SyncJambaTokenizer
# Using local model file
model_path = "/path/to/your/tokenizer.model"
tokenizer = SyncJambaTokenizer(model_path=model_path)
text = "Hello, world!"
encoded = tokenizer.encode(text)
decoded = tokenizer.decode(encoded)
from ai21_tokenizer import AsyncJambaTokenizer
async def main():
model_path = "/path/to/your/tokenizer.model"
tokenizer = await AsyncJambaTokenizer.create(model_path=model_path)
text = "Hello, world!"
encoded = await tokenizer.encode(text)
decoded = await tokenizer.decode(encoded)
asyncio.run(main())
For more examples, please see our examples folder.