# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Intel/tiny-random-gpt2")
model = AutoModelForCausalLM.from_pretrained("Intel/tiny-random-gpt2")Quick Links
Model Card for Model ID
This is a tiny random GPT2 model derived from "openai-community/gpt2".
This is useful for functional testing (not quality generation, since its weights are random) on optimum-intel
- Downloads last month
- 198
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Intel/tiny-random-gpt2")