Skip to content

Commit 918c2e4

Browse files
committed
added example
1 parent 4710c61 commit 918c2e4

File tree

1 file changed

+231
-0
lines changed

1 file changed

+231
-0
lines changed

example_no_internet.ipynb

Lines changed: 231 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,231 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "eba9e610",
6+
"metadata": {},
7+
"source": [
8+
"A simple way to avoid being connected while transcribing is to first load the model version you want to use. See [here](https://github.com/openai/whisper/blob/main/README.md#available-models-and-languages) for more info."
9+
]
10+
},
11+
{
12+
"cell_type": "code",
13+
"execution_count": 6,
14+
"id": "85cd2d12",
15+
"metadata": {},
16+
"outputs": [
17+
{
18+
"data": {
19+
"text/plain": [
20+
"Whisper(\n",
21+
" (encoder): AudioEncoder(\n",
22+
" (conv1): Conv1d(80, 1024, kernel_size=(3,), stride=(1,), padding=(1,))\n",
23+
" (conv2): Conv1d(1024, 1024, kernel_size=(3,), stride=(2,), padding=(1,))\n",
24+
" (blocks): ModuleList(\n",
25+
" (0-23): 24 x ResidualAttentionBlock(\n",
26+
" (attn): MultiHeadAttention(\n",
27+
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
28+
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
29+
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
30+
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
31+
" )\n",
32+
" (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
33+
" (mlp): Sequential(\n",
34+
" (0): Linear(in_features=1024, out_features=4096, bias=True)\n",
35+
" (1): GELU(approximate='none')\n",
36+
" (2): Linear(in_features=4096, out_features=1024, bias=True)\n",
37+
" )\n",
38+
" (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
39+
" )\n",
40+
" )\n",
41+
" (ln_post): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
42+
" )\n",
43+
" (decoder): TextDecoder(\n",
44+
" (token_embedding): Embedding(51865, 1024)\n",
45+
" (blocks): ModuleList(\n",
46+
" (0-23): 24 x ResidualAttentionBlock(\n",
47+
" (attn): MultiHeadAttention(\n",
48+
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
49+
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
50+
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
51+
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
52+
" )\n",
53+
" (attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
54+
" (cross_attn): MultiHeadAttention(\n",
55+
" (query): Linear(in_features=1024, out_features=1024, bias=True)\n",
56+
" (key): Linear(in_features=1024, out_features=1024, bias=False)\n",
57+
" (value): Linear(in_features=1024, out_features=1024, bias=True)\n",
58+
" (out): Linear(in_features=1024, out_features=1024, bias=True)\n",
59+
" )\n",
60+
" (cross_attn_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
61+
" (mlp): Sequential(\n",
62+
" (0): Linear(in_features=1024, out_features=4096, bias=True)\n",
63+
" (1): GELU(approximate='none')\n",
64+
" (2): Linear(in_features=4096, out_features=1024, bias=True)\n",
65+
" )\n",
66+
" (mlp_ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
67+
" )\n",
68+
" )\n",
69+
" (ln): LayerNorm((1024,), eps=1e-05, elementwise_affine=True)\n",
70+
" )\n",
71+
")"
72+
]
73+
},
74+
"execution_count": 6,
75+
"metadata": {},
76+
"output_type": "execute_result"
77+
}
78+
],
79+
"source": [
80+
"import whisper\n",
81+
"#change to model size, bigger is more accurate but slower\n",
82+
"whisper.load_model(\"medium\") #base, small, medium, large"
83+
]
84+
},
85+
{
86+
"cell_type": "code",
87+
"execution_count": 7,
88+
"id": "0d2acd54",
89+
"metadata": {},
90+
"outputs": [],
91+
"source": [
92+
"#after it loads, you can disconnect from the internet and run the rest"
93+
]
94+
},
95+
{
96+
"cell_type": "code",
97+
"execution_count": 8,
98+
"id": "a2cd4050",
99+
"metadata": {},
100+
"outputs": [],
101+
"source": [
102+
"from transcribe import transcribe"
103+
]
104+
},
105+
{
106+
"cell_type": "code",
107+
"execution_count": 9,
108+
"id": "24e1d24e",
109+
"metadata": {},
110+
"outputs": [
111+
{
112+
"name": "stdout",
113+
"output_type": "stream",
114+
"text": [
115+
"Help on function transcribe in module transcribe:\n",
116+
"\n",
117+
"transcribe(path, file_type, model=None, language=None, verbose=True)\n",
118+
" Implementation of OpenAI's whisper model. Downloads model, transcribes audio files in a folder and returns the text files with transcriptions\n",
119+
"\n"
120+
]
121+
}
122+
],
123+
"source": [
124+
"help(transcribe)"
125+
]
126+
},
127+
{
128+
"cell_type": "code",
129+
"execution_count": 11,
130+
"id": "e52477fb",
131+
"metadata": {},
132+
"outputs": [],
133+
"source": [
134+
"path='sample_audio/'#folder path\n",
135+
"file_type='ogg' #check your file for file type, will only transcribe files with the file type, 'ogg', 'WAV'\n",
136+
"model='medium' #'small', 'medium', 'large' (tradeoff between speed and accuracy)\n",
137+
"language= None #tries to auto-detect, other options include 'English', 'Spanish', etc...\n",
138+
"verbose = True # prints output while transcribing, False to deactivate"
139+
]
140+
},
141+
{
142+
"cell_type": "code",
143+
"execution_count": 12,
144+
"id": "d66866af",
145+
"metadata": {},
146+
"outputs": [
147+
{
148+
"name": "stdout",
149+
"output_type": "stream",
150+
"text": [
151+
"Using medium model, you can change this by specifying model=\"medium\" for example\n",
152+
"Only looking for file type ogg, you can change this by specifying file_type=\"mp3\"\n",
153+
"Expecting None language, you can change this by specifying language=\"English\". None will try to auto-detect\n",
154+
"Verbosity is True. If TRUE it will print out the text as it is transcribed, you can turn this off by setting verbose=False\n",
155+
"\n",
156+
"There are 2 ogg files in path: sample_audio/\n",
157+
"\n",
158+
"\n",
159+
"Loading model...\n",
160+
"Transcribing file number number 1: Armstrong_Small_Step\n",
161+
"Model and file loaded...\n",
162+
"Starting transcription...\n",
163+
"\n",
164+
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
165+
"Detected language: English\n",
166+
"[00:00.000 --> 00:24.000] That's one small step for man, one giant leap for mankind.\n",
167+
"\n",
168+
"Finished file number 1.\n",
169+
"\n",
170+
"\n",
171+
"\n",
172+
"Transcribing file number number 2: Axel_Pettersson_röstinspelning\n",
173+
"Model and file loaded...\n",
174+
"Starting transcription...\n",
175+
"\n",
176+
"Detecting language using up to the first 30 seconds. Use `--language` to specify the language\n",
177+
"Detected language: Swedish\n",
178+
"[00:00.000 --> 00:16.000] Hej, jag heter Axel Pettersson, jag föddes i Örebro 1976. Jag har varit Wikipedia sen 2008 och jag har översatt röstintroduktionsprojektet till svenska.\n",
179+
"\n",
180+
"Finished file number 2.\n",
181+
"\n",
182+
"\n",
183+
"\n"
184+
]
185+
},
186+
{
187+
"data": {
188+
"text/plain": [
189+
"'Finished transcription, files can be found in sample_audio/transcriptions'"
190+
]
191+
},
192+
"execution_count": 12,
193+
"metadata": {},
194+
"output_type": "execute_result"
195+
}
196+
],
197+
"source": [
198+
"transcribe(path, file_type, model, language, verbose)"
199+
]
200+
},
201+
{
202+
"cell_type": "code",
203+
"execution_count": null,
204+
"id": "0bc67265",
205+
"metadata": {},
206+
"outputs": [],
207+
"source": []
208+
}
209+
],
210+
"metadata": {
211+
"kernelspec": {
212+
"display_name": "Python 3 (ipykernel)",
213+
"language": "python",
214+
"name": "python3"
215+
},
216+
"language_info": {
217+
"codemirror_mode": {
218+
"name": "ipython",
219+
"version": 3
220+
},
221+
"file_extension": ".py",
222+
"mimetype": "text/x-python",
223+
"name": "python",
224+
"nbconvert_exporter": "python",
225+
"pygments_lexer": "ipython3",
226+
"version": "3.10.4"
227+
}
228+
},
229+
"nbformat": 4,
230+
"nbformat_minor": 5
231+
}

0 commit comments

Comments
 (0)