freddyaboulton HF Staff commited on
Commit
8f3f1d5
·
verified ·
1 Parent(s): 4008baa

Commit 3: Add 19 file(s)

Browse files
demos/reverse_audio/screenshot.png ADDED
demos/stream_audio/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/stream_audio/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def add_to_stream(audio, instream):\n", " if audio is None:\n", " return gr.Audio(), instream\n", " if instream is None:\n", " ret = audio\n", " else:\n", " ret = (audio[0], np.concatenate((instream[1], audio[1])))\n", " return ret, ret\n", "\n", "with gr.Blocks() as demo:\n", " inp = gr.Audio(sources=[\"microphone\"])\n", " out = gr.Audio()\n", " stream = gr.State()\n", " clear = gr.Button(\"Clear\")\n", "\n", " inp.stream(add_to_stream, [inp, stream], [out, stream])\n", " clear.click(lambda: [None, None, None], None, [inp, out, stream])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stream_audio/run.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+
4
+ def add_to_stream(audio, instream):
5
+ if audio is None:
6
+ return gr.Audio(), instream
7
+ if instream is None:
8
+ ret = audio
9
+ else:
10
+ ret = (audio[0], np.concatenate((instream[1], audio[1])))
11
+ return ret, ret
12
+
13
+ with gr.Blocks() as demo:
14
+ inp = gr.Audio(sources=["microphone"])
15
+ out = gr.Audio()
16
+ stream = gr.State()
17
+ clear = gr.Button("Clear")
18
+
19
+ inp.stream(add_to_stream, [inp, stream], [out, stream])
20
+ clear.click(lambda: [None, None, None], None, [inp, out, stream])
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
demos/stream_audio_out/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_audio_out"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from pydub import AudioSegment\n", "from time import sleep\n", "import os\n", "import tempfile\n", "from pathlib import Path\n", "\n", "with gr.Blocks() as demo:\n", " input_audio = gr.Audio(label=\"Input Audio\", type=\"filepath\", format=\"mp3\")\n", " with gr.Row():\n", " with gr.Column():\n", " stream_as_file_btn = gr.Button(\"Stream as File\")\n", " format = gr.Radio([\"wav\", \"mp3\"], value=\"wav\", label=\"Format\")\n", " stream_as_file_output = gr.Audio(streaming=True, elem_id=\"stream_as_file_output\", autoplay=True, visible=False)\n", "\n", " def stream_file(audio_file, format):\n", " audio = AudioSegment.from_file(audio_file)\n", " i = 0\n", " chunk_size = 1000\n", " while chunk_size * i < len(audio):\n", " chunk = audio[chunk_size * i : chunk_size * (i + 1)]\n", " i += 1\n", " if chunk:\n", " file = Path(tempfile.gettempdir()) / \"stream_audio_demo\" / f\"{i}.{format}\"\n", " file.parent.mkdir(parents=True, exist_ok=True)\n", " chunk.export(str(file), format=format)\n", " yield file\n", " sleep(0.5)\n", "\n", " stream_as_file_btn.click(\n", " stream_file, [input_audio, format], stream_as_file_output\n", " )\n", "\n", " gr.Examples(\n", " [[gr.get_audio(\"cantina.wav\"), \"wav\"],\n", " [gr.get_audio(\"cantina.wav\"), \"mp3\"]],\n", " [input_audio, format],\n", " fn=stream_file,\n", " outputs=stream_as_file_output,\n", " cache_examples=False,\n", " )\n", "\n", " with gr.Column():\n", " stream_as_bytes_btn = gr.Button(\"Stream as Bytes\")\n", " stream_as_bytes_output = gr.Audio(streaming=True, elem_id=\"stream_as_bytes_output\", autoplay=True)\n", "\n", " def stream_bytes(audio_file):\n", " chunk_size = 20_000\n", " with open(audio_file, \"rb\") as f:\n", " while True:\n", " chunk = f.read(chunk_size)\n", " if chunk:\n", " yield chunk\n", " sleep(1)\n", " else:\n", " break\n", " stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stream_audio_out/run.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from pydub import AudioSegment
3
+ from time import sleep
4
+ import os
5
+ import tempfile
6
+ from pathlib import Path
7
+
8
+ with gr.Blocks() as demo:
9
+ input_audio = gr.Audio(label="Input Audio", type="filepath", format="mp3")
10
+ with gr.Row():
11
+ with gr.Column():
12
+ stream_as_file_btn = gr.Button("Stream as File")
13
+ format = gr.Radio(["wav", "mp3"], value="wav", label="Format")
14
+ stream_as_file_output = gr.Audio(streaming=True, elem_id="stream_as_file_output", autoplay=True, visible=False)
15
+
16
+ def stream_file(audio_file, format):
17
+ audio = AudioSegment.from_file(audio_file)
18
+ i = 0
19
+ chunk_size = 1000
20
+ while chunk_size * i < len(audio):
21
+ chunk = audio[chunk_size * i : chunk_size * (i + 1)]
22
+ i += 1
23
+ if chunk:
24
+ file = Path(tempfile.gettempdir()) / "stream_audio_demo" / f"{i}.{format}"
25
+ file.parent.mkdir(parents=True, exist_ok=True)
26
+ chunk.export(str(file), format=format)
27
+ yield file
28
+ sleep(0.5)
29
+
30
+ stream_as_file_btn.click(
31
+ stream_file, [input_audio, format], stream_as_file_output
32
+ )
33
+
34
+ gr.Examples(
35
+ [[gr.get_audio("cantina.wav"), "wav"],
36
+ [gr.get_audio("cantina.wav"), "mp3"]],
37
+ [input_audio, format],
38
+ fn=stream_file,
39
+ outputs=stream_as_file_output,
40
+ cache_examples=False,
41
+ )
42
+
43
+ with gr.Column():
44
+ stream_as_bytes_btn = gr.Button("Stream as Bytes")
45
+ stream_as_bytes_output = gr.Audio(streaming=True, elem_id="stream_as_bytes_output", autoplay=True)
46
+
47
+ def stream_bytes(audio_file):
48
+ chunk_size = 20_000
49
+ with open(audio_file, "rb") as f:
50
+ while True:
51
+ chunk = f.read(chunk_size)
52
+ if chunk:
53
+ yield chunk
54
+ sleep(1)
55
+ else:
56
+ break
57
+ stream_as_bytes_btn.click(stream_bytes, input_audio, stream_as_bytes_output)
58
+
59
+ if __name__ == "__main__":
60
+ demo.launch()
demos/stream_frames/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ numpy
demos/stream_frames/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stream_frames"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio numpy "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "def flip(im):\n", " return np.flipud(im)\n", "\n", "demo = gr.Interface(\n", " flip,\n", " gr.Image(sources=[\"webcam\"], streaming=True),\n", " \"image\",\n", " live=True,\n", " api_name=\"predict\",\n", ")\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stream_frames/run.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+
4
+ def flip(im):
5
+ return np.flipud(im)
6
+
7
+ demo = gr.Interface(
8
+ flip,
9
+ gr.Image(sources=["webcam"], streaming=True),
10
+ "image",
11
+ live=True,
12
+ api_name="predict",
13
+ )
14
+ if __name__ == "__main__":
15
+ demo.launch()
demos/stt_or_tts/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: stt_or_tts"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "tts_examples = [\n", " \"I love learning machine learning\",\n", " \"How do you do?\",\n", "]\n", "\n", "tts_demo = gr.load(\n", " \"huggingface/facebook/fastspeech2-en-ljspeech\",\n", " title=None,\n", " examples=tts_examples,\n", " description=\"Give me something to say!\",\n", " cache_examples=False\n", ")\n", "\n", "stt_demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=None,\n", " inputs=gr.Microphone(type=\"filepath\"),\n", " description=\"Let me try to guess what you're saying!\",\n", " cache_examples=False\n", ")\n", "\n", "demo = gr.TabbedInterface([tts_demo, stt_demo], [\"Text-to-speech\", \"Speech-to-text\"])\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/stt_or_tts/run.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ tts_examples = [
4
+ "I love learning machine learning",
5
+ "How do you do?",
6
+ ]
7
+
8
+ tts_demo = gr.load(
9
+ "huggingface/facebook/fastspeech2-en-ljspeech",
10
+ title=None,
11
+ examples=tts_examples,
12
+ description="Give me something to say!",
13
+ cache_examples=False
14
+ )
15
+
16
+ stt_demo = gr.load(
17
+ "huggingface/facebook/wav2vec2-base-960h",
18
+ title=None,
19
+ inputs=gr.Microphone(type="filepath"),
20
+ description="Let me try to guess what you're saying!",
21
+ cache_examples=False
22
+ )
23
+
24
+ demo = gr.TabbedInterface([tts_demo, stt_demo], ["Text-to-speech", "Speech-to-text"])
25
+
26
+ if __name__ == "__main__":
27
+ demo.launch()
demos/video_component/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: video_component"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "# get_video() returns the file path to sample videos included with Gradio\n", "from gradio.media import get_video\n", "\n", "demo = gr.Interface(\n", " fn=lambda x: x,\n", " inputs=gr.Video(),\n", " outputs=gr.Video(),\n", " examples=[\n", " [get_video(\"world.mp4\")],\n", " [get_video(\"a.mp4\")],\n", " [get_video(\"b.mp4\")],\n", " ],\n", " cache_examples=True,\n", " api_name=\"predict\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/video_component/run.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ # get_video() returns the file path to sample videos included with Gradio
3
+ from gradio.media import get_video
4
+
5
+ demo = gr.Interface(
6
+ fn=lambda x: x,
7
+ inputs=gr.Video(),
8
+ outputs=gr.Video(),
9
+ examples=[
10
+ [get_video("world.mp4")],
11
+ [get_video("a.mp4")],
12
+ [get_video("b.mp4")],
13
+ ],
14
+ cache_examples=True,
15
+ api_name="predict"
16
+ )
17
+
18
+ if __name__ == "__main__":
19
+ demo.launch()
demos/zip_files/run.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: zip_files"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["from zipfile import ZipFile\n", "\n", "import gradio as gr\n", "\n", "def zip_files(files):\n", " with ZipFile(\"tmp.zip\", \"w\") as zip_obj:\n", " for file in files:\n", " zip_obj.write(file.name, file.name.split(\"/\")[-1])\n", " return \"tmp.zip\"\n", "\n", "demo = gr.Interface(\n", " zip_files,\n", " gr.File(file_count=\"multiple\", file_types=[\"text\", \".json\", \".csv\"]),\n", " \"file\",\n", " examples=[[[gr.get_file(\"titanic.csv\"),\n", " gr.get_file(\"titanic.csv\"),\n", " gr.get_file(\"titanic.csv\")]]],\n", " cache_examples=True,\n", " api_name=\"predict\"\n", ")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
demos/zip_files/run.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from zipfile import ZipFile
2
+
3
+ import gradio as gr
4
+
5
+ def zip_files(files):
6
+ with ZipFile("tmp.zip", "w") as zip_obj:
7
+ for file in files:
8
+ zip_obj.write(file.name, file.name.split("/")[-1])
9
+ return "tmp.zip"
10
+
11
+ demo = gr.Interface(
12
+ zip_files,
13
+ gr.File(file_count="multiple", file_types=["text", ".json", ".csv"]),
14
+ "file",
15
+ examples=[[[gr.get_file("titanic.csv"),
16
+ gr.get_file("titanic.csv"),
17
+ gr.get_file("titanic.csv")]]],
18
+ cache_examples=True,
19
+ api_name="predict"
20
+ )
21
+
22
+ if __name__ == "__main__":
23
+ demo.launch()
demos/zip_files/screenshot.png ADDED
image.png ADDED
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio-client @ git+https://github.com/gradio-app/gradio@a789c0efec684f82c533762dd1b3339462016d16#subdirectory=client/python
2
+ https://gradio-pypi-previews.s3.amazonaws.com/a789c0efec684f82c533762dd1b3339462016d16/gradio-6.0.2-py3-none-any.whl
3
+ pypistats==1.1.0
4
+ plotly
5
+ matplotlib
6
+ altair
7
+ vega_datasets
run.py ADDED
@@ -0,0 +1,46 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import importlib
2
+ import gradio as gr
3
+ import os
4
+ import sys
5
+ import copy
6
+ import pathlib
7
+ from gradio.media import MEDIA_ROOT
8
+
9
+ os.environ["GRADIO_ANALYTICS_ENABLED"] = "False"
10
+
11
+ demo_dir = pathlib.Path(__file__).parent / "demos"
12
+
13
+ names = sorted(os.listdir("./demos"))
14
+
15
+ all_demos = []
16
+ demo_module = None
17
+ for p in sorted(os.listdir("./demos")):
18
+ old_path = copy.deepcopy(sys.path)
19
+ sys.path = [os.path.join(demo_dir, p)] + sys.path
20
+ try: # Some demos may not be runnable because of 429 timeouts, etc.
21
+ if demo_module is None:
22
+ demo_module = importlib.import_module("run")
23
+ else:
24
+ demo_module = importlib.reload(demo_module)
25
+ all_demos.append((p, demo_module.demo, False)) # type: ignore
26
+ except Exception as e:
27
+ with gr.Blocks() as demo:
28
+ gr.Markdown(f"Error loading demo: {e}")
29
+ all_demos.append((p, demo, True))
30
+
31
+ app = gr.Blocks()
32
+
33
+ with app:
34
+ gr.Markdown("""
35
+ # Deployed Demos
36
+ ## Click through demos to test them out!
37
+ """)
38
+
39
+ for demo_name, demo, _ in all_demos:
40
+ with app.route(demo_name):
41
+ demo.render()
42
+
43
+ # app = gr.mount_gradio_app(app, demo, f"/demo/{demo_name}")
44
+
45
+ if __name__ == "__main__":
46
+ app.launch(allowed_paths=[str(MEDIA_ROOT)])