commit 24ea5bedb877379bf8a00b6a557f9b01d4a814a6 Author: Malte Grosse Date: Fri Oct 13 18:28:50 2023 +0900 init diff --git a/README.md b/README.md new file mode 100644 index 0000000..168805a --- /dev/null +++ b/README.md @@ -0,0 +1,3 @@ +# Examples + +Various example scripts for the [Sandbox](https://sandbox.iuk.hdm-stuttgart.de/). \ No newline at end of file diff --git a/text_to_image.ipynb b/text_to_image.ipynb new file mode 100644 index 0000000..a784aab --- /dev/null +++ b/text_to_image.ipynb @@ -0,0 +1,329 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "1f417fcf-52d5-4e49-9e26-3834eba323cf", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Requirement already satisfied: transformers in /opt/conda/lib/python3.10/site-packages (4.33.2)\n", + "Requirement already satisfied: diffusers in /opt/conda/lib/python3.10/site-packages (0.21.2)\n", + "Requirement already satisfied: accelerate in /opt/conda/lib/python3.10/site-packages (0.23.0)\n", + "Requirement already satisfied: ipywidgets in /opt/conda/lib/python3.10/site-packages (8.0.7)\n", + "Requirement already satisfied: filelock in /opt/conda/lib/python3.10/site-packages (from transformers) (3.12.2)\n", + "Requirement already satisfied: huggingface-hub<1.0,>=0.15.1 in /opt/conda/lib/python3.10/site-packages (from transformers) (0.17.3)\n", + "Requirement already satisfied: numpy>=1.17 in /opt/conda/lib/python3.10/site-packages (from transformers) (1.24.4)\n", + "Requirement already satisfied: packaging>=20.0 in /opt/conda/lib/python3.10/site-packages (from transformers) (23.1)\n", + "Requirement already satisfied: pyyaml>=5.1 in /opt/conda/lib/python3.10/site-packages (from transformers) (6.0)\n", + "Requirement already satisfied: regex!=2019.12.17 in /opt/conda/lib/python3.10/site-packages (from transformers) (2023.8.8)\n", + "Requirement already satisfied: requests in /opt/conda/lib/python3.10/site-packages (from transformers) (2.31.0)\n", + "Requirement already satisfied: tokenizers!=0.11.3,<0.14,>=0.11.1 in /opt/conda/lib/python3.10/site-packages (from transformers) (0.13.3)\n", + "Requirement already satisfied: safetensors>=0.3.1 in /opt/conda/lib/python3.10/site-packages (from transformers) (0.3.3)\n", + "Requirement already satisfied: tqdm>=4.27 in /opt/conda/lib/python3.10/site-packages (from transformers) (4.65.0)\n", + "Requirement already satisfied: importlib-metadata in /opt/conda/lib/python3.10/site-packages (from diffusers) (6.7.0)\n", + "Requirement already satisfied: Pillow in /opt/conda/lib/python3.10/site-packages (from diffusers) (10.0.0)\n", + "Requirement already satisfied: psutil in /opt/conda/lib/python3.10/site-packages (from accelerate) (5.9.5)\n", + "Requirement already satisfied: torch>=1.10.0 in /opt/conda/lib/python3.10/site-packages (from accelerate) (2.0.0.post200)\n", + "Requirement already satisfied: ipykernel>=4.5.1 in /opt/conda/lib/python3.10/site-packages (from ipywidgets) (6.24.0)\n", + "Requirement already satisfied: ipython>=6.1.0 in /opt/conda/lib/python3.10/site-packages (from ipywidgets) (8.14.0)\n", + "Requirement already satisfied: traitlets>=4.3.1 in /opt/conda/lib/python3.10/site-packages (from ipywidgets) (5.9.0)\n", + "Requirement already satisfied: widgetsnbextension~=4.0.7 in /opt/conda/lib/python3.10/site-packages (from ipywidgets) (4.0.8)\n", + "Requirement already satisfied: jupyterlab-widgets~=3.0.7 in /opt/conda/lib/python3.10/site-packages (from ipywidgets) (3.0.8)\n", + "Requirement already satisfied: fsspec in /opt/conda/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.15.1->transformers) (2023.6.0)\n", + "Requirement already satisfied: typing-extensions>=3.7.4.3 in /opt/conda/lib/python3.10/site-packages (from huggingface-hub<1.0,>=0.15.1->transformers) (4.7.1)\n", + "Requirement already satisfied: comm>=0.1.1 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (0.1.3)\n", + "Requirement already satisfied: debugpy>=1.6.5 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (1.6.7)\n", + "Requirement already satisfied: jupyter-client>=6.1.12 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (8.3.0)\n", + "Requirement already satisfied: jupyter-core!=5.0.*,>=4.12 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (5.3.1)\n", + "Requirement already satisfied: matplotlib-inline>=0.1 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (0.1.6)\n", + "Requirement already satisfied: nest-asyncio in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (1.5.6)\n", + "Requirement already satisfied: pyzmq>=20 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (25.1.0)\n", + "Requirement already satisfied: tornado>=6.1 in /opt/conda/lib/python3.10/site-packages (from ipykernel>=4.5.1->ipywidgets) (6.3.2)\n", + "Requirement already satisfied: backcall in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (0.2.0)\n", + "Requirement already satisfied: decorator in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (5.1.1)\n", + "Requirement already satisfied: jedi>=0.16 in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (0.18.2)\n", + "Requirement already satisfied: pickleshare in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (0.7.5)\n", + "Requirement already satisfied: prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30 in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (3.0.38)\n", + "Requirement already satisfied: pygments>=2.4.0 in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (2.15.1)\n", + "Requirement already satisfied: stack-data in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (0.6.2)\n", + "Requirement already satisfied: pexpect>4.3 in /opt/conda/lib/python3.10/site-packages (from ipython>=6.1.0->ipywidgets) (4.8.0)\n", + "Requirement already satisfied: sympy in /opt/conda/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (1.12)\n", + "Requirement already satisfied: networkx in /opt/conda/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (3.1)\n", + "Requirement already satisfied: jinja2 in /opt/conda/lib/python3.10/site-packages (from torch>=1.10.0->accelerate) (3.1.2)\n", + "Requirement already satisfied: zipp>=0.5 in /opt/conda/lib/python3.10/site-packages (from importlib-metadata->diffusers) (3.15.0)\n", + "Requirement already satisfied: charset-normalizer<4,>=2 in /opt/conda/lib/python3.10/site-packages (from requests->transformers) (3.1.0)\n", + "Requirement already satisfied: idna<4,>=2.5 in /opt/conda/lib/python3.10/site-packages (from requests->transformers) (3.4)\n", + "Requirement already satisfied: urllib3<3,>=1.21.1 in /opt/conda/lib/python3.10/site-packages (from requests->transformers) (2.0.3)\n", + "Requirement already satisfied: certifi>=2017.4.17 in /opt/conda/lib/python3.10/site-packages (from requests->transformers) (2023.5.7)\n", + "Requirement already satisfied: parso<0.9.0,>=0.8.0 in /opt/conda/lib/python3.10/site-packages (from jedi>=0.16->ipython>=6.1.0->ipywidgets) (0.8.3)\n", + "Requirement already satisfied: python-dateutil>=2.8.2 in /opt/conda/lib/python3.10/site-packages (from jupyter-client>=6.1.12->ipykernel>=4.5.1->ipywidgets) (2.8.2)\n", + "Requirement already satisfied: platformdirs>=2.5 in /opt/conda/lib/python3.10/site-packages (from jupyter-core!=5.0.*,>=4.12->ipykernel>=4.5.1->ipywidgets) (3.8.0)\n", + "Requirement already satisfied: ptyprocess>=0.5 in /opt/conda/lib/python3.10/site-packages (from pexpect>4.3->ipython>=6.1.0->ipywidgets) (0.7.0)\n", + "Requirement already satisfied: wcwidth in /opt/conda/lib/python3.10/site-packages (from prompt-toolkit!=3.0.37,<3.1.0,>=3.0.30->ipython>=6.1.0->ipywidgets) (0.2.6)\n", + "Requirement already satisfied: MarkupSafe>=2.0 in /opt/conda/lib/python3.10/site-packages (from jinja2->torch>=1.10.0->accelerate) (2.1.3)\n", + "Requirement already satisfied: executing>=1.2.0 in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (1.2.0)\n", + "Requirement already satisfied: asttokens>=2.1.0 in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (2.2.1)\n", + "Requirement already satisfied: pure-eval in /opt/conda/lib/python3.10/site-packages (from stack-data->ipython>=6.1.0->ipywidgets) (0.2.2)\n", + "Requirement already satisfied: mpmath>=0.19 in /opt/conda/lib/python3.10/site-packages (from sympy->torch>=1.10.0->accelerate) (1.3.0)\n", + "Requirement already satisfied: six in /opt/conda/lib/python3.10/site-packages (from asttokens>=2.1.0->stack-data->ipython>=6.1.0->ipywidgets) (1.16.0)\n" + ] + } + ], + "source": [ + "!pip install transformers diffusers accelerate ipywidgets" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "6814d202-9131-4170-a967-6527a504eff4", + "metadata": { + "tags": [] + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-09-27 13:15:44.716139: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE4.1 SSE4.2 AVX AVX2 FMA\n", + "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" + ] + } + ], + "source": [ + "import torch\n", + "from diffusers import StableDiffusionPipeline\n", + "from matplotlib import pyplot as plt\n", + "import datetime" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9a48b8ed-6f07-4f27-95eb-7ec45c4a7d2f", + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# diffusers docs: https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/text2img#texttoimage-generation\n", + "# model_id = \"dreamlike-art/dreamlike-diffusion-1.0\"\n", + "# model_id = \"prompthero/openjourney\"\n", + "model_id = \"XpucT/Deliberate\"" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "895b7852-6e82-4d74-bada-4e5b92ef90f4", + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "text_encoder/model.safetensors not found\n" + ] + }, + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "369649662a124d3da6cbd06fbae927d9", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Loading pipeline components...: 0%| | 0/6 [00:00 by passing `safety_checker=None`. Ensure that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered results in services or applications open to the public. Both the diffusers team and Hugging Face strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling it only for use-cases that involve analyzing network behavior or auditing its results. For more information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\n" + ] + } + ], + "source": [ + "pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, safety_checker=None)\n", + "device = \"cuda\" if torch.cuda.is_available() else \"mps\" if torch.backends.mps.is_available() else \"cpu\"\n", + "pipe = pipe.to(device)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "2b343ab6-7c44-4a19-a4ee-e5cf24dc60ca", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "095dd8981c254c0ba98197b39b78e38b", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "VBox(children=(Textarea(value='', layout=Layout(height='50px', width='auto'), placeholder='cats, sharks, ships…" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "text/plain": [ + "
" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "import numpy as np\n", + "import ipywidgets as widgets\n", + "from IPython.display import clear_output, display\n", + "\n", + "import matplotlib.pyplot as plt\n", + "from IPython.display import set_matplotlib_formats\n", + "%matplotlib inline\n", + "#set_matplotlib_formats('svg')\n", + "\n", + "np.random.seed(42)\n", + "\n", + "#prompt = \"Renaissance-style portrait of an astronaut in space, detailed starry background, reflective helmet, matte painting, hyperdetailed, CGSociety\"\n", + "#negative_prompt = \"gold\"\n", + "#textarea\n", + "std_layout = widgets.Layout(width=\"auto\")\n", + "\n", + "prompt_box = widgets.Textarea(value='', placeholder='cats, sharks, ships, underwater, cinematic composition', description='', layout=widgets.Layout(height=\"50px\", width=\"auto\"))\n", + "neg_prompt_box = widgets.Textarea(value='', placeholder='negative prompt', description='', layout=widgets.Layout(height=\"30px\", width=\"auto\"))\n", + "#slider \n", + "img2gen_lab = widgets.Label(value=\"Images to Generate:\", layout=std_layout)\n", + "img2gen_var = widgets.IntSlider(value=1, min=1, max=10, layout=std_layout)\n", + "#input\n", + "size_x_lab = widgets.Label(value=\"Image Width:\", layout=std_layout)\n", + "size_x_var = widgets.BoundedIntText(value=32, min=32, max=512, step=8, layout=std_layout)\n", + "size_y_lab = widgets.Label(value=\"Image heigth:\", layout=std_layout)\n", + "size_y_var = widgets.BoundedIntText(value=32, min=32, max=512, step=8, layout=std_layout)\n", + "steps_lab = widgets.Label(value=\"Steps:\", layout=std_layout)\n", + "steps_var = widgets.BoundedIntText(value=50, min=11, max=100, step=1, layout=std_layout)\n", + "#Button\n", + "btn = widgets.Button(description=\"Generate\", tooltip=\"Click me\",\n", + " layout=widgets.Layout(width=\"auto\", height=\"30px\", margin=\"30px\"))\n", + "#Canvas\n", + "fig = plt.figure(figsize=(128/80, 128/80))\n", + "box = widgets.VBox([prompt_box, neg_prompt_box, img2gen_lab, img2gen_var, steps_lab, steps_var, size_x_lab, size_x_var,size_y_lab, size_y_var, btn])\n", + "box2 = widgets.VBox([prompt_box, neg_prompt_box, img2gen_lab, img2gen_var, steps_lab, steps_var, size_x_lab, size_x_var,size_y_lab, size_y_var, btn])\n", + "\n", + "box2" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "71055e73-a072-41d7-84c6-602fa69ff46f", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "cb2114daea2b446ca0b07565f6ce88ba", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Output()" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "out = widgets.Output()\n", + "out" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "id": "d2645fdf-4bed-43f1-b3d1-f06c174d45ad", + "metadata": {}, + "outputs": [], + "source": [ + "def on_value_change(change):\n", + " prompt = prompt_box.value\n", + " negative_prompt = neg_prompt_box.value\n", + " images_to_generate = int(img2gen_var.value)\n", + " steps = steps_var.value\n", + " width = size_x_var.value\n", + " height = size_y_var.value\n", + " seed = None\n", + " guidance = 7.5 # Higher guidance scale encourages to generate images that are closely linked to the text prompt, usually at the expense of lower image quality.\n", + " with out:\n", + " clear_output(wait=True)\n", + " for _ in range(images_to_generate):\n", + " steps = int(steps)\n", + " width = int(width)\n", + " height = int(height)\n", + " current_seed = seed or torch.randint(0, int(1e5), size=(1, 1))[0].item()\n", + " generator = torch.Generator().manual_seed(int(current_seed))\n", + " img = pipe(prompt=prompt, negative_prompt=negative_prompt, width=width, height=height, num_inference_steps=steps, guidance_scale=guidance, generator=generator).images[0]\n", + " time_now = datetime.datetime.now().strftime(\"%y.%m.%d_%H:%M:%S\")\n", + " plt.imshow(img)\n", + " plt.axis(\"off\")\n", + " print(\"Current Seed:\", current_seed)\n", + " plt.title(\"\")\n", + " plt.show()\n", + " # img.save(f\"./{time_now}_{current_seed}.jpg\")\n", + "\n", + "btn.on_click(on_value_change)\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "801c01c7-d364-411a-ba10-25161e47f65f", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/webcam_eye_detection.ipynb b/webcam_eye_detection.ipynb new file mode 100644 index 0000000..30eca0b --- /dev/null +++ b/webcam_eye_detection.ipynb @@ -0,0 +1,208 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Webcam (audio/video) processing\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Usage:\n", + "\n", + "Using `ipywebrtc` you can create a `MediaStream` out of:\n", + "- Any ipywidget using `WidgetStream`\n", + "- A video file using `VideoStream`\n", + "- An image file using `ImageStream`\n", + "- An audio file using `AudioStream`\n", + "- Your webcam/camera using `CameraStream`\n", + "\n", + "From this `MediaStream` you can:\n", + "- Record a movie using `VideoRecorder`\n", + "- Record an image snapshot using `ImageRecorder`\n", + "- Record an audio fragment using `AudioRecorder`\n", + "- Stream it to peers using the simple `chat` function." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from ipywebrtc import CameraStream, ImageRecorder, VideoRecorder, AudioRecorder" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "camera = CameraStream(constraints=\n", + " {'facing_mode': 'user',\n", + " 'audio': False,\n", + " 'video': { 'width': 320, 'height': 240 }\n", + " })\n", + "#camera" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from ipywidgets import Image, HBox\n", + "import PIL.Image\n", + "import io\n", + "import numpy as np\n", + "from skimage.filters import sobel\n", + "from skimage.color.adapt_rgb import adapt_rgb, each_channel\n", + "from skimage import filters" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "image_recorder = ImageRecorder(stream=camera)\n", + "image_recorder.recording = True\n", + "#image_recorder" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "import cv2\n", + "face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')\n", + "eye_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_eye.xml')" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def get_detected_eye(face): \n", + " img = face.copy() \n", + " fr = face_cascade.detectMultiScale(img) \n", + " for (x,y,w,h) in fr:\n", + " cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n", + " gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n", + " roi_gray = gray[y:y+h, x:x+w]\n", + " roi_color = img[y:y+h, x:x+w]\n", + " eyes = eye_cascade.detectMultiScale(roi_color, scaleFactor=1.5, minNeighbors=5)\n", + " print(\"Found {0} eyes!\".format(len(eyes)))\n", + " for (ex,ey,ew,eh) in eyes:\n", + " cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n", + " return img" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Press the Camera Button to start" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "editable": true, + "slideshow": { + "slide_type": "" + }, + "tags": [] + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "HBox(children=(Image(value=b''), ImageRecorder(image=Image(value=b\"\\x89PNG\\r\\n\\x1a\\n\\x00\\x00\\x00\\rIHDR\\x00\\x00…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Press the Camera Button to start\n", + "out = Image()\n", + "stop_process = False\n", + "\n", + "def process_image(_):\n", + " if stop_process:\n", + " return\n", + " im_in = PIL.Image.open(io.BytesIO(image_recorder.image.value))\n", + " # result = get_detected_face(np.array(im_in)[...,:3])\n", + " result = get_detected_eye(np.array(im_in)[...,:3])\n", + " \n", + " im_out = PIL.Image.fromarray(result)\n", + " f = io.BytesIO()\n", + " im_out.save(f, format='jpeg')\n", + " out.value = f.getvalue()\n", + " image_recorder.recording = True\n", + "\n", + "image_recorder.image.observe(process_image, names=['value'])\n", + "image_recorder.recording = True\n", + "HBox([out, image_recorder])" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "stop_process = True" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from ipywidgets import Widget\n", + "Widget.close_all()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/whisper_audio_to_text.ipynb b/whisper_audio_to_text.ipynb new file mode 100644 index 0000000..42224ae --- /dev/null +++ b/whisper_audio_to_text.ipynb @@ -0,0 +1,1030 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "...original notebook from https://github.com/fastforwardlabs/whisper-openai/blob/master/WhisperDemo.ipynb" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "v5hvo8QWN-a9" + }, + "source": [ + "# OpenAI's Whisper\n", + "Speech to text...\n", + "\n", + "more information at\n", + "- https://openai.com/blog/whisper\n", + "- https://github.com/openai/whisper\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture\n", + "# install dependencies\n", + "\n", + "! pip install git+https://github.com/openai/whisper.git" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "id": "3CqtR2Fi5-vP" + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2023-10-13 09:14:42.948361: I tensorflow/core/platform/cpu_feature_guard.cc:193] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: SSE4.1 SSE4.2 AVX AVX2 FMA\n", + "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" + ] + } + ], + "source": [ + "%%capture\n", + "# use imports and select cuda\n", + "import os\n", + "import numpy as np\n", + "\n", + "try:\n", + " import tensorflow \n", + "except ImportError:\n", + " pass\n", + "\n", + "import torch\n", + "import pandas as pd\n", + "import whisper\n", + "import torchaudio\n", + "\n", + "from ipywebrtc import AudioRecorder, CameraStream\n", + "from IPython.display import Audio, display\n", + "import ipywidgets as widgets\n", + "\n", + "DEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 107, + "referenced_widgets": [ + "09446a03c33742dfa70a9f242f96b3be", + "8964df95ded44ee28b7ed225c564ed9b", + "823fe8b97ef94aedaed6889ac580c8eb", + "5604b41fde3b45dd80b954c6128bccf7", + "09e1f5b2de9945aea20f6129b7c82ec9", + "35775e7c3c5846a589410566cbec95fa" + ] + }, + "id": "-fFdSBBAGjFk", + "outputId": "5894a254-7fe0-4593-fbee-74491cd72b9f" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "AudioRecorder(audio=Audio(value=b'', format='webm'), stream=CameraStream(constraints={'audio': True, 'video': …" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# manually record using webcam - if u want to use a custom audio file, skip this section\n", + "camera = CameraStream(constraints={'audio': True,'video':False})\n", + "recorder = AudioRecorder(stream=camera)\n", + "recorder" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "id": "EDDgAohMGrCR" + }, + "outputs": [], + "source": [ + "# save recording as file and convert to wav\n", + "with open('recording.webm', 'wb') as f:\n", + " f.write(recorder.audio.value)\n", + "!ffmpeg -i recording.webm -ac 1 -f wav my_recording.wav -y -hide_banner -loglevel panic" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "id": "U7p2AoJItnIM" + }, + "outputs": [], + "source": [ + "# Whisper is capable of performing transcriptions for many languages (though it performs better for some languages and worse for others.) Whisper is also capable of detecting the input language. \n", + "# However, to be on the safe side, we can explicitly tell Whisper which language to expect. \n", + "language_options = whisper.tokenizer.TO_LANGUAGE_CODE \n", + "language_list = list(language_options.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "8c4d89ec973647d1a46aa471311e037c", + "6c5f2af50210411f801f812dc17c389c", + "90728ace9c454a3ab05ffb3e0bb664a3", + "b50eded046cf4d378b1d71a186995d21" + ] + }, + "id": "dpLnKvlb-vLa", + "outputId": "bd012b8a-d413-41a9-834a-6674c0e2928a" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(options=('english', 'chinese', 'german', 'spanish', 'russian', 'korean', 'french', 'japanese', 'portu…" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "# Whisper is also capable of several tasks, including English-only transcription, \n", + "# Any-to-English translation, and non-English transcription. \n", + "lang_dropdown = widgets.Dropdown(options=language_list, value='english')\n", + "output = widgets.Output()\n", + "display(lang_dropdown)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 49, + "referenced_widgets": [ + "43cdc270c46644b6a0308d3c499601fb", + "5f9cd9708efb486c930c1397be9a566c", + "20cdb2181c804215b4a1e6006be77734", + "fb4b05bd474c409288394cd82d6a9179" + ] + }, + "id": "ilyDW-ALMnke", + "outputId": "4a34e1f6-519c-46e5-a00b-05fbe3540e18" + }, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "", + "version_major": 2, + "version_minor": 0 + }, + "text/plain": [ + "Dropdown(options=('transcribe', 'translate'), value='transcribe')" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "task_dropdown = widgets.Dropdown(options=['transcribe', 'translate'], value='transcribe')\n", + "output = widgets.Output()\n", + "display(task_dropdown)" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "_PokfNJtOYNu", + "outputId": "227e41ec-b1a5-409d-c3c4-20d4564fb09c" + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Model is English-only and has 71,825,408 parameters.\n" + ] + } + ], + "source": [ + "# load the model (takes some seconds)\n", + "# hint: Whisper comes in five model sizes, \n", + "# four of which also have an optimized English-only version. \n", + "# This notebook loads \"base\"-sized models (bigger than \"tiny\" but smaller than the others), which require about 1GB of RAM.\n", + "\n", + "#If you selected English above, the cell below will load the optimized English-only version. Otherwise, it will load the multilingual model.\n", + "\n", + "if lang_dropdown.value == \"english\":\n", + " model = whisper.load_model(\"base.en\")\n", + "else:\n", + " model = whisper.load_model(\"base\")\n", + "print(\n", + " f\"Model is {'multilingual' if model.is_multilingual else 'English-only'} \"\n", + " f\"and has {sum(np.prod(p.shape) for p in model.parameters()):,} parameters.\"\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/" + }, + "id": "DwOUHau-dkUt", + "outputId": "d16eef87-3af5-43e6-b54e-407a2e32f5d8" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "DecodingOptions(task='transcribe', language='english', temperature=0.0, sample_len=None, best_of=None, beam_size=None, patience=None, length_penalty=None, prompt=None, prefix=None, suppress_tokens='-1', suppress_blank=True, without_timestamps=True, max_initial_timestamp=1.0, fp16=True)" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# set the options\n", + "options = whisper.DecodingOptions(language=lang_dropdown.value, task=task_dropdown.value, without_timestamps=True)\n", + "options" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "id": "q6S0VvoK0vfq" + }, + "outputs": [], + "source": [ + "# choose your audio file\n", + "#audio = whisper.load_audio(\"my_recording.wav\")\n", + "audio = whisper.load_audio(\"QA-01.mp3\")\n", + "audio = whisper.pad_or_trim(audio)\n", + "mel = whisper.log_mel_spectrogram(audio).to(model.device)\n", + "result = model.decode(mel, options)" + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 53 + }, + "id": "Mg64_MWW1uMb", + "outputId": "444cfab3-f2bd-4519-9779-3f4aba72d1cc" + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'How many people are there in your family? There are five people in my family. My father, mother, brother, sister, and me. Does your family live in a house or an apartment? We live in a house in the countryside. What does your father do? My father is a doctor. He works at the local hospital. How old is your mother? She is 40 years old, one year younger than my father.'" + ] + }, + "execution_count": 14, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# print the text\n", + "result.text" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "# or write it into a text file\n", + "\n", + "text_file = open(\"output.txt\", \"w\")\n", + "text_file.write(result.text)\n", + " \n", + "#close file\n", + "text_file.close()" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "# close all widgets\n", + "from ipywidgets import Widget\n", + "Widget.close_all()" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "# it takes around 2GB memory on GPU, so please clear it\n", + "from numba import cuda\n", + "device = cuda.get_current_device()\n", + "device.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "include_colab_link": true, + "provenance": [] + }, + "gpuClass": "standard", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.12" + }, + "widgets": { + "application/vnd.jupyter.widget-state+json": { + "09446a03c33742dfa70a9f242f96b3be": { + "model_module": "jupyter-webrtc", + "model_module_version": "~0.6.0", + "model_name": "AudioRecorderModel", + "state": { + "_data_src": "blob:https://nfrp4p17vqk-496ff2e9c6d22116-0-colab.googleusercontent.com/d984436f-3337-406a-97dd-76a1145ef36f", + "_dom_classes": [], + "_model_module": "jupyter-webrtc", + "_model_module_version": "~0.6.0", + "_model_name": "AudioRecorderModel", + "_view_count": null, + "_view_module": "jupyter-webrtc", + "_view_module_version": "~0.6.0", + "_view_name": "AudioRecorderView", + "audio": "IPY_MODEL_8964df95ded44ee28b7ed225c564ed9b", + "autosave": false, + "codecs": "", + "filename": "record", + "format": "webm", + "layout": "IPY_MODEL_823fe8b97ef94aedaed6889ac580c8eb", + "recording": false, + "stream": "IPY_MODEL_5604b41fde3b45dd80b954c6128bccf7" + } + }, + "09e1f5b2de9945aea20f6129b7c82ec9": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "20cdb2181c804215b4a1e6006be77734": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "35775e7c3c5846a589410566cbec95fa": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "43cdc270c46644b6a0308d3c499601fb": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DropdownModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DropdownModel", + "_options_labels": [ + "transcribe", + "translate" + ], + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "DropdownView", + "description": "", + "description_tooltip": null, + "disabled": false, + "index": 0, + "layout": "IPY_MODEL_5f9cd9708efb486c930c1397be9a566c", + "style": "IPY_MODEL_20cdb2181c804215b4a1e6006be77734" + } + }, + "5604b41fde3b45dd80b954c6128bccf7": { + "model_module": "jupyter-webrtc", + "model_module_version": "~0.6.0", + "model_name": "CameraStreamModel", + "state": { + "_dom_classes": [], + "_model_module": "jupyter-webrtc", + "_model_module_version": "~0.6.0", + "_model_name": "CameraStreamModel", + "_view_count": null, + "_view_module": "jupyter-webrtc", + "_view_module_version": "~0.6.0", + "_view_name": "MediaStreamView", + "constraints": { + "audio": true, + "video": false + }, + "layout": "IPY_MODEL_35775e7c3c5846a589410566cbec95fa" + } + }, + "5f9cd9708efb486c930c1397be9a566c": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "6c5f2af50210411f801f812dc17c389c": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "823fe8b97ef94aedaed6889ac580c8eb": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "8964df95ded44ee28b7ed225c564ed9b": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "AudioModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "AudioModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "AudioView", + "autoplay": true, + "controls": true, + "format": "webm", + "layout": "IPY_MODEL_09e1f5b2de9945aea20f6129b7c82ec9", + "loop": true + } + }, + "8c4d89ec973647d1a46aa471311e037c": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DropdownModel", + "state": { + "_dom_classes": [], + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DropdownModel", + "_options_labels": [ + "english", + "chinese", + "german", + "spanish", + "russian", + "korean", + "french", + "japanese", + "portuguese", + "turkish", + "polish", + "catalan", + "dutch", + "arabic", + "swedish", + "italian", + "indonesian", + "hindi", + "finnish", + "vietnamese", + "hebrew", + "ukrainian", + "greek", + "malay", + "czech", + "romanian", + "danish", + "hungarian", + "tamil", + "norwegian", + "thai", + "urdu", + "croatian", + "bulgarian", + "lithuanian", + "latin", + "maori", + "malayalam", + "welsh", + "slovak", + "telugu", + "persian", + "latvian", + "bengali", + "serbian", + "azerbaijani", + "slovenian", + "kannada", + "estonian", + "macedonian", + "breton", + "basque", + "icelandic", + "armenian", + "nepali", + "mongolian", + "bosnian", + "kazakh", + "albanian", + "swahili", + "galician", + "marathi", + "punjabi", + "sinhala", + "khmer", + "shona", + "yoruba", + "somali", + "afrikaans", + "occitan", + "georgian", + "belarusian", + "tajik", + "sindhi", + "gujarati", + "amharic", + "yiddish", + "lao", + "uzbek", + "faroese", + "haitian creole", + "pashto", + "turkmen", + "nynorsk", + "maltese", + "sanskrit", + "luxembourgish", + "myanmar", + "tibetan", + "tagalog", + "malagasy", + "assamese", + "tatar", + "hawaiian", + "lingala", + "hausa", + "bashkir", + "javanese", + "sundanese", + "burmese", + "valencian", + "flemish", + "haitian", + "letzeburgesch", + "pushto", + "panjabi", + "moldavian", + "moldovan", + "sinhalese", + "castilian" + ], + "_view_count": null, + "_view_module": "@jupyter-widgets/controls", + "_view_module_version": "1.5.0", + "_view_name": "DropdownView", + "description": "", + "description_tooltip": null, + "disabled": false, + "index": 0, + "layout": "IPY_MODEL_6c5f2af50210411f801f812dc17c389c", + "style": "IPY_MODEL_90728ace9c454a3ab05ffb3e0bb664a3" + } + }, + "90728ace9c454a3ab05ffb3e0bb664a3": { + "model_module": "@jupyter-widgets/controls", + "model_module_version": "1.5.0", + "model_name": "DescriptionStyleModel", + "state": { + "_model_module": "@jupyter-widgets/controls", + "_model_module_version": "1.5.0", + "_model_name": "DescriptionStyleModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "StyleView", + "description_width": "" + } + }, + "b50eded046cf4d378b1d71a186995d21": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + }, + "fb4b05bd474c409288394cd82d6a9179": { + "model_module": "@jupyter-widgets/base", + "model_module_version": "1.2.0", + "model_name": "LayoutModel", + "state": { + "_model_module": "@jupyter-widgets/base", + "_model_module_version": "1.2.0", + "_model_name": "LayoutModel", + "_view_count": null, + "_view_module": "@jupyter-widgets/base", + "_view_module_version": "1.2.0", + "_view_name": "LayoutView", + "align_content": null, + "align_items": null, + "align_self": null, + "border": null, + "bottom": null, + "display": null, + "flex": null, + "flex_flow": null, + "grid_area": null, + "grid_auto_columns": null, + "grid_auto_flow": null, + "grid_auto_rows": null, + "grid_column": null, + "grid_gap": null, + "grid_row": null, + "grid_template_areas": null, + "grid_template_columns": null, + "grid_template_rows": null, + "height": null, + "justify_content": null, + "justify_items": null, + "left": null, + "margin": null, + "max_height": null, + "max_width": null, + "min_height": null, + "min_width": null, + "object_fit": null, + "object_position": null, + "order": null, + "overflow": null, + "overflow_x": null, + "overflow_y": null, + "padding": null, + "right": null, + "top": null, + "visibility": null, + "width": null + } + } + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +}