mirror of
https://github.com/shirayu/whispering.git
synced 2025-02-16 10:35:16 +00:00
Renamed whisper_streaming to whispering
This commit is contained in:
parent
683571e059
commit
807dd633b0
9 changed files with 20 additions and 19 deletions
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
|
||||
all: lint_node lint_python
|
||||
|
||||
TARGET_DIRS:=./whisper_streaming
|
||||
TARGET_DIRS:=./whispering
|
||||
|
||||
flake8:
|
||||
find $(TARGET_DIRS) | grep '\.py$$' | xargs flake8
|
||||
|
|
21
README.md
21
README.md
|
@ -1,20 +1,21 @@
|
|||
|
||||
# whisper_streaming
|
||||
# Whispering
|
||||
|
||||
[![MIT License](https://img.shields.io/apm/l/atomic-design-ui.svg?)](LICENSE)
|
||||
[![Python Versions](https://img.shields.io/badge/Python-3.8%20--%203.10-blue)](https://pypi.org/project/bunkai/)
|
||||
|
||||
[![CI](https://github.com/shirayu/whisper_streaming/actions/workflows/ci.yml/badge.svg)](https://github.com/shirayu/whisper_streaming/actions/workflows/ci.yml)
|
||||
[![CodeQL](https://github.com/shirayu/whisper_streaming/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/shirayu/whisper_streaming/actions/workflows/codeql-analysis.yml)
|
||||
[![Typos](https://github.com/shirayu/whisper_streaming/actions/workflows/typos.yml/badge.svg)](https://github.com/shirayu/whisper_streaming/actions/workflows/typos.yml)
|
||||
[![CI](https://github.com/shirayu/whispering/actions/workflows/ci.yml/badge.svg)](https://github.com/shirayu/whispering/actions/workflows/ci.yml)
|
||||
[![CodeQL](https://github.com/shirayu/whispering/actions/workflows/codeql-analysis.yml/badge.svg)](https://github.com/shirayu/whispering/actions/workflows/codeql-analysis.yml)
|
||||
[![Typos](https://github.com/shirayu/whispering/actions/workflows/typos.yml/badge.svg)](https://github.com/shirayu/whispering/actions/workflows/typos.yml)
|
||||
|
||||
Streaming transcriber with [whisper](https://github.com/openai/whisper).
|
||||
Former name was "whisper_streaming".
|
||||
Enough machine power is needed to transcribe in real time.
|
||||
|
||||
## Setup
|
||||
|
||||
```bash
|
||||
pip install -U git+https://github.com/shirayu/whisper_streaming.git
|
||||
pip install -U git+https://github.com/shirayu/whispering.git
|
||||
|
||||
# If you use GPU, install proper torch and torchaudio
|
||||
# Example : torch for CUDA 11.6
|
||||
|
@ -25,11 +26,11 @@ pip install -U torch torchaudio --extra-index-url https://download.pytorch.org/w
|
|||
|
||||
```bash
|
||||
# Run in English
|
||||
whisper_streaming --language en --model tiny
|
||||
whispering --language en --model tiny
|
||||
```
|
||||
|
||||
- ``--help`` shows full options
|
||||
- ``--language`` sets the language to transcribe. The list of languages are shown with ``whisper_streaming -h``
|
||||
- ``--language`` sets the language to transcribe. The list of languages are shown with ``whispering -h``
|
||||
- ``-t`` sets temperatures to decode. You can set several like (``-t 0.0 -t 0.1 -t 0.5``), but too many temperatures exhaust decoding time
|
||||
- ``--debug`` outputs logs for debug
|
||||
|
||||
|
@ -39,7 +40,7 @@ If you want quick response, set small ``-n`` and add ``--allow-padding``.
|
|||
However, this may sacrifice the accuracy.
|
||||
|
||||
```bash
|
||||
whisper_streaming --language en --model tiny -n 20 --allow-padding
|
||||
whispering --language en --model tiny -n 20 --allow-padding
|
||||
```
|
||||
|
||||
## Example of web socket
|
||||
|
@ -51,7 +52,7 @@ Run with ``--host`` and ``--port``.
|
|||
### Host
|
||||
|
||||
```bash
|
||||
whisper_streaming --language en --model tiny --host 0.0.0.0 --port 8000
|
||||
whispering --language en --model tiny --host 0.0.0.0 --port 8000
|
||||
```
|
||||
|
||||
You can set ``--allow-padding`` and other options.
|
||||
|
@ -59,7 +60,7 @@ You can set ``--allow-padding`` and other options.
|
|||
### Client
|
||||
|
||||
```bash
|
||||
whisper_streaming --model tiny --host ADDRESS_OF_HOST --port 8000 --mode client
|
||||
whispering --model tiny --host ADDRESS_OF_HOST --port 8000 --mode client
|
||||
```
|
||||
|
||||
You can set ``-n`` and other options.
|
||||
|
|
|
@ -25,5 +25,5 @@ requires = ["poetry-core"]
|
|||
build-backend = "poetry.core.masonry.api"
|
||||
|
||||
[tool.poetry.scripts]
|
||||
whisper_streaming = "whisper_streaming.cli:main"
|
||||
whispering = "whisper_streaming.cli:main"
|
||||
|
||||
|
|
|
@ -12,10 +12,10 @@ from whisper import available_models
|
|||
from whisper.audio import N_FRAMES, SAMPLE_RATE
|
||||
from whisper.tokenizer import LANGUAGES, TO_LANGUAGE_CODE
|
||||
|
||||
from whisper_streaming.schema import WhisperConfig
|
||||
from whisper_streaming.serve import serve_with_websocket
|
||||
from whisper_streaming.transcriber import WhisperStreamingTranscriber
|
||||
from whisper_streaming.websocket_client import run_websocket_client
|
||||
from whispering.schema import WhisperConfig
|
||||
from whispering.serve import serve_with_websocket
|
||||
from whispering.transcriber import WhisperStreamingTranscriber
|
||||
from whispering.websocket_client import run_websocket_client
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
|
@ -6,7 +6,7 @@ from logging import getLogger
|
|||
import numpy as np
|
||||
import websockets
|
||||
|
||||
from whisper_streaming.transcriber import WhisperStreamingTranscriber
|
||||
from whispering.transcriber import WhisperStreamingTranscriber
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
|
@ -17,7 +17,7 @@ from whisper.decoding import DecodingOptions, DecodingResult
|
|||
from whisper.tokenizer import get_tokenizer
|
||||
from whisper.utils import exact_div
|
||||
|
||||
from whisper_streaming.schema import ParsedChunk, WhisperConfig
|
||||
from whispering.schema import ParsedChunk, WhisperConfig
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
|
@ -7,7 +7,7 @@ import sounddevice as sd
|
|||
import websockets
|
||||
from whisper.audio import N_FRAMES, SAMPLE_RATE
|
||||
|
||||
from whisper_streaming.schema import ParsedChunk
|
||||
from whispering.schema import ParsedChunk
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
Loading…
Reference in a new issue