Compare commits
No commits in common. "76e5e090427b06e78101f0ab8513fba6b2b21c33" and "main" have entirely different histories.
76e5e09042
...
main
@ -1,2 +0,0 @@
|
|||||||
OPENAI_API_KEY = ''
|
|
||||||
OAI_CONFIG_LIST = '[]'
|
|
||||||
7
.gitignore
vendored
7
.gitignore
vendored
@ -1,5 +1,2 @@
|
|||||||
.cache
|
.aider*
|
||||||
.coding
|
.idea
|
||||||
.env
|
|
||||||
__pycache__
|
|
||||||
|
|
||||||
|
|||||||
@ -1,46 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
"""Sample code slightly modified from https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb """
|
|
||||||
|
|
||||||
from os import getenv
|
|
||||||
|
|
||||||
import autogen
|
|
||||||
from dotenv import load_dotenv
|
|
||||||
|
|
||||||
# Load local configuration from .env
|
|
||||||
load_dotenv()
|
|
||||||
|
|
||||||
config_list = autogen.config_list_from_json(
|
|
||||||
"OAI_CONFIG_LIST",
|
|
||||||
filter_dict={
|
|
||||||
"model": ["gpt-3.5-turbo"]
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
# create an AssistantAgent named "assistant"
|
|
||||||
assistant = autogen.AssistantAgent(
|
|
||||||
name="assistant",
|
|
||||||
llm_config={
|
|
||||||
"cache_seed": 42, # seed for caching and reproducibility
|
|
||||||
"config_list": config_list, # a list of OpenAI API configurations
|
|
||||||
"temperature": 0, # temperature for sampling
|
|
||||||
}, # configuration for autogen's enhanced inference API which is compatible with OpenAI API
|
|
||||||
)
|
|
||||||
|
|
||||||
# create a UserProxyAgent instance named "user_proxy"
|
|
||||||
user_proxy = autogen.UserProxyAgent(
|
|
||||||
name="user_proxy",
|
|
||||||
human_input_mode="NEVER",
|
|
||||||
max_consecutive_auto_reply=10,
|
|
||||||
is_termination_msg=lambda x: x.get("content", "").rstrip().endswith("TERMINATE"),
|
|
||||||
code_execution_config={
|
|
||||||
"work_dir": ".coding",
|
|
||||||
"use_docker": False, # set to True or image name like "python:3" to use docker
|
|
||||||
},
|
|
||||||
)
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# the assistant receives a message from the user_proxy, which contains the task description
|
|
||||||
user_proxy.initiate_chat(
|
|
||||||
assistant,
|
|
||||||
message="""What date is today? Compare the year-to-date gain for META and TESLA.""",
|
|
||||||
)
|
|
||||||
34
app.py
Normal file
34
app.py
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
from fastapi import FastAPI, File, UploadFile
|
||||||
|
import tempfile
|
||||||
|
import os
|
||||||
|
from fastapi import FastAPI
|
||||||
|
from fastapi.openapi.docs import get_swagger_ui_html
|
||||||
|
|
||||||
|
# Assuming Celery is already set up and imported correctly in the project.
|
||||||
|
from tasks import delete_temp_file # Import your Celery task here.
|
||||||
|
|
||||||
|
app = FastAPI(
|
||||||
|
title="AITist API",
|
||||||
|
description="This is a simple API for AI Tist.",
|
||||||
|
version="1.0.0",
|
||||||
|
docs_url="/docs", # Enable Swagger UI
|
||||||
|
)
|
||||||
|
|
||||||
|
@app.get("/docs")
|
||||||
|
async def custom_swagger_ui_html():
|
||||||
|
return get_swagger_ui_html(openapi_url=app.openapi_url, title=app.title + " - Swagger UI")
|
||||||
|
|
||||||
|
@app.post("/uploadfile/")
|
||||||
|
async def upload_file(file: UploadFile = File(...)):
|
||||||
|
contents = await file.read()
|
||||||
|
|
||||||
|
# Save the file to a temporary directory
|
||||||
|
temp_dir = tempfile.gettempdir()
|
||||||
|
temp_file_path = os.path.join(temp_dir, file.filename)
|
||||||
|
with open(temp_file_path, 'wb') as f:
|
||||||
|
f.write(contents)
|
||||||
|
|
||||||
|
# Call the Celery task that deletes the file after processing.
|
||||||
|
delete_temp_file.delay(temp_file_path) # Assuming this is your Celery task name.
|
||||||
|
|
||||||
|
return {"filename": file.filename}
|
||||||
27
flow.md
Normal file
27
flow.md
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# The AItist Listening Flow
|
||||||
|
|
||||||
|
(implementation outline: https://github.com/lablab-ai/Whisper-transcription_and_diarization-speaker-identification-/blob/main/transcribtion_diarization.ipynb
|
||||||
|
|
||||||
|
```mermaid
|
||||||
|
flowchart TD
|
||||||
|
subgraph "Context Input"
|
||||||
|
record[\VAD/] --> transcribe
|
||||||
|
record --> diarize
|
||||||
|
diarize --> identify_speakers
|
||||||
|
identify_speakers --> regard{{Do speakers matter}}
|
||||||
|
transcribe --> regard
|
||||||
|
regard --yes--> parse_context
|
||||||
|
regard --"no"--> stop[/Stop\]
|
||||||
|
regard --some--> log_speakers
|
||||||
|
log_speakers --> parse_context
|
||||||
|
parse_context --> known{{Context known}}
|
||||||
|
known --"no"--> log_context[Log for async analysis]
|
||||||
|
end
|
||||||
|
|
||||||
|
subgraph Process
|
||||||
|
known --yes--> apply_context_prompt
|
||||||
|
apply_context_prompt --> llm_find_action["Find action from text"]
|
||||||
|
end
|
||||||
|
|
||||||
|
|
||||||
|
```
|
||||||
2
main.py
Normal file
2
main.py
Normal file
@ -0,0 +1,2 @@
|
|||||||
|
# This is a simple Python script that prints 'Hey there!'
|
||||||
|
print("Hey there!")
|
||||||
4271
poetry.lock
generated
4271
poetry.lock
generated
File diff suppressed because it is too large
Load Diff
@ -1,16 +1,18 @@
|
|||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "aitist"
|
name = "aitist"
|
||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = ""
|
description = "A new Python project managed by Poetry."
|
||||||
authors = ["Your Name <you@example.com>"]
|
authors = ["Timothy Farrell <tim@thecookiejar.me>"]
|
||||||
readme = "README.md"
|
package-mode = false
|
||||||
|
|
||||||
[tool.poetry.dependencies]
|
[tool.poetry.dependencies]
|
||||||
python = ">=3.9,<3.12"
|
python = "^3.11"
|
||||||
pyautogen = "^0.2.2"
|
fastapi = "^0.111.1"
|
||||||
python-dotenv = "^1.0.0"
|
uvicorn = {extras = ["standard"], version = "^0.17.6"}
|
||||||
|
celery = "^5.4.0"
|
||||||
|
pydantic = "^2.8.2"
|
||||||
|
pyannote-audio = "^3.3.1"
|
||||||
|
|
||||||
[build-system]
|
[build-system]
|
||||||
requires = ["poetry-core"]
|
requires = ["poetry-core>=1.0.0"]
|
||||||
build-backend = "poetry.core.masonry.api"
|
build-backend = "poetry.core.masonry.api"
|
||||||
|
|||||||
31
tasks.py
Normal file
31
tasks.py
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
from celery import shared_task
|
||||||
|
|
||||||
|
@shared_task
|
||||||
|
def delete_temp_file(file_path):
|
||||||
|
"""Celery task to delete a temporary file."""
|
||||||
|
# Implementation goes here.
|
||||||
|
pass
|
||||||
|
|
||||||
|
@shared_task
|
||||||
|
def diarize(audio_path):
|
||||||
|
"""
|
||||||
|
Celery task that performs diarization on an audio file.
|
||||||
|
Placeholder for actual implementation.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@shared_task
|
||||||
|
def transcribe(audio_path):
|
||||||
|
"""
|
||||||
|
Celery task that transcribes speech from an audio file to text.
|
||||||
|
Placeholder for actual implementation.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
|
@shared_task
|
||||||
|
def identify(image_path):
|
||||||
|
"""
|
||||||
|
Celery task that identifies objects or features in an image.
|
||||||
|
Placeholder for actual implementation.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
Loading…
x
Reference in New Issue
Block a user