Skip to content

Commit

Permalink
Merge pull request #20 from software-students-fall2023/Andrew
Browse files Browse the repository at this point in the history
microphone input
  • Loading branch information
KeiOshima authored Nov 30, 2023
2 parents 5a0af37 + ce4d55f commit 305b96d
Show file tree
Hide file tree
Showing 5 changed files with 137 additions and 61 deletions.
5 changes: 4 additions & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,10 @@ jobs:
uses: actions/setup-python@v4
with:
python-version: "3.10"
- name: Install dependencies
- name: Install system dependencies
run: |
sudo apt-get update
sudo apt-get install -y portaudio19-dev
if: ${{ hashFiles('**/*.py') != '' }}
run: |
python -m pip install --upgrade pip
Expand Down
70 changes: 70 additions & 0 deletions machine-learning-client/machine_learning_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,66 @@
from enum import Enum
import pyaudio
import speech_recognition as sr
<<<<<<< HEAD

class ML:
"""Machine Learning class functions """
pa = pyaudio.PyAudio()

def list_all_mic(self):
"""
List all available microphone device
"""
if len(sr.Microphone.list_microphone_names()) == 0:
print("no device available")
return

for index, name in enumerate(sr.Microphone.list_microphone_names()):
print(f'Microphone with name "{name}"')
print(f" found for Microphone(device_index={index})")

def record_microphone(self):
"""Function for recording microphone input"""
ML.list_all_mic(self) # Use for debug
mc = None
try:
mc = sr.Microphone()
print("Mic init successful")
except OSError:
mc = sr.Microphone(0)
print("no default mic")
with mc as source:
print("Please give your answer:")
r = sr.Recognizer()
audio = r.listen(source)
return audio

def audio_to_text(self, audio_file_string):
"""Function for converting audio file to text transcription"""
r = sr.Recognizer()
with sr.AudioFile(audio_file_string) as source:
ad = r.listen(source)
try:
transcription = r.recognize_google(ad)
print(transcription)
print(type(transcription))
except sr.UnknownValueError:
print("Sorry, we could not recognize your response.")
except sr.RequestError:
print("Sorry, there appears to be an error with Google Speech to Text")
return transcription

class BuzzWord(Enum):
"""List of Buzzwords"""
HELLO = "hello"
UGH = "ugh"

def grade_response(self, transcription):
"""
Give out a score based on the transcribed audio
=======
class ML:
Expand Down Expand Up @@ -64,6 +124,7 @@ def grade_response(self, transcription):
Give out a score based on the transcribed audio


>>>>>>> 5a0af371e4dbfe2fdc0328cf9e5f1e13a84def20
Args:
transcription (str): transcribed audio
"""
Expand All @@ -79,10 +140,19 @@ def main():
"""Main Method"""
print("Tell me a little bit about yourself")
ml = ML()
<<<<<<< HEAD
audio = r'..\\web-app\uploads\user_audio.wav'
transcription = ml.audio_to_text(audio)
result = ml.grade_response(transcription)
print(result)
print("test main")

=======
audio = ml.record_microphone()
transcription = ml.audio_to_text(audio)
result = ml.grade_response(transcription)
print(result)
>>>>>>> 5a0af371e4dbfe2fdc0328cf9e5f1e13a84def20


if __name__ == "__main__":
Expand Down
53 changes: 28 additions & 25 deletions web-app/app.py
Original file line number Diff line number Diff line change
@@ -1,35 +1,38 @@
"""Module designed to supplement front end webpage"""
from flask import Flask, render_template, request, jsonify
import os
import datetime
import sys
from flask import (
Flask,
render_template,
request,
redirect,
url_for,
make_response,
session,
)
from pymongo import MongoClient
import pymongo
from bson.objectid import ObjectId
import subprocess

app = Flask("project4")
app = Flask('project4')

app.config['UPLOAD_FOLDER'] = 'uploads'
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)

@app.route("/")
def root_page():
"""Root page route"""
return render_template("root.html")
@app.route('/')
def RootPage():
return render_template('root.html')

@app.route('/analyzeData', methods=['POST'])
def analyzeData():
try:
if 'audio' not in request.files:
return jsonify({"status": "error", "message": "No audio file provided"})

@app.route("/analyze_data", methods=["POST"])
def anaylze_data():
"""Analyze data by sending it to the ml client"""
return
audio_file = request.files['audio']
audio_path = os.path.join(app.config['UPLOAD_FOLDER'], 'user_audio.wav')
audio_file.save(audio_path)


print("Audio file saved at:", audio_path)
result = subprocess.run(["C:\\Users\\Andrew - User\\AppData\\Local\\Microsoft\\WindowsApps\\python.exe", "E:\\4-containerized-app-exercise-rizzballs\\machine-learning-client\\machine_learning_client.py", audio_path], capture_output=True, text=True)


return jsonify({"status": "success"})

except Exception as e:
return jsonify({"status": "error", "message": str(e)})


if __name__ == "__main__":
PORT = os.getenv("PORT", 5000)
PORT = os.getenv('PORT', 5000)
app.run(debug=True, port=PORT)

70 changes: 35 additions & 35 deletions web-app/templates/root.html
Original file line number Diff line number Diff line change
Expand Up @@ -7,64 +7,64 @@
<link rel="stylesheet" href="/static/style.css">
</head>
<body>
{% extends 'base.html' %} {% block Base %}
{% extends 'base.html' %}
{% block Base %}
<div class="buttonContainer">
<h2 style="text-align:center">What is our machine Learning app?</h2>
<button type="submit" onclick="beginRecording()"> Start Recording Audio </button>
<button type="submit" onclick="stopRecording()"> Stop Recording Audio </button>
</div>
<div class="buttonContainer">
<p class="displayReuslts"> reuslts of analysis will be place here</p>
<p class="displayReuslts">Results of analysis will be placed here</p>
</div>

<script>
let mediaRecorder;
let audioChunks = [];
let wordCount = 0;

async function beginRecording() {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
const audioContext = new AudioContext();
const audioInput = audioContext.createMediaStreamSource(stream);
mediaRecorder = new MediaREcorder(audioContext);
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });

mediaRecorder.ondataavailable = (event) =>{
audioChunks.push(event.data);
};
if (stream instanceof MediaStream) {
mediaRecorder = new MediaRecorder(stream);

mediaRecorder.onstop = async () =>{
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
mediaRecorder.ondataavailable = (event) => {
audioChunks.push(event.data);
};

const formData = new FormData();
formData.append('audio', audioBlob);
mediaRecorder.onstop = async () => {
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });

const response = await fetch('/anaylzeData', {
method: 'POST',
body: formData
});
const formData = new FormData();
formData.append('audio', audioBlob);

/**
* const result = await response.json();
* const transcription = result.transcription;
*
*
*
*/
try {
const response = await fetch('/analyzeData', {
method: 'POST',
body: formData
});

}
const result = await response.json();
console.log(result);
} catch (error) {
console.error('Error sending data to server:', error);
}
}

//audioInput.connect(mediaRecorder);
//mediaRecorder.connect(audioContext.destination);
//mediaRecorder.start();
mediaRecorder.start();
} else {
console.error('Failed to obtain a valid MediaStream object.');
}
} catch (error) {
console.error('Error getting user media:', error);
}
}

function stopRecording(){
mediaRecorder.stop()
function stopRecording() {
mediaRecorder.stop();
}
</script>

{% endblock %}


</body>
</html>
</html>
Binary file added web-app/uploads/user_audio.wav
Binary file not shown.

0 comments on commit 305b96d

Please sign in to comment.