feat(tools, client): add speaking tasks logic (#61906)

Co-authored-by: Huyen Nguyen <25715018+huyenltnguyen@users.noreply.github.com>
Co-authored-by: Oliver Eyton-Williams <ojeytonwilliams@gmail.com>
This commit is contained in:
DanielRosa74
2025-11-07 16:29:21 -03:00
committed by GitHub
parent e32bad05c3
commit 2432f5e9e4
24 changed files with 1388 additions and 82 deletions

View File

@@ -386,6 +386,7 @@ exports.createSchemaCustomization = ({ actions }) => {
type Answer {
answer: String
feedback: String
audioId: String
}
type RequiredResource {
link: String

View File

@@ -1354,6 +1354,26 @@
"two-questions": "Congratulations on getting this far. Before you can start the exam, please answer these two short survey questions."
}
},
"speaking-modal": {
"heading": "Speaking Practice",
"repeat-sentence": "Repeat aloud this sentence:",
"play": "Play",
"playing": "Playing...",
"record": "Record",
"stop": "Stop",
"incorrect-words": "Incorrect words: {{words}}.",
"misplaced-words": "Misplaced words: {{words}}.",
"correct-congratulations": "That's correct! Congratulations!",
"very-good": "Very good!",
"try-again": "Try again.",
"no-audio-available": "No audio file available.",
"no-speech-detected": "Recording stopped. No speech detected.",
"speech-recognition-not-supported": "Speech recognition not supported in this browser.",
"recording-speak-now": "Recording. Speak now.",
"recording-stopped-processing": "Recording stopped. Processing...",
"microphone-access-error": "Error: Could not access microphone.",
"speaking-button": "Practice speaking"
},
"curriculum": {
"catalog": {
"title": "Explore our Catalog",

View File

@@ -62,6 +62,7 @@
"@reduxjs/toolkit": "2.8.2",
"@stripe/react-stripe-js": "1.16.5",
"@stripe/stripe-js": "1.54.2",
"@types/react-speech-recognition": "3.9.6",
"algoliasearch": "4.22.1",
"assert": "2.0.0",
"babel-plugin-preval": "5.1.0",
@@ -113,6 +114,7 @@
"react-reflex": "4.1.0",
"react-responsive": "9.0.2",
"react-scroll": "1.9.0",
"react-speech-recognition": "4.0.1",
"react-spinkit": "3.0.0",
"react-tooltip": "4.5.1",
"react-transition-group": "4.4.5",

View File

@@ -37,6 +37,7 @@ export type MarkdownRemark = {
type MultipleChoiceAnswer = {
answer: string;
feedback: string | null;
audioId: string | null;
};
export type Question = {

View File

@@ -1,8 +1,14 @@
import React from 'react';
import React, { useState } from 'react';
import { connect } from 'react-redux';
import { useTranslation } from 'react-i18next';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import { faMicrophone } from '@fortawesome/free-solid-svg-icons';
import { Spacer } from '@freecodecamp/ui';
import { Button, Spacer } from '@freecodecamp/ui';
import { Question } from '../../../redux/prop-types';
import { openModal } from '../redux/actions';
import { SuperBlocks } from '../../../../../shared/config/curriculum';
import SpeakingModal from './speaking-modal';
import ChallengeHeading from './challenge-heading';
import PrismFormatted from './prism-formatted';
@@ -12,6 +18,8 @@ type MultipleChoiceQuestionsProps = {
handleOptionChange: (questionIndex: number, answerIndex: number) => void;
submittedMcqAnswers: (number | null)[];
showFeedback: boolean;
openSpeakingModal: () => void;
superBlock: SuperBlocks;
};
function removeParagraphTags(text: string): string {
@@ -23,10 +31,45 @@ function MultipleChoiceQuestions({
selectedOptions,
handleOptionChange,
submittedMcqAnswers,
showFeedback
showFeedback,
openSpeakingModal,
superBlock
}: MultipleChoiceQuestionsProps): JSX.Element {
const { t } = useTranslation();
const [modalText, setModalText] = useState('');
const [modalAnswerIndex, setModalAnswerIndex] = useState<number>(0);
const [modalQuestionIndex, setModalQuestionIndex] = useState<number>(0);
function stripCodeTags(text: string): string {
return text.replace(/<code>(.*?)<\/code>/g, '$1');
}
const handleSpeakingButtonClick = (
answer: string,
answerIndex: number,
questionIndex: number
) => {
setModalText(stripCodeTags(removeParagraphTags(answer)));
setModalAnswerIndex(answerIndex);
setModalQuestionIndex(questionIndex);
openSpeakingModal();
};
const constructAudioUrl = (audioId?: string): string | undefined =>
audioId
? `https://cdn.freecodecamp.org/curriculum/english/animation-assets/sounds/${audioId}`
: undefined;
const getAudioUrl = (
questionIndex: number,
answerIndex: number
): string | undefined => {
const answer = questions[questionIndex]?.answers[answerIndex];
const audioId = answer?.audioId ?? undefined;
return constructAudioUrl(audioId);
};
return (
<>
<ChallengeHeading
@@ -50,74 +93,116 @@ function MultipleChoiceQuestions({
// -1 because the solution is 1-indexed
questions[questionIndex].solution - 1;
const labelId = `mc-question-${questionIndex}-answer-${answerIndex}-label`;
const hasAudio =
questions[questionIndex]?.answers[answerIndex]?.audioId;
return (
<React.Fragment key={answerIndex}>
<label
className={`video-quiz-option-label
${showFeedback && isSubmittedAnswer ? 'mcq-hide-border' : ''}
${showFeedback && isSubmittedAnswer ? (isCorrect ? 'mcq-correct-border' : 'mcq-incorrect-border') : ''}`}
htmlFor={`mc-question-${questionIndex}-answer-${answerIndex}`}
>
<input
name={`mc-question-${questionIndex}`}
checked={selectedOptions[questionIndex] === answerIndex}
className='sr-only'
onChange={() =>
handleOptionChange(questionIndex, answerIndex)
}
type='radio'
value={answerIndex}
id={`mc-question-${questionIndex}-answer-${answerIndex}`}
/>{' '}
<span className='video-quiz-input-visible'>
{selectedOptions[questionIndex] === answerIndex ? (
<span className='video-quiz-selected-input' />
) : null}
</span>
<PrismFormatted
className={'video-quiz-option'}
text={removeParagraphTags(answer)}
useSpan
noAria
/>
</label>
{showFeedback && isSubmittedAnswer && (
<div
className={`video-quiz-option-label mcq-feedback ${isCorrect ? 'mcq-correct' : 'mcq-incorrect'}`}
>
<p>
{isCorrect
? t('learn.quiz.correct-answer')
: t('learn.quiz.incorrect-answer')}
</p>
{feedback && (
<div key={answerIndex} className='mcq-option-row'>
<div className='mcq-option-with-feedback'>
<div className='mcq-option-content'>
<label
id={labelId}
className={`video-quiz-option-label mcq-option-label
${showFeedback && isSubmittedAnswer ? 'mcq-hide-border' : ''}
${showFeedback && isSubmittedAnswer ? (isCorrect ? 'mcq-correct-border' : 'mcq-incorrect-border') : ''}`}
htmlFor={`mc-question-${questionIndex}-answer-${answerIndex}`}
>
<input
name={`mc-question-${questionIndex}`}
checked={
selectedOptions[questionIndex] === answerIndex
}
className='sr-only'
onChange={() =>
handleOptionChange(questionIndex, answerIndex)
}
type='radio'
value={answerIndex}
id={`mc-question-${questionIndex}-answer-${answerIndex}`}
/>{' '}
<span className='video-quiz-input-visible'>
{selectedOptions[questionIndex] === answerIndex ? (
<span className='video-quiz-selected-input' />
) : null}
</span>
<PrismFormatted
className={'video-quiz-option'}
text={removeParagraphTags(answer)}
useSpan
noAria
/>
</label>
</div>
{showFeedback && isSubmittedAnswer && (
<div
className={`video-quiz-option-label mcq-feedback ${isCorrect ? 'mcq-correct' : 'mcq-incorrect'}`}
>
<p>
<PrismFormatted
className={
isCorrect
? 'mcq-prism-correct'
: 'mcq-prism-incorrect'
}
text={removeParagraphTags(feedback)}
useSpan
noAria
/>
{isCorrect
? t('learn.quiz.correct-answer')
: t('learn.quiz.incorrect-answer')}
</p>
)}
{feedback && (
<p>
<PrismFormatted
className={
isCorrect
? 'mcq-prism-correct'
: 'mcq-prism-incorrect'
}
text={removeParagraphTags(feedback)}
useSpan
noAria
/>
</p>
)}
</div>
)}
</div>
{hasAudio && (
<div className='mcq-speaking-button-wrapper'>
<Button
size='medium'
onClick={() =>
handleSpeakingButtonClick(
answer,
answerIndex,
questionIndex
)
}
className='mcq-speaking-button'
aria-describedby={labelId}
aria-label={t('speaking-modal.speaking-button')}
>
<FontAwesomeIcon icon={faMicrophone} />
</Button>
</div>
)}
</React.Fragment>
</div>
);
})}
</div>
<Spacer size='m' />
</fieldset>
))}
<Spacer size='m' />
<SpeakingModal
sentence={modalText}
audioUrl={getAudioUrl(modalQuestionIndex, modalAnswerIndex)}
answerIndex={modalAnswerIndex}
superBlock={superBlock}
/>
</>
);
}
const mapDispatchToProps = {
openSpeakingModal: () => openModal('speaking')
};
MultipleChoiceQuestions.displayName = 'MultipleChoiceQuestions';
export default MultipleChoiceQuestions;
export default connect(null, mapDispatchToProps)(MultipleChoiceQuestions);

View File

@@ -0,0 +1,234 @@
import { describe, it, expect } from 'vitest';
import { normalizeText, compareTexts } from './speaking-modal-helpers';
describe('speaking-modal-helpers', () => {
describe('normalizeText', () => {
it('should convert text to lowercase and remove punctuation', () => {
expect(normalizeText('Hello, World!')).toEqual(['hello', 'world']);
});
it('should handle multiple spaces and trim whitespace', () => {
expect(normalizeText(' Hello World ')).toEqual(['hello', 'world']);
});
it('should remove various punctuation marks', () => {
expect(normalizeText("Hello! How are you? I'm fine.")).toEqual([
'hello',
'how',
'are',
'you',
'im',
'fine'
]);
});
it('should handle empty string', () => {
expect(normalizeText('')).toEqual([]);
});
it('should handle string with only punctuation', () => {
expect(normalizeText('!!!')).toEqual([]);
});
it('should handle string with only spaces', () => {
expect(normalizeText(' ')).toEqual([]);
});
it('should preserve numbers and letters', () => {
expect(normalizeText('Hello123 World456')).toEqual([
'hello123',
'world456'
]);
});
});
describe('compareTexts', () => {
describe('exact matches', () => {
it('should return exact match for identical text', () => {
const result = compareTexts('Hello world', 'Hello world');
expect(result).toEqual({
status: 'correct'
});
});
it('should return exact match ignoring punctuation and case', () => {
const result = compareTexts('Hello, World!', 'hello world');
expect(result).toEqual({
status: 'correct'
});
});
it('should return exact match ignoring extra spaces', () => {
const result = compareTexts('Hello world', ' hello world ');
expect(result).toEqual({
status: 'correct'
});
});
});
describe('partial matches', () => {
it('should mark individual wrong words as incorrect', () => {
const result = compareTexts(
'Hello beautiful world',
'Hello wonderful world'
);
expect(result.comparison).toEqual([
{ expected: 'hello', actual: 'hello' },
{ expected: 'beautiful', actual: 'wonderful' },
{ expected: 'world', actual: 'world' }
]);
});
it('should handle shorter utterance with a missing word', () => {
const result = compareTexts('Hello beautiful world', 'Hello world');
expect(result.comparison).toEqual([
{ expected: 'hello', actual: 'hello' },
{ expected: 'beautiful' },
{ expected: 'world', actual: 'world' }
]);
});
it('should handle consecutive missing words', () => {
const result = compareTexts('a b c d', 'a d');
expect(result.comparison).toEqual([
{ expected: 'a', actual: 'a' },
{ expected: 'b' },
{ expected: 'c' },
{ expected: 'd', actual: 'd' }
]);
});
it('should handle consecutive extra words', () => {
const result = compareTexts('a d', 'a b c d');
expect(result.comparison).toEqual([
{ expected: 'a', actual: 'a' },
{ actual: 'b' },
{ actual: 'c' },
{ expected: 'd', actual: 'd' }
]);
});
it('should handle missing words in longer sentences', () => {
const result = compareTexts('a b c d e f g', 'a b c e f g');
expect(result.comparison).toEqual([
{ expected: 'a', actual: 'a' },
{ expected: 'b', actual: 'b' },
{ expected: 'c', actual: 'c' },
{ expected: 'd' },
{ expected: 'e', actual: 'e' },
{ expected: 'f', actual: 'f' },
{ expected: 'g', actual: 'g' }
]);
});
it('should pad shorter utterance with undefined for alignment', () => {
const result = compareTexts('a b c', 'b c');
expect(result.comparison).toEqual([
{ expected: 'a' },
{ expected: 'b', actual: 'b' },
{ expected: 'c', actual: 'c' }
]);
});
it('should handle completely different texts that are longer than the original', () => {
const result = compareTexts('a b', 'c d e');
expect(result.comparison).toEqual([
{ expected: 'a', actual: 'c' },
{ expected: 'b', actual: 'd' },
{ actual: 'e' }
]);
});
it('should handle completely different texts that are shorter than the original', () => {
const result = compareTexts('a b c', 'd e');
expect(result.comparison).toEqual([
{ expected: 'a', actual: 'd' },
{ expected: 'b', actual: 'e' },
{ expected: 'c' }
]);
});
it('should handle repeated words correctly', () => {
const result = compareTexts(
'hello hello hello hello hello',
'hello hello hello hello'
);
expect(result.comparison).toEqual([
{ expected: 'hello', actual: 'hello' },
{ expected: 'hello', actual: 'hello' },
{ expected: 'hello', actual: 'hello' },
{ expected: 'hello', actual: 'hello' },
{ expected: 'hello' }
]);
});
it('should not ignore incorrect words in the middle of the sentence', () => {
const result = compareTexts(
'The cat sat on the mat',
'The black cat sat on the mat'
);
expect(result.comparison).toEqual([
{ expected: 'the', actual: 'the' },
{ actual: 'black' },
{ expected: 'cat', actual: 'cat' },
{ expected: 'sat', actual: 'sat' },
{ expected: 'on', actual: 'on' },
{ expected: 'the', actual: 'the' },
{ expected: 'mat', actual: 'mat' }
]);
});
});
describe('edge cases', () => {
it('should handle empty utterance', () => {
const result = compareTexts('Hello world', '');
expect(result.comparison).toEqual([
{ expected: 'hello' },
{ expected: 'world' }
]);
});
it('should handle single word comparison', () => {
const result = compareTexts('Hello', 'Hello');
expect(result.status).toBe('correct');
});
it('should treat punctuation-only original text as if it was empty', () => {
const result = compareTexts('!!!', 'hello');
expect(result.comparison).toEqual([
{
actual: 'hello'
}
]);
});
});
describe('accuracy calculations', () => {
it('should calculate accuracy based on correct words', () => {
const result = compareTexts('Hello beautiful world', 'Hello world');
expect(result.status).toBe('incorrect');
});
it('should handle zero division when original is empty after normalization', () => {
const result = compareTexts(' ', 'hello');
expect(result.status).toBe('incorrect');
});
it('should return partially-correct for 80% accuracy', () => {
const result = compareTexts(
'Hello beautiful wonderful world amazing',
'Hello beautiful wonderful world fantastic'
);
expect(result.status).toBe('partially-correct');
});
it('should handle both empty strings', () => {
const result = compareTexts('', '');
expect(result).toEqual({
status: 'correct'
});
});
});
});
});

View File

@@ -0,0 +1,183 @@
import { isEmpty } from 'lodash-es';
export const normalizeText = (text: string) => {
return text
.toLowerCase()
.replace(/[^\w\s]/g, '')
.trim()
.split(/\s+/)
.filter((word: string) => word.length > 0);
};
interface Missing {
expected: string;
actual?: never;
}
interface Extra {
actual: string;
expected?: never;
}
interface Comparison {
expected: string;
actual: string;
}
export type ComparisonWord = Missing | Extra | Comparison;
export interface ComparisonResult {
comparison?: ComparisonWord[];
status?: 'correct' | 'partially-correct' | 'incorrect';
}
export const compareTexts = (
original: string,
utterance: string
): ComparisonResult => {
const originalWords = normalizeText(original);
const utteranceWords = normalizeText(utterance);
if (originalWords.join(' ') === utteranceWords.join(' ')) {
return {
status: 'correct'
};
}
const alignment = alignWords(originalWords, utteranceWords);
const correctCount = alignment.filter(
item => item.expected === item.actual
).length;
const accuracy =
originalWords.length > 0 ? (correctCount / originalWords.length) * 100 : 0;
const rounded = Math.round(accuracy);
const status: ComparisonResult['status'] =
rounded === 100
? 'correct'
: rounded >= 80
? 'partially-correct'
: 'incorrect';
return {
comparison: alignment,
status
};
};
const toMissing = (word?: string): ComparisonWord => ({ expected: word! });
const toExtra = (word?: string): ComparisonWord => ({ actual: word! });
function search<T extends string | undefined>(
needle: T,
haystack: T[],
compare: (x?: string) => ComparisonWord
): {
comparisons: ComparisonWord[];
updatedHaystack: T[];
} {
const id = haystack.indexOf(needle);
const match = { expected: needle!, actual: needle! };
return {
comparisons: id > -1 ? [...haystack.slice(0, id).map(compare), match] : [],
updatedHaystack: id > -1 ? haystack.slice(id) : haystack
};
}
function matchTexts(
originalWords: string[],
utteranceWords: (string | undefined)[]
) {
const results: ComparisonWord[] = [];
let originals = [...originalWords];
let utterances = [...utteranceWords];
while (utterances.length > 0 || originals.length > 0) {
const expected = originals[0];
const actual = utterances[0];
if (expected === actual) {
results.push({ expected, actual });
} else {
// If it's not a direct match, see if there is a match in the original
// text, i.e. find out if the speaker omitted some words
if (originals.includes(actual!)) {
const output = search(actual!, originals, toMissing);
if (isEmpty(output.comparisons)) {
results.push({ expected, actual });
} else {
results.push(...output.comparisons);
}
originals = output.updatedHaystack;
} else {
// The utterance isn't in the original, but the original may be later on
// in the utterances. i.e. we find out if the speaker inserted some
// incorrect words
const output = search(originals[0], utterances, toExtra);
if (isEmpty(output.comparisons)) {
results.push({ expected, actual });
} else {
results.push(...output.comparisons);
}
utterances = output.updatedHaystack;
}
}
originals.shift();
utterances.shift();
}
return results;
}
function alignWords(
originalWords: string[],
utteranceWords: string[]
): ComparisonWord[] {
// first we find the first utterance that's in the original array
const firstUtteranceIndex = utteranceWords.findIndex(word =>
originalWords.includes(word)
);
// Assuming there's a match now we need to know where that appears in the original, so we can align them
if (firstUtteranceIndex !== -1) {
const firstUtterance = utteranceWords[firstUtteranceIndex];
const firstOriginalIndex = originalWords.findIndex(
// we know there's a match, so no need to handle the -1 case
word => word === firstUtterance
);
const delta = firstOriginalIndex - firstUtteranceIndex;
// if delta is positive, the utterance is too short, so we pad the utterance to align
// if delta is negative, the utterance is too long, and we can ignore the first |delta| utterances
const alignedUtterance: (string | undefined)[] =
delta >= 0
? Array<string | undefined>(delta)
.fill(undefined)
.concat(utteranceWords)
: utteranceWords.slice(-delta);
return matchTexts(originalWords, alignedUtterance);
} else {
const missingUtteranceCount =
utteranceWords.length < originalWords.length
? originalWords.length - utteranceWords.length
: 0;
const paddedUtterance = utteranceWords.concat(
Array(missingUtteranceCount).fill(undefined)
);
return paddedUtterance.map((uttered, index) => ({
expected: originalWords[index],
actual: uttered
}));
}
}

View File

@@ -0,0 +1,55 @@
.speaking-modal-body {
display: flex;
flex-direction: column;
align-items: center;
padding: 1.5rem 0;
}
.speaking-modal-sentence-container {
display: flex;
align-items: center;
justify-content: space-between;
width: 80%;
gap: 2rem;
background-color: var(--background-primary);
border: 2px solid var(--background-tertiary);
padding: 0.5rem 1rem;
}
.speaking-modal-sentence {
margin: 0;
}
.speaking-modal-record-container {
display: flex;
justify-content: center;
width: 100%;
margin-top: 1.5rem;
}
.speaking-modal-record-icon {
margin-inline-end: 0.5rem;
}
.speaking-modal-feedback {
margin-top: 2rem;
}
.speaking-modal-correct-text {
color: var(--background-success);
}
.speaking-modal-feedback-message {
margin-top: 8px;
margin-bottom: 0;
}
.speaking-modal-comparison-word-correct {
color: var(--background-success);
margin-inline-start: 4px;
}
.speaking-modal-comparison-word-incorrect {
color: var(--background-danger);
margin-inline-start: 4px;
}

View File

@@ -0,0 +1,398 @@
import React, { useState, useRef, useEffect, useCallback } from 'react';
import { connect } from 'react-redux';
import { Button, Modal } from '@freecodecamp/ui';
import { useTranslation } from 'react-i18next';
import SpeechRecognition, {
useSpeechRecognition
} from 'react-speech-recognition';
import { FontAwesomeIcon } from '@fortawesome/react-fontawesome';
import {
faPlay,
faStop,
faMicrophone
} from '@fortawesome/free-solid-svg-icons';
import { closeModal } from '../redux/actions';
import { isSpeakingModalOpenSelector } from '../redux/selectors';
import {
SuperBlocks,
superBlockToSpeechLang
} from '../../../../../shared-dist/config/curriculum';
import {
compareTexts,
type ComparisonResult,
type ComparisonWord
} from './speaking-modal-helpers';
import './speaking-modal.css';
interface ExactMatchFeedbackProps {
sentence: string;
feedback: string;
}
interface PartialMatchFeedbackProps {
comparisonResult: ComparisonResult;
sentence: string;
feedback: string;
}
const ExactMatchFeedback = ({
sentence,
feedback
}: ExactMatchFeedbackProps) => (
<>
<div className='speaking-modal-correct-text'>{sentence}</div>
<p className='speaking-modal-feedback-message'>{feedback}</p>
</>
);
function processComparison(comparison: ComparisonWord): {
status: 'correct' | 'misplaced' | 'extra' | 'wrong';
word: string;
} {
if (comparison.actual === undefined)
return { status: 'misplaced', word: comparison.expected };
if (comparison.expected === undefined)
return { status: 'extra', word: comparison.actual };
if (comparison.actual === comparison.expected)
return { status: 'correct', word: comparison.expected };
return {
status: 'wrong',
word: comparison.actual
};
}
const PartialMatchFeedback = ({
comparisonResult,
sentence,
feedback
}: PartialMatchFeedbackProps) => {
const { t } = useTranslation();
if (!comparisonResult?.comparison) return null;
const punctuationMark = sentence[sentence.length - 1];
const fullUtterance =
comparisonResult.comparison.map(w => w.actual).join(' ') + punctuationMark;
const comparison = comparisonResult.comparison.map(processComparison);
const misplacedWords = comparison
.filter(item => item.status === 'misplaced')
.map(item => item.word)
.join(', ');
const incorrectWords = comparison
.filter(item => item.status === 'extra' || item.status === 'wrong')
.map(item => item.word)
.join(', ');
return (
<>
<div>
{/* Render the utterance as a full sentence rather than separated words
so screen readers don't add a stop after each word */}
<p className='sr-only'>{fullUtterance}</p>
{misplacedWords && (
<p>
{t('speaking-modal.misplaced-words', { words: misplacedWords })}
</p>
)}
{incorrectWords && (
<p>
{t('speaking-modal.incorrect-words', { words: incorrectWords })}
</p>
)}
{comparison
.filter(({ status }) => status !== 'misplaced')
.map((item, index: number) => (
<span
key={index}
aria-hidden='true'
className={`${item.status === 'correct' ? 'speaking-modal-comparison-word-correct' : 'speaking-modal-comparison-word-incorrect'}`}
>
{index === 0
? item.word.charAt(0).toUpperCase() + item.word.slice(1)
: item.word}
</span>
))}
<span aria-hidden='true'>{punctuationMark}</span>
</div>
<p className='speaking-modal-feedback-message'>{feedback}</p>
</>
);
};
interface SpeakingModalProps {
closeSpeakingModal: () => void;
isSpeakingModalOpen: boolean;
sentence: string;
audioUrl?: string;
answerIndex: number;
superBlock: SuperBlocks;
}
const SpeakingModal = ({
closeSpeakingModal,
isSpeakingModalOpen,
sentence,
audioUrl,
superBlock
}: SpeakingModalProps) => {
const { t } = useTranslation();
const [isPlaying, setIsPlaying] = useState(false);
const [feedback, setFeedback] = useState('');
const [comparisonResult, setComparisonResult] =
useState<ComparisonResult | null>(null);
const [hasStartedRecording, setHasStartedRecording] = useState(false);
const [previouslyListening, setPreviouslyListening] = useState(false);
const audioRef = useRef<HTMLAudioElement | null>(null);
const stopListeningTimeoutRef = useRef<
ReturnType<typeof setTimeout> | undefined
>();
const {
transcript,
listening,
resetTranscript,
browserSupportsSpeechRecognition
} = useSpeechRecognition();
console.log('Speech recognition transcript:', transcript);
const handleAudioEnded = useCallback(() => {
setIsPlaying(false);
}, []);
const handleAudioError = useCallback((e: Event) => {
setIsPlaying(false);
console.error('Audio playback error:', e);
}, []);
const cleanupAudioResources = useCallback(() => {
try {
if (audioRef.current) {
audioRef.current.pause();
audioRef.current.removeEventListener('ended', handleAudioEnded);
audioRef.current.removeEventListener('error', handleAudioError);
audioRef.current = null;
}
} catch (error) {
console.warn('Error stopping audio playback:', error);
}
}, [handleAudioEnded, handleAudioError]);
// Reset feedback when modal is closed and cleanup on unmount
useEffect(() => {
if (!isSpeakingModalOpen) {
setFeedback('');
setComparisonResult(null);
setHasStartedRecording(false);
setPreviouslyListening(false);
setIsPlaying(false);
resetTranscript();
void SpeechRecognition.stopListening();
clearTimeout(stopListeningTimeoutRef.current);
cleanupAudioResources();
}
return () => {
clearTimeout(stopListeningTimeoutRef.current);
cleanupAudioResources();
};
}, [isSpeakingModalOpen, resetTranscript, cleanupAudioResources]);
// Track listening state changes
useEffect(() => {
if (previouslyListening && !listening && hasStartedRecording) {
// Speech recognition just stopped and we had started a recording session
if (transcript && transcript.trim()) {
const result = compareTexts(sentence, transcript);
setComparisonResult(result);
if (result.status === 'correct') {
setFeedback(t('speaking-modal.correct-congratulations'));
} else if (result.status === 'partially-correct') {
setFeedback(`${t('speaking-modal.very-good')}`);
} else {
setFeedback(`${t('speaking-modal.try-again')}`);
}
} else {
// No transcript and we were recording, this means no speech detected
setFeedback(t('speaking-modal.no-speech-detected'));
setComparisonResult(null);
}
setHasStartedRecording(false);
}
setPreviouslyListening(listening);
}, [
listening,
previouslyListening,
hasStartedRecording,
transcript,
sentence,
t
]);
const handlePlay = async () => {
if (!audioUrl) {
setFeedback(t('speaking-modal.no-audio-available'));
return;
}
const modifiedAudioUrl = audioUrl.endsWith('.mp3')
? audioUrl
: `${audioUrl}.mp3`;
try {
setIsPlaying(true);
cleanupAudioResources();
const audio = new Audio(modifiedAudioUrl);
audioRef.current = audio;
audio.addEventListener('ended', handleAudioEnded);
audio.addEventListener('error', handleAudioError);
await audio.play();
} catch (error) {
setIsPlaying(false);
console.error('Audio playback error:', error);
}
};
const handleStartRecording = () => {
if (!browserSupportsSpeechRecognition) {
setFeedback(t('speaking-modal.speech-recognition-not-supported'));
return;
}
try {
setFeedback(t('speaking-modal.recording-speak-now'));
setHasStartedRecording(true);
resetTranscript();
setComparisonResult(null);
void SpeechRecognition.startListening({
continuous: false,
language: superBlockToSpeechLang[superBlock]
});
stopListeningTimeoutRef.current = setTimeout(() => {
void SpeechRecognition.stopListening();
}, 30000);
} catch (error) {
console.error('Error starting recording:', error);
setFeedback(t('speaking-modal.microphone-access-error'));
}
};
const handleStopRecording = () => {
void SpeechRecognition.stopListening();
clearTimeout(stopListeningTimeoutRef.current);
setFeedback(t('speaking-modal.recording-stopped-processing'));
};
const handleRecord = () => {
if (listening) {
handleStopRecording();
} else {
handleStartRecording();
}
};
return (
<Modal onClose={closeSpeakingModal} open={isSpeakingModalOpen} size='large'>
<Modal.Header closeButtonClassNames='close'>
{t('speaking-modal.heading')}
</Modal.Header>
<Modal.Body alignment='center' className='speaking-modal-body'>
<p>{t('speaking-modal.repeat-sentence')}</p>
<div className='speaking-modal-sentence-container'>
<p id='speaking-sentence' className='speaking-modal-sentence'>
{sentence}
</p>
<Button
size='medium'
onClick={() => void handlePlay()}
aria-describedby='speaking-sentence'
disabled={isPlaying || listening}
aria-label={
isPlaying ? t('speaking-modal.playing') : t('speaking-modal.play')
}
>
<FontAwesomeIcon
icon={isPlaying ? faStop : faPlay}
aria-hidden='true'
/>
</Button>
</div>
<div className='speaking-modal-record-container'>
{browserSupportsSpeechRecognition ? (
<Button
size='medium'
onClick={() => void handleRecord()}
disabled={isPlaying}
>
<FontAwesomeIcon
icon={listening ? faStop : faMicrophone}
aria-hidden='true'
className='speaking-modal-record-icon'
/>
{listening
? t('speaking-modal.stop')
: t('speaking-modal.record')}
</Button>
) : (
<p className='speaking-modal-not-supported'>
{t('speaking-modal.speech-recognition-not-supported')}
</p>
)}
</div>
<div
className='speaking-modal-feedback'
aria-live='polite'
aria-atomic='true'
>
{comparisonResult?.status === 'correct' ? (
<ExactMatchFeedback sentence={sentence} feedback={feedback} />
) : comparisonResult?.comparison ? (
<PartialMatchFeedback
comparisonResult={comparisonResult}
sentence={sentence}
feedback={feedback}
/>
) : (
feedback
)}
</div>
</Modal.Body>
</Modal>
);
};
const mapStateToProps = (state: unknown) => ({
isSpeakingModalOpen: isSpeakingModalOpenSelector(state) as boolean
});
const mapDispatchToProps = {
closeSpeakingModal: () => closeModal('speaking')
};
SpeakingModal.displayName = 'SpeakingModal';
export default connect(mapStateToProps, mapDispatchToProps)(SpeakingModal);

View File

@@ -330,6 +330,7 @@ const ShowGeneric = ({
handleOptionChange={handleMcqOptionChange}
submittedMcqAnswers={submittedMcqAnswers}
showFeedback={showFeedback}
superBlock={superBlock}
/>
</ObserveKeys>
)}
@@ -405,6 +406,7 @@ export const query = graphql`
answers {
answer
feedback
audioId
}
solution
}

View File

@@ -47,7 +47,8 @@ const initialState = {
examResults: false,
survey: false,
projectPreview: false,
shortcuts: false
shortcuts: false,
speaking: false
},
portalWindow: null,
showPreviewPortal: false,

View File

@@ -52,6 +52,7 @@ export const isFinishQuizModalOpenSelector = state =>
export const isProjectPreviewModalOpenSelector = state =>
state[ns].modal.projectPreview;
export const isShortcutsModalOpenSelector = state => state[ns].modal.shortcuts;
export const isSpeakingModalOpenSelector = state => state[ns].modal.speaking;
export const isSubmittingSelector = state => state[ns].isSubmitting;
export const isResettingSelector = state => state[ns].isResetting;

View File

@@ -32,6 +32,8 @@
.video-quiz-options {
background-color: var(--primary-background);
display: flex;
flex-direction: column;
}
/* remove bootstrap margin and center the radio buttons */
@@ -59,18 +61,6 @@
cursor: pointer;
display: flex;
font-weight: normal;
border-left: 4px solid var(--tertiary-background);
border-right: 4px solid var(--tertiary-background);
border-top: 2px solid var(--tertiary-background);
border-bottom: 2px solid var(--tertiary-background);
}
.video-quiz-option-label:first-child {
border-top: 4px solid var(--tertiary-background);
}
.video-quiz-option-label:last-child {
border-bottom: 4px solid var(--tertiary-background);
}
.video-quiz-input-visible {
@@ -116,14 +106,51 @@ input:focus-visible + .video-quiz-input-visible {
background: none;
}
.mcq-correct-border {
.mcq-option-row {
/* Use grid so we can align the speaking button to the option content row only */
display: grid;
grid-template-columns: 1fr auto;
grid-template-rows: auto auto; /* row1 = option content, row2 = feedback */
gap: 0 1rem;
border-left: 4px solid var(--tertiary-background);
border-right: 4px solid var(--tertiary-background);
border-top: 2px solid var(--tertiary-background);
border-bottom: 2px solid var(--tertiary-background);
}
.mcq-option-row:first-child {
border-top: 4px solid var(--tertiary-background);
}
.mcq-option-row:last-child {
border-bottom: 4px solid var(--tertiary-background);
}
.mcq-option-row:has(.mcq-correct) {
border-left-color: var(--success-background);
}
.mcq-incorrect-border {
.mcq-option-row:has(.mcq-incorrect) {
border-left-color: var(--danger-background);
}
.mcq-option-with-feedback {
display: contents;
}
.mcq-option-content {
display: block;
grid-column: 1;
grid-row: 1;
}
.mcq-option-label {
display: flex;
align-items: center;
margin: 0;
flex: 1;
}
.mcq-correct {
color: var(--success-color);
border-left-color: var(--success-background);
@@ -138,7 +165,20 @@ input:focus-visible + .video-quiz-input-visible {
border-bottom: none;
}
.mcq-speaking-button-wrapper {
grid-column: 2;
/* place the button in the same grid row as the option content */
grid-row: 1;
display: flex;
align-items: center; /* vertically center the button within the option content row */
justify-content: center;
padding-inline-end: 20px;
}
/* ensure feedback occupies the second grid row */
.mcq-feedback {
grid-column: 1;
grid-row: 2;
border-top: none;
display: block;
}

View File

@@ -3,6 +3,7 @@ id: 67fe9b40ec6bdb9c8f891e4d
title: Task 19
challengeType: 19
dashedName: task-19
showSpeakingButton: true
lang: en-US
---
@@ -24,6 +25,10 @@ Which of the following best shows that Sarah agrees with Bob's suggestion?
`I don't think that would help much.`
### --audio-id--
67fe9b-SP1
### --feedback--
This shows disagreement, not support.
@@ -32,6 +37,10 @@ This shows disagreement, not support.
`That sounds like a good approach to me.`
### --audio-id--
67fe9b-SP2
## --video-solution--
2

View File

@@ -93,7 +93,8 @@ const questionJoi = Joi.object().keys({
.items(
Joi.object().keys({
answer: Joi.string().required(),
feedback: Joi.string().allow(null)
feedback: Joi.string().allow(null),
audioId: Joi.string().allow(null)
})
)
.required()
@@ -334,6 +335,7 @@ const schema = Joi.object().keys({
'array.unique': 'Dialogues must not have overlapping times.'
})
}),
showSpeakingButton: Joi.bool(),
solutions: Joi.array().items(Joi.array().items(fileJoi).min(1)),
superBlock: Joi.string().regex(slugWithSlashRE),
superOrder: Joi.number(),

View File

@@ -0,0 +1,114 @@
import { test, expect } from '@playwright/test';
import translations from '../client/i18n/locales/english/translations.json';
const pageWithSpeaking =
'/learn/b1-english-for-developers/learn-about-adverbial-phrases/task-19';
const pageWithoutSpeaking =
'/learn/full-stack-developer/lecture-what-is-css/what-is-the-basic-anatomy-of-a-css-rule';
test.describe('Multiple Choice Question Challenge - With Speaking Modal', () => {
test.beforeEach(async ({ page }) => {
await page.goto(pageWithSpeaking);
});
test('should show the speaking button and open the speaking modal', async ({
page,
browserName
}) => {
test.skip(
browserName === 'firefox',
'Skip on Firefox - speech recognition unsupported'
);
const speakingButtons = page.getByRole('button', {
name: translations['speaking-modal']['speaking-button']
});
const speakingButtonCount = await speakingButtons.count();
expect(speakingButtonCount).toBeGreaterThan(0);
const radioCount = await page.getByRole('radio').count();
expect(speakingButtonCount).toBe(radioCount);
for (let i = 0; i < speakingButtonCount; i++) {
const btn = speakingButtons.nth(i);
await expect(btn).toBeVisible();
const describedBy = await btn.getAttribute('aria-describedby');
expect(describedBy).toBeTruthy();
// Ensure aria-describedby points to an existing element
await expect(page.locator(`#${describedBy}`)).toBeVisible();
await expect(btn).toHaveAttribute(
'aria-label',
translations['speaking-modal']['speaking-button']
);
}
await speakingButtons.first().click();
await expect(page.getByRole('dialog')).toBeVisible();
await expect(
page.getByRole('button', {
name: translations['speaking-modal']['play']
})
).toBeVisible();
await expect(
page.getByRole('button', {
name: translations['speaking-modal']['record']
})
).toBeVisible();
});
test('should show not-supported message on Firefox', async ({
page,
browserName
}) => {
test.skip(
browserName !== 'firefox',
'Run only on Firefox to validate unsupported path'
);
const radioCount = await page.getByRole('radio').count();
expect(radioCount).toBeGreaterThan(1);
const speakingButton = page
.getByRole('button', {
name: translations['speaking-modal']['speaking-button']
})
.first();
await expect(speakingButton).toBeVisible();
await speakingButton.click();
await expect(page.getByRole('dialog')).toBeVisible();
await expect(
page.getByText(
translations['speaking-modal']['speech-recognition-not-supported']
)
).toBeVisible();
});
});
test.describe('Multiple Choice Question Challenge - Without Speaking Modal', () => {
test.beforeEach(async ({ page }) => {
await page.goto(pageWithoutSpeaking);
});
test('should not show speaking controls on a challenge without speaking', async ({
page
}) => {
const radioCount = await page.getByRole('radio').count();
expect(radioCount).toBeGreaterThan(1);
const speakingButtonsCount = await page
.getByRole('button', {
name: translations['speaking-modal']['speaking-button']
})
.count();
expect(speakingButtonsCount).toBe(0);
});
});

23
pnpm-lock.yaml generated
View File

@@ -360,6 +360,9 @@ importers:
'@stripe/stripe-js':
specifier: 1.54.2
version: 1.54.2
'@types/react-speech-recognition':
specifier: 3.9.6
version: 3.9.6
algoliasearch:
specifier: 4.22.1
version: 4.22.1
@@ -513,6 +516,9 @@ importers:
react-scroll:
specifier: 1.9.0
version: 1.9.0(react-dom@17.0.2(react@17.0.2))(react@17.0.2)
react-speech-recognition:
specifier: 4.0.1
version: 4.0.1(react@17.0.2)
react-spinkit:
specifier: 3.0.0
version: 3.0.0
@@ -4785,6 +4791,9 @@ packages:
'@types/react-scroll@1.8.10':
resolution: {integrity: sha512-RD4Z7grbdNGOKwKnUBKar6zNxqaW3n8m9QSrfvljW+gmkj1GArb8AFBomVr6xMOgHPD3v1uV3BrIf01py57daQ==}
'@types/react-speech-recognition@3.9.6':
resolution: {integrity: sha512-cdzwXIZXWyp8zfM2XI7APDW1rZf4Nz73T4SIS2y+cC7zHnZluCdumYKH6HacxgxJH+zemAq2oXbHWXcyW0eT3A==}
'@types/react-spinkit@3.0.10':
resolution: {integrity: sha512-grNfPdesm/xVJPyohfW752bM8N9kuJUx2yFo0I41mZwF3BuXt4+IV4TwaCPcBtA1V3C5r2NUPyqfEUpNTtWbvA==}
@@ -11833,6 +11842,11 @@ packages:
peerDependencies:
react: ^16.3.0 || ^17.0.0 || ^18.0.0
react-speech-recognition@4.0.1:
resolution: {integrity: sha512-0fIqzLtfY8vuYA6AmJVK7qiabZx0oFKOO+rbiBgFI3COWVGREy0A+gdU16hWXmFebeyrI8JsOLYsWk6WaHUXRw==}
peerDependencies:
react: '>=16.8.0'
react-spinkit@3.0.0:
resolution: {integrity: sha512-RrfGRPjqxHQiy7quPqhjPynTu0zobgQaZu1QYBMpJJ6pCSRRRK16EZMaxdE6fLVYFRJWpX/eGATWLMoVFFT5uQ==}
@@ -19587,6 +19601,10 @@ snapshots:
dependencies:
'@types/react': 17.0.83
'@types/react-speech-recognition@3.9.6':
dependencies:
'@types/dom-speech-recognition': 0.0.1
'@types/react-spinkit@3.0.10':
dependencies:
'@types/react': 17.0.83
@@ -28764,6 +28782,11 @@ snapshots:
dependencies:
react: 17.0.2
react-speech-recognition@4.0.1(react@17.0.2):
dependencies:
lodash.debounce: 4.0.8
react: 17.0.2
react-spinkit@3.0.0:
dependencies:
classnames: 2.3.2

View File

@@ -53,6 +53,12 @@ export const languageSuperBlocks = [
SuperBlocks.A2Chinese
];
// Mapping from superblock to a speech recognition language (BCP-47)
export const superBlockToSpeechLang: Partial<Record<SuperBlocks, string>> = {
[SuperBlocks.A2English]: 'en-US',
[SuperBlocks.B1English]: 'en-US'
};
/*
* SuperBlockStages.Upcoming = SHOW_UPCOMING_CHANGES === 'true'
* 'Upcoming' is for development -> not shown on stag or prod anywhere

View File

@@ -0,0 +1,55 @@
# --description--
Paragraph 1
```html
code example
```
# --instructions--
Paragraph 0
```html
code example 0
```
# --questions--
## --text--
Question line 1
```js
var x = 'y';
```
## --answers--
Some inline `code`
### --feedback--
That is not correct.
### --audio-id--
answer1-audio
---
Some *italics*
A second answer paragraph.
### --audio-id--
answer2-audio
---
<code> code in </code> code tags
## --video-solution--
3

View File

@@ -688,15 +688,18 @@ exports[`challenge parser > should parse video questions 1`] = `
"answers": [
{
"answer": "<p>Some inline <code>code</code></p>",
"audioId": null,
"feedback": "<p>That is not correct.</p>",
},
{
"answer": "<p>Some <em>italics</em></p>
<p>A second answer paragraph.</p>",
"audioId": null,
"feedback": null,
},
{
"answer": "<p><code> code in </code> code tags</p>",
"audioId": null,
"feedback": null,
},
],

View File

@@ -7,15 +7,18 @@ exports[`add-video-question plugin > should match the video snapshot 1`] = `
"answers": [
{
"answer": "<p>Some inline <code>code</code></p>",
"audioId": null,
"feedback": "<p>That is not correct.</p>",
},
{
"answer": "<p>Some <em>italics</em></p>
<p>A second answer paragraph.</p>",
"audioId": null,
"feedback": null,
},
{
"answer": "<p><code> code in </code> code tags</p>",
"audioId": null,
"feedback": null,
},
],

View File

@@ -58,23 +58,56 @@ function getAnswers(answersNodes) {
return answerGroups.map(answerGroup => {
const answerTree = root(answerGroup);
const feedback = find(answerTree, { value: '--feedback--' });
if (feedback) {
const answerNodes = getAllBefore(answerTree, '--feedback--');
const feedbackNodes = getSection(answerTree, '--feedback--');
const feedbackNodes = getSection(answerTree, '--feedback--');
const audioIdNodes = getSection(answerTree, '--audio-id--');
const hasFeedback = feedbackNodes.length > 0;
const hasAudioId = audioIdNodes.length > 0;
if (hasFeedback || hasAudioId) {
let answerNodes;
if (hasFeedback && hasAudioId) {
const feedbackHeading = find(answerTree, {
type: 'heading',
children: [{ type: 'text', value: '--feedback--' }]
});
const audioIdHeading = find(answerTree, {
type: 'heading',
children: [{ type: 'text', value: '--audio-id--' }]
});
const feedbackIndex = answerTree.children.indexOf(feedbackHeading);
const audioIdIndex = answerTree.children.indexOf(audioIdHeading);
const firstMarker =
feedbackIndex < audioIdIndex ? '--feedback--' : '--audio-id--';
answerNodes = getAllBefore(answerTree, firstMarker);
} else if (hasFeedback) {
answerNodes = getAllBefore(answerTree, '--feedback--');
} else {
answerNodes = getAllBefore(answerTree, '--audio-id--');
}
if (answerNodes.length < 1) {
throw Error('Answer missing');
}
let extractedAudioId = null;
if (hasAudioId) {
const audioIdContent = getParagraphContent(audioIdNodes[0]);
if (audioIdContent && audioIdContent.trim()) {
extractedAudioId = audioIdContent.trim();
}
}
return {
answer: mdastToHtml(answerNodes),
feedback: mdastToHtml(feedbackNodes)
feedback: hasFeedback ? mdastToHtml(feedbackNodes) : null,
audioId: extractedAudioId
};
}
return { answer: mdastToHtml(answerGroup), feedback: null };
return { answer: mdastToHtml(answerGroup), feedback: null, audioId: null };
});
}

View File

@@ -3,7 +3,11 @@ import parseFixture from '../__fixtures__/parse-fixture';
import addVideoQuestion from './add-video-question';
describe('add-video-question plugin', () => {
let simpleAST, videoAST, multipleQuestionAST, videoOutOfOrderAST;
let simpleAST,
videoAST,
multipleQuestionAST,
videoOutOfOrderAST,
videoWithAudioAST;
const plugin = addVideoQuestion();
let file = { data: {} };
@@ -16,6 +20,7 @@ describe('add-video-question plugin', () => {
videoOutOfOrderAST = await parseFixture(
'with-video-question-out-of-order.md'
);
videoWithAudioAST = await parseFixture('with-video-question-audio.md');
});
beforeEach(() => {
@@ -43,6 +48,7 @@ describe('add-video-question plugin', () => {
expect(question.answers[0]).toHaveProperty('answer');
expect(question.answers[0].answer).toBeTruthy();
expect(question.answers[0]).toHaveProperty('feedback');
expect(question.answers[0]).toHaveProperty('audioId');
};
it('should generate a questions array from a video challenge AST', () => {
@@ -76,16 +82,19 @@ describe('add-video-question plugin', () => {
expect(testObject.solution).toBe(3);
expect(testObject.answers[0]).toStrictEqual({
answer: '<p>Some inline <code>code</code></p>',
feedback: '<p>That is not correct.</p>'
feedback: '<p>That is not correct.</p>',
audioId: null
});
expect(testObject.answers[1]).toStrictEqual({
answer: `<p>Some <em>italics</em></p>
<p>A second answer paragraph.</p>`,
feedback: null
feedback: null,
audioId: null
});
expect(testObject.answers[2]).toStrictEqual({
answer: '<p><code> code in </code> code tags</p>',
feedback: null
feedback: null,
audioId: null
});
});
@@ -101,6 +110,31 @@ describe('add-video-question plugin', () => {
expect(() => plugin(simpleAST)).not.toThrow();
});
it('should extract audioId from answers when present', () => {
plugin(videoWithAudioAST, file);
const testObject = file.data.questions[0];
expect(testObject.answers[0]).toStrictEqual({
answer: '<p>Some inline <code>code</code></p>',
feedback: '<p>That is not correct.</p>',
audioId: 'answer1-audio'
});
expect(testObject.answers[1]).toStrictEqual({
answer: `<p>Some <em>italics</em></p>
<p>A second answer paragraph.</p>`,
feedback: null,
audioId: 'answer2-audio'
});
expect(testObject.answers[2]).toStrictEqual({
answer: '<p><code> code in </code> code tags</p>',
feedback: null,
audioId: null
});
});
it('should match the video snapshot', () => {
plugin(videoAST, file);
expect(file.data).toMatchSnapshot();

View File

@@ -35,6 +35,7 @@ const VALID_MARKERS = [
'## --before-user-code--',
// Level 3
'### --audio-id--',
'### --feedback--',
'### --question--',