Spaces:
Running
Running
File size: 3,290 Bytes
121f6d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 |
if (document.location.search.includes('dark-theme=true')) {
document.body.classList.add('dark-theme');
}
let cursor = 0;
const RANGE = 5;
const LIMIT = 16_000;
const textToImage = async (text) => {
const inferenceResponse = await fetch(`infer_biggan?input=${text}`);
const inferenceBlob = await inferenceResponse.blob();
const content = URL.createObjectURL(inferenceBlob);
return content;
};
const translateText = async (text) => {
const inferResponse = await fetch(`infer_t5?input=${text}`);
const inferJson = await inferResponse.json();
return inferJson.output;
};
const queryDataset = async (start, end) => {
const queryResponse = await fetch(`query_emotion?start=${start}&end=${end}`);
const queryJson = await queryResponse.json();
return queryJson.output;
};
const updateTable = async (cursor, range = RANGE) => {
const table = document.querySelector('.dataset-output');
const fragment = new DocumentFragment();
const observations = await queryDataset(cursor, cursor + range);
for (const observation of observations) {
let row = document.createElement('tr');
let text = document.createElement('td');
let emotion = document.createElement('td');
text.textContent = observation.text;
emotion.textContent = observation.emotion;
row.appendChild(text);
row.appendChild(emotion);
fragment.appendChild(row);
}
table.innerHTML = '';
table.appendChild(fragment);
table.insertAdjacentHTML(
'afterbegin',
`<thead>
<tr>
<td>text</td>
<td>emotion</td>
</tr>
</thead>`
);
};
const imageGenSelect = document.getElementById('image-gen-input');
const imageGenImage = document.querySelector('.image-gen-output');
const textGenForm = document.querySelector('.text-gen-form');
const tableButtonPrev = document.querySelector('.table-previous');
const tableButtonNext = document.querySelector('.table-next');
imageGenSelect.addEventListener('change', async (event) => {
const value = event.target.value;
try {
imageGenImage.src = await textToImage(value);
imageGenImage.alt = value + ' generated from BigGAN AI model';
} catch (err) {
console.error(err);
}
});
textGenForm.addEventListener('submit', async (event) => {
event.preventDefault();
const textGenInput = document.getElementById('text-gen-input');
const textGenParagraph = document.querySelector('.text-gen-output');
try {
textGenParagraph.textContent = await translateText(textGenInput.value);
} catch (err) {
console.error(err);
}
});
tableButtonPrev.addEventListener('click', () => {
cursor = cursor > RANGE ? cursor - RANGE : 0;
if (cursor < RANGE) {
tableButtonPrev.classList.add('hidden');
}
if (cursor < LIMIT - RANGE) {
tableButtonNext.classList.remove('hidden');
}
updateTable(cursor);
});
tableButtonNext.addEventListener('click', () => {
cursor = cursor < LIMIT - RANGE ? cursor + RANGE : cursor;
if (cursor >= RANGE) {
tableButtonPrev.classList.remove('hidden');
}
if (cursor >= LIMIT - RANGE) {
tableButtonNext.classList.add('hidden');
}
updateTable(cursor);
});
textToImage(imageGenSelect.value)
.then((image) => (imageGenImage.src = image))
.catch(console.error);
updateTable(cursor)
.catch(console.error);
|