mingyang91 commited on
Commit
c09a9ed
·
verified ·
1 Parent(s): bb6818c

add lip-sync

Browse files
Files changed (4) hide show
  1. src/main.rs +3 -51
  2. static/client.js +0 -303
  3. static/index.html +15 -55
  4. static/main.css +0 -232
src/main.rs CHANGED
@@ -9,13 +9,9 @@ use std::default::Default;
9
  use std::error::Error;
10
  use std::fmt::{Debug, Display, Formatter};
11
  use std::future::Future;
12
- use tokio::sync::mpsc::channel;
13
  use async_stream::stream;
14
  use aws_config::meta::region::RegionProviderChain;
15
- use aws_sdk_transcribestreaming::primitives::Blob;
16
- use aws_sdk_transcribestreaming::types::{AudioEvent, AudioStream, LanguageCode, MediaEncoding, TranscriptResultStream};
17
  use aws_sdk_transcribestreaming::{config::Region, meta::PKG_VERSION};
18
- use aws_sdk_transcribestreaming::operation::start_stream_transcription::StartStreamTranscriptionOutput;
19
  use clap::Parser;
20
 
21
  use poem::{Endpoint, EndpointExt, get, handler, IntoResponse, listener::TcpListener, Route, Server};
@@ -26,7 +22,6 @@ use futures_util::stream::StreamExt;
26
  use poem::web::{Data, Query};
27
 
28
  use tokio::select;
29
- use tokio::sync::mpsc::{Receiver, Sender};
30
  use tokio_stream::Stream;
31
  use serde::{Deserialize, Serialize};
32
  use lesson::{LessonsManager};
@@ -57,28 +52,6 @@ enum ReplyEvent {
57
  Synthesized(Vec<u8>),
58
  }
59
 
60
-
61
- async fn translate(client: &aws_sdk_translate::Client, transcript: Option<String>, source_lang_code: Option<String>) -> Option<String> {
62
- let res = client.translate_text()
63
- .set_text(transcript)
64
- .set_source_language_code(Some("zh".to_string()))
65
- .set_target_language_code(Some("en".to_string()))
66
- .send().await;
67
- res.expect("failed to translate").translated_text
68
- }
69
-
70
- async fn synthesize(client: &aws_sdk_polly::Client, transcript: String) -> Option<Vec<u8>> {
71
- let res = client.synthesize_speech()
72
- .set_text(Some(transcript))
73
- .voice_id("Amy".into())
74
- .output_format("pcm".into())
75
- .language_code("en-US".into())
76
- // .language_code("cmn-CN".into())
77
- .send().await;
78
- let bs = res.expect("failed to translate").audio_stream.collect().await.ok()?;
79
- Some(bs.to_vec())
80
- }
81
-
82
  #[derive(Clone)]
83
  struct Context {
84
  lessons_manager: LessonsManager,
@@ -171,7 +144,9 @@ async fn stream_speaker(ctx: Data<&Context>, query: Query<LessonSpeakerQuery>, w
171
  output = transcribe_rx.recv() => {
172
  if let Ok(transcript) = output {
173
  println!("Transcribed: {}", transcript);
174
- socket.send(Message::Text(transcript)).await.expect("failed to send");
 
 
175
  }
176
  },
177
  }
@@ -281,26 +256,3 @@ impl Error for StreamTranscriptionError {
281
  }
282
  }
283
 
284
- fn process(translate_client: aws_sdk_translate::Client,
285
- polly_client: aws_sdk_polly::Client,
286
- res: Result<String, StreamTranscriptionError>) -> impl Stream<Item=Result<ReplyEvent, StreamTranscriptionError>> {
287
- stream! {
288
- match res {
289
- Ok(transcription) => {
290
- yield Ok(ReplyEvent::Transcribed(transcription.clone()));
291
- let translated = translate(&translate_client, Some(transcription), Some("en".to_string())).await;
292
- if let Some(has) = translated {
293
- yield Ok(ReplyEvent::Translated(has.clone()));
294
- println!("Translated: {}", has);
295
- if let Some(synthesized) = synthesize(&polly_client, has).await {
296
- yield Ok(ReplyEvent::Synthesized(synthesized));
297
- }
298
- }
299
- },
300
- Err(e) => {
301
- yield Err(e);
302
- }
303
- }
304
-
305
- }
306
- }
 
9
  use std::error::Error;
10
  use std::fmt::{Debug, Display, Formatter};
11
  use std::future::Future;
 
12
  use async_stream::stream;
13
  use aws_config::meta::region::RegionProviderChain;
 
 
14
  use aws_sdk_transcribestreaming::{config::Region, meta::PKG_VERSION};
 
15
  use clap::Parser;
16
 
17
  use poem::{Endpoint, EndpointExt, get, handler, IntoResponse, listener::TcpListener, Route, Server};
 
22
  use poem::web::{Data, Query};
23
 
24
  use tokio::select;
 
25
  use tokio_stream::Stream;
26
  use serde::{Deserialize, Serialize};
27
  use lesson::{LessonsManager};
 
52
  Synthesized(Vec<u8>),
53
  }
54
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  #[derive(Clone)]
56
  struct Context {
57
  lessons_manager: LessonsManager,
 
144
  output = transcribe_rx.recv() => {
145
  if let Ok(transcript) = output {
146
  println!("Transcribed: {}", transcript);
147
+ let evt = LiveLessonTextEvent::Transcription { text: transcript.clone() };
148
+ let json = serde_json::to_string(&evt).expect("failed to serialize");
149
+ let _ = socket.send(Message::Text(json)).await.expect("failed to send");
150
  }
151
  },
152
  }
 
256
  }
257
  }
258
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/client.js DELETED
@@ -1,303 +0,0 @@
1
- 'use strict';
2
-
3
- // Google Cloud Speech Playground with node.js and socket.io
4
- // Created by Vinzenz Aubry for sansho 24.01.17
5
- // Feel free to improve!
6
- // Contact: [email protected]
7
-
8
- //connection to socket
9
- let websocket_uri
10
- if (location.protocol === "https:") {
11
- websocket_uri = "wss:"
12
- } else {
13
- websocket_uri = "ws:"
14
- }
15
- websocket_uri += "//" + location.host
16
- websocket_uri += "/ws" + location.pathname
17
- websocket_uri += location.search
18
- const socket = new WebSocket(websocket_uri);
19
-
20
- //================= CONFIG =================
21
- // Stream Audio
22
- let bufferSize = 2048,
23
- AudioContext,
24
- context,
25
- processor,
26
- input,
27
- globalStream;
28
-
29
- //vars
30
- let audioElement = document.querySelector('audio'),
31
- finalWord = false,
32
- translationText = document.getElementById('Translation'),
33
- transcriptionText = document.getElementById('Transcription'),
34
- removeLastSentence = true,
35
- streamStreaming = false;
36
-
37
- //audioStream constraints
38
- const constraints = {
39
- audio: true,
40
- video: false,
41
- };
42
-
43
- //================= RECORDING =================
44
-
45
- async function initRecording() {
46
- // socket.emit('startGoogleCloudStream', ''); //init socket Google Speech Connection
47
- streamStreaming = true;
48
- AudioContext = window.AudioContext || window.webkitAudioContext;
49
- context = new AudioContext({
50
- // if Non-interactive, use 'playback' or 'balanced' // https://developer.mozilla.org/en-US/docs/Web/API/AudioContextLatencyCategory
51
- latencyHint: 'interactive',
52
- });
53
-
54
- await context.audioWorklet.addModule('recorderWorkletProcessor.js')
55
- context.resume();
56
-
57
- globalStream = await navigator.mediaDevices.getUserMedia(constraints)
58
- input = context.createMediaStreamSource(globalStream)
59
- processor = new window.AudioWorkletNode(
60
- context,
61
- 'recorder.worklet'
62
- );
63
- processor.connect(context.destination);
64
- context.resume()
65
- input.connect(processor)
66
- processor.port.onmessage = (e) => {
67
- const audioData = e.data;
68
- microphoneProcess(audioData)
69
- }
70
- }
71
-
72
- function microphoneProcess(buffer) {
73
- socket.send(buffer);
74
- }
75
-
76
- //================= INTERFACE =================
77
- var startButton = document.getElementById('startRecButton');
78
- startButton.addEventListener('click', startRecording);
79
-
80
- var endButton = document.getElementById('stopRecButton');
81
- endButton.addEventListener('click', stopRecording);
82
- endButton.disabled = true;
83
-
84
- var recordingStatus = document.getElementById('recordingStatus');
85
-
86
- function startRecording() {
87
- startButton.disabled = true;
88
- endButton.disabled = false;
89
- recordingStatus.style.visibility = 'visible';
90
- initRecording();
91
- }
92
-
93
- function stopRecording() {
94
- // waited for FinalWord
95
- startButton.disabled = false;
96
- endButton.disabled = true;
97
- recordingStatus.style.visibility = 'hidden';
98
- streamStreaming = false;
99
- // socket.emit('endGoogleCloudStream', '');
100
-
101
- let track = globalStream.getTracks()[0];
102
- track.stop();
103
-
104
- input.disconnect(processor);
105
- processor.disconnect(context.destination);
106
- context.close().then(function () {
107
- input = null;
108
- processor = null;
109
- context = null;
110
- AudioContext = null;
111
- startButton.disabled = false;
112
- });
113
-
114
- // context.close();
115
-
116
- // audiovideostream.stop();
117
-
118
- // microphone_stream.disconnect(script_processor_node);
119
- // script_processor_node.disconnect(audioContext.destination);
120
- // microphone_stream = null;
121
- // script_processor_node = null;
122
-
123
- // audiovideostream.stop();
124
- // videoElement.srcObject = null;
125
- }
126
-
127
-
128
- const audioQueue = new rxjs.Subject();
129
- audioQueue
130
- .pipe(rxjs.concatMap(playAudio))
131
- .subscribe(_ => console.log('played audio'));
132
- //================= SOCKET IO =================
133
- socket.onmessage = function (msg) {
134
- if (msg.data instanceof Blob) {
135
- audioQueue.next(msg.data)
136
- } else {
137
- // text
138
- const evt = JSON.parse(msg.data)
139
- if (evt.type === 'Translation') {
140
- onSpeechData(transcriptionText, evt.text)
141
- } else if (evt.type === 'Transcription') {
142
- onSpeechData(translationText, evt.text)
143
- } else {
144
- console.log(evt.visemes)
145
- }
146
- }
147
- }
148
- socket.onclose = function () {
149
- processor.stop()
150
- }
151
-
152
- function onSpeechData(resultText, data) {
153
- var dataFinal = false;
154
-
155
- if (dataFinal === false) {
156
- // console.log(resultText.lastElementChild);
157
- if (removeLastSentence) {
158
- resultText.lastElementChild.remove();
159
- }
160
- removeLastSentence = true;
161
-
162
- //add empty span
163
- let empty = document.createElement('span');
164
- resultText.appendChild(empty);
165
-
166
- //add children to empty span
167
- let edit = addTimeSettingsInterim(data);
168
-
169
- for (var i = 0; i < edit.length; i++) {
170
- resultText.lastElementChild.appendChild(edit[i]);
171
- resultText.lastElementChild.appendChild(
172
- document.createTextNode('\u00A0')
173
- );
174
- }
175
- } else if (dataFinal === true) {
176
- resultText.lastElementChild.remove();
177
-
178
- //add empty span
179
- let empty = document.createElement('span');
180
- resultText.appendChild(empty);
181
-
182
- //add children to empty span
183
- let edit = addTimeSettingsFinal(data);
184
- for (var i = 0; i < edit.length; i++) {
185
- if (i === 0) {
186
- edit[i].innerText = capitalize(edit[i].innerText);
187
- }
188
- resultText.lastElementChild.appendChild(edit[i]);
189
-
190
- if (i !== edit.length - 1) {
191
- resultText.lastElementChild.appendChild(
192
- document.createTextNode('\u00A0')
193
- );
194
- }
195
- }
196
- resultText.lastElementChild.appendChild(
197
- document.createTextNode('\u002E\u00A0')
198
- );
199
-
200
- console.log("Google Speech sent 'final' Sentence.");
201
- finalWord = true;
202
- endButton.disabled = false;
203
-
204
- removeLastSentence = false;
205
- }
206
- }
207
-
208
- //================= Juggling Spans for nlp Coloring =================
209
- function addTimeSettingsInterim(wholeString) {
210
- console.log(wholeString);
211
-
212
- let nlpObject = nlp(wholeString).out('terms');
213
-
214
- let words_without_time = [];
215
-
216
- for (let i = 0; i < nlpObject.length; i++) {
217
- //data
218
- let word = nlpObject[i].text;
219
- let tags = [];
220
-
221
- //generate span
222
- let newSpan = document.createElement('span');
223
- newSpan.innerHTML = word;
224
-
225
- //push all tags
226
- for (let j = 0; j < nlpObject[i].tags.length; j++) {
227
- tags.push(nlpObject[i].tags[j]);
228
- }
229
-
230
- //add all classes
231
- for (let j = 0; j < nlpObject[i].tags.length; j++) {
232
- let cleanClassName = tags[j];
233
- // console.log(tags);
234
- let className = `nl-${cleanClassName}`;
235
- newSpan.classList.add(className);
236
- }
237
-
238
- words_without_time.push(newSpan);
239
- }
240
-
241
- finalWord = false;
242
- endButton.disabled = true;
243
-
244
- return words_without_time;
245
- }
246
-
247
- window.onbeforeunload = function () {
248
- if (streamStreaming) {
249
- // socket.emit('endGoogleCloudStream', '');
250
- }
251
- };
252
-
253
- //================= SANTAS HELPERS =================
254
-
255
- // sampleRateHertz 16000 //saved sound is awefull
256
- function convertFloat32ToInt16(buffer) {
257
- let l = buffer.length;
258
- let buf = new Int16Array(l / 3);
259
-
260
- while (l--) {
261
- if (l % 3 == 0) {
262
- buf[l / 3] = buffer[l] * 0xffff;
263
- }
264
- }
265
- return buf.buffer;
266
- }
267
-
268
- function capitalize(s) {
269
- if (s.length < 1) {
270
- return s;
271
- }
272
- return s.charAt(0).toUpperCase() + s.slice(1);
273
- }
274
-
275
- const audioContext = new (window.AudioContext || window.webkitAudioContext)();
276
-
277
- let nextStartTime = audioContext.currentTime;
278
-
279
- async function playAudio(chunk) {
280
- const totalLength = chunk.size;
281
-
282
- // Create an AudioBuffer of enough size
283
- const audioBuffer = audioContext.createBuffer(1, totalLength / Int16Array.BYTES_PER_ELEMENT, 16000); // Assuming mono audio at 44.1kHz
284
- const output = audioBuffer.getChannelData(0);
285
-
286
- // Copy the PCM samples into the AudioBuffer
287
- const arrayBuf = await chunk.arrayBuffer();
288
- const int16Array = new Int16Array(arrayBuf, 0, Math.floor(arrayBuf.byteLength / 2))
289
- for(let i = 0; i < int16Array.length; i++) {
290
- output[i] = int16Array[i] / 32768.0; // Convert to [-1, 1] float32 range
291
- }
292
-
293
- // 3. Play the audio using Web Audio API
294
-
295
- const source = audioContext.createBufferSource();
296
- source.buffer = audioBuffer;
297
- source.connect(audioContext.destination);
298
- source.start(nextStartTime);
299
- nextStartTime = Math.max(nextStartTime, audioContext.currentTime) + audioBuffer.duration;
300
- source.onended = () => {
301
- console.log('audio slice ended');
302
- }
303
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/index.html CHANGED
@@ -1,55 +1,15 @@
1
- <!doctype html>
2
- <html class="no-js" lang="en">
3
-
4
- <head>
5
- <meta charset="utf-8">
6
- <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
7
- <title>Polyhedron: Realtime Transcribe + Translate + Speech</title>
8
- <meta name="description" content="Google Cloud Speech Recognition with Node and Socket.io">
9
- <meta name="viewport" content="width=device-width, initial-scale=1">
10
-
11
- <link rel="stylesheet" href="main.css">
12
- </head>
13
-
14
- <body>
15
- <div class="wrapper">
16
- <h1>Polyhedron: Realtime Transcribe + Translate + Speech</h1>
17
-
18
- <audio></audio>
19
-
20
- <br>
21
- <button id="startButton" type="button"> Start listening</button>
22
- <button id="startRecButton" type="button"> Start recording</button>
23
- <button id="stopRecButton" type="button"> Stop recording</button>
24
- <div id="recordingStatus">&nbsp;</div>
25
- <br>
26
-
27
- <div>
28
- <h1>Translation</h1>
29
- <p id="Translation">
30
- <span class="greyText">No Speech to Text yet
31
- <span>
32
- </p>
33
- <h1>Transcription</h1>
34
- <p id="Transcription">
35
- <span class="greyText">No Speech to Text yet
36
- <span>
37
- </p>
38
- </div>
39
-
40
- <br>
41
- <br>
42
- </div>
43
-
44
- <!-- Nlp -->
45
- <script src="https://unpkg.com/[email protected]/builds/compromise.min.js"></script>
46
-
47
- <!-- Socket -->
48
- <!--<script src="assets/js/socket.io.js"></script>-->
49
-
50
- <script src="https://unpkg.com/rxjs@%5E7/dist/bundles/rxjs.umd.min.js"></script>
51
- <!-- Client -->
52
- <script src="client.js"></script>
53
- </body>
54
-
55
- </html>
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta charset="UTF-8" />
5
+ <link rel="icon" type="image/svg+xml" href="/vite.svg" />
6
+ <meta name="viewport" content="width=device-width, initial-scale=1.0" />
7
+ <title>Vite + React</title>
8
+ <script type="module" crossorigin src="/assets/index-835823f8.js"></script>
9
+ <link rel="stylesheet" href="/assets/index-983f9492.css">
10
+ </head>
11
+ <body>
12
+ <div id="root"></div>
13
+
14
+ </body>
15
+ </html>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
static/main.css DELETED
@@ -1,232 +0,0 @@
1
- body {
2
- font-family: sans-serif;
3
- background-color: rgb(189, 189, 189);
4
- }
5
-
6
- p {
7
- color: black;
8
- }
9
-
10
- a {
11
- font-weight: bold;
12
- text-decoration: none;
13
- color: #2a2a2a;
14
- }
15
-
16
- a:visited {
17
- font-weight: normal;
18
- }
19
-
20
- a:hover {
21
- text-decoration: underline;
22
- }
23
-
24
- audio {
25
- width: 300px;
26
- height: auto;
27
- backgorund-color: red;
28
- }
29
-
30
- .wrapper {
31
- width: 90vw;
32
- margin: 0 auto;
33
- }
34
-
35
- .greyText {
36
- opacity: 0.4;
37
- }
38
-
39
- @-webkit-keyframes redGlow {
40
- from {
41
- background-color: #8c190a;
42
- -webkit-box-shadow: 0 0 9px #9c291a;
43
- }
44
- 50% {
45
- background-color: #9c291a;
46
- -webkit-box-shadow: 0 0 18px #bdb5b4;
47
- }
48
- to {
49
- background-color: #8c190a;
50
- -webkit-box-shadow: 0 0 9px #9c291a;
51
- }
52
- }
53
- #recordingStatus {
54
- display: inline-block;
55
- width: 18px;
56
- height: 18px;
57
- border-radius: 20px;
58
- visibility: hidden;
59
- -webkit-animation-name: redGlow;
60
- -webkit-animation-duration: 2s;
61
- -webkit-animation-iteration-count: infinite;
62
- }
63
-
64
- #ResultText {
65
- width: 80vw;
66
- }
67
-
68
- #ResultText span {
69
- display: inline-block;
70
- margin-top: 10px;
71
- }
72
-
73
- #sessionSpeechData {
74
- width: 80vw;
75
- }
76
-
77
- #sessionSpeechData span {
78
- display: inline-block;
79
- margin-top: 10px;
80
- }
81
-
82
- .nl-Adjective {
83
- background-color: #1ada47;
84
- padding: 3px;
85
- border-radius: 5px;
86
- }
87
-
88
- .nl-Noun {
89
- background-color: #151ffa;
90
- padding: 3px;
91
- border-radius: 5px;
92
- color: white;
93
- }
94
-
95
- .nl-Verb {
96
- background-color: #ff1616;
97
- padding: 3px;
98
- border-radius: 5px;
99
- }
100
-
101
-
102
- .hiddenForms {
103
- opacity: 0.2;
104
- }
105
-
106
- h1 {
107
- color: black;
108
- }
109
-
110
-
111
- /* ==========================================================================
112
- Media Queries
113
- ========================================================================== */
114
-
115
- /*========== Non-Mobile First Method ==========*/
116
-
117
- /*Above */
118
-
119
- @media only screen and (min-width: 1201px) {}
120
-
121
- /* Large Devices, Wide Screens */
122
-
123
- @media only screen and (max-width: 1200px) {}
124
-
125
- /* Medium Devices, Desktops */
126
-
127
- @media only screen and (max-width: 992px) {}
128
-
129
- /* Small Devices, Tablets */
130
-
131
- @media only screen and (max-width: 768px) {}
132
-
133
- /* Extra Small Devices, Phones */
134
-
135
- @media only screen and (max-width: 480px) {}
136
-
137
- /* Custom, iPhone Retina */
138
-
139
- @media only screen and (max-width: 320px) {}
140
-
141
- @media print, (-o-min-device-pixel-ratio: 5/4), (-webkit-min-device-pixel-ratio: 1.25), (min-resolution: 120dpi) {}
142
-
143
- /* ==========================================================================
144
- Helper classes
145
- ========================================================================== */
146
-
147
- .hidden {
148
- display: none !important;
149
- visibility: hidden;
150
- }
151
-
152
- .visuallyhidden {
153
- border: 0;
154
- clip: rect(0 0 0 0);
155
- height: 1px;
156
- margin: -1px;
157
- overflow: hidden;
158
- padding: 0;
159
- position: absolute;
160
- width: 1px;
161
- }
162
-
163
- .visuallyhidden.focusable:active, .visuallyhidden.focusable:focus {
164
- clip: auto;
165
- height: auto;
166
- margin: 0;
167
- overflow: visible;
168
- position: static;
169
- width: auto;
170
- }
171
-
172
- .invisible {
173
- visibility: hidden;
174
- }
175
-
176
- .clearfix:before, .clearfix:after {
177
- content: " ";
178
- display: table;
179
- }
180
-
181
- .clearfix:after {
182
- clear: both;
183
- }
184
-
185
- .clearfix {
186
- *zoom: 1;
187
- }
188
-
189
- /* ==========================================================================
190
- Print styles
191
- ========================================================================== */
192
-
193
- @media print {
194
- *, *:before, *:after {
195
- background: transparent !important;
196
- color: #000 !important;
197
- box-shadow: none !important;
198
- text-shadow: none !important;
199
- }
200
- a, a:visited {
201
- text-decoration: underline;
202
- }
203
- a[href]:after {
204
- content: " (" attr(href) ")";
205
- }
206
- abbr[title]:after {
207
- content: " (" attr(title) ")";
208
- }
209
- a[href^="#"]:after, a[href^="javascript:"]:after {
210
- content: "";
211
- }
212
- pre, blockquote {
213
- border: 1px solid #999;
214
- page-break-inside: avoid;
215
- }
216
- thead {
217
- display: table-header-group;
218
- }
219
- tr, img {
220
- page-break-inside: avoid;
221
- }
222
- img {
223
- max-width: 100% !important;
224
- }
225
- p, h2, h3 {
226
- orphans: 3;
227
- widows: 3;
228
- }
229
- h2, h3 {
230
- page-break-after: avoid;
231
- }
232
- }