Update index.html
Browse files- index.html +28 -290
index.html
CHANGED
@@ -3,10 +3,10 @@
|
|
3 |
<head>
|
4 |
<meta charset="utf-8">
|
5 |
<meta name="description"
|
6 |
-
content="
|
7 |
-
<meta name="keywords" content="
|
8 |
<meta name="viewport" content="width=device-width, initial-scale=1">
|
9 |
-
<title>
|
10 |
|
11 |
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
|
12 |
rel="stylesheet">
|
@@ -33,39 +33,26 @@
|
|
33 |
<div class="container is-max-desktop">
|
34 |
<div class="columns is-centered">
|
35 |
<div class="column has-text-centered">
|
36 |
-
<h1 class="title is-1 publication-title">
|
37 |
<div class="is-size-5 publication-authors">
|
38 |
<span class="author-block">
|
39 |
-
<a href="https://
|
40 |
<span class="author-block">
|
41 |
-
<a href="https://
|
42 |
<span class="author-block">
|
43 |
-
<a href="
|
44 |
-
</span>
|
45 |
-
<span class="author-block">
|
46 |
-
<a href="http://sofienbouaziz.com" target="_blank">Sofien Bouaziz</a><sup>2</sup>,
|
47 |
-
</span>
|
48 |
-
<span class="author-block">
|
49 |
-
<a href="https://www.danbgoldman.com" target="_blank">Dan B Goldman</a><sup>2</sup>,
|
50 |
-
</span>
|
51 |
-
<span class="author-block">
|
52 |
-
<a href="https://homes.cs.washington.edu/~seitz/" target="_blank">Steven M. Seitz</a><sup>1,2</sup>,
|
53 |
-
</span>
|
54 |
-
<span class="author-block">
|
55 |
-
<a href="http://www.ricardomartinbrualla.com" target="_blank">Ricardo Martin-Brualla</a><sup>2</sup>
|
56 |
-
</span>
|
57 |
</div>
|
58 |
|
59 |
<div class="is-size-5 publication-authors">
|
60 |
-
<span class="author-block"><sup>1</sup>University
|
61 |
-
<span class="author-block"><sup>2</sup>Google Research</span>
|
62 |
</div>
|
63 |
|
64 |
<div class="column has-text-centered">
|
65 |
<div class="publication-links">
|
66 |
<!-- PDF Link. -->
|
67 |
<span class="link-block">
|
68 |
-
<a href="https://
|
69 |
class="external-link button is-normal is-rounded is-dark">
|
70 |
<span class="icon">
|
71 |
<i class="fas fa-file-pdf"></i>
|
@@ -74,7 +61,7 @@
|
|
74 |
</a>
|
75 |
</span>
|
76 |
<span class="link-block">
|
77 |
-
<a href="https://
|
78 |
class="external-link button is-normal is-rounded is-dark">
|
79 |
<span class="icon">
|
80 |
<i class="ai ai-arxiv"></i>
|
@@ -82,19 +69,10 @@
|
|
82 |
<span>arXiv</span>
|
83 |
</a>
|
84 |
</span>
|
85 |
-
|
86 |
-
<span class="link-block">
|
87 |
-
<a href="https://www.youtube.com/watch?v=MrKrnHhk8IA" target="_blank"
|
88 |
-
class="external-link button is-normal is-rounded is-dark">
|
89 |
-
<span class="icon">
|
90 |
-
<i class="fab fa-youtube"></i>
|
91 |
-
</span>
|
92 |
-
<span>Video</span>
|
93 |
-
</a>
|
94 |
-
</span>
|
95 |
<!-- Code Link. -->
|
96 |
<span class="link-block">
|
97 |
-
<a href="
|
98 |
class="external-link button is-normal is-rounded is-dark">
|
99 |
<span class="icon">
|
100 |
<i class="fab fa-github"></i>
|
@@ -104,7 +82,7 @@
|
|
104 |
</span>
|
105 |
<!-- Dataset Link. -->
|
106 |
<span class="link-block">
|
107 |
-
<a href="https://
|
108 |
class="external-link button is-normal is-rounded is-dark">
|
109 |
<span class="icon">
|
110 |
<i class="far fa-images"></i>
|
@@ -120,80 +98,8 @@
|
|
120 |
</div>
|
121 |
</section>
|
122 |
|
123 |
-
<section class="hero teaser">
|
124 |
-
<div class="container is-max-desktop">
|
125 |
-
<div class="hero-body">
|
126 |
-
<video id="teaser" autoplay muted loop playsinline height="100%">
|
127 |
-
<source src="./static/videos/teaser.mp4"
|
128 |
-
type="video/mp4">
|
129 |
-
</video>
|
130 |
-
<h2 class="subtitle has-text-centered">
|
131 |
-
<span class="dnerf">Nerfies</span> turns selfie videos from your phone into
|
132 |
-
free-viewpoint
|
133 |
-
portraits.
|
134 |
-
</h2>
|
135 |
-
</div>
|
136 |
-
</div>
|
137 |
-
</section>
|
138 |
|
139 |
|
140 |
-
<section class="hero is-light is-small">
|
141 |
-
<div class="hero-body">
|
142 |
-
<div class="container">
|
143 |
-
<div id="results-carousel" class="carousel results-carousel">
|
144 |
-
<div class="item item-steve">
|
145 |
-
<video poster="" id="steve" autoplay controls muted loop playsinline height="100%">
|
146 |
-
<source src="./static/videos/steve.mp4"
|
147 |
-
type="video/mp4">
|
148 |
-
</video>
|
149 |
-
</div>
|
150 |
-
<div class="item item-chair-tp">
|
151 |
-
<video poster="" id="chair-tp" autoplay controls muted loop playsinline height="100%">
|
152 |
-
<source src="./static/videos/chair-tp.mp4"
|
153 |
-
type="video/mp4">
|
154 |
-
</video>
|
155 |
-
</div>
|
156 |
-
<div class="item item-shiba">
|
157 |
-
<video poster="" id="shiba" autoplay controls muted loop playsinline height="100%">
|
158 |
-
<source src="./static/videos/shiba.mp4"
|
159 |
-
type="video/mp4">
|
160 |
-
</video>
|
161 |
-
</div>
|
162 |
-
<div class="item item-fullbody">
|
163 |
-
<video poster="" id="fullbody" autoplay controls muted loop playsinline height="100%">
|
164 |
-
<source src="./static/videos/fullbody.mp4"
|
165 |
-
type="video/mp4">
|
166 |
-
</video>
|
167 |
-
</div>
|
168 |
-
<div class="item item-blueshirt">
|
169 |
-
<video poster="" id="blueshirt" autoplay controls muted loop playsinline height="100%">
|
170 |
-
<source src="./static/videos/blueshirt.mp4"
|
171 |
-
type="video/mp4">
|
172 |
-
</video>
|
173 |
-
</div>
|
174 |
-
<div class="item item-mask">
|
175 |
-
<video poster="" id="mask" autoplay controls muted loop playsinline height="100%">
|
176 |
-
<source src="./static/videos/mask.mp4"
|
177 |
-
type="video/mp4">
|
178 |
-
</video>
|
179 |
-
</div>
|
180 |
-
<div class="item item-coffee">
|
181 |
-
<video poster="" id="coffee" autoplay controls muted loop playsinline height="100%">
|
182 |
-
<source src="./static/videos/coffee.mp4"
|
183 |
-
type="video/mp4">
|
184 |
-
</video>
|
185 |
-
</div>
|
186 |
-
<div class="item item-toby">
|
187 |
-
<video poster="" id="toby" autoplay controls muted loop playsinline height="100%">
|
188 |
-
<source src="./static/videos/toby2.mp4"
|
189 |
-
type="video/mp4">
|
190 |
-
</video>
|
191 |
-
</div>
|
192 |
-
</div>
|
193 |
-
</div>
|
194 |
-
</div>
|
195 |
-
</section>
|
196 |
-
|
197 |
|
198 |
<section class="section">
|
199 |
<div class="container is-max-desktop">
|
@@ -203,198 +109,30 @@
|
|
203 |
<h2 class="title is-3">Abstract</h2>
|
204 |
<div class="content has-text-justified">
|
205 |
<p>
|
206 |
-
|
207 |
-
deforming scene using photos/videos captured casually from mobile phones.
|
208 |
-
</p>
|
209 |
-
<p>
|
210 |
-
Our approach augments neural radiance fields
|
211 |
-
(NeRF) by optimizing an
|
212 |
-
additional continuous volumetric deformation field that warps each observed point into a
|
213 |
-
canonical 5D NeRF.
|
214 |
-
We observe that these NeRF-like deformation fields are prone to local minima, and
|
215 |
-
propose a coarse-to-fine optimization method for coordinate-based models that allows for
|
216 |
-
more robust optimization.
|
217 |
-
By adapting principles from geometry processing and physical simulation to NeRF-like
|
218 |
-
models, we propose an elastic regularization of the deformation field that further
|
219 |
-
improves robustness.
|
220 |
-
</p>
|
221 |
-
<p>
|
222 |
-
We show that <span class="dnerf">Nerfies</span> can turn casually captured selfie
|
223 |
-
photos/videos into deformable NeRF
|
224 |
-
models that allow for photorealistic renderings of the subject from arbitrary
|
225 |
-
viewpoints, which we dub <i>"nerfies"</i>. We evaluate our method by collecting data
|
226 |
-
using a
|
227 |
-
rig with two mobile phones that take time-synchronized photos, yielding train/validation
|
228 |
-
images of the same pose at different viewpoints. We show that our method faithfully
|
229 |
-
reconstructs non-rigidly deforming scenes and reproduces unseen views with high
|
230 |
-
fidelity.
|
231 |
</p>
|
232 |
</div>
|
233 |
</div>
|
234 |
</div>
|
235 |
<!--/ Abstract. -->
|
236 |
|
237 |
-
<!-- Paper video. -->
|
238 |
-
<div class="columns is-centered has-text-centered">
|
239 |
-
<div class="column is-four-fifths">
|
240 |
-
<h2 class="title is-3">Video</h2>
|
241 |
-
<div class="publication-video">
|
242 |
-
<iframe src="https://www.youtube.com/embed/MrKrnHhk8IA?rel=0&showinfo=0"
|
243 |
-
frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>
|
244 |
-
</div>
|
245 |
-
</div>
|
246 |
-
</div>
|
247 |
-
<!--/ Paper video. -->
|
248 |
-
</div>
|
249 |
-
</section>
|
250 |
-
|
251 |
-
|
252 |
-
<section class="section">
|
253 |
-
<div class="container is-max-desktop">
|
254 |
-
|
255 |
-
<div class="columns is-centered">
|
256 |
-
|
257 |
-
<!-- Visual Effects. -->
|
258 |
-
<div class="column">
|
259 |
-
<div class="content">
|
260 |
-
<h2 class="title is-3">Visual Effects</h2>
|
261 |
-
<p>
|
262 |
-
Using <i>nerfies</i> you can create fun visual effects. This Dolly zoom effect
|
263 |
-
would be impossible without nerfies since it would require going through a wall.
|
264 |
-
</p>
|
265 |
-
<video id="dollyzoom" autoplay controls muted loop playsinline height="100%">
|
266 |
-
<source src="./static/videos/dollyzoom-stacked.mp4"
|
267 |
-
type="video/mp4">
|
268 |
-
</video>
|
269 |
-
</div>
|
270 |
-
</div>
|
271 |
-
<!--/ Visual Effects. -->
|
272 |
-
|
273 |
-
<!-- Matting. -->
|
274 |
-
<div class="column">
|
275 |
-
<h2 class="title is-3">Matting</h2>
|
276 |
-
<div class="columns is-centered">
|
277 |
-
<div class="column content">
|
278 |
-
<p>
|
279 |
-
As a byproduct of our method, we can also solve the matting problem by ignoring
|
280 |
-
samples that fall outside of a bounding box during rendering.
|
281 |
-
</p>
|
282 |
-
<video id="matting-video" controls playsinline height="100%">
|
283 |
-
<source src="./static/videos/matting.mp4"
|
284 |
-
type="video/mp4">
|
285 |
-
</video>
|
286 |
-
</div>
|
287 |
-
|
288 |
-
</div>
|
289 |
-
</div>
|
290 |
-
</div>
|
291 |
-
<!--/ Matting. -->
|
292 |
-
|
293 |
-
<!-- Animation. -->
|
294 |
-
<div class="columns is-centered">
|
295 |
-
<div class="column is-full-width">
|
296 |
-
<h2 class="title is-3">Animation</h2>
|
297 |
-
|
298 |
-
<!-- Interpolating. -->
|
299 |
-
<h3 class="title is-4">Interpolating states</h3>
|
300 |
-
<div class="content has-text-justified">
|
301 |
-
<p>
|
302 |
-
We can also animate the scene by interpolating the deformation latent codes of two input
|
303 |
-
frames. Use the slider here to linearly interpolate between the left frame and the right
|
304 |
-
frame.
|
305 |
-
</p>
|
306 |
-
</div>
|
307 |
-
<div class="columns is-vcentered interpolation-panel">
|
308 |
-
<div class="column is-3 has-text-centered">
|
309 |
-
<img src="./static/images/interpolate_start.jpg"
|
310 |
-
class="interpolation-image"
|
311 |
-
alt="Interpolate start reference image."/>
|
312 |
-
<p>Start Frame</p>
|
313 |
-
</div>
|
314 |
-
<div class="column interpolation-video-column">
|
315 |
-
<div id="interpolation-image-wrapper">
|
316 |
-
Loading...
|
317 |
-
</div>
|
318 |
-
<input class="slider is-fullwidth is-large is-info"
|
319 |
-
id="interpolation-slider"
|
320 |
-
step="1" min="0" max="100" value="0" type="range">
|
321 |
-
</div>
|
322 |
-
<div class="column is-3 has-text-centered">
|
323 |
-
<img src="./static/images/interpolate_end.jpg"
|
324 |
-
class="interpolation-image"
|
325 |
-
alt="Interpolation end reference image."/>
|
326 |
-
<p class="is-bold">End Frame</p>
|
327 |
-
</div>
|
328 |
-
</div>
|
329 |
-
<br/>
|
330 |
-
<!--/ Interpolating. -->
|
331 |
-
|
332 |
-
<!-- Re-rendering. -->
|
333 |
-
<h3 class="title is-4">Re-rendering the input video</h3>
|
334 |
-
<div class="content has-text-justified">
|
335 |
-
<p>
|
336 |
-
Using <span class="dnerf">Nerfies</span>, you can re-render a video from a novel
|
337 |
-
viewpoint such as a stabilized camera by playing back the training deformations.
|
338 |
-
</p>
|
339 |
-
</div>
|
340 |
-
<div class="content has-text-centered">
|
341 |
-
<video id="replay-video"
|
342 |
-
controls
|
343 |
-
muted
|
344 |
-
preload
|
345 |
-
playsinline
|
346 |
-
width="75%">
|
347 |
-
<source src="./static/videos/replay.mp4"
|
348 |
-
type="video/mp4">
|
349 |
-
</video>
|
350 |
-
</div>
|
351 |
-
<!--/ Re-rendering. -->
|
352 |
-
|
353 |
-
</div>
|
354 |
-
</div>
|
355 |
-
<!--/ Animation. -->
|
356 |
-
|
357 |
-
|
358 |
-
<!-- Concurrent Work. -->
|
359 |
-
<div class="columns is-centered">
|
360 |
-
<div class="column is-full-width">
|
361 |
-
<h2 class="title is-3">Related Links</h2>
|
362 |
-
|
363 |
-
<div class="content has-text-justified">
|
364 |
-
<p>
|
365 |
-
There's a lot of excellent work that was introduced around the same time as ours.
|
366 |
-
</p>
|
367 |
-
<p>
|
368 |
-
<a href="https://arxiv.org/abs/2104.09125" target="_blank">Progressive Encoding for Neural Optimization</a> introduces an idea similar to our windowed position encoding for coarse-to-fine optimization.
|
369 |
-
</p>
|
370 |
-
<p>
|
371 |
-
<a href="https://www.albertpumarola.com/research/D-NeRF/index.html" target="_blank">D-NeRF</a> and <a href="https://gvv.mpi-inf.mpg.de/projects/nonrigid_nerf/" target="_blank">NR-NeRF</a>
|
372 |
-
both use deformation fields to model non-rigid scenes.
|
373 |
-
</p>
|
374 |
-
<p>
|
375 |
-
Some works model videos with a NeRF by directly modulating the density, such as <a href="https://video-nerf.github.io/" target="_blank">Video-NeRF</a>, <a href="https://www.cs.cornell.edu/~zl548/NSFF/" target="_blank">NSFF</a>, and <a href="https://neural-3d-video.github.io/" target="_blank">DyNeRF</a>
|
376 |
-
</p>
|
377 |
-
<p>
|
378 |
-
There are probably many more by the time you are reading this. Check out <a href="https://dellaert.github.io/NeRF/" target="_blank">Frank Dellart's survey on recent NeRF papers</a>, and <a href="https://github.com/yenchenlin/awesome-NeRF" target="_blank">Yen-Chen Lin's curated list of NeRF papers</a>.
|
379 |
-
</p>
|
380 |
-
</div>
|
381 |
-
</div>
|
382 |
-
</div>
|
383 |
-
<!--/ Concurrent Work. -->
|
384 |
-
|
385 |
</div>
|
386 |
</section>
|
387 |
|
388 |
-
|
389 |
<section class="section" id="BibTeX">
|
390 |
<div class="container is-max-desktop content">
|
391 |
<h2 class="title">BibTeX</h2>
|
392 |
-
<pre><code>@
|
393 |
-
author
|
394 |
-
|
395 |
-
|
396 |
-
year
|
397 |
-
}
|
|
|
|
|
|
|
|
|
|
|
398 |
</div>
|
399 |
</section>
|
400 |
|
@@ -403,10 +141,10 @@
|
|
403 |
<div class="container">
|
404 |
<div class="content has-text-centered">
|
405 |
<a class="icon-link" target="_blank"
|
406 |
-
href="
|
407 |
<i class="fas fa-file-pdf"></i>
|
408 |
</a>
|
409 |
-
<a class="icon-link" href="https://github.com/
|
410 |
<i class="fab fa-github"></i>
|
411 |
</a>
|
412 |
</div>
|
|
|
3 |
<head>
|
4 |
<meta charset="utf-8">
|
5 |
<meta name="description"
|
6 |
+
content="SentiWordNet for New Language: Automatic Translation Approach">
|
7 |
+
<meta name="keywords" content="Sentiment lexicon,Translation approach,Machine learning,Sentiment analysis">
|
8 |
<meta name="viewport" content="width=device-width, initial-scale=1">
|
9 |
+
<title>SentiWordNet for New Language: Automatic Translation Approach</title>
|
10 |
|
11 |
<link href="https://fonts.googleapis.com/css?family=Google+Sans|Noto+Sans|Castoro"
|
12 |
rel="stylesheet">
|
|
|
33 |
<div class="container is-max-desktop">
|
34 |
<div class="columns is-centered">
|
35 |
<div class="column has-text-centered">
|
36 |
+
<h1 class="title is-1 publication-title">SentiWordNet for New Language: Automatic Translation Approach</h1>
|
37 |
<div class="is-size-5 publication-authors">
|
38 |
<span class="author-block">
|
39 |
+
<a href="https://aucan.github.io/" target="_blank">Alaettin Uçan</a><sup>1</sup>,</span>
|
40 |
<span class="author-block">
|
41 |
+
<a href="https://profiles.stanford.edu/behzad-naderalvojoud" target="_blank">Behzad Naderalvojoud</a><sup>1</sup>,</span>
|
42 |
<span class="author-block">
|
43 |
+
<a href="#">Ebru Akcapinar Sezer</a><sup>1</sup>,
|
44 |
+
</span>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
</div>
|
46 |
|
47 |
<div class="is-size-5 publication-authors">
|
48 |
+
<span class="author-block"><sup>1</sup>Hacettepe University</span>
|
|
|
49 |
</div>
|
50 |
|
51 |
<div class="column has-text-centered">
|
52 |
<div class="publication-links">
|
53 |
<!-- PDF Link. -->
|
54 |
<span class="link-block">
|
55 |
+
<a href="https://ieeexplore.ieee.org/document/7907484" target="_blank"
|
56 |
class="external-link button is-normal is-rounded is-dark">
|
57 |
<span class="icon">
|
58 |
<i class="fas fa-file-pdf"></i>
|
|
|
61 |
</a>
|
62 |
</span>
|
63 |
<span class="link-block">
|
64 |
+
<a href="https://ieeexplore.ieee.org/document/7907484" target="_blank"
|
65 |
class="external-link button is-normal is-rounded is-dark">
|
66 |
<span class="icon">
|
67 |
<i class="ai ai-arxiv"></i>
|
|
|
69 |
<span>arXiv</span>
|
70 |
</a>
|
71 |
</span>
|
72 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
73 |
<!-- Code Link. -->
|
74 |
<span class="link-block">
|
75 |
+
<a href="#" target="_blank"
|
76 |
class="external-link button is-normal is-rounded is-dark">
|
77 |
<span class="icon">
|
78 |
<i class="fab fa-github"></i>
|
|
|
82 |
</span>
|
83 |
<!-- Dataset Link. -->
|
84 |
<span class="link-block">
|
85 |
+
<a href="https://huggingface.co/datasets/Alaettin/Humir-Sentiment-Datasets" target="_blank"
|
86 |
class="external-link button is-normal is-rounded is-dark">
|
87 |
<span class="icon">
|
88 |
<i class="far fa-images"></i>
|
|
|
98 |
</div>
|
99 |
</section>
|
100 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
|
104 |
<section class="section">
|
105 |
<div class="container is-max-desktop">
|
|
|
109 |
<h2 class="title is-3">Abstract</h2>
|
110 |
<div class="content has-text-justified">
|
111 |
<p>
|
112 |
+
This paper proposes an automatic translation approach to create a sentiment lexicon for a new language from available English resources. In this approach, an automatic mapping is generated from a sense-level resource to a wordlevel by applying a triple unification process. This process produces a single polarity score for each term by incorporating all sense polarities. The major idea is to deal with the sense ambiguity during the lexicon transfer and provide a general sentiment lexicon for languages like Turkish which do not have a freely available machine-readable dictionary. On the other hand, the translation quality is critical in the lexicon transfer due to the ambiguity problem. Thus, this paper also proposes a multiple bilingual translation approach to find the most appropriate equivalents for the source language terms. In this approach, three parallel, series and hybrid algorithms are used to integrate the translation results. Finally, three lexicons are achieved for the target language with different sizes. The performance of three lexicons is evaluated in the lexicon-based sentiment classification task and compared with the results achieved by the supervised approach. According to experimental results, the proposed approach can produce reliable sentiment lexicons for the target language.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
</p>
|
114 |
</div>
|
115 |
</div>
|
116 |
</div>
|
117 |
<!--/ Abstract. -->
|
118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
119 |
</div>
|
120 |
</section>
|
121 |
|
|
|
122 |
<section class="section" id="BibTeX">
|
123 |
<div class="container is-max-desktop content">
|
124 |
<h2 class="title">BibTeX</h2>
|
125 |
+
<pre><code>@INPROCEEDINGS{7907484,
|
126 |
+
author={Ucan, Alaettin and Naderalvojoud, Behzad and Sezer, Ebru Akcapinar and Sever, Hayri},
|
127 |
+
booktitle={2016 12th International Conference on Signal-Image Technology & Internet-Based Systems (SITIS)},
|
128 |
+
title={SentiWordNet for New Language: Automatic Translation Approach},
|
129 |
+
year={2016},
|
130 |
+
volume={},
|
131 |
+
number={},
|
132 |
+
pages={308-315},
|
133 |
+
keywords={Dictionaries;Sentiment analysis;Pragmatics;Semantics;Learning systems;Benchmark testing;Feature extraction;Sentiment lexicon;Translation approach;Machine learning;Sentiment analysis},
|
134 |
+
doi={10.1109/SITIS.2016.57}}
|
135 |
+
</code></pre>
|
136 |
</div>
|
137 |
</section>
|
138 |
|
|
|
141 |
<div class="container">
|
142 |
<div class="content has-text-centered">
|
143 |
<a class="icon-link" target="_blank"
|
144 |
+
href="#">
|
145 |
<i class="fas fa-file-pdf"></i>
|
146 |
</a>
|
147 |
+
<a class="icon-link" href="https://github.com/aucan" target="_blank" class="external-link" disabled>
|
148 |
<i class="fab fa-github"></i>
|
149 |
</a>
|
150 |
</div>
|