desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
''
def write_gifs(self, clip, gif_dir):
for (start, end, _, _) in self: name = ('%s/%08d_%08d.gif' % (gif_dir, (100 * start), (100 * end))) clip.subclip(start, end).write_gif(name, verbose=False)
'Returns a list of the clips in the composite clips that are actually playing at the given time `t`.'
def playing_clips(self, t=0):
return [c for c in self.clips if c.is_playing(t)]
'Writes one frame in the file.'
def write_frame(self, img_array):
try: if PY3: self.proc.stdin.write(img_array.tobytes()) else: self.proc.stdin.write(img_array.tostring()) except IOError as err: (_, ffmpeg_error) = self.proc.communicate() error = (str(err) + ('\n\nMoviePy error: FFMPEG encountered the following error while writing file %s:\n\n %s' % (self.filename, str(ffmpeg_error)))) if ('Unknown encoder' in ffmpeg_error): error = (error + ("\n\nThe video export failed because FFMPEG didn't find the specified codec for video encoding (%s). Please install this codec or change the codec when calling write_videofile. For instance:\n >>> clip.write_videofile('myvid.webm', codec='libvpx')" % self.codec)) elif ('incorrect codec parameters ?' in ffmpeg_error): error = (error + ("\n\nThe video export failed, possibly because the codec specified for the video (%s) is not compatible with the given extension (%s). Please specify a valid 'codec' argument in write_videofile. This would be 'libx264' or 'mpeg4' for mp4, 'libtheora' for ogv, 'libvpx for webm. Another possible reason is that the audio codec was not compatible with the video codec. For instance the video extensions 'ogv' and 'webm' only allow 'libvorbis' (default) as avideo codec." % (self.codec, self.ext))) elif ('encoder setup failed' in ffmpeg_error): error = (error + '\n\nThe video export failed, possibly because the bitrate you specified was too high or too low for the video codec.') elif ('Invalid encoder type' in ffmpeg_error): error = (error + '\n\nThe video export failed because the codec or file extension you provided is not a video') raise IOError(error)
'Close/delete the internal reader.'
def __del__(self):
try: del self.reader except AttributeError: pass try: del self.audio except AttributeError: pass
'Opens the file, creates the pipe.'
def initialize(self, starttime=0):
self.close() if (starttime != 0): offset = min(1, starttime) i_arg = ['-ss', ('%.06f' % (starttime - offset)), '-i', self.filename, '-ss', ('%.06f' % offset)] else: i_arg = ['-i', self.filename] cmd = (([get_setting('FFMPEG_BINARY')] + i_arg) + ['-loglevel', 'error', '-f', 'image2pipe', '-vf', ('scale=%d:%d' % tuple(self.size)), '-sws_flags', self.resize_algo, '-pix_fmt', self.pix_fmt, '-vcodec', 'rawvideo', '-']) popen_params = {'bufsize': self.bufsize, 'stdout': sp.PIPE, 'stderr': sp.PIPE, 'stdin': DEVNULL} if (os.name == 'nt'): popen_params['creationflags'] = 134217728 self.proc = sp.Popen(cmd, **popen_params)
'Reads and throws away n frames'
def skip_frames(self, n=1):
(w, h) = self.size for i in range(n): self.proc.stdout.read(((self.depth * w) * h)) self.pos += n
'Read a file video frame at time t. Note for coders: getting an arbitrary frame in the video with ffmpeg can be painfully slow if some decoding has to be done. This function tries to avoid fectching arbitrary frames whenever possible, by moving between adjacent frames.'
def get_frame(self, t):
pos = (int(((self.fps * t) + 1e-05)) + 1) if (pos == self.pos): return self.lastread else: if ((pos < self.pos) or (pos > (self.pos + 100))): self.initialize(t) self.pos = pos else: self.skip_frames(((pos - self.pos) - 1)) result = self.read_frame() self.pos = pos return result
'Shallow copy of the clip. Returns a shwallow copy of the clip whose mask and audio will be shallow copies of the clip\'s mask and audio if they exist. This method is intensively used to produce new clips every time there is an outplace transformation of the clip (clip.resize, clip.subclip, etc.)'
def copy(self):
newclip = copy(self) if hasattr(self, 'audio'): newclip.audio = copy(self.audio) if hasattr(self, 'mask'): newclip.mask = copy(self.mask) return newclip
'Gets a numpy array representing the RGB picture of the clip at time t or (mono or stereo) value for a sound clip'
@convert_to_seconds(['t']) def get_frame(self, t):
if self.memoize: if (t == self.memoized_t): return self.memoized_frame else: frame = self.make_frame(t) self.memoized_t = t self.memoized_frame = frame return frame else: return self.make_frame(t)
'General processing of a clip. Returns a new Clip whose frames are a transformation (through function ``fun``) of the frames of the current clip. Parameters fun A function with signature (gf,t -> frame) where ``gf`` will represent the current clip\'s ``get_frame`` method, i.e. ``gf`` is a function (t->image). Parameter `t` is a time in seconds, `frame` is a picture (=Numpy array) which will be returned by the transformed clip (see examples below). apply_to Can be either ``\'mask\'``, or ``\'audio\'``, or ``[\'mask\',\'audio\']``. Specifies if the filter ``fl`` should also be applied to the audio or the mask of the clip, if any. keep_duration Set to True if the transformation does not change the ``duration`` of the clip. Examples In the following ``newclip`` a 100 pixels-high clip whose video content scrolls from the top to the bottom of the frames of ``clip``. >>> fl = lambda gf,t : gf(t)[int(t):int(t)+50, :] >>> newclip = clip.fl(fl, apply_to=\'mask\')'
def fl(self, fun, apply_to=None, keep_duration=True):
if (apply_to is None): apply_to = [] newclip = self.set_make_frame((lambda t: fun(self.get_frame, t))) if (not keep_duration): newclip.duration = None newclip.end = None if isinstance(apply_to, str): apply_to = [apply_to] for attr in apply_to: if hasattr(newclip, attr): a = getattr(newclip, attr) if (a is not None): new_a = a.fl(fun, keep_duration=keep_duration) setattr(newclip, attr, new_a) return newclip
'Returns a Clip instance playing the content of the current clip but with a modified timeline, time ``t`` being replaced by another time `t_func(t)`. Parameters t_func: A function ``t-> new_t`` apply_to: Can be either \'mask\', or \'audio\', or [\'mask\',\'audio\']. Specifies if the filter ``fl`` should also be applied to the audio or the mask of the clip, if any. keep_duration: ``False`` (default) if the transformation modifies the ``duration`` of the clip. Examples >>> # plays the clip (and its mask and sound) twice faster >>> newclip = clip.fl_time(lambda: 2*t, apply_to=[\'mask\',\'audio\']) >>> # plays the clip starting at t=3, and backwards: >>> newclip = clip.fl_time(lambda: 3-t)'
def fl_time(self, t_func, apply_to=None, keep_duration=False):
if (apply_to is None): apply_to = [] return self.fl((lambda gf, t: gf(t_func(t))), apply_to, keep_duration=keep_duration)
'Returns the result of ``func(self, *args, **kwargs)``. for instance >>> newclip = clip.fx(resize, 0.2, method=\'bilinear\') is equivalent to >>> newclip = resize(clip, 0.2, method=\'bilinear\') The motivation of fx is to keep the name of the effect near its parameters, when the effects are chained: >>> from moviepy.video.fx import volumex, resize, mirrorx >>> clip.fx( volumex, 0.5).fx( resize, 0.3).fx( mirrorx ) >>> # Is equivalent, but clearer than >>> resize( volumex( mirrorx( clip ), 0.5), 0.3)'
def fx(self, func, *args, **kwargs):
return func(self, *args, **kwargs)
'Returns a copy of the clip, with the ``start`` attribute set to ``t``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. If ``change_end=True`` and the clip has a ``duration`` attribute, the ``end`` atrribute of the clip will be updated to ``start+duration``. If ``change_end=False`` and the clip has a ``end`` attribute, the ``duration`` attribute of the clip will be updated to ``end-start`` These changes are also applied to the ``audio`` and ``mask`` clips of the current clip, if they exist.'
@apply_to_mask @apply_to_audio @convert_to_seconds(['t']) @outplace def set_start(self, t, change_end=True):
self.start = t if ((self.duration is not None) and change_end): self.end = (t + self.duration) elif (self.end is not None): self.duration = (self.end - self.start)
'Returns a copy of the clip, with the ``end`` attribute set to ``t``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. Also sets the duration of the mask and audio, if any, of the returned clip.'
@apply_to_mask @apply_to_audio @convert_to_seconds(['t']) @outplace def set_end(self, t):
self.end = t if (self.end is None): return if (self.start is None): if (self.duration is not None): self.start = max(0, (t - newclip.duration)) else: self.duration = (self.end - self.start)
'Returns a copy of the clip, with the ``duration`` attribute set to ``t``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. Also sets the duration of the mask and audio, if any, of the returned clip. If change_end is False, the start attribute of the clip will be modified in function of the duration and the preset end of the clip.'
@apply_to_mask @apply_to_audio @convert_to_seconds(['t']) @outplace def set_duration(self, t, change_end=True):
self.duration = t if change_end: self.end = (None if (t is None) else (self.start + t)) else: if (self.duration is None): raise Exception('Cannot change clip start when newduration is None') self.start = (self.end - t)
'Sets a ``make_frame`` attribute for the clip. Useful for setting arbitrary/complicated videoclips.'
@outplace def set_make_frame(self, make_frame):
self.make_frame = make_frame
'Returns a copy of the clip with a new default fps for functions like write_videofile, iterframe, etc.'
@outplace def set_fps(self, fps):
self.fps = fps
'Says wheter the clip is a mask or not (ismask is a boolean)'
@outplace def set_ismask(self, ismask):
self.ismask = ismask
'Sets wheter the clip should keep the last frame read in memory'
@outplace def set_memoize(self, memoize):
self.memoize = memoize
'If t is a time, returns true if t is between the start and the end of the clip. t can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. If t is a numpy array, returns False if none of the t is in theclip, else returns a vector [b_1, b_2, b_3...] where b_i is true iff tti is in the clip.'
@convert_to_seconds(['t']) def is_playing(self, t):
if isinstance(t, np.ndarray): (tmin, tmax) = (t.min(), t.max()) if ((self.end is not None) and (tmin >= self.end)): return False if (tmax < self.start): return False result = (1 * (t >= self.start)) if (self.end is not None): result *= (t <= self.end) return result else: return ((t >= self.start) and ((self.end is None) or (t < self.end)))
'Returns a clip playing the content of the current clip between times ``t_start`` and ``t_end``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. If ``t_end`` is not provided, it is assumed to be the duration of the clip (potentially infinite). If ``t_end`` is a negative value, it is reset to ``clip.duration + t_end. ``. For instance: :: >>> # cut the last two seconds of the clip: >>> newclip = clip.subclip(0,-2) If ``t_end`` is provided or if the clip has a duration attribute, the duration of the returned clip is set automatically. The ``mask`` and ``audio`` of the resulting subclip will be subclips of ``mask`` and ``audio`` the original clip, if they exist.'
@convert_to_seconds(['t_start', 't_end']) @apply_to_mask @apply_to_audio def subclip(self, t_start=0, t_end=None):
if (t_start < 0): t_start = (self.duration + t_start) if ((self.duration is not None) and (t_start > self.duration)): raise ValueError(((('t_start (%.02f) ' % t_start) + "should be smaller than the clip's ") + ('duration (%.02f).' % self.duration))) newclip = self.fl_time((lambda t: (t + t_start)), apply_to=[]) if ((t_end is None) and (self.duration is not None)): t_end = self.duration elif ((t_end is not None) and (t_end < 0)): if (self.duration is None): print (('Error: subclip with negative times (here %s)' % str((t_start, t_end))) + ' can only be extracted from clips with a ``duration``') else: t_end = (self.duration + t_end) if (t_end is not None): newclip.duration = (t_end - t_start) newclip.end = (newclip.start + newclip.duration) return newclip
'Returns a clip playing the content of the current clip but skips the extract between ``ta`` and ``tb``, which can be expressed in seconds (15.35), in (min, sec), in (hour, min, sec), or as a string: \'01:03:05.35\'. If the original clip has a ``duration`` attribute set, the duration of the returned clip is automatically computed as `` duration - (tb - ta)``. The resulting clip\'s ``audio`` and ``mask`` will also be cutout if they exist.'
@apply_to_mask @apply_to_audio @convert_to_seconds(['ta', 'tb']) def cutout(self, ta, tb):
fl = (lambda t: (t + ((t >= ta) * (tb - ta)))) newclip = self.fl_time(fl) if (self.duration is not None): return newclip.set_duration((self.duration - (tb - ta))) else: return newclip
'Iterates over all the frames of the clip. Returns each frame of the clip as a HxWxN np.array, where N=1 for mask clips and N=3 for RGB clips. This function is not really meant for video editing. It provides an easy way to do frame-by-frame treatment of a video, for fields like science, computer vision... The ``fps`` (frames per second) parameter is optional if the clip already has a ``fps`` attribute. Use dtype="uint8" when using the pictures to write video, images... Examples >>> # prints the maximum of red that is contained >>> # on the first line of each frame of the clip. >>> from moviepy.editor import VideoFileClip >>> myclip = VideoFileClip(\'myvideo.mp4\') >>> print ( [frame[0,:,0].max() for frame in myclip.iter_frames()])'
@requires_duration @use_clip_fps_by_default def iter_frames(self, fps=None, with_times=False, progress_bar=False, dtype=None):
def generator(): for t in np.arange(0, self.duration, (1.0 / fps)): frame = self.get_frame(t) if ((dtype is not None) and (frame.dtype != dtype)): frame = frame.astype(dtype) if with_times: (yield (t, frame)) else: (yield frame) if progress_bar: nframes = (int((self.duration * fps)) + 1) return tqdm(generator(), total=nframes) return generator()
'Returns a copy of the AudioFileClip, i.e. a new entrance point to the audio file. Use copy when you have different clips watching the audio file at different times.'
def coreader(self):
return AudioFileClip(self.filename, self.buffersize)
'Close/delete the internal reader.'
def __del__(self):
try: del self.reader except AttributeError: pass
'Opens the file, creates the pipe.'
def initialize(self, starttime=0):
self.close_proc() if (starttime != 0): offset = min(1, starttime) i_arg = ['-ss', ('%.05f' % (starttime - offset)), '-i', self.filename, '-vn', '-ss', ('%.05f' % offset)] else: i_arg = ['-i', self.filename, '-vn'] cmd = (([get_setting('FFMPEG_BINARY')] + i_arg) + ['-loglevel', 'error', '-f', self.f, '-acodec', self.acodec, '-ar', ('%d' % self.fps), '-ac', ('%d' % self.nchannels), '-']) popen_params = {'bufsize': self.buffersize, 'stdout': sp.PIPE, 'stderr': sp.PIPE, 'stdin': DEVNULL} if (os.name == 'nt'): popen_params['creationflags'] = 134217728 self.proc = sp.Popen(cmd, **popen_params) self.pos = np.round((self.fps * starttime))
'Reads a frame at time t. Note for coders: getting an arbitrary frame in the video with ffmpeg can be painfully slow if some decoding has to be done. This function tries to avoid fectching arbitrary frames whenever possible, by moving between adjacent frames.'
def seek(self, pos):
if ((pos < self.pos) or (pos > (self.pos + 1000000))): t = ((1.0 * pos) / self.fps) self.initialize(t) elif (pos > self.pos): self.skip_chunk((pos - self.pos)) self.pos = pos
'Fills the buffer with frames, centered on ``framenumber`` if possible'
def buffer_around(self, framenumber):
new_bufferstart = max(0, (framenumber - (self.buffersize // 2))) if (self.buffer is not None): current_f_end = (self.buffer_startframe + self.buffersize) if (new_bufferstart < current_f_end < (new_bufferstart + self.buffersize)): conserved = ((current_f_end - new_bufferstart) + 1) chunksize = (self.buffersize - conserved) array = self.read_chunk(chunksize) self.buffer = np.vstack([self.buffer[(- conserved):], array]) else: self.seek(new_bufferstart) self.buffer = self.read_chunk(self.buffersize) else: self.seek(new_bufferstart) self.buffer = self.read_chunk(self.buffersize) self.buffer_startframe = new_bufferstart
'Iterator that returns the whole sound array of the clip by chunks'
@requires_duration def iter_chunks(self, chunksize=None, chunk_duration=None, fps=None, quantize=False, nbytes=2, progress_bar=False):
if (fps is None): fps = self.fps if (chunk_duration is not None): chunksize = int((chunk_duration * fps)) totalsize = int((fps * self.duration)) if ((totalsize % chunksize) == 0): nchunks = (totalsize // chunksize) else: nchunks = ((totalsize // chunksize) + 1) pospos = (list(range(0, totalsize, chunksize)) + [totalsize]) def generator(): for i in range(nchunks): tt = ((1.0 / fps) * np.arange(pospos[i], pospos[(i + 1)])) (yield self.to_soundarray(tt, nbytes=nbytes, quantize=quantize, fps=fps, buffersize=chunksize)) if progress_bar: return tqdm(generator(), total=nchunks) else: return generator()
'Transforms the sound into an array that can be played by pygame or written in a wav file. See ``AudioClip.preview``. Parameters fps Frame rate of the sound for the conversion. 44100 for top quality. nbytes Number of bytes to encode the sound: 1 for 8bit sound, 2 for 16bit, 4 for 32bit sound.'
@requires_duration def to_soundarray(self, tt=None, fps=None, quantize=False, nbytes=2, buffersize=50000):
if (fps is None): fps = self.fps stacker = (np.vstack if (self.nchannels == 2) else np.hstack) max_duration = ((1.0 * buffersize) / fps) if (tt is None): if (self.duration > max_duration): return stacker(self.iter_chunks(fps=fps, quantize=quantize, nbytes=2, chunksize=buffersize)) else: tt = np.arange(0, self.duration, (1.0 / fps)) '\n elif len(tt)> 1.5*buffersize:\n nchunks = int(len(tt)/buffersize+1)\n tt_chunks = np.array_split(tt, nchunks)\n return stacker([self.to_soundarray(tt=ttc, buffersize=buffersize, fps=fps,\n quantize=quantize, nbytes=nbytes)\n for ttc in tt_chunks])\n ' snd_array = self.get_frame(tt) if quantize: snd_array = np.maximum((-0.99), np.minimum(0.99, snd_array)) inttype = {1: 'int8', 2: 'int16', 4: 'int32'}[nbytes] snd_array = ((2 ** ((8 * nbytes) - 1)) * snd_array).astype(inttype) return snd_array
'Writes an audio file from the AudioClip. Parameters filename Name of the output file fps Frames per second nbyte Sample width (set to 2 for 16-bit sound, 4 for 32-bit sound) codec Which audio codec should be used. If None provided, the codec is determined based on the extension of the filename. Choose \'pcm_s16le\' for 16-bit wav and \'pcm_s32le\' for 32-bit wav. bitrate Audio bitrate, given as a string like \'50k\', \'500k\', \'3000k\'. Will determine the size and quality of the output file. Note that it mainly an indicative goal, the bitrate won\'t necessarily be the this in the output file. ffmpeg_params Any additional parameters you would like to pass, as a list of terms, like [\'-option1\', \'value1\', \'-option2\', \'value2\'] write_logfile If true, produces a detailed logfile named filename + \'.log\' when writing the file verbose Boolean indicating whether to print infomation progress_bar Boolean indicating whether to show the progress bar.'
@requires_duration def write_audiofile(self, filename, fps=44100, nbytes=2, buffersize=2000, codec=None, bitrate=None, ffmpeg_params=None, write_logfile=False, verbose=True, progress_bar=True):
if (codec is None): (name, ext) = os.path.splitext(os.path.basename(filename)) try: codec = extensions_dict[ext[1:]]['codec'][0] except KeyError: raise ValueError("MoviePy couldn't find the codec associated with the filename. Provide the 'codec' parameter in write_videofile.") return ffmpeg_audiowrite(self, filename, fps, nbytes, buffersize, codec=codec, bitrate=bitrate, write_logfile=write_logfile, verbose=verbose, ffmpeg_params=ffmpeg_params, progress_bar=progress_bar)
'Initialize the PyTest options.'
def initialize_options(self):
TestCommand.initialize_options(self) self.pytest_args = []
'Finalize the PyTest options.'
def finalize_options(self):
TestCommand.finalize_options(self) self.test_args = [] self.test_suite = True
'Run the PyTest testing suite.'
def run_tests(self):
try: import pytest except ImportError: raise ImportError('Running tests requires additional dependencies.\nPlease run (pip install moviepy[test])') errno = pytest.main(self.pytest_args) sys.exit(errno)
'Initialize SoupData object. Load data from HTML.'
def __init__(self):
self.SOUP_HTML = '' self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open('download/index.html', 'r') as data_file: self.SOUP_HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.SOUP_HTML
'Given raw data, get the relevant section Args: raw_data: HTML data'
def __init__(self, raw_data):
self.parsed_data = None self.sections = list() soup_data = BeautifulSoup(raw_data, 'html.parser') for content in soup_data.find('div', {'class': 'body'}): if ((not content) or isinstance(content, NavigableString)): continue self.sections.append(content)
'Find the name and anchor for a given section Args: section: A section of parsed HTML Returns: name: Name of the section anchor: Anchor tag to use when linking back to the docs (ie #tags)'
def parse_name_and_anchor_from_data(self, section):
(name, anchor) = (None, None) a_tag = section.find('a', {'class': 'headerlink'}) if a_tag: anchor = a_tag['href'] name = a_tag.parent.text[:(-1)].replace('()', '').replace('.', '') return (name, anchor)
'Get the first paragraph for display Args: section: A section of parsed HTML Returns: First paragraph in the HTML'
def parse_first_paragraph_from_data(self, section):
data = section.find('p') if data: return data.text.replace('\n', ' ').replace(' ', ' ') return None
'Look for example code block to output Args: section: A section of parsed HTML Returns: Formatted code string'
def parse_code_from_data(self, section):
code = section.find('div', {'class': 'highlight-python'}) if code: return '<pre><code>{}</code></pre>'.format(code.text.replace('\xc2\xb6', '').replace('\n', '\\n')) return None
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self):
data = list() for section in self.sections: for sub_section in section.find_all('div', {'class': 'section'}): (name, anchor) = self.parse_name_and_anchor_from_data(sub_section) first_paragraph = self.parse_first_paragraph_from_data(sub_section) code = self.parse_code_from_data(sub_section) if (name and first_paragraph and code): data_elements = {'name': name, 'anchor': anchor, 'first_paragraph': first_paragraph, 'code': code} data.append(data_elements) self.parsed_data = data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Initialize SoupDataOutput object Args: data: Dict containing the data elements'
def __init__(self, data):
self.data = data
'Iterate through the data and create the necessary output.txt file'
def create_file(self):
with open('output.txt', 'w+') as output_file: for data_element in self.data: name = data_element.get('name') code = data_element.get('code') first_paragraph = data_element.get('first_paragraph') abstract = '{}{}{}'.format(code, ('<br>' if code else ''), first_paragraph) url = '{}{}'.format(SOUP_HOME, data_element.get('anchor')) list_of_data = [name, 'A', '', '', '', '', '', '', SOUP_HOME, '', '', abstract, url] output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
':param name: Name of the Fathead :param description: Description of what\'s being displayed :param filename: The filename from which the information came. Used to construct the URL for the entry Instantiate the information about the class'
def __init__(self, name, description, filename):
self.name = name self.description = description.replace('\n', '').replace(' DCTB ', ' ') self.description = '<p>{}</p>'.format(self.description) self.filename = filename
'Get all itext class files that need to be parsed'
def __init__(self):
self.itext_classes = {} self.files_to_parse = glob.glob('download/*.html')
'Args: specfile: A filesystem path to a csv file containing language definitions. It should have the format: BaseObject,property,{class_property,class_function,instance_method,instance_property}'
def __init__(self, specfile):
self.inverted_index = {} self.objects = set() with codecs.open(specfile, 'r', 'utf-8') as f: for line in f: line = line.strip() index = line.split('(')[0] if (index.count('.') > 1): index = index.split('prototype')[(-1)] index = index.split('.')[(-1)].lower().strip() if (index not in self.inverted_index): self.inverted_index[index] = [] self.inverted_index[index].append(line) obj = line.split('.')[0] self.objects.add(obj)
'Standardize and clean the fields within an MDN object.'
def standardize(self, mdn):
if ('Global' in mdn.obj): mdn.obj = 'Global' if ((mdn.obj not in self.objects) and ('Global' in mdn.url)): return None if (mdn.prop.lower() not in self.inverted_index): return mdn for signature in self.inverted_index[mdn.prop.lower()]: if signature.startswith(mdn.obj): mdn.codesnippet = signature mdn.title = signature.split('(')[0].strip() break return mdn
'Write the dict row.'
def writerow(self, outdict):
row = [] for field in FatWriter.FIELDS: col = outdict.get(field, '') col = col.replace(' DCTB ', ' ') col = col.replace('\n', '\\n') row.append(col) self.outfile.write((' DCTB '.join(row) + '\n'))
'Parse an html file and return an mdn object. Args: htmlfile: A file-like object that should parse with lxml html parser.'
def parse(self, htmlfile):
page = htmlfile.read() tree = html.fromstring(page) if self._is_obsolete(tree): return None title = tree.xpath("//meta[@property='og:title']/@content")[0] article = tree.xpath("//article[contains(@id,'wikiArticle')]") summary = '' if article: summary_nodes = tree.xpath("//h2[contains(@id,'Summary')]/following-sibling::p[1]") for summary_el in summary_nodes: for tag in summary_el.xpath('//*[@class]'): tag.attrib.pop('class') summary += re.sub('<[^<]+?>', '', etree.tostring(summary_el).strip()) if (not summary): summary_el = tree.xpath("//meta[@property='og:description']/@content") if summary_el: summary = summary_el[0] if (not summary): see_also_el = tree.xpath("//h3[contains(@id,'See_also')]") if see_also_el: elements = tree.xpath("//h3[contains(@id,'See_also')]/following-sibling::ul[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') summary = re.findall('title="([^"]*)"', etree.tostring(element).strip()) summary = summary[0].strip() codesnippet = '' syntax_header = tree.xpath("//h2[contains(@id,'Syntax')]") if syntax_header: elements = tree.xpath("//h2[contains(@id,'Syntax')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') codesnippet += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) articletype = '' exampledesc = '' example = '' url = tree.xpath("//meta[@property='og:url']/@content")[0] if ('Web/API' in url): example_header = tree.xpath("//h2[contains(@id,'Example')][position()=1]") if example_header: html_example_header = tree.xpath("//h2[contains(@id,'Example')][position()=1]/following-sibling::h3[contains(@id,'HTML_Content')]") js_example_header = tree.xpath("//h2[contains(@id,'Example')][position()=1]/following-sibling::h3[contains(@id,'JavaScript_Content')]") if (html_example_header and js_example_header): example = {} example['HTML Content'] = '' elements = tree.xpath("//h2[contains(@id,'Example')][position()=1]/following-sibling::h3[contains(@id,'HTML_Content')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') example['HTML Content'] += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) example['JavaScript Content'] = '' elements = tree.xpath("//h2[contains(@id,'Example')][position()=1]/following-sibling::h3[contains(@id,'JavaScript_Content')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') example['JavaScript Content'] += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) else: example = '' example_header = tree.xpath("//h2[contains(@id,'Example')]") if example_header: elements = tree.xpath("//h2[contains(@id,'Example')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') example += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) elements = tree.xpath("//h2[contains(@id,'Example')]/following-sibling::p[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') exampledesc += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) if ('Error' in htmlfile.name): articletype = 'Error' whatWentWrong_summary = '' whatWentWrong = tree.xpath("//h2[contains(@id,'What_went_wrong')]/following-sibling::p[1]") for element in whatWentWrong: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') whatWentWrong_summary += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) if whatWentWrong_summary: summary = whatWentWrong_summary exampleGood = ''.join(tree.xpath("//h3[contains(@id,'Valid_cases')]/following-sibling::pre/text()")) exampleBad = ''.join(tree.xpath("//h3[contains(@id,'Invalid_cases')]/following-sibling::pre/text()")) exampleGood = re.sub('<[^<]+?>', '', exampleGood) exampleBad = re.sub('<[^<]+?>', '', exampleBad) if exampleGood: exampleGood = ('Valid Cases:\n' + exampleGood) if exampleBad: exampleBad = ('Invalid Cases:\n' + exampleGood) if (exampleBad or exampleGood): codesnippet = ((exampleBad + '\n') + exampleGood) if any(((wiki in htmlfile.name) for wiki in ['Functions.', 'Classes.', 'Statements.', 'Operators.'])): articletype = htmlfile.name.split('.')[0].split('/')[1] desc_header = tree.xpath("//h2[contains(@id,'Description')]") if desc_header: elements = tree.xpath("//h2[contains(@id,'Description')]/following-sibling::p[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') exampledesc += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) elements = tree.xpath("//h2[contains(@id,'Description')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') example += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) else: elements = tree.xpath("//h2[contains(@id,'Examples')]/following-sibling::p[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') exampledesc += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) elements = tree.xpath("//h2[contains(@id,'Examples')]/following-sibling::pre[1]") for element in elements: for tag in element.xpath('//*[@class]'): tag.attrib.pop('class') example += re.sub('<[^<]+?>', '', etree.tostring(element).strip()) print ((title + (' ' * 30)) + '\r'), mdn = MDN() mdn.title = title mdn.summary = summary mdn.codesnippet = codesnippet mdn.exampledesc = exampledesc mdn.example = example mdn.articletype = articletype return mdn
'Initialize PythonData object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open(self.FILE, 'r') as data_file: self.HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Given raw data, get the relevant sections Args: raw_data: HTML data path: path of downloaded HTML data'
def __init__(self, data_object, info):
self.parsed_data = None self.function_sections = [] self.method_sections = [] self.intro_text = '' self.title = '' self.info = info self.file_being_used = data_object.get_file() soup_data = BeautifulSoup(data_object.get_raw_data(), 'html.parser') sections = soup_data.find_all('div', {'class': 'section'}) for section in sections: functions = section.find_all('dl', {'class': 'function'}) if functions: self.function_sections.extend(functions) methods = section.find_all('dl', {'class': 'method'}) if methods: self.method_sections.extend(methods) intro = [] hr = soup_data.find_all('hr', {'class': 'docutils'}) if hr: first_paragraph_after_hr = soup_data.hr.find_next('p') intro.append(first_paragraph_after_hr) second_paragraph_after_hr = first_paragraph_after_hr.find_next('p') intro.append(second_paragraph_after_hr) for p in intro: self.intro_text += p.text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n') module_title = soup_data.find('a', {'class': 'reference internal'}) if module_title: self.title = module_title.text
'Returns the module name Args: section: A section of parsed HTML that represents a function definition Returns: Name of the module'
def parse_for_module_name(self, section):
module_name = section.find('code', {'class': 'descclassname'}) if module_name: return module_name.text.rstrip('.') return ''
'Returns the function name Args: section: A section of parsed HTML that represents a function definition Returns: Name of function'
def parse_for_function_name(self, section):
function_name = section.find('code', {'class': 'descname'}) if function_name: return function_name.text return ''
'Returns the first paragraph of text for a given function Fixes up some weird double spacing and newlines. Args: section: A section of parsed HTML that represents a function definition Returns: First paragraph found with text'
def parse_for_first_paragraph(self, section):
paragraphs = section.find_all('p') for paragraph in paragraphs: if paragraph.text: return paragraph.text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n') return ''
'Returns the anchor link to specific function doc Args: section: A section of parsed HTML that represents a function definition Returns: The href value of the link to doc'
def parse_for_anchor(self, section):
a_tag = section.find('a', {'class': 'headerlink'}) if a_tag: return a_tag['href'] return ''
'Returns the method signature Args: section: A section of parsed HTML that represents a function definition Returns: The method signature'
def parse_for_method_signature(self, section):
dt = section.find('dt') if dt: return '<pre><code>{}</code></pre>'.format(dt.text.replace('\xc2\xb6', '').replace('\n', '').replace('\\n', '\\\\n')) return ''
'Returns the class.module.method signature Args: section: A section of parsed HTML that represents a method definition Returns: The method signature'
def parse_for_class_method(self, section):
id_tag = section.find('dt').get('id') if id_tag: tag_parts = id_tag.split('.') if (len(tag_parts) == 3): return tag_parts elif (len(tag_parts) > 3): return (tag_parts[0], tag_parts[1], '.'.join(tag_parts[2:])) return ['', '', '']
'Helper method to create URL back to document Args: anchor: #anchor Returns: Full URL to function on the python doc'
def create_url(self, anchor):
file_path = self.file_being_used.replace(self.info['download_path'], '') return self.info['doc_base_url'].format('{}{}'.format(file_path, anchor))
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self):
data = [] if (self.intro_text and self.title): data_elements = {'module': self.title, 'function': '', 'method_signature': '', 'first_paragraph': self.intro_text, 'url': self.create_url('')} data.append(data_elements) for function_section in self.function_sections: module = self.parse_for_module_name(function_section) function = self.parse_for_function_name(function_section) if (module or function): method_signature = self.parse_for_method_signature(function_section) first_paragraph = self.parse_for_first_paragraph(function_section) anchor = self.parse_for_anchor(function_section) url = self.create_url(anchor) data_elements = {'module': module, 'function': function, 'method_signature': method_signature, 'first_paragraph': first_paragraph, 'url': url} data.append(data_elements) for method_section in self.method_sections: (module, class_name, method) = self.parse_for_class_method(method_section) if method: method_signature = self.parse_for_method_signature(method_section) first_paragraph = self.parse_for_first_paragraph(method_section) url = self.create_url(('#' + '.'.join([module, class_name, method]))) data_elements = {'module': module, 'function': ((class_name + '.') + method), 'method_signature': method_signature, 'first_paragraph': first_paragraph, 'url': url} data.append(data_elements) self.parsed_data = data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Figure out the name of the function. Will contain the module name if one exists. Args: data_element: Incoming data dict Returns: Name, with whitespace stripped out'
def create_names_from_data(self, data_element):
module = data_element.get('module') function = data_element.get('function') dotted_name = '{}{}{}'.format(module, ('.' if (module and function) else ''), function) spaced_name = '{} {}'.format(module, function) return (dotted_name.strip(), spaced_name.strip())
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
with open(self.output, 'a') as output_file: for data_element in self.data: if (data_element.get('module') or data_element.get('function')): method_signature = data_element.get('method_signature') first_paragraph = (('<p>' + data_element.get('first_paragraph')) + '</p>') (name, redirect) = self.create_names_from_data(data_element) if first_paragraph.startswith('Source code:'): temp = first_paragraph.split('.py', 1) if (len(temp) > 1): first_paragraph = ((temp[0] + '.py<br>') + temp[1]) abstract = (('<section class="prog__container">' + '{}{}{}'.format(first_paragraph, '', method_signature)) + '</section>') url = data_element.get('url') list_of_data = [name, 'A', '', '', '', '', '', '', 'https://docs.python.org', '', '', abstract, url] output_file.write('{}\n'.format(' DCTB '.join(list_of_data))) if (redirect != name): list_of_data = [redirect, 'R', name, '', '', '', '', '', '', '', '', '', ''] output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
'Try to parse given input into a valid entry. Args: input_obj: TSV string or list of data. Returns: List of data Raises: Throws BadEntryException if data is invalid.'
def parse(self, input_obj):
if isinstance(input_obj, str): processed = input_obj.split(' DCTB ') self.data = processed elif isinstance(input_obj, list): self.data = input_obj try: self.key = self.data[0].strip() self.entry_type = self.data[1].strip() self.reference = self.data[2].strip() if (len(self.data) > 3): self.category = self.data[4].strip() self.related = self.data[6].strip() self.abstract = self.data[11].strip() self.anchor = self.data[12].strip() elif ((self.entry_type == 13) and (len(self.data) != 13)): raise BadEntryException except Exception as e: raise BadEntryException('Article had invalid number of elements.') if (self.entry_type == 'A'): self.parse_alternative_keys() return self.data
'Find alternative keys to use in generated redirects Returns: Set of possible redirect entries'
def parse_alternative_keys(self):
self.alternative_keys = set() if (('.' in self.key) and (self.entry_type == 'A')): key_arr = self.key.split('.') method_name = key_arr[(-1)] key_arr_len = len(key_arr) self.alternative_keys.add(method_name) if (key_arr_len >= 3): for l in range((key_arr_len - 1)): permutations = itertools.permutations(key_arr[:(key_arr_len - 1)], (l + 1)) for k in permutations: new_key = '{} {}'.format(' '.join(k), method_name) self.alternative_keys.add(new_key) else: package_name = key_arr[0] new_key = '{} {}'.format(package_name, method_name) self.alternative_keys.add(new_key) return self.alternative_keys
'Instantiate the information about the command'
def __init__(self, name, description, filename):
self.name = name self.description = description.replace('\n', '\\n').replace(' DCTB ', ' ') self.description = '<p>{}</p>'.format(self.description) self.filename = filename self.usage = ''
'Output the git command information in the proper format required for DuckDuckGo Fatheads'
def basic_usage(self):
usage_cleaned = self.usage.replace('\n', '\\n').replace(' DCTB ', ' ') if usage_cleaned: abstract = '{}\\n<pre><code>{}</code></pre>'.format(self.description, usage_cleaned) else: abstract = self.description abstract = '<section class="prog__container">{}</section>'.format(abstract) return ' DCTB '.join([self.name, 'A', '', '', '', '', '', '', '', '', '', abstract, '{}{}'.format(git_docs_base_url, self.filename)])
'Get all git command files that need to be parsed'
def __init__(self):
self.files_to_parse = glob.glob('download/*.html')
'Parse each git command and make a Command object for each'
def parse_commands(self):
self.commands = [] for file in self.files_to_parse: soup = BeautifulSoup(open(file), 'html.parser') name_h2 = soup.find('h2', {'id': '_name'}) if (not name_h2): continue description_p = name_h2.findNext('p') if (not description_p): continue description = description_p.getText() (command_name, description) = description.split(' - ') command = Command(command_name, description, file.replace('download/', '')) synopsis_h2 = soup.find('h2', {'id': '_synopsis'}) if synopsis_h2: usage_pre = synopsis_h2.findNext('pre') if usage_pre: usage = usage_pre.getText() command.usage = usage options_h2s = soup.find_all('h2') for options_h2 in options_h2s: if (options_h2.getText() != 'OPTIONS'): continue options = [] for option_h2_child in options_h2.parent.find_all(): if (option_h2_child.name == 'dt'): options.append(option_h2_child.getText()) elif (option_h2_child.name == 'p'): description = option_h2_child.getText() for option in options: command_name_with_option = '{} {}'.format(command_name, option) command_with_option = Command(command_name_with_option, description, file.replace('download/', '')) self.commands.append(command_with_option) options = [] self.commands.append(command)
'Initialize PythonData object. Load data from HTML.'
def __init__(self, file):
self.HTML = '' self.FILE = file self.load_data()
'Open the HTML file and load it into the object.'
def load_data(self):
with open(self.FILE, 'r') as data_file: self.HTML = data_file.read()
'Returns: The raw HTML that was loaded.'
def get_raw_data(self):
return self.HTML
'Returns: The file path of the file being used.'
def get_file(self):
return self.FILE
'Returns the function name Args: section: A section of parsed HTML that represents a function definition Returns: Name of function'
def parse_for_prop_name(self, section):
prop_name_h4 = section.find('h4', {'class': 'propTitle'}) link_to_general_props = 'View props... #' if (prop_name_h4 and (prop_name_h4.text != link_to_general_props)): prop_name = prop_name_h4.next.next if prop_name_h4.find('span', {'class': 'platform'}): prop_name = prop_name_h4.find('span', {'class': 'platform'}).next.next if (not isinstance(prop_name, str)): return None return prop_name
'Returns the first paragraph of text for a given function Fixes up some weird double spacing and newlines. Args: section: A section of parsed HTML that represents a function definition Returns: First paragraph found with text'
def parse_for_first_paragraph(self, section):
paragraphs = section.find_all('p') for paragraph in paragraphs: if paragraph.text: return self._format_output(paragraph.text) return ''
'Returns the anchor link to specific function doc Args: section: A section of parsed HTML that represents a function definition Returns: The href value of the link to doc'
def parse_for_anchor(self, section):
a_tag = section.find('a', {'class': 'anchor'}) if a_tag: return a_tag['name'] return ''
'Returns the signature Args: section: A section of parsed HTML that represents a definition of a property or method Returns: The signature'
def parse_for_signature(self, section, titleName):
h4 = section.find('h4', {'class': titleName}) contents = [] for e in h4.strings: contents.append(e) del contents[(-1)] del contents[(-1)] if h4.find('span', {'class': 'platform'}): del contents[0] if (len(h4.find_all('span', {'class': 'methodType'})) > 1): del contents[0] if contents: signature = '' for el in contents: signature += el return '<pre><code>{}</code></pre>'.format(self._format_output(signature)) return ''
'Returns the name of a method Args: section: A section of parsed HTML that represents a method definition Returns: The method name'
def parse_for_method_name(self, section):
method_name_h4 = section.find('h4', {'class': 'methodTitle'}) if method_name_h4: method_name = method_name_h4.next.next nbr_of_methodType_tags_in_h4 = len(method_name_h4.find_all('span', {'class': 'methodType'})) if (nbr_of_methodType_tags_in_h4 > 1): method_name = method_name_h4.find('span', {'class': 'methodType'}).next.next return method_name
'Helper method to create URL back to document Args: anchor: #anchor Returns: Full URL to function on the python doc'
def create_url(self, anchor):
file_path = self.file_being_used.replace(self.info['download_path'], '') return self.info['doc_base_url'].format('{}#{}'.format(file_path, anchor))
'Main gateway into parsing the data. Will retrieve all necessary data elements.'
def parse_for_data(self):
data = [] if (self.intro_text and self.title): data_elements = {'module': self.title, 'function': '', 'method_signature': '', 'first_paragraph': self.intro_text, 'url': self.create_url('')} data.append(data_elements) titleName = 'propTitle' for prop_section in self.prop_sections: prop_name = self.parse_for_prop_name(prop_section) if prop_name: prop_signature = self.parse_for_signature(prop_section, titleName) first_paragraph = self.parse_for_first_paragraph(prop_section) anchor = self.parse_for_anchor(prop_section) url = self.create_url(anchor) data_elements = {'module': self.title, 'function': prop_name, 'method_signature': prop_signature, 'first_paragraph': first_paragraph, 'url': url} data.append(data_elements) titleName = 'methodTitle' for method_section in self.method_sections: method_name = self.parse_for_method_name(method_section) if method_name: method_signature = self.parse_for_signature(method_section, titleName) first_paragraph = self.parse_for_first_paragraph(method_section) anchor = self.parse_for_anchor(method_section) url = self.create_url(anchor) data_elements = {'module': self.title, 'function': method_name, 'method_signature': method_signature, 'first_paragraph': first_paragraph, 'url': url} data.append(data_elements) self.parsed_data = data
'Get the parsed data. Returns: self.parsed_data: Dict containing necessary data elements'
def get_data(self):
return self.parsed_data
'Helper method to format the output appropriately.'
def _format_output(self, text):
return text.replace(' ', ' ').replace('\n', ' ').replace('\\n', '\\\\n')
'Figure out the name of the function. Will contain the module name if one exists. Args: data_element: Incoming data dict Returns: Name, with whitespace stripped out'
def create_names_from_data(self, data_element):
module = data_element.get('module') function = data_element.get('function') dotted_name = '{}{}{}'.format(module, ('.' if (module and function) else ''), function) spaced_name = '{} {}'.format(module, function) return (dotted_name.strip(), spaced_name.strip())
'Iterate through the data and create the needed output.txt file, appending to file as necessary.'
def create_file(self):
with open(self.output, 'a') as output_file: for data_element in self.data: if (data_element.get('module') or data_element.get('function')): method_signature = data_element.get('method_signature') first_paragraph_text = data_element.get('first_paragraph') first_paragraph = '' if first_paragraph_text: first_paragraph = '<p>' first_paragraph += first_paragraph_text first_paragraph += '</p>' (name, redirect) = self.create_names_from_data(data_element) abstract = '<section class="prog__container">' abstract += '{}{}{}'.format(first_paragraph, '', method_signature) abstract += '</section>' url = data_element.get('url') list_of_data = [name, 'A', '', '', '', '', '', '', HOME_LINK, '', '', abstract, url] output_file.write('{}\n'.format(' DCTB '.join(list_of_data))) if (redirect != name): list_of_data = [redirect, 'R', name, '', '', '', '', '', '', '', '', '', ''] output_file.write('{}\n'.format(' DCTB '.join(list_of_data)))
'Try to parse given input into a valid entry. Args: input_obj: TSV string or list of data. Returns: List of data Raises: Throws BadEntryException if data is invalid.'
def parse(self, input_obj):
if isinstance(input_obj, str): processed = input_obj.split(' DCTB ') self.data = processed elif isinstance(input_obj, list): self.data = input_obj try: self.key = self.data[0].strip() self.entry_type = self.data[1].strip() self.reference = self.data[2].strip() if (len(self.data) > 3): self.category = self.data[4].strip() self.related = self.data[6].strip() self.abstract = self.data[11].strip() self.anchor = self.data[12].strip() except Exception as e: raise BadEntryException('Article had invalid number of elements.') if (self.entry_type == 'A'): self.parse_alternative_keys() return self.data
'Find alternative keys to use in generated redirects Returns: Set of possible redirect entries'
def parse_alternative_keys(self):
self.alternative_keys = set() if (('.' in self.key) and (self.entry_type == 'A')): key_arr = self.key.split('.') method_name = key_arr[(-1)] key_arr_len = len(key_arr) self.alternative_keys.add(method_name) if (key_arr_len >= 3): for l in range((key_arr_len - 1)): permutations = itertools.permutations(key_arr[:(key_arr_len - 1)], (l + 1)) for k in permutations: new_key = '{} {}'.format(' '.join(k), method_name) self.alternative_keys.add(new_key) else: package_name = key_arr[0] new_key = '{} {}'.format(package_name, method_name) self.alternative_keys.add(new_key) return self.alternative_keys
'The setup member function checks to see if the \'output.txt\' file exists. If it does, delete it and move on, otherwise move on. This is called in the constructor of the Environment object and shouldn\'t be called again.'
def setup(self):
if os.path.exists('output.txt'): os.remove('output.txt')
'The get_contents() member function loads the file into memory and reads the contents. The file buffer is then closed and then a BeautifulSoup object is instantiated, with the contents. More on this can be read at the BeautifulSoup documentation website (https://www.crummy.com/software/BeautifulSoup/bs4/doc/).'
def get_contents(self):
with open('download/packages.html') as f: contents = f.read() contents = BeautifulSoup(contents, 'html.parser', from_encoding='utf-8') return contents
'This member function parses the html document and extracts the cells from the table.'
def parse_contents(self, soup):
data = [] table = soup.find('table', attrs={'id': 'packages'}) table_body = table.find('tbody') rows = table_body.find_all('tr') '\n This section aims to extract the individual data points from the\n table and turn them into a list. For each of the columns found in\n <td> tags, we return them in a list, while finding one link.\n We then preprocess the contents by stripping whitespace and\n stripping the front of the relative link. Once this preprocessing\n has been done, we connect the URL, and append it to the list.\n ' for row in rows: cols = row.find_all('td') ref = row.find('a', href=True) cols = [ele.text.strip() for ele in cols] relurl = ref['href'].strip('../') aburl = urljoin(__TARGET__, relurl) cols.append(aburl) print cols data.append(cols) return data
'The concat (concatenation) member function is responsible for preparing the data to be written to the file. The file is layed outlike this as requested in the DuckDuckHack docs found here: http://docs.duckduckhack.com/resources/fathead-overview.html#data-file-format'
def concat(self, name, desc, url, version):
title = name type_ = 'A' redirect = '' four = '' categories = '' six = '' related_topics = '' eight = '' external_links = '' ten = '' image = '' nAbstract = u'\n <section class="prog__container">\n <p>{0}</p>\n <pre>\n <code>\n opam install {1}\n </code>\n </pre>\n <span class="prog__sub">Version: {2}</span>\n </section>\n '.format(desc, name, version) abstract = re.sub('\\s+', ' ', nAbstract) url = url data = [title, type_, redirect, four, categories, six, related_topics, eight, external_links, ten, image, abstract, url] line = (' DCTB '.join(data) + '\n') return line
'The output() member function outputs the rows of data at a time to the output.txt file which is used as the k/v store for the FatHead IA.'
def output(self, data_list):
with open('output.txt', 'a') as f: for data in data_list: line = self.concat(data[0], data[2], data[3], data[1]) f.write(line.encode('utf'))
'Gets all tags defined in \'dl\' tags'
def get_tags(self):
self.tags = [] for tag in self.soup.find_all('dl'): name = tag.dt.contents[0] info = '' for p in tag.dd.find_all('p'): info += (p.getText() + ' ') a_tags = tag.dd.find_all('a') example_id = a_tags[1]['href'].replace('#', '') example = self.soup.find('div', {'id': example_id}).getText() reference = '' try: reference = tag.dt.span.a['href'] except: reference = a_tags[0]['href'] reference = ('http://html5doctor.com/element-index/#' + name) new_tag = Tag(name, info, reference, example) self.tags.append(new_tag) logger.info(('Tag parsed: %s' % new_tag.name))
'Function to get the source code of the tutorials page'
def get_pages(self):
file_loc = 'download/events.html' file = open(file_loc, 'r+') code_str = '' for k in file.readlines(): code_str += k self.events_page = bs(code_str, 'html.parser') file_loc = 'download/internals.html' file = open(file_loc, 'r+') code_str = '' for k in file.readlines(): code_str += k self.internals_page = bs(code_str, 'html.parser') file_loc = 'download/exceptions.html' file = open(file_loc, 'r+') code_str = '' for k in file.readlines(): code_str += k self.exceptions_page = bs(code_str, 'html.parser') file_loc = 'download/type_basics.html' file = open(file_loc, 'r+') code_str = '' for k in file.readlines(): code_str += k self.types_page = bs(code_str, 'html.parser')
'Function to replace all unicodes with their HTML equivalent'
def replace_unicodes(self, txt):
txt = txt.replace('\n', '\\n') txt = txt.replace(u'\u2019', '&#8217;') txt = txt.replace(u'\u201c', '&#8220;') txt = txt.replace(u'\u201d', '&#8221;') txt = txt.replace(u'\xb6', '') txt = txt.replace(u'\u2013', '&#8211;') txt = txt.replace(u'\u2018', '&#8216;') return txt
'Function to append a given statement to the given file'
def write_to_file(self, filename, elements):
currFile = open(filename, 'a') for e in elements: currFile.write(elements[e]) currFile.close()
'Function to remove \' from the start of a sentence'
def remove_newline(self, text):
t_list = text.split('\\n') txt = '' for k in t_list: if ((k != '') and (k != ' ')): txt += (k + ' ') return txt