body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def make_request(self, request, data, max_wait=600, step=5, wait=0): 'Sends a get, post or delete request every step seconds until the request was successful or wait exceeds max_wait.\n\n Args:\n request (str): Define which kind of request to execute.\n data (str): Submit information or sherpas job_id for a status request or job_id for deleting a trial.\n max_wait (int, optional): Time in seconds after which the requests repetition will be stopped. Defaults to 600.\n step (int, optional): Time in seconds after which a faulty request is repeated. Defaults to 5.\n wait (int, optional): Variable to which the step time is added and compared to max_wait. Defaults to 0.\n\n Returns:\n [class]: Response\n ' proxies = {'http': None, 'https': None} if (request == 'GET'): response = requests.get((self.status_url + data), headers=self.headers, proxies=proxies, verify=False) elif (request == 'POST'): response = requests.post(self.submit_url, headers=self.headers, data=data, proxies=proxies, verify=False) elif (request == 'DELETE'): response = requests.delete((self.status_url + data), headers=self.headers, proxies=proxies, verify=False) else: logging.error('Request argument is none of ["GET","POST","DELETE"].') if ((response.status_code == 200) or (wait > max_wait)): if (wait > max_wait): logging.warning('Request has failed for {} seconds with status code: {}:{}'.format(max_wait, response.status_code, response.reason)) return response else: sleep(step) logging.error('Request has failed for {} times with reason {}:{}'.format((1 + int(((max_wait / step) - ((max_wait / step) - (wait / step))))), response.status_code, response.reason)) return self.make_request(request=request, data=data, max_wait=max_wait, step=step, wait=(wait + step))
6,385,178,805,594,607,000
Sends a get, post or delete request every step seconds until the request was successful or wait exceeds max_wait. Args: request (str): Define which kind of request to execute. data (str): Submit information or sherpas job_id for a status request or job_id for deleting a trial. max_wait (int, optional): Time in seconds after which the requests repetition will be stopped. Defaults to 600. step (int, optional): Time in seconds after which a faulty request is repeated. Defaults to 5. wait (int, optional): Variable to which the step time is added and compared to max_wait. Defaults to 0. Returns: [class]: Response
argo_scheduler.py
make_request
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def make_request(self, request, data, max_wait=600, step=5, wait=0): 'Sends a get, post or delete request every step seconds until the request was successful or wait exceeds max_wait.\n\n Args:\n request (str): Define which kind of request to execute.\n data (str): Submit information or sherpas job_id for a status request or job_id for deleting a trial.\n max_wait (int, optional): Time in seconds after which the requests repetition will be stopped. Defaults to 600.\n step (int, optional): Time in seconds after which a faulty request is repeated. Defaults to 5.\n wait (int, optional): Variable to which the step time is added and compared to max_wait. Defaults to 0.\n\n Returns:\n [class]: Response\n ' proxies = {'http': None, 'https': None} if (request == 'GET'): response = requests.get((self.status_url + data), headers=self.headers, proxies=proxies, verify=False) elif (request == 'POST'): response = requests.post(self.submit_url, headers=self.headers, data=data, proxies=proxies, verify=False) elif (request == 'DELETE'): response = requests.delete((self.status_url + data), headers=self.headers, proxies=proxies, verify=False) else: logging.error('Request argument is none of ["GET","POST","DELETE"].') if ((response.status_code == 200) or (wait > max_wait)): if (wait > max_wait): logging.warning('Request has failed for {} seconds with status code: {}:{}'.format(max_wait, response.status_code, response.reason)) return response else: sleep(step) logging.error('Request has failed for {} times with reason {}:{}'.format((1 + int(((max_wait / step) - ((max_wait / step) - (wait / step))))), response.status_code, response.reason)) return self.make_request(request=request, data=data, max_wait=max_wait, step=step, wait=(wait + step))
def file_strategy(self, job_id, metrics): 'Delete all trial files which were generated through a hpo trial\n It deletes all files in the output_path related to the job_id\n\n Args:\n job_id (str): Sherpa Job_ID / Argo trial workflow name\n metrics (dict): metrics to compare\n ' if (job_id in self.trials): trial = self.trials[job_id] if ('output_path' in trial): if (self.storage_strategy == 'delete'): delete_s3_objects(trial['output_path']) elif (self.storage_strategy == 'best'): if (self.best_metric['metric'] == None): self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id elif ((self.lower_is_better == True) and (metrics[self.objective] < self.best_metric['metric'])): delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path']) self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'], self.best_metric['metric'])) elif ((self.lower_is_better == False) and (metrics[self.objective] > self.best_metric['metric'])): delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path']) self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'], self.best_metric['metric'])) else: delete_s3_objects(trial['output_path'])
-7,574,703,012,453,597,000
Delete all trial files which were generated through a hpo trial It deletes all files in the output_path related to the job_id Args: job_id (str): Sherpa Job_ID / Argo trial workflow name metrics (dict): metrics to compare
argo_scheduler.py
file_strategy
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def file_strategy(self, job_id, metrics): 'Delete all trial files which were generated through a hpo trial\n It deletes all files in the output_path related to the job_id\n\n Args:\n job_id (str): Sherpa Job_ID / Argo trial workflow name\n metrics (dict): metrics to compare\n ' if (job_id in self.trials): trial = self.trials[job_id] if ('output_path' in trial): if (self.storage_strategy == 'delete'): delete_s3_objects(trial['output_path']) elif (self.storage_strategy == 'best'): if (self.best_metric['metric'] == None): self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id elif ((self.lower_is_better == True) and (metrics[self.objective] < self.best_metric['metric'])): delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path']) self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'], self.best_metric['metric'])) elif ((self.lower_is_better == False) and (metrics[self.objective] > self.best_metric['metric'])): delete_s3_objects(self.trials[self.best_metric['job_id']]['output_path']) self.best_metric['metric'] = metrics[self.objective] self.best_metric['job_id'] = job_id logging.info('New best trial {} with metric {}'.format(self.best_metric['job_id'], self.best_metric['metric'])) else: delete_s3_objects(trial['output_path'])
def submit_job(self, command, env={}, job_name=''): "Submits a new hpo trial to argo in order to start a workflow template\n\n Args:\n command (list[str]): List that contains ['Argo WorkflowTemplate','Entrypoint of that Argo WorkflowTemplate]\n env (dict, optional): Dictionary that contains env variables, mainly the sherpa_trial_id. Defaults to {}.\n job_name (str, optional): Not needed for Argo scheduler. Defaults to ''.\n\n Returns:\n [str]: Sherpa Job_ID / Name of the workflow that was started by Argo\n " os.environ['SHERPA_TRIAL_ID'] = env['SHERPA_TRIAL_ID'] trial = self.client.get_trial() tp = trial.parameters WorkflowTemplate = command[0] entrypoint = command[1] default_parameter = self.default_parameter if ('save_to' in tp): default_parameter['output_path'] = os.path.join(self.output_path, str(tp['save_to']), '') else: default_parameter['output_path'] = os.path.join(self.output_path, str(env['SHERPA_TRIAL_ID']), '') if (('load_from' in tp) and (tp['load_from'] != '')): default_parameter['model_input_path'] = os.path.join(self.output_path, str(tp['load_from']), '') WorkflowTemplate = eval(self.trial_run_parameter)['WorkflowTemplateContinue'] entrypoint = eval(self.trial_run_parameter)['EntrypointContinue'] else: default_parameter['model_input_path'] = '' merged_parameter = eval(self.trial_run_parameter) for (k, v) in default_parameter.items(): merged_parameter[k] = v epochs = merged_parameter.get('epochs', 0) parameters_list = [] for (key, val) in merged_parameter.items(): parameters_list.append('{}={}'.format(key, val)) data = json.dumps({'resourceKind': 'WorkflowTemplate', 'resourceName': WorkflowTemplate, 'submitOptions': {'entrypoint': entrypoint, 'labels': ((('sherpa_run=' + self.hostname) + ',run_name=') + self.run_name), 'parameters': parameters_list}}) response_submit = self.make_request(request='POST', data=data) if (response_submit.status_code == 200): job_id = json.loads(response_submit.content)['metadata']['name'] logging.info('Submitted trial {} with job_id {}'.format(env['SHERPA_TRIAL_ID'], job_id)) else: job_id = ('failed_trial_id_' + str(env['SHERPA_TRIAL_ID'])) logging.warning('Failed to sumbit job with Trial_ID {} to argo.'.format(env['SHERPA_TRIAL_ID'])) self.trials[job_id] = {'trial': trial, 'epochs': epochs, 'output_path': default_parameter['output_path'], 'model_input_path': default_parameter['model_input_path'], 'status': 0, 'finished': False} return job_id
1,542,736,167,545,708,800
Submits a new hpo trial to argo in order to start a workflow template Args: command (list[str]): List that contains ['Argo WorkflowTemplate','Entrypoint of that Argo WorkflowTemplate] env (dict, optional): Dictionary that contains env variables, mainly the sherpa_trial_id. Defaults to {}. job_name (str, optional): Not needed for Argo scheduler. Defaults to ''. Returns: [str]: Sherpa Job_ID / Name of the workflow that was started by Argo
argo_scheduler.py
submit_job
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def submit_job(self, command, env={}, job_name=): "Submits a new hpo trial to argo in order to start a workflow template\n\n Args:\n command (list[str]): List that contains ['Argo WorkflowTemplate','Entrypoint of that Argo WorkflowTemplate]\n env (dict, optional): Dictionary that contains env variables, mainly the sherpa_trial_id. Defaults to {}.\n job_name (str, optional): Not needed for Argo scheduler. Defaults to .\n\n Returns:\n [str]: Sherpa Job_ID / Name of the workflow that was started by Argo\n " os.environ['SHERPA_TRIAL_ID'] = env['SHERPA_TRIAL_ID'] trial = self.client.get_trial() tp = trial.parameters WorkflowTemplate = command[0] entrypoint = command[1] default_parameter = self.default_parameter if ('save_to' in tp): default_parameter['output_path'] = os.path.join(self.output_path, str(tp['save_to']), ) else: default_parameter['output_path'] = os.path.join(self.output_path, str(env['SHERPA_TRIAL_ID']), ) if (('load_from' in tp) and (tp['load_from'] != )): default_parameter['model_input_path'] = os.path.join(self.output_path, str(tp['load_from']), ) WorkflowTemplate = eval(self.trial_run_parameter)['WorkflowTemplateContinue'] entrypoint = eval(self.trial_run_parameter)['EntrypointContinue'] else: default_parameter['model_input_path'] = merged_parameter = eval(self.trial_run_parameter) for (k, v) in default_parameter.items(): merged_parameter[k] = v epochs = merged_parameter.get('epochs', 0) parameters_list = [] for (key, val) in merged_parameter.items(): parameters_list.append('{}={}'.format(key, val)) data = json.dumps({'resourceKind': 'WorkflowTemplate', 'resourceName': WorkflowTemplate, 'submitOptions': {'entrypoint': entrypoint, 'labels': ((('sherpa_run=' + self.hostname) + ',run_name=') + self.run_name), 'parameters': parameters_list}}) response_submit = self.make_request(request='POST', data=data) if (response_submit.status_code == 200): job_id = json.loads(response_submit.content)['metadata']['name'] logging.info('Submitted trial {} with job_id {}'.format(env['SHERPA_TRIAL_ID'], job_id)) else: job_id = ('failed_trial_id_' + str(env['SHERPA_TRIAL_ID'])) logging.warning('Failed to sumbit job with Trial_ID {} to argo.'.format(env['SHERPA_TRIAL_ID'])) self.trials[job_id] = {'trial': trial, 'epochs': epochs, 'output_path': default_parameter['output_path'], 'model_input_path': default_parameter['model_input_path'], 'status': 0, 'finished': False} return job_id
def get_status(self, job_id): 'Obtains the current status of the job.\n Sends objective values/metrics to the DB when a trial succeeded.\n Compares objective values and decides wether to delete or keep files. \n\n Args:\n job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo\n\n Returns:\n sherpa.schedulers._JobStatus: the job-status.\n ' response_status = self.make_request(request='GET', data=job_id) if (response_status.status_code == 200): status = json.loads(response_status.content)['status']['phase'] if (status == 'Succeeded'): if (self.trials[job_id]['finished'] == True): logging.info('Set status to finished for trial : {}'.format(self.trials[job_id]['trial'].id)) else: filename = self.metrics_filename input_path = self.trials[job_id]['output_path'] metrics = read_json(input_path, filename) logging.info('Send metrics for trial: {}'.format(self.trials[job_id]['trial'].id)) self.client.send_metrics(trial=self.trials[job_id]['trial'], iteration=self.trials[job_id]['epochs'], objective=metrics[self.objective], context=metrics) status = 'Running' self.trials[job_id]['finished'] = True self.file_strategy(job_id, metrics) elif (status == 'Failed'): delete_s3_objects(self.trials[job_id]['output_path']) elif (job_id in self.killed_jobs): status = 'Stopped' else: status = 'Other' s = self.decode_status.get(status, _JobStatus.other) if (s != self.trials[job_id]['status']): logging.info('Jobstatus: {} for Job {}'.format(status, job_id)) self.trials[job_id]['status'] = s return s
8,755,017,924,860,176,000
Obtains the current status of the job. Sends objective values/metrics to the DB when a trial succeeded. Compares objective values and decides wether to delete or keep files. Args: job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo Returns: sherpa.schedulers._JobStatus: the job-status.
argo_scheduler.py
get_status
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def get_status(self, job_id): 'Obtains the current status of the job.\n Sends objective values/metrics to the DB when a trial succeeded.\n Compares objective values and decides wether to delete or keep files. \n\n Args:\n job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo\n\n Returns:\n sherpa.schedulers._JobStatus: the job-status.\n ' response_status = self.make_request(request='GET', data=job_id) if (response_status.status_code == 200): status = json.loads(response_status.content)['status']['phase'] if (status == 'Succeeded'): if (self.trials[job_id]['finished'] == True): logging.info('Set status to finished for trial : {}'.format(self.trials[job_id]['trial'].id)) else: filename = self.metrics_filename input_path = self.trials[job_id]['output_path'] metrics = read_json(input_path, filename) logging.info('Send metrics for trial: {}'.format(self.trials[job_id]['trial'].id)) self.client.send_metrics(trial=self.trials[job_id]['trial'], iteration=self.trials[job_id]['epochs'], objective=metrics[self.objective], context=metrics) status = 'Running' self.trials[job_id]['finished'] = True self.file_strategy(job_id, metrics) elif (status == 'Failed'): delete_s3_objects(self.trials[job_id]['output_path']) elif (job_id in self.killed_jobs): status = 'Stopped' else: status = 'Other' s = self.decode_status.get(status, _JobStatus.other) if (s != self.trials[job_id]['status']): logging.info('Jobstatus: {} for Job {}'.format(status, job_id)) self.trials[job_id]['status'] = s return s
def kill_job(self, job_id): 'Kill a job by deleting the argo workflow completly\n\n Args:\n job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo\n ' response_kill = self.make_request(request='DELETE', data=job_id) if (response_kill.status_code == 200): self.killed_jobs.append(str(job_id))
-2,386,664,912,636,583,000
Kill a job by deleting the argo workflow completly Args: job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo
argo_scheduler.py
kill_job
predictive-quality/ml-pipeline-blocks-hpo-sherpa
python
def kill_job(self, job_id): 'Kill a job by deleting the argo workflow completly\n\n Args:\n job_id (str): Sherpa Job_ID / Name of the workflow that was started by Argo\n ' response_kill = self.make_request(request='DELETE', data=job_id) if (response_kill.status_code == 200): self.killed_jobs.append(str(job_id))
def _module_available(module_path: str) -> bool: "\n Check if a path is available in your environment\n\n >>> _module_available('os')\n True\n >>> _module_available('bla.bla')\n False\n " try: return (find_spec(module_path) is not None) except AttributeError: return False except ModuleNotFoundError: return False
1,338,762,679,162,271,200
Check if a path is available in your environment >>> _module_available('os') True >>> _module_available('bla.bla') False
pytorch_lightning/utilities/imports.py
_module_available
Queuecumber/pytorch-lightning
python
def _module_available(module_path: str) -> bool: "\n Check if a path is available in your environment\n\n >>> _module_available('os')\n True\n >>> _module_available('bla.bla')\n False\n " try: return (find_spec(module_path) is not None) except AttributeError: return False except ModuleNotFoundError: return False
def _compare_version(package: str, op, version) -> bool: '\n Compare package version with some requirements\n\n >>> _compare_version("torch", operator.ge, "0.1")\n True\n ' try: pkg = importlib.import_module(package) except (ModuleNotFoundError, DistributionNotFound): return False try: pkg_version = Version(pkg.__version__) except TypeError: return True return op(pkg_version, Version(version))
-222,479,884,128,013,500
Compare package version with some requirements >>> _compare_version("torch", operator.ge, "0.1") True
pytorch_lightning/utilities/imports.py
_compare_version
Queuecumber/pytorch-lightning
python
def _compare_version(package: str, op, version) -> bool: '\n Compare package version with some requirements\n\n >>> _compare_version("torch", operator.ge, "0.1")\n True\n ' try: pkg = importlib.import_module(package) except (ModuleNotFoundError, DistributionNotFound): return False try: pkg_version = Version(pkg.__version__) except TypeError: return True return op(pkg_version, Version(version))
def init(): 'Return True if the plugin has loaded successfully.' g.trace('pyplot_backend.py is not a plugin.') return False
2,428,046,564,228,245,500
Return True if the plugin has loaded successfully.
leo/plugins/pyplot_backend.py
init
ATikhonov2/leo-editor
python
def init(): g.trace('pyplot_backend.py is not a plugin.') return False
def new_figure_manager(num, *args, **kwargs): '\n Create a new figure manager instance\n ' FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig)
-4,321,160,556,889,542,700
Create a new figure manager instance
leo/plugins/pyplot_backend.py
new_figure_manager
ATikhonov2/leo-editor
python
def new_figure_manager(num, *args, **kwargs): '\n \n ' FigureClass = kwargs.pop('FigureClass', Figure) thisFig = FigureClass(*args, **kwargs) return new_figure_manager_given_figure(num, thisFig)
def new_figure_manager_given_figure(num, figure): '\n Create a new figure manager instance for the given figure.\n ' canvas = FigureCanvasQTAgg(figure) return LeoFigureManagerQT(canvas, num)
-3,158,270,832,078,468,000
Create a new figure manager instance for the given figure.
leo/plugins/pyplot_backend.py
new_figure_manager_given_figure
ATikhonov2/leo-editor
python
def new_figure_manager_given_figure(num, figure): '\n \n ' canvas = FigureCanvasQTAgg(figure) return LeoFigureManagerQT(canvas, num)
def __init__(self, canvas, num): 'Ctor for the LeoFigureManagerQt class.' self.c = c = g.app.log.c super().__init__(canvas, num) self.canvas = canvas self.vr_controller = vc = vr.controllers.get(c.hash()) self.splitter = c.free_layout.get_top_splitter() self.frame = w = QtWidgets.QFrame() w.setLayout(QtWidgets.QVBoxLayout()) w.layout().addWidget(self.canvas) if vc: vc.embed_widget(w) class DummyWindow(): def __init__(self, c): self.c = c self._destroying = None def windowTitle(self): return self.c.p.h self.window = DummyWindow(c) FocusPolicy = (QtCore.Qt.FocusPolicy if isQt6 else QtCore.Qt) self.canvas.setFocusPolicy(FocusPolicy.StrongFocus) self.canvas.setFocus() self.canvas._destroying = False self.toolbar = self._get_toolbar(self.canvas, self.frame) if (self.toolbar is not None): layout = self.frame.layout() layout.addWidget(self.toolbar) self.statusbar_label = QtWidgets.QLabel() layout.addWidget(self.statusbar_label) if (isQt5 or isQt6): pass else: self.toolbar.message.connect(self._show_message) self.canvas.draw_idle() def notify_axes_change(fig): if (self.toolbar is not None): self.toolbar.update() self.canvas.figure.add_axobserver(notify_axes_change)
-1,373,600,437,377,754,600
Ctor for the LeoFigureManagerQt class.
leo/plugins/pyplot_backend.py
__init__
ATikhonov2/leo-editor
python
def __init__(self, canvas, num): self.c = c = g.app.log.c super().__init__(canvas, num) self.canvas = canvas self.vr_controller = vc = vr.controllers.get(c.hash()) self.splitter = c.free_layout.get_top_splitter() self.frame = w = QtWidgets.QFrame() w.setLayout(QtWidgets.QVBoxLayout()) w.layout().addWidget(self.canvas) if vc: vc.embed_widget(w) class DummyWindow(): def __init__(self, c): self.c = c self._destroying = None def windowTitle(self): return self.c.p.h self.window = DummyWindow(c) FocusPolicy = (QtCore.Qt.FocusPolicy if isQt6 else QtCore.Qt) self.canvas.setFocusPolicy(FocusPolicy.StrongFocus) self.canvas.setFocus() self.canvas._destroying = False self.toolbar = self._get_toolbar(self.canvas, self.frame) if (self.toolbar is not None): layout = self.frame.layout() layout.addWidget(self.toolbar) self.statusbar_label = QtWidgets.QLabel() layout.addWidget(self.statusbar_label) if (isQt5 or isQt6): pass else: self.toolbar.message.connect(self._show_message) self.canvas.draw_idle() def notify_axes_change(fig): if (self.toolbar is not None): self.toolbar.update() self.canvas.figure.add_axobserver(notify_axes_change)
def main(): 'Main program code.' window = MyGame() window.setup() arcade.run()
294,195,495,317,205,760
Main program code.
multiple_levels.py
main
casadina/py_arcade
python
def main(): window = MyGame() window.setup() arcade.run()
def tick(self): 'Determine tick amount.' t_1 = time.perf_counter() dt = (t_1 - self.time) self.time = t_1 self.frame_times.append(dt)
8,592,838,661,354,698,000
Determine tick amount.
multiple_levels.py
tick
casadina/py_arcade
python
def tick(self): t_1 = time.perf_counter() dt = (t_1 - self.time) self.time = t_1 self.frame_times.append(dt)
def get_fps(self) -> float: 'Return FPS as a float.' total_time = sum(self.frame_times) if (total_time == 0): return 0 return (len(self.frame_times) / sum(self.frame_times))
-3,622,362,377,545,486,300
Return FPS as a float.
multiple_levels.py
get_fps
casadina/py_arcade
python
def get_fps(self) -> float: total_time = sum(self.frame_times) if (total_time == 0): return 0 return (len(self.frame_times) / sum(self.frame_times))
def update(self): ' Move the player' self.left = max(self.left, 0)
-6,030,875,653,913,213,000
Move the player
multiple_levels.py
update
casadina/py_arcade
python
def update(self): ' ' self.left = max(self.left, 0)
def __init__(self): 'Call the parent class and set up the window.' super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) (self.scene, self.player_sprite) = (None, None) self.physics_engine = None self.left_pressed = False self.right_pressed = False self.camera = None self.gui_camera = None self.score = 0 self.lives_left = 0 self.timer = 0 self.fps = FPSCounter() self.up = (key.UP, key.W) self.down = (key.DOWN, key.S) self.left = (key.LEFT, key.A) self.right = (key.RIGHT, key.D) self.tile_map = None self.end_of_map = 0 self.level = 1 self.collect_coin_sound = arcade.load_sound(':resources:sounds/coin1.wav') self.jump_sound = arcade.load_sound(':resources:sounds/jump1.wav') self.game_over_sound = arcade.load_sound(':resources:sounds/gameover1.wav') arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)
-1,055,211,858,887,127,900
Call the parent class and set up the window.
multiple_levels.py
__init__
casadina/py_arcade
python
def __init__(self): super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE) (self.scene, self.player_sprite) = (None, None) self.physics_engine = None self.left_pressed = False self.right_pressed = False self.camera = None self.gui_camera = None self.score = 0 self.lives_left = 0 self.timer = 0 self.fps = FPSCounter() self.up = (key.UP, key.W) self.down = (key.DOWN, key.S) self.left = (key.LEFT, key.A) self.right = (key.RIGHT, key.D) self.tile_map = None self.end_of_map = 0 self.level = 1 self.collect_coin_sound = arcade.load_sound(':resources:sounds/coin1.wav') self.jump_sound = arcade.load_sound(':resources:sounds/jump1.wav') self.game_over_sound = arcade.load_sound(':resources:sounds/gameover1.wav') arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)
def setup(self): 'Set-up the game here. Call this function to restart the game.' self.camera = arcade.Camera(self.width, self.height) self.gui_camera = arcade.Camera(self.width, self.height) map_name = f':resources:tiled_maps/map2_level_{self.level}.json' layer_options = {LAYER_NAME_PLATFORMS: {'use_spatial_hash': True}, LAYER_NAME_COINS: {'use_spatial_hash': True}, LAYER_NAME_DONT_TOUCH: {'use_spatial_hash': True}} self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options) self.scene = arcade.Scene.from_tilemap(self.tile_map) self.scene.add_sprite_list_after('Player', LAYER_NAME_FOREGROUND) image_source = ':resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png' self.player_sprite = Player(image_source, CHARACTER_SCALING) self.player_sprite.center_x = PLAYER_START_X self.player_sprite.center_y = PLAYER_START_Y self.scene.add_sprite('Player', self.player_sprite) self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite, gravity_constant=GRAVITY, walls=self.scene['Platforms']) self.score = 0 self.lives_left = 5 if self.tile_map.background_color: arcade.set_background_color(self.tile_map.background_color) self.end_of_map = (self.tile_map.width * GRID_PIXEL_SIZE)
4,101,371,736,262,876,700
Set-up the game here. Call this function to restart the game.
multiple_levels.py
setup
casadina/py_arcade
python
def setup(self): self.camera = arcade.Camera(self.width, self.height) self.gui_camera = arcade.Camera(self.width, self.height) map_name = f':resources:tiled_maps/map2_level_{self.level}.json' layer_options = {LAYER_NAME_PLATFORMS: {'use_spatial_hash': True}, LAYER_NAME_COINS: {'use_spatial_hash': True}, LAYER_NAME_DONT_TOUCH: {'use_spatial_hash': True}} self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options) self.scene = arcade.Scene.from_tilemap(self.tile_map) self.scene.add_sprite_list_after('Player', LAYER_NAME_FOREGROUND) image_source = ':resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png' self.player_sprite = Player(image_source, CHARACTER_SCALING) self.player_sprite.center_x = PLAYER_START_X self.player_sprite.center_y = PLAYER_START_Y self.scene.add_sprite('Player', self.player_sprite) self.physics_engine = arcade.PhysicsEnginePlatformer(self.player_sprite, gravity_constant=GRAVITY, walls=self.scene['Platforms']) self.score = 0 self.lives_left = 5 if self.tile_map.background_color: arcade.set_background_color(self.tile_map.background_color) self.end_of_map = (self.tile_map.width * GRID_PIXEL_SIZE)
@property def current_fps(self) -> float: 'Determine current fps.' return self.fps.get_fps()
1,849,623,787,745,285,400
Determine current fps.
multiple_levels.py
current_fps
casadina/py_arcade
python
@property def current_fps(self) -> float: return self.fps.get_fps()
@property def coins_left(self) -> int: 'Determine coins remaining.' return len(self.scene['Coins'])
-1,729,096,299,829,627,600
Determine coins remaining.
multiple_levels.py
coins_left
casadina/py_arcade
python
@property def coins_left(self) -> int: return len(self.scene['Coins'])
@staticmethod def gui_label(text: str, var: any, x: int, y: int): "\n Simplify arcade.draw_text.\n\n Keyword arguments:\n text -- This is the label.\n var -- This is the variable value.\n x -- This is the percent point of the screen's x x that it will start at.\n y -- This is the percent point of the screen's y it will start at.\n " (x, y) = ((x / 100), (y / 100)) arcade.draw_text(text=f'{text}: {var}', start_x=(SCREEN_WIDTH * x), start_y=(SCREEN_HEIGHT * y), color=arcade.csscolor.WHITE, font_size=18)
1,918,866,859,303,684,400
Simplify arcade.draw_text. Keyword arguments: text -- This is the label. var -- This is the variable value. x -- This is the percent point of the screen's x x that it will start at. y -- This is the percent point of the screen's y it will start at.
multiple_levels.py
gui_label
casadina/py_arcade
python
@staticmethod def gui_label(text: str, var: any, x: int, y: int): "\n Simplify arcade.draw_text.\n\n Keyword arguments:\n text -- This is the label.\n var -- This is the variable value.\n x -- This is the percent point of the screen's x x that it will start at.\n y -- This is the percent point of the screen's y it will start at.\n " (x, y) = ((x / 100), (y / 100)) arcade.draw_text(text=f'{text}: {var}', start_x=(SCREEN_WIDTH * x), start_y=(SCREEN_HEIGHT * y), color=arcade.csscolor.WHITE, font_size=18)
def display_gui_info(self): 'Display GUI information.' arcade.draw_rectangle_filled(center_x=(SCREEN_WIDTH / 14), center_y=(SCREEN_HEIGHT - (SCREEN_HEIGHT / 10)), width=(SCREEN_WIDTH / 7), height=(SCREEN_HEIGHT / 4), color=arcade.color.IRRESISTIBLE) self.gui_label('Score', self.score, 0, 95) self.gui_label('Coins Left', self.coins_left, 0, 90) self.gui_label('Time', round(self.timer), 0, 85) self.gui_label('Lives', self.lives_left, 0, 80) self.gui_label('FPS', round(self.current_fps), 90, 95)
-7,317,001,881,754,198,000
Display GUI information.
multiple_levels.py
display_gui_info
casadina/py_arcade
python
def display_gui_info(self): arcade.draw_rectangle_filled(center_x=(SCREEN_WIDTH / 14), center_y=(SCREEN_HEIGHT - (SCREEN_HEIGHT / 10)), width=(SCREEN_WIDTH / 7), height=(SCREEN_HEIGHT / 4), color=arcade.color.IRRESISTIBLE) self.gui_label('Score', self.score, 0, 95) self.gui_label('Coins Left', self.coins_left, 0, 90) self.gui_label('Time', round(self.timer), 0, 85) self.gui_label('Lives', self.lives_left, 0, 80) self.gui_label('FPS', round(self.current_fps), 90, 95)
def on_draw(self): 'Render the screen.' arcade.start_render() self.camera.use() self.scene.draw() self.gui_camera.use() self.display_gui_info() self.fps.tick()
4,505,006,218,272,286,700
Render the screen.
multiple_levels.py
on_draw
casadina/py_arcade
python
def on_draw(self): arcade.start_render() self.camera.use() self.scene.draw() self.gui_camera.use() self.display_gui_info() self.fps.tick()
def on_key_press(self, button: int, modifiers: int): 'Called whenever a key is pressed.' if ((button in self.up) and self.physics_engine.can_jump()): self.player_sprite.change_y = PLAYER_JUMP_SPEED arcade.play_sound(self.jump_sound) elif (button in self.left): self.left_pressed = True elif (button in self.right): self.right_pressed = True
531,593,586,049,197,250
Called whenever a key is pressed.
multiple_levels.py
on_key_press
casadina/py_arcade
python
def on_key_press(self, button: int, modifiers: int): if ((button in self.up) and self.physics_engine.can_jump()): self.player_sprite.change_y = PLAYER_JUMP_SPEED arcade.play_sound(self.jump_sound) elif (button in self.left): self.left_pressed = True elif (button in self.right): self.right_pressed = True
def on_key_release(self, button: int, modifiers: int): 'Called when the user releases a key.' if (button in self.left): self.left_pressed = False elif (button in self.right): self.right_pressed = False
-5,128,810,662,760,468,000
Called when the user releases a key.
multiple_levels.py
on_key_release
casadina/py_arcade
python
def on_key_release(self, button: int, modifiers: int): if (button in self.left): self.left_pressed = False elif (button in self.right): self.right_pressed = False
def update_player_velocity(self): 'Update velocity based on key state.' if (self.left_pressed and (not self.right_pressed)): self.player_sprite.change_x = (- PLAYER_MOVEMENT_SPEED) elif (self.right_pressed and (not self.left_pressed)): self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED else: self.player_sprite.change_x = 0
-6,462,452,232,308,232,000
Update velocity based on key state.
multiple_levels.py
update_player_velocity
casadina/py_arcade
python
def update_player_velocity(self): if (self.left_pressed and (not self.right_pressed)): self.player_sprite.change_x = (- PLAYER_MOVEMENT_SPEED) elif (self.right_pressed and (not self.left_pressed)): self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED else: self.player_sprite.change_x = 0
def center_camera_to_player(self): 'Ensure the camera is centered on the player.' screen_center_x = (self.player_sprite.center_x - (self.camera.viewport_width / 2)) screen_center_y = (self.player_sprite.center_y - (self.camera.viewport_height / 2)) if (screen_center_x < 0): screen_center_x = 0 if (screen_center_y < 0): screen_center_y = 0 player_centered = (screen_center_x, screen_center_y) self.camera.move_to(player_centered)
-1,353,567,521,603,266,800
Ensure the camera is centered on the player.
multiple_levels.py
center_camera_to_player
casadina/py_arcade
python
def center_camera_to_player(self): screen_center_x = (self.player_sprite.center_x - (self.camera.viewport_width / 2)) screen_center_y = (self.player_sprite.center_y - (self.camera.viewport_height / 2)) if (screen_center_x < 0): screen_center_x = 0 if (screen_center_y < 0): screen_center_y = 0 player_centered = (screen_center_x, screen_center_y) self.camera.move_to(player_centered)
def player_coin_collision(self): '\n Detects player collision with coins, then removes the coin sprite.\n This will play a sound and add 1 to the score.\n ' coin_hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.scene['Coins']) for coin in coin_hit_list: coin.remove_from_sprite_lists() arcade.play_sound(self.collect_coin_sound) self.score += 1
-4,846,683,211,798,819,000
Detects player collision with coins, then removes the coin sprite. This will play a sound and add 1 to the score.
multiple_levels.py
player_coin_collision
casadina/py_arcade
python
def player_coin_collision(self): '\n Detects player collision with coins, then removes the coin sprite.\n This will play a sound and add 1 to the score.\n ' coin_hit_list = arcade.check_for_collision_with_list(self.player_sprite, self.scene['Coins']) for coin in coin_hit_list: coin.remove_from_sprite_lists() arcade.play_sound(self.collect_coin_sound) self.score += 1
def reset_player(self): "Reset's player to start position." self.player_sprite.center_x = PLAYER_START_X self.player_sprite.center_y = PLAYER_START_Y
-6,883,142,112,036,425,000
Reset's player to start position.
multiple_levels.py
reset_player
casadina/py_arcade
python
def reset_player(self): self.player_sprite.center_x = PLAYER_START_X self.player_sprite.center_y = PLAYER_START_Y
def stop_player(self): 'Stop player movement.' self.player_sprite.change_x = 0 self.player_sprite.change_y = 0
-764,573,933,544,241,300
Stop player movement.
multiple_levels.py
stop_player
casadina/py_arcade
python
def stop_player(self): self.player_sprite.change_x = 0 self.player_sprite.change_y = 0
def game_over(self): 'Sets game over and resets position.' self.stop_player() self.reset_player() self.lives_left -= 1 arcade.play_sound(self.game_over_sound)
6,663,112,794,571,538,000
Sets game over and resets position.
multiple_levels.py
game_over
casadina/py_arcade
python
def game_over(self): self.stop_player() self.reset_player() self.lives_left -= 1 arcade.play_sound(self.game_over_sound)
def fell_off_map(self): 'Detect if the player fell off the map and then reset position if so.' if (self.player_sprite.center_y < (- 100)): self.game_over()
-8,257,541,033,313,706,000
Detect if the player fell off the map and then reset position if so.
multiple_levels.py
fell_off_map
casadina/py_arcade
python
def fell_off_map(self): if (self.player_sprite.center_y < (- 100)): self.game_over()
def touched_dont_touch(self): "Detect collision on Don't Touch layer. Reset player if collision." if arcade.check_for_collision_with_list(self.player_sprite, self.scene[LAYER_NAME_DONT_TOUCH]): self.game_over()
-3,392,554,248,256,127,500
Detect collision on Don't Touch layer. Reset player if collision.
multiple_levels.py
touched_dont_touch
casadina/py_arcade
python
def touched_dont_touch(self): if arcade.check_for_collision_with_list(self.player_sprite, self.scene[LAYER_NAME_DONT_TOUCH]): self.game_over()
def at_end_of_level(self): 'Checks if player at end of level, and if so, load the next level.' if (self.player_sprite.center_x >= self.end_of_map): self.level += 1 self.setup()
-3,506,122,557,746,076,700
Checks if player at end of level, and if so, load the next level.
multiple_levels.py
at_end_of_level
casadina/py_arcade
python
def at_end_of_level(self): if (self.player_sprite.center_x >= self.end_of_map): self.level += 1 self.setup()
def on_update(self, delta_time: float): 'Movement and game logic.' self.timer += delta_time self.update_player_velocity() self.player_sprite.update() self.physics_engine.update() self.player_coin_collision() self.fell_off_map() self.touched_dont_touch() self.at_end_of_level() self.center_camera_to_player()
3,231,113,912,894,307,300
Movement and game logic.
multiple_levels.py
on_update
casadina/py_arcade
python
def on_update(self, delta_time: float): self.timer += delta_time self.update_player_velocity() self.player_sprite.update() self.physics_engine.update() self.player_coin_collision() self.fell_off_map() self.touched_dont_touch() self.at_end_of_level() self.center_camera_to_player()
def default_stream_factory(total_content_length, filename, content_type, content_length=None): 'The stream factory that is used per default.' if (total_content_length > (1024 * 500)): return TemporaryFile('wb+') return StringIO()
3,775,995,986,324,513,000
The stream factory that is used per default.
werkzeug/formparser.py
default_stream_factory
Chitrank-Dixit/werkzeug
python
def default_stream_factory(total_content_length, filename, content_type, content_length=None): if (total_content_length > (1024 * 500)): return TemporaryFile('wb+') return StringIO()
def parse_form_data(environ, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, max_content_length=None, cls=None, silent=True): 'Parse the form data in the environ and return it as tuple in the form\n ``(stream, form, files)``. You should only call this method if the\n transport method is `POST`, `PUT`, or `PATCH`.\n\n If the mimetype of the data transmitted is `multipart/form-data` the\n files multidict will be filled with `FileStorage` objects. If the\n mimetype is unknown the input stream is wrapped and returned as first\n argument, else the stream is empty.\n\n This is a shortcut for the common usage of :class:`FormDataParser`.\n\n Have a look at :ref:`dealing-with-request-data` for more details.\n\n .. versionadded:: 0.5\n The `max_form_memory_size`, `max_content_length` and\n `cls` parameters were added.\n\n .. versionadded:: 0.5.1\n The optional `silent` flag was added.\n\n :param environ: the WSGI environment to be used for parsing.\n :param stream_factory: An optional callable that returns a new read and\n writeable file descriptor. This callable works\n the same as :meth:`~BaseResponse._get_file_stream`.\n :param charset: The character set for URL and url encoded form data.\n :param errors: The encoding error behavior.\n :param max_form_memory_size: the maximum number of bytes to be accepted for\n in-memory stored form data. If the data\n exceeds the value specified an\n :exc:`~exceptions.RequestEntityTooLarge`\n exception is raised.\n :param max_content_length: If this is provided and the transmitted data\n is longer than this value an\n :exc:`~exceptions.RequestEntityTooLarge`\n exception is raised.\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param silent: If set to False parsing errors will not be caught.\n :return: A tuple in the form ``(stream, form, files)``.\n ' return FormDataParser(stream_factory, charset, errors, max_form_memory_size, max_content_length, cls, silent).parse_from_environ(environ)
1,399,098,653,220,583,700
Parse the form data in the environ and return it as tuple in the form ``(stream, form, files)``. You should only call this method if the transport method is `POST`, `PUT`, or `PATCH`. If the mimetype of the data transmitted is `multipart/form-data` the files multidict will be filled with `FileStorage` objects. If the mimetype is unknown the input stream is wrapped and returned as first argument, else the stream is empty. This is a shortcut for the common usage of :class:`FormDataParser`. Have a look at :ref:`dealing-with-request-data` for more details. .. versionadded:: 0.5 The `max_form_memory_size`, `max_content_length` and `cls` parameters were added. .. versionadded:: 0.5.1 The optional `silent` flag was added. :param environ: the WSGI environment to be used for parsing. :param stream_factory: An optional callable that returns a new read and writeable file descriptor. This callable works the same as :meth:`~BaseResponse._get_file_stream`. :param charset: The character set for URL and url encoded form data. :param errors: The encoding error behavior. :param max_form_memory_size: the maximum number of bytes to be accepted for in-memory stored form data. If the data exceeds the value specified an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param max_content_length: If this is provided and the transmitted data is longer than this value an :exc:`~exceptions.RequestEntityTooLarge` exception is raised. :param cls: an optional dict class to use. If this is not specified or `None` the default :class:`MultiDict` is used. :param silent: If set to False parsing errors will not be caught. :return: A tuple in the form ``(stream, form, files)``.
werkzeug/formparser.py
parse_form_data
Chitrank-Dixit/werkzeug
python
def parse_form_data(environ, stream_factory=None, charset='utf-8', errors='replace', max_form_memory_size=None, max_content_length=None, cls=None, silent=True): 'Parse the form data in the environ and return it as tuple in the form\n ``(stream, form, files)``. You should only call this method if the\n transport method is `POST`, `PUT`, or `PATCH`.\n\n If the mimetype of the data transmitted is `multipart/form-data` the\n files multidict will be filled with `FileStorage` objects. If the\n mimetype is unknown the input stream is wrapped and returned as first\n argument, else the stream is empty.\n\n This is a shortcut for the common usage of :class:`FormDataParser`.\n\n Have a look at :ref:`dealing-with-request-data` for more details.\n\n .. versionadded:: 0.5\n The `max_form_memory_size`, `max_content_length` and\n `cls` parameters were added.\n\n .. versionadded:: 0.5.1\n The optional `silent` flag was added.\n\n :param environ: the WSGI environment to be used for parsing.\n :param stream_factory: An optional callable that returns a new read and\n writeable file descriptor. This callable works\n the same as :meth:`~BaseResponse._get_file_stream`.\n :param charset: The character set for URL and url encoded form data.\n :param errors: The encoding error behavior.\n :param max_form_memory_size: the maximum number of bytes to be accepted for\n in-memory stored form data. If the data\n exceeds the value specified an\n :exc:`~exceptions.RequestEntityTooLarge`\n exception is raised.\n :param max_content_length: If this is provided and the transmitted data\n is longer than this value an\n :exc:`~exceptions.RequestEntityTooLarge`\n exception is raised.\n :param cls: an optional dict class to use. If this is not specified\n or `None` the default :class:`MultiDict` is used.\n :param silent: If set to False parsing errors will not be caught.\n :return: A tuple in the form ``(stream, form, files)``.\n ' return FormDataParser(stream_factory, charset, errors, max_form_memory_size, max_content_length, cls, silent).parse_from_environ(environ)
def exhaust_stream(f): 'Helper decorator for methods that exhausts the stream on return.' def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: stream.exhaust() return update_wrapper(wrapper, f)
2,898,723,767,904,656,000
Helper decorator for methods that exhausts the stream on return.
werkzeug/formparser.py
exhaust_stream
Chitrank-Dixit/werkzeug
python
def exhaust_stream(f): def wrapper(self, stream, *args, **kwargs): try: return f(self, stream, *args, **kwargs) finally: stream.exhaust() return update_wrapper(wrapper, f)
def is_valid_multipart_boundary(boundary): 'Checks if the string given is a valid multipart boundary.' return (_multipart_boundary_re.match(boundary) is not None)
-8,237,246,297,276,212,000
Checks if the string given is a valid multipart boundary.
werkzeug/formparser.py
is_valid_multipart_boundary
Chitrank-Dixit/werkzeug
python
def is_valid_multipart_boundary(boundary): return (_multipart_boundary_re.match(boundary) is not None)
def _line_parse(line): 'Removes line ending characters and returns a tuple (`stripped_line`,\n `is_terminated`).\n ' if (line[(- 2):] == '\r\n'): return (line[:(- 2)], True) elif (line[(- 1):] in '\r\n'): return (line[:(- 1)], True) return (line, False)
2,266,841,580,460,052,500
Removes line ending characters and returns a tuple (`stripped_line`, `is_terminated`).
werkzeug/formparser.py
_line_parse
Chitrank-Dixit/werkzeug
python
def _line_parse(line): 'Removes line ending characters and returns a tuple (`stripped_line`,\n `is_terminated`).\n ' if (line[(- 2):] == '\r\n'): return (line[:(- 2)], True) elif (line[(- 1):] in '\r\n'): return (line[:(- 1)], True) return (line, False)
def parse_multipart_headers(iterable): 'Parses multipart headers from an iterable that yields lines (including\n the trailing newline symbol). The iterable has to be newline terminated.\n\n The iterable will stop at the line where the headers ended so it can be\n further consumed.\n\n :param iterable: iterable of strings that are newline terminated\n ' result = [] for line in iterable: (line, line_terminated) = _line_parse(line) if (not line_terminated): raise ValueError('unexpected end of line in multipart header') if (not line): break elif ((line[0] in ' \t') and result): (key, value) = result[(- 1)] result[(- 1)] = (key, ((value + '\n ') + line[1:])) else: parts = line.split(':', 1) if (len(parts) == 2): result.append((parts[0].strip(), parts[1].strip())) return Headers.linked(result)
-2,176,537,027,926,288,600
Parses multipart headers from an iterable that yields lines (including the trailing newline symbol). The iterable has to be newline terminated. The iterable will stop at the line where the headers ended so it can be further consumed. :param iterable: iterable of strings that are newline terminated
werkzeug/formparser.py
parse_multipart_headers
Chitrank-Dixit/werkzeug
python
def parse_multipart_headers(iterable): 'Parses multipart headers from an iterable that yields lines (including\n the trailing newline symbol). The iterable has to be newline terminated.\n\n The iterable will stop at the line where the headers ended so it can be\n further consumed.\n\n :param iterable: iterable of strings that are newline terminated\n ' result = [] for line in iterable: (line, line_terminated) = _line_parse(line) if (not line_terminated): raise ValueError('unexpected end of line in multipart header') if (not line): break elif ((line[0] in ' \t') and result): (key, value) = result[(- 1)] result[(- 1)] = (key, ((value + '\n ') + line[1:])) else: parts = line.split(':', 1) if (len(parts) == 2): result.append((parts[0].strip(), parts[1].strip())) return Headers.linked(result)
def parse_from_environ(self, environ): 'Parses the information from the environment as form data.\n\n :param environ: the WSGI environment to be used for parsing.\n :return: A tuple in the form ``(stream, form, files)``.\n ' content_type = environ.get('CONTENT_TYPE', '') (mimetype, options) = parse_options_header(content_type) try: content_length = int(environ['CONTENT_LENGTH']) except (KeyError, ValueError): content_length = 0 stream = environ['wsgi.input'] return self.parse(stream, mimetype, content_length, options)
-5,458,521,010,198,014,000
Parses the information from the environment as form data. :param environ: the WSGI environment to be used for parsing. :return: A tuple in the form ``(stream, form, files)``.
werkzeug/formparser.py
parse_from_environ
Chitrank-Dixit/werkzeug
python
def parse_from_environ(self, environ): 'Parses the information from the environment as form data.\n\n :param environ: the WSGI environment to be used for parsing.\n :return: A tuple in the form ``(stream, form, files)``.\n ' content_type = environ.get('CONTENT_TYPE', ) (mimetype, options) = parse_options_header(content_type) try: content_length = int(environ['CONTENT_LENGTH']) except (KeyError, ValueError): content_length = 0 stream = environ['wsgi.input'] return self.parse(stream, mimetype, content_length, options)
def parse(self, stream, mimetype, content_length, options=None): 'Parses the information from the given stream, mimetype,\n content length and mimetype parameters.\n\n :param stream: an input stream\n :param mimetype: the mimetype of the data\n :param content_length: the content length of the incoming data\n :param options: optional mimetype parameters (used for\n the multipart boundary for instance)\n :return: A tuple in the form ``(stream, form, files)``.\n ' if ((self.max_content_length is not None) and (content_length > self.max_content_length)): raise RequestEntityTooLarge() if (options is None): options = {} input_stream = LimitedStream(stream, content_length) parse_func = self.get_parse_func(mimetype, options) if (parse_func is not None): try: return parse_func(self, input_stream, mimetype, content_length, options) except ValueError: if (not self.silent): raise return (input_stream, self.cls(), self.cls())
5,671,949,490,822,148,000
Parses the information from the given stream, mimetype, content length and mimetype parameters. :param stream: an input stream :param mimetype: the mimetype of the data :param content_length: the content length of the incoming data :param options: optional mimetype parameters (used for the multipart boundary for instance) :return: A tuple in the form ``(stream, form, files)``.
werkzeug/formparser.py
parse
Chitrank-Dixit/werkzeug
python
def parse(self, stream, mimetype, content_length, options=None): 'Parses the information from the given stream, mimetype,\n content length and mimetype parameters.\n\n :param stream: an input stream\n :param mimetype: the mimetype of the data\n :param content_length: the content length of the incoming data\n :param options: optional mimetype parameters (used for\n the multipart boundary for instance)\n :return: A tuple in the form ``(stream, form, files)``.\n ' if ((self.max_content_length is not None) and (content_length > self.max_content_length)): raise RequestEntityTooLarge() if (options is None): options = {} input_stream = LimitedStream(stream, content_length) parse_func = self.get_parse_func(mimetype, options) if (parse_func is not None): try: return parse_func(self, input_stream, mimetype, content_length, options) except ValueError: if (not self.silent): raise return (input_stream, self.cls(), self.cls())
def _fix_ie_filename(self, filename): 'Internet Explorer 6 transmits the full file name if a file is\n uploaded. This function strips the full path if it thinks the\n filename is Windows-like absolute.\n ' if ((filename[1:3] == ':\\') or (filename[:2] == '\\\\')): return filename.split('\\')[(- 1)] return filename
4,052,912,053,183,255,600
Internet Explorer 6 transmits the full file name if a file is uploaded. This function strips the full path if it thinks the filename is Windows-like absolute.
werkzeug/formparser.py
_fix_ie_filename
Chitrank-Dixit/werkzeug
python
def _fix_ie_filename(self, filename): 'Internet Explorer 6 transmits the full file name if a file is\n uploaded. This function strips the full path if it thinks the\n filename is Windows-like absolute.\n ' if ((filename[1:3] == ':\\') or (filename[:2] == '\\\\')): return filename.split('\\')[(- 1)] return filename
def _find_terminator(self, iterator): 'The terminator might have some additional newlines before it.\n There is at least one application that sends additional newlines\n before headers (the python setuptools package).\n ' for line in iterator: if (not line): break line = line.strip() if line: return line return ''
2,901,900,155,092,777,000
The terminator might have some additional newlines before it. There is at least one application that sends additional newlines before headers (the python setuptools package).
werkzeug/formparser.py
_find_terminator
Chitrank-Dixit/werkzeug
python
def _find_terminator(self, iterator): 'The terminator might have some additional newlines before it.\n There is at least one application that sends additional newlines\n before headers (the python setuptools package).\n ' for line in iterator: if (not line): break line = line.strip() if line: return line return
def parse_lines(self, file, boundary, content_length): "Generate parts of\n ``('begin_form', (headers, name))``\n ``('begin_file', (headers, name, filename))``\n ``('cont', bytestring)``\n ``('end', None)``\n\n Always obeys the grammar\n parts = ( begin_form cont* end |\n begin_file cont* end )*\n " next_part = ('--' + boundary) last_part = (next_part + '--') iterator = chain(make_line_iter(file, limit=content_length, buffer_size=self.buffer_size), _empty_string_iter) terminator = self._find_terminator(iterator) if (terminator != next_part): self.fail('Expected boundary at start of multipart data') while (terminator != last_part): headers = parse_multipart_headers(iterator) disposition = headers.get('content-disposition') if (disposition is None): self.fail('Missing Content-Disposition header') (disposition, extra) = parse_options_header(disposition) transfer_encoding = self.get_part_encoding(headers) name = extra.get('name') filename = extra.get('filename') if (filename is None): (yield (_begin_form, (headers, name))) else: (yield (_begin_file, (headers, name, filename))) buf = '' for line in iterator: if (not line): self.fail('unexpected end of stream') if (line[:2] == '--'): terminator = line.rstrip() if (terminator in (next_part, last_part)): break if (transfer_encoding is not None): try: line = line.decode(transfer_encoding) except Exception: self.fail('could not decode transfer encoded chunk') if buf: (yield (_cont, buf)) buf = '' if (line[(- 2):] == '\r\n'): buf = '\r\n' cutoff = (- 2) else: buf = line[(- 1)] cutoff = (- 1) (yield (_cont, line[:cutoff])) else: raise ValueError('unexpected end of part') if (buf not in ('', '\r', '\n', '\r\n')): (yield (_cont, buf)) (yield (_end, None))
-1,307,323,556,561,785,000
Generate parts of ``('begin_form', (headers, name))`` ``('begin_file', (headers, name, filename))`` ``('cont', bytestring)`` ``('end', None)`` Always obeys the grammar parts = ( begin_form cont* end | begin_file cont* end )*
werkzeug/formparser.py
parse_lines
Chitrank-Dixit/werkzeug
python
def parse_lines(self, file, boundary, content_length): "Generate parts of\n ``('begin_form', (headers, name))``\n ``('begin_file', (headers, name, filename))``\n ``('cont', bytestring)``\n ``('end', None)``\n\n Always obeys the grammar\n parts = ( begin_form cont* end |\n begin_file cont* end )*\n " next_part = ('--' + boundary) last_part = (next_part + '--') iterator = chain(make_line_iter(file, limit=content_length, buffer_size=self.buffer_size), _empty_string_iter) terminator = self._find_terminator(iterator) if (terminator != next_part): self.fail('Expected boundary at start of multipart data') while (terminator != last_part): headers = parse_multipart_headers(iterator) disposition = headers.get('content-disposition') if (disposition is None): self.fail('Missing Content-Disposition header') (disposition, extra) = parse_options_header(disposition) transfer_encoding = self.get_part_encoding(headers) name = extra.get('name') filename = extra.get('filename') if (filename is None): (yield (_begin_form, (headers, name))) else: (yield (_begin_file, (headers, name, filename))) buf = for line in iterator: if (not line): self.fail('unexpected end of stream') if (line[:2] == '--'): terminator = line.rstrip() if (terminator in (next_part, last_part)): break if (transfer_encoding is not None): try: line = line.decode(transfer_encoding) except Exception: self.fail('could not decode transfer encoded chunk') if buf: (yield (_cont, buf)) buf = if (line[(- 2):] == '\r\n'): buf = '\r\n' cutoff = (- 2) else: buf = line[(- 1)] cutoff = (- 1) (yield (_cont, line[:cutoff])) else: raise ValueError('unexpected end of part') if (buf not in (, '\r', '\n', '\r\n')): (yield (_cont, buf)) (yield (_end, None))
def parse_parts(self, file, boundary, content_length): "Generate `('file', (name, val))` and `('form', (name\n ,val))` parts.\n " in_memory = 0 for (ellt, ell) in self.parse_lines(file, boundary, content_length): if (ellt == _begin_file): (headers, name, filename) = ell is_file = True guard_memory = False (filename, container) = self.start_file_streaming(filename, headers, content_length) _write = container.write elif (ellt == _begin_form): (headers, name) = ell is_file = False container = [] _write = container.append guard_memory = (self.max_form_memory_size is not None) elif (ellt == _cont): _write(ell) if guard_memory: in_memory += len(ell) if (in_memory > self.max_form_memory_size): self.in_memory_threshold_reached(in_memory) elif (ellt == _end): if is_file: container.seek(0) (yield ('file', (name, FileStorage(container, filename, name, headers=headers)))) else: part_charset = self.get_part_charset(headers) (yield ('form', (name, _decode_unicode(''.join(container), part_charset, self.errors))))
-1,497,096,617,322,765,800
Generate `('file', (name, val))` and `('form', (name ,val))` parts.
werkzeug/formparser.py
parse_parts
Chitrank-Dixit/werkzeug
python
def parse_parts(self, file, boundary, content_length): "Generate `('file', (name, val))` and `('form', (name\n ,val))` parts.\n " in_memory = 0 for (ellt, ell) in self.parse_lines(file, boundary, content_length): if (ellt == _begin_file): (headers, name, filename) = ell is_file = True guard_memory = False (filename, container) = self.start_file_streaming(filename, headers, content_length) _write = container.write elif (ellt == _begin_form): (headers, name) = ell is_file = False container = [] _write = container.append guard_memory = (self.max_form_memory_size is not None) elif (ellt == _cont): _write(ell) if guard_memory: in_memory += len(ell) if (in_memory > self.max_form_memory_size): self.in_memory_threshold_reached(in_memory) elif (ellt == _end): if is_file: container.seek(0) (yield ('file', (name, FileStorage(container, filename, name, headers=headers)))) else: part_charset = self.get_part_charset(headers) (yield ('form', (name, _decode_unicode(.join(container), part_charset, self.errors))))
def createMatrices(file, word2Idx, maxSentenceLen=100): 'Creates matrices for the events and sentence for the given file' labels = [] positionMatrix1 = [] positionMatrix2 = [] tokenMatrix = [] for line in open(file): splits = line.strip().split('\t') label = splits[0] pos1 = splits[1] pos2 = splits[2] sentence = splits[3] tokens = sentence.split(' ') tokenIds = np.zeros(maxSentenceLen) positionValues1 = np.zeros(maxSentenceLen) positionValues2 = np.zeros(maxSentenceLen) for idx in range(0, min(maxSentenceLen, len(tokens))): tokenIds[idx] = getWordIdx(tokens[idx], word2Idx) distance1 = (idx - int(pos1)) distance2 = (idx - int(pos2)) if (distance1 in distanceMapping): positionValues1[idx] = distanceMapping[distance1] elif (distance1 <= minDistance): positionValues1[idx] = distanceMapping['LowerMin'] else: positionValues1[idx] = distanceMapping['GreaterMax'] if (distance2 in distanceMapping): positionValues2[idx] = distanceMapping[distance2] elif (distance2 <= minDistance): positionValues2[idx] = distanceMapping['LowerMin'] else: positionValues2[idx] = distanceMapping['GreaterMax'] tokenMatrix.append(tokenIds) positionMatrix1.append(positionValues1) positionMatrix2.append(positionValues2) labels.append(labelsMapping[label]) return (np.array(labels, dtype='int32'), np.array(tokenMatrix, dtype='int32'), np.array(positionMatrix1, dtype='int32'), np.array(positionMatrix2, dtype='int32'))
-3,162,865,911,030,710,300
Creates matrices for the events and sentence for the given file
2017-07_Seminar/Session 3 - Relation CNN/code/preprocess.py
createMatrices
BhuvaneshwaranK/deeplearning4nlp-tutorial
python
def createMatrices(file, word2Idx, maxSentenceLen=100): labels = [] positionMatrix1 = [] positionMatrix2 = [] tokenMatrix = [] for line in open(file): splits = line.strip().split('\t') label = splits[0] pos1 = splits[1] pos2 = splits[2] sentence = splits[3] tokens = sentence.split(' ') tokenIds = np.zeros(maxSentenceLen) positionValues1 = np.zeros(maxSentenceLen) positionValues2 = np.zeros(maxSentenceLen) for idx in range(0, min(maxSentenceLen, len(tokens))): tokenIds[idx] = getWordIdx(tokens[idx], word2Idx) distance1 = (idx - int(pos1)) distance2 = (idx - int(pos2)) if (distance1 in distanceMapping): positionValues1[idx] = distanceMapping[distance1] elif (distance1 <= minDistance): positionValues1[idx] = distanceMapping['LowerMin'] else: positionValues1[idx] = distanceMapping['GreaterMax'] if (distance2 in distanceMapping): positionValues2[idx] = distanceMapping[distance2] elif (distance2 <= minDistance): positionValues2[idx] = distanceMapping['LowerMin'] else: positionValues2[idx] = distanceMapping['GreaterMax'] tokenMatrix.append(tokenIds) positionMatrix1.append(positionValues1) positionMatrix2.append(positionValues2) labels.append(labelsMapping[label]) return (np.array(labels, dtype='int32'), np.array(tokenMatrix, dtype='int32'), np.array(positionMatrix1, dtype='int32'), np.array(positionMatrix2, dtype='int32'))
def getWordIdx(token, word2Idx): 'Returns from the word2Idex table the word index for a given token' if (token in word2Idx): return word2Idx[token] elif (token.lower() in word2Idx): return word2Idx[token.lower()] return word2Idx['UNKNOWN_TOKEN']
-500,736,905,236,166,900
Returns from the word2Idex table the word index for a given token
2017-07_Seminar/Session 3 - Relation CNN/code/preprocess.py
getWordIdx
BhuvaneshwaranK/deeplearning4nlp-tutorial
python
def getWordIdx(token, word2Idx): if (token in word2Idx): return word2Idx[token] elif (token.lower() in word2Idx): return word2Idx[token.lower()] return word2Idx['UNKNOWN_TOKEN']
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering): "Handles the mark_groups=... keyword argument in plotting methods of\n clusterings.\n\n This is an internal method, you shouldn't need to mess around with it.\n Its purpose is to handle the extended semantics of the mark_groups=...\n keyword argument in the C{__plot__} method of L{VertexClustering} and\n L{VertexCover} instances, namely the feature that numeric IDs are resolved\n to clusters automatically.\n " if (mark_groups is True): group_iter = ((group, color) for (color, group) in enumerate(clustering)) elif isinstance(mark_groups, dict): group_iter = mark_groups.iteritems() elif (hasattr(mark_groups, '__getitem__') and hasattr(mark_groups, '__len__')): try: first = mark_groups[0] except: first = None if (first is not None): if isinstance(first, (int, long)): group_iter = ((group, color) for (color, group) in enumerate(mark_groups)) else: group_iter = mark_groups else: group_iter = mark_groups elif hasattr(mark_groups, '__iter__'): group_iter = mark_groups else: group_iter = {}.iteritems() def cluster_index_resolver(): for (group, color) in group_iter: if isinstance(group, (int, long)): group = clustering[group] (yield (group, color)) return cluster_index_resolver()
-5,505,528,020,700,937,000
Handles the mark_groups=... keyword argument in plotting methods of clusterings. This is an internal method, you shouldn't need to mess around with it. Its purpose is to handle the extended semantics of the mark_groups=... keyword argument in the C{__plot__} method of L{VertexClustering} and L{VertexCover} instances, namely the feature that numeric IDs are resolved to clusters automatically.
igraph/clustering.py
_handle_mark_groups_arg_for_clustering
tuandnvn/ecat_learning
python
def _handle_mark_groups_arg_for_clustering(mark_groups, clustering): "Handles the mark_groups=... keyword argument in plotting methods of\n clusterings.\n\n This is an internal method, you shouldn't need to mess around with it.\n Its purpose is to handle the extended semantics of the mark_groups=...\n keyword argument in the C{__plot__} method of L{VertexClustering} and\n L{VertexCover} instances, namely the feature that numeric IDs are resolved\n to clusters automatically.\n " if (mark_groups is True): group_iter = ((group, color) for (color, group) in enumerate(clustering)) elif isinstance(mark_groups, dict): group_iter = mark_groups.iteritems() elif (hasattr(mark_groups, '__getitem__') and hasattr(mark_groups, '__len__')): try: first = mark_groups[0] except: first = None if (first is not None): if isinstance(first, (int, long)): group_iter = ((group, color) for (color, group) in enumerate(mark_groups)) else: group_iter = mark_groups else: group_iter = mark_groups elif hasattr(mark_groups, '__iter__'): group_iter = mark_groups else: group_iter = {}.iteritems() def cluster_index_resolver(): for (group, color) in group_iter: if isinstance(group, (int, long)): group = clustering[group] (yield (group, color)) return cluster_index_resolver()
def _prepare_community_comparison(comm1, comm2, remove_none=False): 'Auxiliary method that takes two community structures either as\n membership lists or instances of L{Clustering}, and returns a\n tuple whose two elements are membership lists.\n\n This is used by L{compare_communities} and L{split_join_distance}.\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param remove_none: whether to remove C{None} entries from the membership\n lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}\n or C{comm2} will result in an exception. If C{remove_none} is C{True},\n C{None} values are filtered away and only the remaining lists are\n compared.\n ' def _ensure_list(obj): if isinstance(obj, Clustering): return obj.membership return list(obj) (vec1, vec2) = (_ensure_list(comm1), _ensure_list(comm2)) if (len(vec1) != len(vec2)): raise ValueError('the two membership vectors must be equal in length') if (remove_none and ((None in vec1) or (None in vec2))): idxs_to_remove = [i for i in xrange(len(vec1)) if ((vec1[i] is None) or (vec2[i] is None))] idxs_to_remove.reverse() n = len(vec1) for i in idxs_to_remove: n -= 1 (vec1[i], vec1[n]) = (vec1[n], vec1[i]) (vec2[i], vec2[n]) = (vec2[n], vec2[i]) del vec1[n:] del vec2[n:] return (vec1, vec2)
-1,930,164,210,523,227,600
Auxiliary method that takes two community structures either as membership lists or instances of L{Clustering}, and returns a tuple whose two elements are membership lists. This is used by L{compare_communities} and L{split_join_distance}. @param comm1: the first community structure as a membership list or as a L{Clustering} object. @param comm2: the second community structure as a membership list or as a L{Clustering} object. @param remove_none: whether to remove C{None} entries from the membership lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1} or C{comm2} will result in an exception. If C{remove_none} is C{True}, C{None} values are filtered away and only the remaining lists are compared.
igraph/clustering.py
_prepare_community_comparison
tuandnvn/ecat_learning
python
def _prepare_community_comparison(comm1, comm2, remove_none=False): 'Auxiliary method that takes two community structures either as\n membership lists or instances of L{Clustering}, and returns a\n tuple whose two elements are membership lists.\n\n This is used by L{compare_communities} and L{split_join_distance}.\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param remove_none: whether to remove C{None} entries from the membership\n lists. If C{remove_none} is C{False}, a C{None} entry in either C{comm1}\n or C{comm2} will result in an exception. If C{remove_none} is C{True},\n C{None} values are filtered away and only the remaining lists are\n compared.\n ' def _ensure_list(obj): if isinstance(obj, Clustering): return obj.membership return list(obj) (vec1, vec2) = (_ensure_list(comm1), _ensure_list(comm2)) if (len(vec1) != len(vec2)): raise ValueError('the two membership vectors must be equal in length') if (remove_none and ((None in vec1) or (None in vec2))): idxs_to_remove = [i for i in xrange(len(vec1)) if ((vec1[i] is None) or (vec2[i] is None))] idxs_to_remove.reverse() n = len(vec1) for i in idxs_to_remove: n -= 1 (vec1[i], vec1[n]) = (vec1[n], vec1[i]) (vec2[i], vec2[n]) = (vec2[n], vec2[i]) del vec1[n:] del vec2[n:] return (vec1, vec2)
def compare_communities(comm1, comm2, method='vi', remove_none=False): 'Compares two community structures using various distance measures.\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param method: the measure to use. C{"vi"} or C{"meila"} means the\n variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}\n means the normalized mutual information as defined by Danon et al (2005),\n C{"split-join"} means the split-join distance of van Dongen (2000),\n C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}\n means the adjusted Rand index of Hubert and Arabie (1985).\n @param remove_none: whether to remove C{None} entries from the membership\n lists. This is handy if your L{Clustering} object was constructed using\n L{VertexClustering.FromAttribute} using an attribute which was not defined\n for all the vertices. If C{remove_none} is C{False}, a C{None} entry in\n either C{comm1} or C{comm2} will result in an exception. If C{remove_none}\n is C{True}, C{None} values are filtered away and only the remaining lists\n are compared.\n\n @return: the calculated measure.\n @newfield ref: Reference\n @ref: Meila M: Comparing clusterings by the variation of information.\n In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel\n Machines: 16th Annual Conference on Computational Learning Theory\n and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.\n Lecture Notes in Computer Science, vol. 2777, Springer, 2003.\n ISBN: 978-3-540-40720-1.\n @ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community\n structure identification. J Stat Mech P09008, 2005.\n @ref: van Dongen D: Performance criteria for graph clustering and Markov\n cluster experiments. Technical Report INS-R0012, National Research\n Institute for Mathematics and Computer Science in the Netherlands,\n Amsterdam, May 2000.\n @ref: Rand WM: Objective criteria for the evaluation of clustering\n methods. J Am Stat Assoc 66(336):846-850, 1971.\n @ref: Hubert L and Arabie P: Comparing partitions. Journal of\n Classification 2:193-218, 1985.\n ' import igraph._igraph (vec1, vec2) = _prepare_community_comparison(comm1, comm2, remove_none) return igraph._igraph._compare_communities(vec1, vec2, method)
6,305,604,480,575,149,000
Compares two community structures using various distance measures. @param comm1: the first community structure as a membership list or as a L{Clustering} object. @param comm2: the second community structure as a membership list or as a L{Clustering} object. @param method: the measure to use. C{"vi"} or C{"meila"} means the variation of information metric of Meila (2003), C{"nmi"} or C{"danon"} means the normalized mutual information as defined by Danon et al (2005), C{"split-join"} means the split-join distance of van Dongen (2000), C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"} means the adjusted Rand index of Hubert and Arabie (1985). @param remove_none: whether to remove C{None} entries from the membership lists. This is handy if your L{Clustering} object was constructed using L{VertexClustering.FromAttribute} using an attribute which was not defined for all the vertices. If C{remove_none} is C{False}, a C{None} entry in either C{comm1} or C{comm2} will result in an exception. If C{remove_none} is C{True}, C{None} values are filtered away and only the remaining lists are compared. @return: the calculated measure. @newfield ref: Reference @ref: Meila M: Comparing clusterings by the variation of information. In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel Machines: 16th Annual Conference on Computational Learning Theory and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA. Lecture Notes in Computer Science, vol. 2777, Springer, 2003. ISBN: 978-3-540-40720-1. @ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community structure identification. J Stat Mech P09008, 2005. @ref: van Dongen D: Performance criteria for graph clustering and Markov cluster experiments. Technical Report INS-R0012, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, May 2000. @ref: Rand WM: Objective criteria for the evaluation of clustering methods. J Am Stat Assoc 66(336):846-850, 1971. @ref: Hubert L and Arabie P: Comparing partitions. Journal of Classification 2:193-218, 1985.
igraph/clustering.py
compare_communities
tuandnvn/ecat_learning
python
def compare_communities(comm1, comm2, method='vi', remove_none=False): 'Compares two community structures using various distance measures.\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param method: the measure to use. C{"vi"} or C{"meila"} means the\n variation of information metric of Meila (2003), C{"nmi"} or C{"danon"}\n means the normalized mutual information as defined by Danon et al (2005),\n C{"split-join"} means the split-join distance of van Dongen (2000),\n C{"rand"} means the Rand index of Rand (1971), C{"adjusted_rand"}\n means the adjusted Rand index of Hubert and Arabie (1985).\n @param remove_none: whether to remove C{None} entries from the membership\n lists. This is handy if your L{Clustering} object was constructed using\n L{VertexClustering.FromAttribute} using an attribute which was not defined\n for all the vertices. If C{remove_none} is C{False}, a C{None} entry in\n either C{comm1} or C{comm2} will result in an exception. If C{remove_none}\n is C{True}, C{None} values are filtered away and only the remaining lists\n are compared.\n\n @return: the calculated measure.\n @newfield ref: Reference\n @ref: Meila M: Comparing clusterings by the variation of information.\n In: Scholkopf B, Warmuth MK (eds). Learning Theory and Kernel\n Machines: 16th Annual Conference on Computational Learning Theory\n and 7th Kernel Workship, COLT/Kernel 2003, Washington, DC, USA.\n Lecture Notes in Computer Science, vol. 2777, Springer, 2003.\n ISBN: 978-3-540-40720-1.\n @ref: Danon L, Diaz-Guilera A, Duch J, Arenas A: Comparing community\n structure identification. J Stat Mech P09008, 2005.\n @ref: van Dongen D: Performance criteria for graph clustering and Markov\n cluster experiments. Technical Report INS-R0012, National Research\n Institute for Mathematics and Computer Science in the Netherlands,\n Amsterdam, May 2000.\n @ref: Rand WM: Objective criteria for the evaluation of clustering\n methods. J Am Stat Assoc 66(336):846-850, 1971.\n @ref: Hubert L and Arabie P: Comparing partitions. Journal of\n Classification 2:193-218, 1985.\n ' import igraph._igraph (vec1, vec2) = _prepare_community_comparison(comm1, comm2, remove_none) return igraph._igraph._compare_communities(vec1, vec2, method)
def split_join_distance(comm1, comm2, remove_none=False): 'Calculates the split-join distance between two community structures.\n\n The split-join distance is a distance measure defined on the space of\n partitions of a given set. It is the sum of the projection distance of\n one partition from the other and vice versa, where the projection\n number of A from B is if calculated as follows:\n\n 1. For each set in A, find the set in B with which it has the\n maximal overlap, and take note of the size of the overlap.\n\n 2. Take the sum of the maximal overlap sizes for each set in A.\n\n 3. Subtract the sum from M{n}, the number of elements in the\n partition.\n\n Note that the projection distance is asymmetric, that\'s why it has to be\n calculated in both directions and then added together. This function\n returns the projection distance of C{comm1} from C{comm2} and the\n projection distance of C{comm2} from C{comm1}, and returns them in a pair.\n The actual split-join distance is the sum of the two distances. The reason\n why it is presented this way is that one of the elements being zero then\n implies that one of the partitions is a subpartition of the other (and if\n it is close to zero, then one of the partitions is close to being a\n subpartition of the other).\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param remove_none: whether to remove C{None} entries from the membership\n lists. This is handy if your L{Clustering} object was constructed using\n L{VertexClustering.FromAttribute} using an attribute which was not defined\n for all the vertices. If C{remove_none} is C{False}, a C{None} entry in\n either C{comm1} or C{comm2} will result in an exception. If C{remove_none}\n is C{True}, C{None} values are filtered away and only the remaining lists\n are compared.\n\n @return: the projection distance of C{comm1} from C{comm2} and vice versa\n in a tuple. The split-join distance is the sum of the two.\n @newfield ref: Reference\n @ref: van Dongen D: Performance criteria for graph clustering and Markov\n cluster experiments. Technical Report INS-R0012, National Research\n Institute for Mathematics and Computer Science in the Netherlands,\n Amsterdam, May 2000.\n\n @see: L{compare_communities()} with C{method = "split-join"} if you are\n not interested in the individual projection distances but only the\n sum of them.\n ' import igraph._igraph (vec1, vec2) = _prepare_community_comparison(comm1, comm2, remove_none) return igraph._igraph._split_join_distance(vec1, vec2)
-4,521,227,686,832,673,300
Calculates the split-join distance between two community structures. The split-join distance is a distance measure defined on the space of partitions of a given set. It is the sum of the projection distance of one partition from the other and vice versa, where the projection number of A from B is if calculated as follows: 1. For each set in A, find the set in B with which it has the maximal overlap, and take note of the size of the overlap. 2. Take the sum of the maximal overlap sizes for each set in A. 3. Subtract the sum from M{n}, the number of elements in the partition. Note that the projection distance is asymmetric, that's why it has to be calculated in both directions and then added together. This function returns the projection distance of C{comm1} from C{comm2} and the projection distance of C{comm2} from C{comm1}, and returns them in a pair. The actual split-join distance is the sum of the two distances. The reason why it is presented this way is that one of the elements being zero then implies that one of the partitions is a subpartition of the other (and if it is close to zero, then one of the partitions is close to being a subpartition of the other). @param comm1: the first community structure as a membership list or as a L{Clustering} object. @param comm2: the second community structure as a membership list or as a L{Clustering} object. @param remove_none: whether to remove C{None} entries from the membership lists. This is handy if your L{Clustering} object was constructed using L{VertexClustering.FromAttribute} using an attribute which was not defined for all the vertices. If C{remove_none} is C{False}, a C{None} entry in either C{comm1} or C{comm2} will result in an exception. If C{remove_none} is C{True}, C{None} values are filtered away and only the remaining lists are compared. @return: the projection distance of C{comm1} from C{comm2} and vice versa in a tuple. The split-join distance is the sum of the two. @newfield ref: Reference @ref: van Dongen D: Performance criteria for graph clustering and Markov cluster experiments. Technical Report INS-R0012, National Research Institute for Mathematics and Computer Science in the Netherlands, Amsterdam, May 2000. @see: L{compare_communities()} with C{method = "split-join"} if you are not interested in the individual projection distances but only the sum of them.
igraph/clustering.py
split_join_distance
tuandnvn/ecat_learning
python
def split_join_distance(comm1, comm2, remove_none=False): 'Calculates the split-join distance between two community structures.\n\n The split-join distance is a distance measure defined on the space of\n partitions of a given set. It is the sum of the projection distance of\n one partition from the other and vice versa, where the projection\n number of A from B is if calculated as follows:\n\n 1. For each set in A, find the set in B with which it has the\n maximal overlap, and take note of the size of the overlap.\n\n 2. Take the sum of the maximal overlap sizes for each set in A.\n\n 3. Subtract the sum from M{n}, the number of elements in the\n partition.\n\n Note that the projection distance is asymmetric, that\'s why it has to be\n calculated in both directions and then added together. This function\n returns the projection distance of C{comm1} from C{comm2} and the\n projection distance of C{comm2} from C{comm1}, and returns them in a pair.\n The actual split-join distance is the sum of the two distances. The reason\n why it is presented this way is that one of the elements being zero then\n implies that one of the partitions is a subpartition of the other (and if\n it is close to zero, then one of the partitions is close to being a\n subpartition of the other).\n\n @param comm1: the first community structure as a membership list or\n as a L{Clustering} object.\n @param comm2: the second community structure as a membership list or\n as a L{Clustering} object.\n @param remove_none: whether to remove C{None} entries from the membership\n lists. This is handy if your L{Clustering} object was constructed using\n L{VertexClustering.FromAttribute} using an attribute which was not defined\n for all the vertices. If C{remove_none} is C{False}, a C{None} entry in\n either C{comm1} or C{comm2} will result in an exception. If C{remove_none}\n is C{True}, C{None} values are filtered away and only the remaining lists\n are compared.\n\n @return: the projection distance of C{comm1} from C{comm2} and vice versa\n in a tuple. The split-join distance is the sum of the two.\n @newfield ref: Reference\n @ref: van Dongen D: Performance criteria for graph clustering and Markov\n cluster experiments. Technical Report INS-R0012, National Research\n Institute for Mathematics and Computer Science in the Netherlands,\n Amsterdam, May 2000.\n\n @see: L{compare_communities()} with C{method = "split-join"} if you are\n not interested in the individual projection distances but only the\n sum of them.\n ' import igraph._igraph (vec1, vec2) = _prepare_community_comparison(comm1, comm2, remove_none) return igraph._igraph._split_join_distance(vec1, vec2)
def __init__(self, membership, params=None): "Constructor.\n\n @param membership: the membership list -- that is, the cluster\n index in which each element of the set belongs to.\n @param params: additional parameters to be stored in this\n object's dictionary." self._membership = list(membership) if (len(self._membership) > 0): self._len = (max((m for m in self._membership if (m is not None))) + 1) else: self._len = 0 if params: self.__dict__.update(params)
-3,012,367,874,068,840,000
Constructor. @param membership: the membership list -- that is, the cluster index in which each element of the set belongs to. @param params: additional parameters to be stored in this object's dictionary.
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, membership, params=None): "Constructor.\n\n @param membership: the membership list -- that is, the cluster\n index in which each element of the set belongs to.\n @param params: additional parameters to be stored in this\n object's dictionary." self._membership = list(membership) if (len(self._membership) > 0): self._len = (max((m for m in self._membership if (m is not None))) + 1) else: self._len = 0 if params: self.__dict__.update(params)
def __getitem__(self, idx): 'Returns the members of the specified cluster.\n\n @param idx: the index of the cluster\n @return: the members of the specified cluster as a list\n @raise IndexError: if the index is out of bounds' if ((idx < 0) or (idx >= self._len)): raise IndexError('cluster index out of range') return [i for (i, e) in enumerate(self._membership) if (e == idx)]
3,332,638,273,974,701,600
Returns the members of the specified cluster. @param idx: the index of the cluster @return: the members of the specified cluster as a list @raise IndexError: if the index is out of bounds
igraph/clustering.py
__getitem__
tuandnvn/ecat_learning
python
def __getitem__(self, idx): 'Returns the members of the specified cluster.\n\n @param idx: the index of the cluster\n @return: the members of the specified cluster as a list\n @raise IndexError: if the index is out of bounds' if ((idx < 0) or (idx >= self._len)): raise IndexError('cluster index out of range') return [i for (i, e) in enumerate(self._membership) if (e == idx)]
def __iter__(self): 'Iterates over the clusters in this clustering.\n\n This method will return a generator that generates the clusters\n one by one.' clusters = [[] for _ in xrange(self._len)] for (idx, cluster) in enumerate(self._membership): clusters[cluster].append(idx) return iter(clusters)
7,846,816,968,255,388,000
Iterates over the clusters in this clustering. This method will return a generator that generates the clusters one by one.
igraph/clustering.py
__iter__
tuandnvn/ecat_learning
python
def __iter__(self): 'Iterates over the clusters in this clustering.\n\n This method will return a generator that generates the clusters\n one by one.' clusters = [[] for _ in xrange(self._len)] for (idx, cluster) in enumerate(self._membership): clusters[cluster].append(idx) return iter(clusters)
def __len__(self): 'Returns the number of clusters.\n\n @return: the number of clusters\n ' return self._len
-5,451,640,488,408,298
Returns the number of clusters. @return: the number of clusters
igraph/clustering.py
__len__
tuandnvn/ecat_learning
python
def __len__(self): 'Returns the number of clusters.\n\n @return: the number of clusters\n ' return self._len
def as_cover(self): 'Returns a L{Cover} that contains the same clusters as this clustering.' return Cover(self._graph, self)
8,436,789,138,042,786,000
Returns a L{Cover} that contains the same clusters as this clustering.
igraph/clustering.py
as_cover
tuandnvn/ecat_learning
python
def as_cover(self): return Cover(self._graph, self)
def compare_to(self, other, *args, **kwds): 'Compares this clustering to another one using some similarity or\n distance metric.\n\n This is a convenience method that simply calls L{compare_communities}\n with the two clusterings as arguments. Any extra positional or keyword\n argument is also forwarded to L{compare_communities}.' return compare_communities(self, other, *args, **kwds)
-242,300,988,132,617,400
Compares this clustering to another one using some similarity or distance metric. This is a convenience method that simply calls L{compare_communities} with the two clusterings as arguments. Any extra positional or keyword argument is also forwarded to L{compare_communities}.
igraph/clustering.py
compare_to
tuandnvn/ecat_learning
python
def compare_to(self, other, *args, **kwds): 'Compares this clustering to another one using some similarity or\n distance metric.\n\n This is a convenience method that simply calls L{compare_communities}\n with the two clusterings as arguments. Any extra positional or keyword\n argument is also forwarded to L{compare_communities}.' return compare_communities(self, other, *args, **kwds)
@property def membership(self): 'Returns the membership vector.' return self._membership[:]
6,664,867,578,842,224,000
Returns the membership vector.
igraph/clustering.py
membership
tuandnvn/ecat_learning
python
@property def membership(self): return self._membership[:]
@property def n(self): 'Returns the number of elements covered by this clustering.' return len(self._membership)
6,145,121,453,949,917,000
Returns the number of elements covered by this clustering.
igraph/clustering.py
n
tuandnvn/ecat_learning
python
@property def n(self): return len(self._membership)
def size(self, idx): 'Returns the size of a given cluster.\n\n @param idx: the cluster in which we are interested.\n ' return len(self[idx])
-2,611,264,052,909,075,500
Returns the size of a given cluster. @param idx: the cluster in which we are interested.
igraph/clustering.py
size
tuandnvn/ecat_learning
python
def size(self, idx): 'Returns the size of a given cluster.\n\n @param idx: the cluster in which we are interested.\n ' return len(self[idx])
def sizes(self, *args): 'Returns the size of given clusters.\n\n The indices are given as positional arguments. If there are no\n positional arguments, the function will return the sizes of all clusters.\n ' counts = ([0] * len(self)) for x in self._membership: counts[x] += 1 if args: return [counts[idx] for idx in args] return counts
2,385,789,323,031,367,700
Returns the size of given clusters. The indices are given as positional arguments. If there are no positional arguments, the function will return the sizes of all clusters.
igraph/clustering.py
sizes
tuandnvn/ecat_learning
python
def sizes(self, *args): 'Returns the size of given clusters.\n\n The indices are given as positional arguments. If there are no\n positional arguments, the function will return the sizes of all clusters.\n ' counts = ([0] * len(self)) for x in self._membership: counts[x] += 1 if args: return [counts[idx] for idx in args] return counts
def size_histogram(self, bin_width=1): 'Returns the histogram of cluster sizes.\n\n @param bin_width: the bin width of the histogram\n @return: a L{Histogram} object\n ' return Histogram(bin_width, self.sizes())
-2,461,763,455,575,568,000
Returns the histogram of cluster sizes. @param bin_width: the bin width of the histogram @return: a L{Histogram} object
igraph/clustering.py
size_histogram
tuandnvn/ecat_learning
python
def size_histogram(self, bin_width=1): 'Returns the histogram of cluster sizes.\n\n @param bin_width: the bin width of the histogram\n @return: a L{Histogram} object\n ' return Histogram(bin_width, self.sizes())
def summary(self, verbosity=0, width=None): 'Returns the summary of the clustering.\n\n The summary includes the number of items and clusters, and also the\n list of members for each of the clusters if the verbosity is nonzero.\n\n @param verbosity: determines whether the cluster members should be\n printed. Zero verbosity prints the number of items and clusters only.\n @return: the summary of the clustering as a string.\n ' out = StringIO() ((print >> out), ('Clustering with %d elements and %d clusters' % (len(self._membership), len(self)))) if (verbosity < 1): return out.getvalue().strip() ndigits = len(str(len(self))) wrapper = _get_wrapper_for_width(width, subsequent_indent=(' ' * (ndigits + 3))) for (idx, cluster) in enumerate(self._formatted_cluster_iterator()): wrapper.initial_indent = ('[%*d] ' % (ndigits, idx)) ((print >> out), '\n'.join(wrapper.wrap(cluster))) return out.getvalue().strip()
-4,108,578,556,226,028,500
Returns the summary of the clustering. The summary includes the number of items and clusters, and also the list of members for each of the clusters if the verbosity is nonzero. @param verbosity: determines whether the cluster members should be printed. Zero verbosity prints the number of items and clusters only. @return: the summary of the clustering as a string.
igraph/clustering.py
summary
tuandnvn/ecat_learning
python
def summary(self, verbosity=0, width=None): 'Returns the summary of the clustering.\n\n The summary includes the number of items and clusters, and also the\n list of members for each of the clusters if the verbosity is nonzero.\n\n @param verbosity: determines whether the cluster members should be\n printed. Zero verbosity prints the number of items and clusters only.\n @return: the summary of the clustering as a string.\n ' out = StringIO() ((print >> out), ('Clustering with %d elements and %d clusters' % (len(self._membership), len(self)))) if (verbosity < 1): return out.getvalue().strip() ndigits = len(str(len(self))) wrapper = _get_wrapper_for_width(width, subsequent_indent=(' ' * (ndigits + 3))) for (idx, cluster) in enumerate(self._formatted_cluster_iterator()): wrapper.initial_indent = ('[%*d] ' % (ndigits, idx)) ((print >> out), '\n'.join(wrapper.wrap(cluster))) return out.getvalue().strip()
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' for cluster in self: (yield ', '.join((str(member) for member in cluster)))
810,895,616,325,758,500
Iterates over the clusters and formats them into a string to be presented in the summary.
igraph/clustering.py
_formatted_cluster_iterator
tuandnvn/ecat_learning
python
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' for cluster in self: (yield ', '.join((str(member) for member in cluster)))
def __init__(self, graph, membership=None, modularity=None, params=None, modularity_params=None): 'Creates a clustering object for a given graph.\n\n @param graph: the graph that will be associated to the clustering\n @param membership: the membership list. The length of the list must\n be equal to the number of vertices in the graph. If C{None}, every\n vertex is assumed to belong to the same cluster.\n @param modularity: the modularity score of the clustering. If C{None},\n it will be calculated when needed.\n @param params: additional parameters to be stored in this object.\n @param modularity_params: arguments that should be passed to\n L{Graph.modularity} when the modularity is (re)calculated. If the\n original graph was weighted, you should pass a dictionary\n containing a C{weight} key with the appropriate value here.\n ' if (membership is None): Clustering.__init__(self, ([0] * graph.vcount()), params) else: if (len(membership) != graph.vcount()): raise ValueError('membership list has invalid length') Clustering.__init__(self, membership, params) self._graph = graph self._modularity = modularity self._modularity_dirty = (modularity is None) if (modularity_params is None): self._modularity_params = {} else: self._modularity_params = dict(modularity_params)
1,624,140,130,461,915,000
Creates a clustering object for a given graph. @param graph: the graph that will be associated to the clustering @param membership: the membership list. The length of the list must be equal to the number of vertices in the graph. If C{None}, every vertex is assumed to belong to the same cluster. @param modularity: the modularity score of the clustering. If C{None}, it will be calculated when needed. @param params: additional parameters to be stored in this object. @param modularity_params: arguments that should be passed to L{Graph.modularity} when the modularity is (re)calculated. If the original graph was weighted, you should pass a dictionary containing a C{weight} key with the appropriate value here.
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, graph, membership=None, modularity=None, params=None, modularity_params=None): 'Creates a clustering object for a given graph.\n\n @param graph: the graph that will be associated to the clustering\n @param membership: the membership list. The length of the list must\n be equal to the number of vertices in the graph. If C{None}, every\n vertex is assumed to belong to the same cluster.\n @param modularity: the modularity score of the clustering. If C{None},\n it will be calculated when needed.\n @param params: additional parameters to be stored in this object.\n @param modularity_params: arguments that should be passed to\n L{Graph.modularity} when the modularity is (re)calculated. If the\n original graph was weighted, you should pass a dictionary\n containing a C{weight} key with the appropriate value here.\n ' if (membership is None): Clustering.__init__(self, ([0] * graph.vcount()), params) else: if (len(membership) != graph.vcount()): raise ValueError('membership list has invalid length') Clustering.__init__(self, membership, params) self._graph = graph self._modularity = modularity self._modularity_dirty = (modularity is None) if (modularity_params is None): self._modularity_params = {} else: self._modularity_params = dict(modularity_params)
@classmethod def FromAttribute(cls, graph, attribute, intervals=None, params=None): 'Creates a vertex clustering based on the value of a vertex attribute.\n\n Vertices having the same attribute will correspond to the same cluster.\n\n @param graph: the graph on which we are working\n @param attribute: name of the attribute on which the clustering\n is based.\n @param intervals: for numeric attributes, you can either pass a single\n number or a list of numbers here. A single number means that the\n vertices will be put in bins of that width and vertices ending up\n in the same bin will be in the same cluster. A list of numbers\n specify the bin positions explicitly; e.g., C{[10, 20, 30]} means\n that there will be four categories: vertices with the attribute\n value less than 10, between 10 and 20, between 20 and 30 and over 30.\n Intervals are closed from the left and open from the right.\n @param params: additional parameters to be stored in this object.\n\n @return: a new VertexClustering object\n ' from bisect import bisect def safeintdiv(x, y): 'Safe integer division that handles None gracefully' if (x is None): return None return int((x / y)) def safebisect(intervals, x): 'Safe list bisection that handles None gracefully' if (x is None): return None return bisect(intervals, x) try: _ = iter(intervals) iterable = True except TypeError: iterable = False if (intervals is None): vec = graph.vs[attribute] elif iterable: intervals = list(intervals) vec = [safebisect(intervals, x) for x in graph.vs[attribute]] else: intervals = float(intervals) vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]] idgen = UniqueIdGenerator() idgen[None] = None vec = [idgen[i] for i in vec] return cls(graph, vec, None, params)
-3,112,355,477,865,406,000
Creates a vertex clustering based on the value of a vertex attribute. Vertices having the same attribute will correspond to the same cluster. @param graph: the graph on which we are working @param attribute: name of the attribute on which the clustering is based. @param intervals: for numeric attributes, you can either pass a single number or a list of numbers here. A single number means that the vertices will be put in bins of that width and vertices ending up in the same bin will be in the same cluster. A list of numbers specify the bin positions explicitly; e.g., C{[10, 20, 30]} means that there will be four categories: vertices with the attribute value less than 10, between 10 and 20, between 20 and 30 and over 30. Intervals are closed from the left and open from the right. @param params: additional parameters to be stored in this object. @return: a new VertexClustering object
igraph/clustering.py
FromAttribute
tuandnvn/ecat_learning
python
@classmethod def FromAttribute(cls, graph, attribute, intervals=None, params=None): 'Creates a vertex clustering based on the value of a vertex attribute.\n\n Vertices having the same attribute will correspond to the same cluster.\n\n @param graph: the graph on which we are working\n @param attribute: name of the attribute on which the clustering\n is based.\n @param intervals: for numeric attributes, you can either pass a single\n number or a list of numbers here. A single number means that the\n vertices will be put in bins of that width and vertices ending up\n in the same bin will be in the same cluster. A list of numbers\n specify the bin positions explicitly; e.g., C{[10, 20, 30]} means\n that there will be four categories: vertices with the attribute\n value less than 10, between 10 and 20, between 20 and 30 and over 30.\n Intervals are closed from the left and open from the right.\n @param params: additional parameters to be stored in this object.\n\n @return: a new VertexClustering object\n ' from bisect import bisect def safeintdiv(x, y): 'Safe integer division that handles None gracefully' if (x is None): return None return int((x / y)) def safebisect(intervals, x): 'Safe list bisection that handles None gracefully' if (x is None): return None return bisect(intervals, x) try: _ = iter(intervals) iterable = True except TypeError: iterable = False if (intervals is None): vec = graph.vs[attribute] elif iterable: intervals = list(intervals) vec = [safebisect(intervals, x) for x in graph.vs[attribute]] else: intervals = float(intervals) vec = [safeintdiv(x, intervals) for x in graph.vs[attribute]] idgen = UniqueIdGenerator() idgen[None] = None vec = [idgen[i] for i in vec] return cls(graph, vec, None, params)
def as_cover(self): 'Returns a L{VertexCover} that contains the same clusters as this\n clustering.' return VertexCover(self._graph, self)
6,069,732,515,534,388,000
Returns a L{VertexCover} that contains the same clusters as this clustering.
igraph/clustering.py
as_cover
tuandnvn/ecat_learning
python
def as_cover(self): 'Returns a L{VertexCover} that contains the same clusters as this\n clustering.' return VertexCover(self._graph, self)
def cluster_graph(self, combine_vertices=None, combine_edges=None): 'Returns a graph where each cluster is contracted into a single\n vertex.\n\n In the resulting graph, vertex M{i} represents cluster M{i} in this\n clustering. Vertex M{i} and M{j} will be connected if there was\n at least one connected vertex pair M{(a, b)} in the original graph such\n that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster\n M{j}.\n\n @param combine_vertices: specifies how to derive the attributes of\n the vertices in the new graph from the attributes of the old ones.\n See L{Graph.contract_vertices()} for more details.\n @param combine_edges: specifies how to derive the attributes of the\n edges in the new graph from the attributes of the old ones. See\n L{Graph.simplify()} for more details. If you specify C{False}\n here, edges will not be combined, and the number of edges between\n the vertices representing the original clusters will be equal to\n the number of edges between the members of those clusters in the\n original graph.\n\n @return: the new graph.\n ' result = self.graph.copy() result.contract_vertices(self.membership, combine_vertices) if (combine_edges != False): result.simplify(combine_edges=combine_edges) return result
-5,948,843,330,100,500,000
Returns a graph where each cluster is contracted into a single vertex. In the resulting graph, vertex M{i} represents cluster M{i} in this clustering. Vertex M{i} and M{j} will be connected if there was at least one connected vertex pair M{(a, b)} in the original graph such that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster M{j}. @param combine_vertices: specifies how to derive the attributes of the vertices in the new graph from the attributes of the old ones. See L{Graph.contract_vertices()} for more details. @param combine_edges: specifies how to derive the attributes of the edges in the new graph from the attributes of the old ones. See L{Graph.simplify()} for more details. If you specify C{False} here, edges will not be combined, and the number of edges between the vertices representing the original clusters will be equal to the number of edges between the members of those clusters in the original graph. @return: the new graph.
igraph/clustering.py
cluster_graph
tuandnvn/ecat_learning
python
def cluster_graph(self, combine_vertices=None, combine_edges=None): 'Returns a graph where each cluster is contracted into a single\n vertex.\n\n In the resulting graph, vertex M{i} represents cluster M{i} in this\n clustering. Vertex M{i} and M{j} will be connected if there was\n at least one connected vertex pair M{(a, b)} in the original graph such\n that vertex M{a} was in cluster M{i} and vertex M{b} was in cluster\n M{j}.\n\n @param combine_vertices: specifies how to derive the attributes of\n the vertices in the new graph from the attributes of the old ones.\n See L{Graph.contract_vertices()} for more details.\n @param combine_edges: specifies how to derive the attributes of the\n edges in the new graph from the attributes of the old ones. See\n L{Graph.simplify()} for more details. If you specify C{False}\n here, edges will not be combined, and the number of edges between\n the vertices representing the original clusters will be equal to\n the number of edges between the members of those clusters in the\n original graph.\n\n @return: the new graph.\n ' result = self.graph.copy() result.contract_vertices(self.membership, combine_vertices) if (combine_edges != False): result.simplify(combine_edges=combine_edges) return result
def crossing(self): 'Returns a boolean vector where element M{i} is C{True} iff edge\n M{i} lies between clusters, C{False} otherwise.' membership = self.membership return [(membership[v1] != membership[v2]) for (v1, v2) in self.graph.get_edgelist()]
1,045,636,364,997,920,800
Returns a boolean vector where element M{i} is C{True} iff edge M{i} lies between clusters, C{False} otherwise.
igraph/clustering.py
crossing
tuandnvn/ecat_learning
python
def crossing(self): 'Returns a boolean vector where element M{i} is C{True} iff edge\n M{i} lies between clusters, C{False} otherwise.' membership = self.membership return [(membership[v1] != membership[v2]) for (v1, v2) in self.graph.get_edgelist()]
@property def modularity(self): 'Returns the modularity score' if self._modularity_dirty: return self._recalculate_modularity_safe() return self._modularity
-3,664,254,341,804,715,000
Returns the modularity score
igraph/clustering.py
modularity
tuandnvn/ecat_learning
python
@property def modularity(self): if self._modularity_dirty: return self._recalculate_modularity_safe() return self._modularity
@property def graph(self): 'Returns the graph belonging to this object' return self._graph
-6,013,293,917,706,169,000
Returns the graph belonging to this object
igraph/clustering.py
graph
tuandnvn/ecat_learning
python
@property def graph(self): return self._graph
def recalculate_modularity(self): 'Recalculates the stored modularity value.\n\n This method must be called before querying the modularity score of the\n clustering through the class member C{modularity} or C{q} if the\n graph has been modified (edges have been added or removed) since the\n creation of the L{VertexClustering} object.\n\n @return: the new modularity score\n ' self._modularity = self._graph.modularity(self._membership, **self._modularity_params) self._modularity_dirty = False return self._modularity
1,722,162,046,988,135,400
Recalculates the stored modularity value. This method must be called before querying the modularity score of the clustering through the class member C{modularity} or C{q} if the graph has been modified (edges have been added or removed) since the creation of the L{VertexClustering} object. @return: the new modularity score
igraph/clustering.py
recalculate_modularity
tuandnvn/ecat_learning
python
def recalculate_modularity(self): 'Recalculates the stored modularity value.\n\n This method must be called before querying the modularity score of the\n clustering through the class member C{modularity} or C{q} if the\n graph has been modified (edges have been added or removed) since the\n creation of the L{VertexClustering} object.\n\n @return: the new modularity score\n ' self._modularity = self._graph.modularity(self._membership, **self._modularity_params) self._modularity_dirty = False return self._modularity
def _recalculate_modularity_safe(self): 'Recalculates the stored modularity value and swallows all exceptions\n raised by the modularity function (if any).\n\n @return: the new modularity score or C{None} if the modularity function\n could not be calculated.\n ' try: return self.recalculate_modularity() except: return None finally: self._modularity_dirty = False
-3,958,502,414,622,825,500
Recalculates the stored modularity value and swallows all exceptions raised by the modularity function (if any). @return: the new modularity score or C{None} if the modularity function could not be calculated.
igraph/clustering.py
_recalculate_modularity_safe
tuandnvn/ecat_learning
python
def _recalculate_modularity_safe(self): 'Recalculates the stored modularity value and swallows all exceptions\n raised by the modularity function (if any).\n\n @return: the new modularity score or C{None} if the modularity function\n could not be calculated.\n ' try: return self.recalculate_modularity() except: return None finally: self._modularity_dirty = False
def subgraph(self, idx): "Get the subgraph belonging to a given cluster.\n\n @param idx: the cluster index\n @return: a copy of the subgraph\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " return self._graph.subgraph(self[idx])
4,888,167,428,059,338,000
Get the subgraph belonging to a given cluster. @param idx: the cluster index @return: a copy of the subgraph @precondition: the vertex set of the graph hasn't been modified since the moment the clustering was constructed.
igraph/clustering.py
subgraph
tuandnvn/ecat_learning
python
def subgraph(self, idx): "Get the subgraph belonging to a given cluster.\n\n @param idx: the cluster index\n @return: a copy of the subgraph\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " return self._graph.subgraph(self[idx])
def subgraphs(self): "Gets all the subgraphs belonging to each of the clusters.\n\n @return: a list containing copies of the subgraphs\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " return [self._graph.subgraph(cl) for cl in self]
397,228,615,663,400,600
Gets all the subgraphs belonging to each of the clusters. @return: a list containing copies of the subgraphs @precondition: the vertex set of the graph hasn't been modified since the moment the clustering was constructed.
igraph/clustering.py
subgraphs
tuandnvn/ecat_learning
python
def subgraphs(self): "Gets all the subgraphs belonging to each of the clusters.\n\n @return: a list containing copies of the subgraphs\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " return [self._graph.subgraph(cl) for cl in self]
def giant(self): "Returns the giant community of the clustered graph.\n\n The giant component a community for which no larger community exists.\n @note: there can be multiple giant communities, this method will return\n the copy of an arbitrary one if there are multiple giant communities.\n\n @return: a copy of the giant community.\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " ss = self.sizes() max_size = max(ss) return self.subgraph(ss.index(max_size))
5,153,737,018,873,520,000
Returns the giant community of the clustered graph. The giant component a community for which no larger community exists. @note: there can be multiple giant communities, this method will return the copy of an arbitrary one if there are multiple giant communities. @return: a copy of the giant community. @precondition: the vertex set of the graph hasn't been modified since the moment the clustering was constructed.
igraph/clustering.py
giant
tuandnvn/ecat_learning
python
def giant(self): "Returns the giant community of the clustered graph.\n\n The giant component a community for which no larger community exists.\n @note: there can be multiple giant communities, this method will return\n the copy of an arbitrary one if there are multiple giant communities.\n\n @return: a copy of the giant community.\n @precondition: the vertex set of the graph hasn't been modified since\n the moment the clustering was constructed.\n " ss = self.sizes() max_size = max(ss) return self.subgraph(ss.index(max_size))
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the clustering to the given Cairo context in the given\n bounding box.\n\n This is done by calling L{Graph.__plot__()} with the same arguments, but\n coloring the graph vertices according to the current clustering (unless\n overridden by the C{vertex_color} argument explicitly).\n\n This method understands all the positional and keyword arguments that\n are understood by L{Graph.__plot__()}, only the differences will be\n highlighted here:\n\n - C{mark_groups}: whether to highlight some of the vertex groups by\n colored polygons. Besides the values accepted by L{Graph.__plot__}\n (i.e., a dict mapping colors to vertex indices, a list containing\n lists of vertex indices, or C{False}), the following are also\n accepted:\n\n - C{True}: all the groups will be highlighted, the colors matching\n the corresponding color indices from the current palette\n (see the C{palette} keyword argument of L{Graph.__plot__}.\n\n - A dict mapping cluster indices or tuples of vertex indices to\n color names. The given clusters or vertex groups will be\n highlighted by the given colors.\n\n - A list of cluster indices. This is equivalent to passing a\n dict mapping numeric color indices from the current palette\n to cluster indices; therefore, the cluster referred to by element\n I{i} of the list will be highlighted by color I{i} from the\n palette.\n\n The value of the C{plotting.mark_groups} configuration key is also\n taken into account here; if that configuration key is C{True} and\n C{mark_groups} is not given explicitly, it will automatically be set\n to C{True}.\n\n In place of lists of vertex indices, you may also use L{VertexSeq}\n instances.\n\n In place of color names, you may also use color indices into the\n current palette. C{None} as a color name will mean that the\n corresponding group is ignored.\n\n - C{palette}: the palette used to resolve numeric color indices to RGBA\n values. By default, this is an instance of L{ClusterColoringPalette}.\n\n @see: L{Graph.__plot__()} for more supported keyword arguments.\n ' if (('edge_color' not in kwds) and ('color' not in self.graph.edge_attributes())): colors = ['grey20', 'grey80'] kwds['edge_color'] = [colors[is_crossing] for is_crossing in self.crossing()] if (palette is None): palette = ClusterColoringPalette(len(self)) if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: kwds['mark_groups'] = ((group, color) for (color, group) in enumerate(self)) else: kwds['mark_groups'] = _handle_mark_groups_arg_for_clustering(kwds['mark_groups'], self) if ('vertex_color' not in kwds): kwds['vertex_color'] = self.membership return self._graph.__plot__(context, bbox, palette, *args, **kwds)
626,841,932,283,767,200
Plots the clustering to the given Cairo context in the given bounding box. This is done by calling L{Graph.__plot__()} with the same arguments, but coloring the graph vertices according to the current clustering (unless overridden by the C{vertex_color} argument explicitly). This method understands all the positional and keyword arguments that are understood by L{Graph.__plot__()}, only the differences will be highlighted here: - C{mark_groups}: whether to highlight some of the vertex groups by colored polygons. Besides the values accepted by L{Graph.__plot__} (i.e., a dict mapping colors to vertex indices, a list containing lists of vertex indices, or C{False}), the following are also accepted: - C{True}: all the groups will be highlighted, the colors matching the corresponding color indices from the current palette (see the C{palette} keyword argument of L{Graph.__plot__}. - A dict mapping cluster indices or tuples of vertex indices to color names. The given clusters or vertex groups will be highlighted by the given colors. - A list of cluster indices. This is equivalent to passing a dict mapping numeric color indices from the current palette to cluster indices; therefore, the cluster referred to by element I{i} of the list will be highlighted by color I{i} from the palette. The value of the C{plotting.mark_groups} configuration key is also taken into account here; if that configuration key is C{True} and C{mark_groups} is not given explicitly, it will automatically be set to C{True}. In place of lists of vertex indices, you may also use L{VertexSeq} instances. In place of color names, you may also use color indices into the current palette. C{None} as a color name will mean that the corresponding group is ignored. - C{palette}: the palette used to resolve numeric color indices to RGBA values. By default, this is an instance of L{ClusterColoringPalette}. @see: L{Graph.__plot__()} for more supported keyword arguments.
igraph/clustering.py
__plot__
tuandnvn/ecat_learning
python
def __plot__(self, context, bbox, palette, *args, **kwds): 'Plots the clustering to the given Cairo context in the given\n bounding box.\n\n This is done by calling L{Graph.__plot__()} with the same arguments, but\n coloring the graph vertices according to the current clustering (unless\n overridden by the C{vertex_color} argument explicitly).\n\n This method understands all the positional and keyword arguments that\n are understood by L{Graph.__plot__()}, only the differences will be\n highlighted here:\n\n - C{mark_groups}: whether to highlight some of the vertex groups by\n colored polygons. Besides the values accepted by L{Graph.__plot__}\n (i.e., a dict mapping colors to vertex indices, a list containing\n lists of vertex indices, or C{False}), the following are also\n accepted:\n\n - C{True}: all the groups will be highlighted, the colors matching\n the corresponding color indices from the current palette\n (see the C{palette} keyword argument of L{Graph.__plot__}.\n\n - A dict mapping cluster indices or tuples of vertex indices to\n color names. The given clusters or vertex groups will be\n highlighted by the given colors.\n\n - A list of cluster indices. This is equivalent to passing a\n dict mapping numeric color indices from the current palette\n to cluster indices; therefore, the cluster referred to by element\n I{i} of the list will be highlighted by color I{i} from the\n palette.\n\n The value of the C{plotting.mark_groups} configuration key is also\n taken into account here; if that configuration key is C{True} and\n C{mark_groups} is not given explicitly, it will automatically be set\n to C{True}.\n\n In place of lists of vertex indices, you may also use L{VertexSeq}\n instances.\n\n In place of color names, you may also use color indices into the\n current palette. C{None} as a color name will mean that the\n corresponding group is ignored.\n\n - C{palette}: the palette used to resolve numeric color indices to RGBA\n values. By default, this is an instance of L{ClusterColoringPalette}.\n\n @see: L{Graph.__plot__()} for more supported keyword arguments.\n ' if (('edge_color' not in kwds) and ('color' not in self.graph.edge_attributes())): colors = ['grey20', 'grey80'] kwds['edge_color'] = [colors[is_crossing] for is_crossing in self.crossing()] if (palette is None): palette = ClusterColoringPalette(len(self)) if ('mark_groups' not in kwds): if Configuration.instance()['plotting.mark_groups']: kwds['mark_groups'] = ((group, color) for (color, group) in enumerate(self)) else: kwds['mark_groups'] = _handle_mark_groups_arg_for_clustering(kwds['mark_groups'], self) if ('vertex_color' not in kwds): kwds['vertex_color'] = self.membership return self._graph.__plot__(context, bbox, palette, *args, **kwds)
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' if self._graph.is_named(): names = self._graph.vs['name'] for cluster in self: (yield ', '.join((str(names[member]) for member in cluster))) else: for cluster in self: (yield ', '.join((str(member) for member in cluster)))
6,838,424,363,819,696,000
Iterates over the clusters and formats them into a string to be presented in the summary.
igraph/clustering.py
_formatted_cluster_iterator
tuandnvn/ecat_learning
python
def _formatted_cluster_iterator(self): 'Iterates over the clusters and formats them into a string to be\n presented in the summary.' if self._graph.is_named(): names = self._graph.vs['name'] for cluster in self: (yield ', '.join((str(names[member]) for member in cluster))) else: for cluster in self: (yield ', '.join((str(member) for member in cluster)))
def __init__(self, merges): 'Creates a hierarchical clustering.\n\n @param merges: the merge history either in matrix or tuple format' self._merges = [tuple(pair) for pair in merges] self._nmerges = len(self._merges) if self._nmerges: self._nitems = ((max(self._merges[(- 1)]) - self._nmerges) + 2) else: self._nitems = 0 self._names = None
3,493,641,226,356,949,500
Creates a hierarchical clustering. @param merges: the merge history either in matrix or tuple format
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, merges): 'Creates a hierarchical clustering.\n\n @param merges: the merge history either in matrix or tuple format' self._merges = [tuple(pair) for pair in merges] self._nmerges = len(self._merges) if self._nmerges: self._nitems = ((max(self._merges[(- 1)]) - self._nmerges) + 2) else: self._nitems = 0 self._names = None
@staticmethod def _convert_matrix_to_tuple_repr(merges, n=None): 'Converts the matrix representation of a clustering to a tuple\n representation.\n\n @param merges: the matrix representation of the clustering\n @return: the tuple representation of the clustering\n ' if (n is None): n = (len(merges) + 1) tuple_repr = range(n) idxs = range(n) for (rowidx, row) in enumerate(merges): (i, j) = row try: (idxi, idxj) = (idxs[i], idxs[j]) tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj]) tuple_repr[idxj] = None except IndexError: raise ValueError(('malformed matrix, subgroup referenced ' + ('before being created in step %d' % rowidx))) idxs.append(j) return [x for x in tuple_repr if (x is not None)]
2,679,342,802,674,906,600
Converts the matrix representation of a clustering to a tuple representation. @param merges: the matrix representation of the clustering @return: the tuple representation of the clustering
igraph/clustering.py
_convert_matrix_to_tuple_repr
tuandnvn/ecat_learning
python
@staticmethod def _convert_matrix_to_tuple_repr(merges, n=None): 'Converts the matrix representation of a clustering to a tuple\n representation.\n\n @param merges: the matrix representation of the clustering\n @return: the tuple representation of the clustering\n ' if (n is None): n = (len(merges) + 1) tuple_repr = range(n) idxs = range(n) for (rowidx, row) in enumerate(merges): (i, j) = row try: (idxi, idxj) = (idxs[i], idxs[j]) tuple_repr[idxi] = (tuple_repr[idxi], tuple_repr[idxj]) tuple_repr[idxj] = None except IndexError: raise ValueError(('malformed matrix, subgroup referenced ' + ('before being created in step %d' % rowidx))) idxs.append(j) return [x for x in tuple_repr if (x is not None)]
def _traverse_inorder(self): 'Conducts an inorder traversal of the merge tree.\n\n The inorder traversal returns the nodes on the last level in the order\n they should be drawn so that no edges cross each other.\n\n @return: the result of the inorder traversal in a list.' result = [] seen_nodes = set() for node_index in reversed(xrange((self._nitems + self._nmerges))): if (node_index in seen_nodes): continue stack = [node_index] while stack: last = stack.pop() seen_nodes.add(last) if (last < self._nitems): result.append(last) else: stack.extend(self._merges[(last - self._nitems)]) return result
-8,123,607,309,487,676,000
Conducts an inorder traversal of the merge tree. The inorder traversal returns the nodes on the last level in the order they should be drawn so that no edges cross each other. @return: the result of the inorder traversal in a list.
igraph/clustering.py
_traverse_inorder
tuandnvn/ecat_learning
python
def _traverse_inorder(self): 'Conducts an inorder traversal of the merge tree.\n\n The inorder traversal returns the nodes on the last level in the order\n they should be drawn so that no edges cross each other.\n\n @return: the result of the inorder traversal in a list.' result = [] seen_nodes = set() for node_index in reversed(xrange((self._nitems + self._nmerges))): if (node_index in seen_nodes): continue stack = [node_index] while stack: last = stack.pop() seen_nodes.add(last) if (last < self._nitems): result.append(last) else: stack.extend(self._merges[(last - self._nitems)]) return result
def format(self, format='newick'): 'Formats the dendrogram in a foreign format.\n\n Currently only the Newick format is supported.\n\n Example:\n\n >>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])\n >>> d.format()\n \'((2,3)4,(0,1)5)6;\'\n >>> d.names = list("ABCDEFG")\n >>> d.format()\n \'((C,D)E,(A,B)F)G;\'\n ' if (format == 'newick'): n = (self._nitems + self._nmerges) if (self._names is None): nodes = range(n) else: nodes = list(self._names) if (len(nodes) < n): nodes.extend(('' for _ in xrange((n - len(nodes))))) for (k, (i, j)) in enumerate(self._merges, self._nitems): nodes[k] = ('(%s,%s)%s' % (nodes[i], nodes[j], nodes[k])) nodes[i] = nodes[j] = None return (nodes[(- 1)] + ';') raise ValueError(('unsupported format: %r' % format))
285,569,044,303,103,330
Formats the dendrogram in a foreign format. Currently only the Newick format is supported. Example: >>> d = Dendrogram([(2, 3), (0, 1), (4, 5)]) >>> d.format() '((2,3)4,(0,1)5)6;' >>> d.names = list("ABCDEFG") >>> d.format() '((C,D)E,(A,B)F)G;'
igraph/clustering.py
format
tuandnvn/ecat_learning
python
def format(self, format='newick'): 'Formats the dendrogram in a foreign format.\n\n Currently only the Newick format is supported.\n\n Example:\n\n >>> d = Dendrogram([(2, 3), (0, 1), (4, 5)])\n >>> d.format()\n \'((2,3)4,(0,1)5)6;\'\n >>> d.names = list("ABCDEFG")\n >>> d.format()\n \'((C,D)E,(A,B)F)G;\'\n ' if (format == 'newick'): n = (self._nitems + self._nmerges) if (self._names is None): nodes = range(n) else: nodes = list(self._names) if (len(nodes) < n): nodes.extend(( for _ in xrange((n - len(nodes))))) for (k, (i, j)) in enumerate(self._merges, self._nitems): nodes[k] = ('(%s,%s)%s' % (nodes[i], nodes[j], nodes[k])) nodes[i] = nodes[j] = None return (nodes[(- 1)] + ';') raise ValueError(('unsupported format: %r' % format))
def summary(self, verbosity=0, max_leaf_count=40): 'Returns the summary of the dendrogram.\n\n The summary includes the number of leafs and branches, and also an\n ASCII art representation of the dendrogram unless it is too large.\n\n @param verbosity: determines whether the ASCII representation of the\n dendrogram should be printed. Zero verbosity prints only the number\n of leafs and branches.\n @param max_leaf_count: the maximal number of leafs to print in the\n ASCII representation. If the dendrogram has more leafs than this\n limit, the ASCII representation will not be printed even if the\n verbosity is larger than or equal to 1.\n @return: the summary of the dendrogram as a string.\n ' out = StringIO() ((print >> out), ('Dendrogram, %d elements, %d merges' % (self._nitems, self._nmerges))) if ((self._nitems == 0) or (verbosity < 1) or (self._nitems > max_leaf_count)): return out.getvalue().strip() (print >> out) positions = ([None] * self._nitems) inorder = self._traverse_inorder() distance = 2 level_distance = 2 nextp = 0 for (idx, element) in enumerate(inorder): positions[element] = nextp inorder[idx] = str(element) nextp += max(distance, (len(inorder[idx]) + 1)) width = (max(positions) + 1) ((print >> out), (' ' * (distance - 1)).join(inorder)) midx = 0 max_community_idx = self._nitems while (midx < self._nmerges): char_array = ([' '] * width) for position in positions: if (position >= 0): char_array[position] = '|' char_str = ''.join(char_array) for _ in xrange((level_distance - 1)): ((print >> out), char_str) cidx_incr = 0 while (midx < self._nmerges): (id1, id2) = self._merges[midx] if ((id1 >= max_community_idx) or (id2 >= max_community_idx)): break midx += 1 (pos1, pos2) = (positions[id1], positions[id2]) (positions[id1], positions[id2]) = ((- 1), (- 1)) if (pos1 > pos2): (pos1, pos2) = (pos2, pos1) positions.append(((pos1 + pos2) // 2)) dashes = ('-' * ((pos2 - pos1) - 1)) char_array[pos1:(pos2 + 1)] = ("`%s'" % dashes) cidx_incr += 1 max_community_idx += cidx_incr ((print >> out), ''.join(char_array)) return out.getvalue().strip()
2,575,065,336,735,225,000
Returns the summary of the dendrogram. The summary includes the number of leafs and branches, and also an ASCII art representation of the dendrogram unless it is too large. @param verbosity: determines whether the ASCII representation of the dendrogram should be printed. Zero verbosity prints only the number of leafs and branches. @param max_leaf_count: the maximal number of leafs to print in the ASCII representation. If the dendrogram has more leafs than this limit, the ASCII representation will not be printed even if the verbosity is larger than or equal to 1. @return: the summary of the dendrogram as a string.
igraph/clustering.py
summary
tuandnvn/ecat_learning
python
def summary(self, verbosity=0, max_leaf_count=40): 'Returns the summary of the dendrogram.\n\n The summary includes the number of leafs and branches, and also an\n ASCII art representation of the dendrogram unless it is too large.\n\n @param verbosity: determines whether the ASCII representation of the\n dendrogram should be printed. Zero verbosity prints only the number\n of leafs and branches.\n @param max_leaf_count: the maximal number of leafs to print in the\n ASCII representation. If the dendrogram has more leafs than this\n limit, the ASCII representation will not be printed even if the\n verbosity is larger than or equal to 1.\n @return: the summary of the dendrogram as a string.\n ' out = StringIO() ((print >> out), ('Dendrogram, %d elements, %d merges' % (self._nitems, self._nmerges))) if ((self._nitems == 0) or (verbosity < 1) or (self._nitems > max_leaf_count)): return out.getvalue().strip() (print >> out) positions = ([None] * self._nitems) inorder = self._traverse_inorder() distance = 2 level_distance = 2 nextp = 0 for (idx, element) in enumerate(inorder): positions[element] = nextp inorder[idx] = str(element) nextp += max(distance, (len(inorder[idx]) + 1)) width = (max(positions) + 1) ((print >> out), (' ' * (distance - 1)).join(inorder)) midx = 0 max_community_idx = self._nitems while (midx < self._nmerges): char_array = ([' '] * width) for position in positions: if (position >= 0): char_array[position] = '|' char_str = .join(char_array) for _ in xrange((level_distance - 1)): ((print >> out), char_str) cidx_incr = 0 while (midx < self._nmerges): (id1, id2) = self._merges[midx] if ((id1 >= max_community_idx) or (id2 >= max_community_idx)): break midx += 1 (pos1, pos2) = (positions[id1], positions[id2]) (positions[id1], positions[id2]) = ((- 1), (- 1)) if (pos1 > pos2): (pos1, pos2) = (pos2, pos1) positions.append(((pos1 + pos2) // 2)) dashes = ('-' * ((pos2 - pos1) - 1)) char_array[pos1:(pos2 + 1)] = ("`%s'" % dashes) cidx_incr += 1 max_community_idx += cidx_incr ((print >> out), .join(char_array)) return out.getvalue().strip()
def _item_box_size(self, context, horiz, idx): 'Calculates the amount of space needed for drawing an\n individual vertex at the bottom of the dendrogram.' if ((self._names is None) or (self._names[idx] is None)): (x_bearing, _, _, height, x_advance, _) = context.text_extents('') else: (x_bearing, _, _, height, x_advance, _) = context.text_extents(str(self._names[idx])) if horiz: return ((x_advance - x_bearing), height) return (height, (x_advance - x_bearing))
-6,424,601,739,130,504,000
Calculates the amount of space needed for drawing an individual vertex at the bottom of the dendrogram.
igraph/clustering.py
_item_box_size
tuandnvn/ecat_learning
python
def _item_box_size(self, context, horiz, idx): 'Calculates the amount of space needed for drawing an\n individual vertex at the bottom of the dendrogram.' if ((self._names is None) or (self._names[idx] is None)): (x_bearing, _, _, height, x_advance, _) = context.text_extents() else: (x_bearing, _, _, height, x_advance, _) = context.text_extents(str(self._names[idx])) if horiz: return ((x_advance - x_bearing), height) return (height, (x_advance - x_bearing))
def _plot_item(self, context, horiz, idx, x, y): 'Plots a dendrogram item to the given Cairo context\n\n @param context: the Cairo context we are plotting on\n @param horiz: whether the dendrogram is horizontally oriented\n @param idx: the index of the item\n @param x: the X position of the item\n @param y: the Y position of the item\n ' if ((self._names is None) or (self._names[idx] is None)): return height = self._item_box_size(context, True, idx)[1] if horiz: context.move_to(x, (y + height)) context.show_text(str(self._names[idx])) else: context.save() context.translate(x, y) context.rotate(((- pi) / 2.0)) context.move_to(0, height) context.show_text(str(self._names[idx])) context.restore()
-9,049,214,614,630,723,000
Plots a dendrogram item to the given Cairo context @param context: the Cairo context we are plotting on @param horiz: whether the dendrogram is horizontally oriented @param idx: the index of the item @param x: the X position of the item @param y: the Y position of the item
igraph/clustering.py
_plot_item
tuandnvn/ecat_learning
python
def _plot_item(self, context, horiz, idx, x, y): 'Plots a dendrogram item to the given Cairo context\n\n @param context: the Cairo context we are plotting on\n @param horiz: whether the dendrogram is horizontally oriented\n @param idx: the index of the item\n @param x: the X position of the item\n @param y: the Y position of the item\n ' if ((self._names is None) or (self._names[idx] is None)): return height = self._item_box_size(context, True, idx)[1] if horiz: context.move_to(x, (y + height)) context.show_text(str(self._names[idx])) else: context.save() context.translate(x, y) context.rotate(((- pi) / 2.0)) context.move_to(0, height) context.show_text(str(self._names[idx])) context.restore()
def __plot__(self, context, bbox, palette, *args, **kwds): 'Draws the dendrogram on the given Cairo context\n\n Supported keyword arguments are:\n\n - C{orientation}: the orientation of the dendrogram. Must be one of\n the following values: C{left-right}, C{bottom-top}, C{right-left}\n or C{top-bottom}. Individual elements are always placed at the\n former edge and merges are performed towards the latter edge.\n Possible aliases: C{horizontal} = C{left-right},\n C{vertical} = C{bottom-top}, C{lr} = C{left-right},\n C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.\n The default is C{left-right}.\n\n ' from igraph.layout import Layout if (self._names is None): self._names = [str(x) for x in xrange(self._nitems)] orientation = str_to_orientation(kwds.get('orientation', 'lr'), reversed_vertical=True) horiz = (orientation in ('lr', 'rl')) font_height = context.font_extents()[2] item_boxes = [self._item_box_size(context, horiz, idx) for idx in xrange(self._nitems)] ygap = (2 if (orientation == 'bt') else 0) xgap = (2 if (orientation == 'lr') else 0) item_boxes = [((x + xgap), (y + ygap)) for (x, y) in item_boxes] layout = Layout(([(0, 0)] * self._nitems), dim=2) inorder = self._traverse_inorder() if (not horiz): (x, y) = (0, 0) for (idx, element) in enumerate(inorder): layout[element] = (x, 0) x += max(font_height, item_boxes[element][0]) for (id1, id2) in self._merges: y += 1 layout.append((((layout[id1][0] + layout[id2][0]) / 2.0), y)) if (orientation == 'bt'): layout.mirror(1) else: (x, y) = (0, 0) for (idx, element) in enumerate(inorder): layout[element] = (0, y) y += max(font_height, item_boxes[element][1]) for (id1, id2) in self._merges: x += 1 layout.append((x, ((layout[id1][1] + layout[id2][1]) / 2.0))) if (orientation == 'rl'): layout.mirror(0) maxw = max((e[0] for e in item_boxes)) maxh = max((e[1] for e in item_boxes)) (width, height) = (float(bbox.width), float(bbox.height)) (delta_x, delta_y) = (0, 0) if horiz: width -= maxw if (orientation == 'lr'): delta_x = maxw else: height -= maxh if (orientation == 'tb'): delta_y = maxh if horiz: delta_y += (font_height / 2.0) else: delta_x += (font_height / 2.0) layout.fit_into((delta_x, delta_y, (width - delta_x), (height - delta_y)), keep_aspect_ratio=False) context.save() context.translate(bbox.left, bbox.top) context.set_source_rgb(0.0, 0.0, 0.0) context.set_line_width(1) if horiz: sgn = (0 if (orientation == 'rl') else (- 1)) for idx in xrange(self._nitems): x = (layout[idx][0] + (sgn * item_boxes[idx][0])) y = (layout[idx][1] - (item_boxes[idx][1] / 2.0)) self._plot_item(context, horiz, idx, x, y) else: sgn = (1 if (orientation == 'bt') else 0) for idx in xrange(self._nitems): x = (layout[idx][0] - (item_boxes[idx][0] / 2.0)) y = (layout[idx][1] + (sgn * item_boxes[idx][1])) self._plot_item(context, horiz, idx, x, y) if (not horiz): for (idx, (id1, id2)) in enumerate(self._merges): (x0, y0) = layout[id1] (x1, y1) = layout[id2] (x2, y2) = layout[(idx + self._nitems)] context.move_to(x0, y0) context.line_to(x0, y2) context.line_to(x1, y2) context.line_to(x1, y1) context.stroke() else: for (idx, (id1, id2)) in enumerate(self._merges): (x0, y0) = layout[id1] (x1, y1) = layout[id2] (x2, y2) = layout[(idx + self._nitems)] context.move_to(x0, y0) context.line_to(x2, y0) context.line_to(x2, y1) context.line_to(x1, y1) context.stroke() context.restore()
-2,328,309,552,966,241,300
Draws the dendrogram on the given Cairo context Supported keyword arguments are: - C{orientation}: the orientation of the dendrogram. Must be one of the following values: C{left-right}, C{bottom-top}, C{right-left} or C{top-bottom}. Individual elements are always placed at the former edge and merges are performed towards the latter edge. Possible aliases: C{horizontal} = C{left-right}, C{vertical} = C{bottom-top}, C{lr} = C{left-right}, C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}. The default is C{left-right}.
igraph/clustering.py
__plot__
tuandnvn/ecat_learning
python
def __plot__(self, context, bbox, palette, *args, **kwds): 'Draws the dendrogram on the given Cairo context\n\n Supported keyword arguments are:\n\n - C{orientation}: the orientation of the dendrogram. Must be one of\n the following values: C{left-right}, C{bottom-top}, C{right-left}\n or C{top-bottom}. Individual elements are always placed at the\n former edge and merges are performed towards the latter edge.\n Possible aliases: C{horizontal} = C{left-right},\n C{vertical} = C{bottom-top}, C{lr} = C{left-right},\n C{rl} = C{right-left}, C{tb} = C{top-bottom}, C{bt} = C{bottom-top}.\n The default is C{left-right}.\n\n ' from igraph.layout import Layout if (self._names is None): self._names = [str(x) for x in xrange(self._nitems)] orientation = str_to_orientation(kwds.get('orientation', 'lr'), reversed_vertical=True) horiz = (orientation in ('lr', 'rl')) font_height = context.font_extents()[2] item_boxes = [self._item_box_size(context, horiz, idx) for idx in xrange(self._nitems)] ygap = (2 if (orientation == 'bt') else 0) xgap = (2 if (orientation == 'lr') else 0) item_boxes = [((x + xgap), (y + ygap)) for (x, y) in item_boxes] layout = Layout(([(0, 0)] * self._nitems), dim=2) inorder = self._traverse_inorder() if (not horiz): (x, y) = (0, 0) for (idx, element) in enumerate(inorder): layout[element] = (x, 0) x += max(font_height, item_boxes[element][0]) for (id1, id2) in self._merges: y += 1 layout.append((((layout[id1][0] + layout[id2][0]) / 2.0), y)) if (orientation == 'bt'): layout.mirror(1) else: (x, y) = (0, 0) for (idx, element) in enumerate(inorder): layout[element] = (0, y) y += max(font_height, item_boxes[element][1]) for (id1, id2) in self._merges: x += 1 layout.append((x, ((layout[id1][1] + layout[id2][1]) / 2.0))) if (orientation == 'rl'): layout.mirror(0) maxw = max((e[0] for e in item_boxes)) maxh = max((e[1] for e in item_boxes)) (width, height) = (float(bbox.width), float(bbox.height)) (delta_x, delta_y) = (0, 0) if horiz: width -= maxw if (orientation == 'lr'): delta_x = maxw else: height -= maxh if (orientation == 'tb'): delta_y = maxh if horiz: delta_y += (font_height / 2.0) else: delta_x += (font_height / 2.0) layout.fit_into((delta_x, delta_y, (width - delta_x), (height - delta_y)), keep_aspect_ratio=False) context.save() context.translate(bbox.left, bbox.top) context.set_source_rgb(0.0, 0.0, 0.0) context.set_line_width(1) if horiz: sgn = (0 if (orientation == 'rl') else (- 1)) for idx in xrange(self._nitems): x = (layout[idx][0] + (sgn * item_boxes[idx][0])) y = (layout[idx][1] - (item_boxes[idx][1] / 2.0)) self._plot_item(context, horiz, idx, x, y) else: sgn = (1 if (orientation == 'bt') else 0) for idx in xrange(self._nitems): x = (layout[idx][0] - (item_boxes[idx][0] / 2.0)) y = (layout[idx][1] + (sgn * item_boxes[idx][1])) self._plot_item(context, horiz, idx, x, y) if (not horiz): for (idx, (id1, id2)) in enumerate(self._merges): (x0, y0) = layout[id1] (x1, y1) = layout[id2] (x2, y2) = layout[(idx + self._nitems)] context.move_to(x0, y0) context.line_to(x0, y2) context.line_to(x1, y2) context.line_to(x1, y1) context.stroke() else: for (idx, (id1, id2)) in enumerate(self._merges): (x0, y0) = layout[id1] (x1, y1) = layout[id2] (x2, y2) = layout[(idx + self._nitems)] context.move_to(x0, y0) context.line_to(x2, y0) context.line_to(x2, y1) context.line_to(x1, y1) context.stroke() context.restore()
@property def merges(self): 'Returns the performed merges in matrix format' return deepcopy(self._merges)
-5,628,481,384,864,011,000
Returns the performed merges in matrix format
igraph/clustering.py
merges
tuandnvn/ecat_learning
python
@property def merges(self): return deepcopy(self._merges)
@property def names(self): 'Returns the names of the nodes in the dendrogram' return self._names
-9,158,098,303,931,814,000
Returns the names of the nodes in the dendrogram
igraph/clustering.py
names
tuandnvn/ecat_learning
python
@property def names(self): return self._names
@names.setter def names(self, items): 'Sets the names of the nodes in the dendrogram' if (items is None): self._names = None return items = list(items) if (len(items) < self._nitems): raise ValueError(('must specify at least %d names' % self._nitems)) n = (self._nitems + self._nmerges) self._names = items[:n] if (len(self._names) < n): self._names.extend(('' for _ in xrange((n - len(self._names)))))
7,007,590,815,281,501,000
Sets the names of the nodes in the dendrogram
igraph/clustering.py
names
tuandnvn/ecat_learning
python
@names.setter def names(self, items): if (items is None): self._names = None return items = list(items) if (len(items) < self._nitems): raise ValueError(('must specify at least %d names' % self._nitems)) n = (self._nitems + self._nmerges) self._names = items[:n] if (len(self._names) < n): self._names.extend(( for _ in xrange((n - len(self._names)))))
def __init__(self, graph, merges, optimal_count=None, params=None, modularity_params=None): 'Creates a dendrogram object for a given graph.\n\n @param graph: the graph that will be associated to the clustering\n @param merges: the merges performed given in matrix form.\n @param optimal_count: the optimal number of clusters where the\n dendrogram should be cut. This is a hint usually provided by the\n clustering algorithm that produces the dendrogram. C{None} means\n that such a hint is not available; the optimal count will then be\n selected based on the modularity in such a case.\n @param params: additional parameters to be stored in this object.\n @param modularity_params: arguments that should be passed to\n L{Graph.modularity} when the modularity is (re)calculated. If the\n original graph was weighted, you should pass a dictionary\n containing a C{weight} key with the appropriate value here.\n ' Dendrogram.__init__(self, merges) self._graph = graph self._optimal_count = optimal_count if (modularity_params is None): self._modularity_params = {} else: self._modularity_params = dict(modularity_params)
-3,161,577,109,791,939,600
Creates a dendrogram object for a given graph. @param graph: the graph that will be associated to the clustering @param merges: the merges performed given in matrix form. @param optimal_count: the optimal number of clusters where the dendrogram should be cut. This is a hint usually provided by the clustering algorithm that produces the dendrogram. C{None} means that such a hint is not available; the optimal count will then be selected based on the modularity in such a case. @param params: additional parameters to be stored in this object. @param modularity_params: arguments that should be passed to L{Graph.modularity} when the modularity is (re)calculated. If the original graph was weighted, you should pass a dictionary containing a C{weight} key with the appropriate value here.
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, graph, merges, optimal_count=None, params=None, modularity_params=None): 'Creates a dendrogram object for a given graph.\n\n @param graph: the graph that will be associated to the clustering\n @param merges: the merges performed given in matrix form.\n @param optimal_count: the optimal number of clusters where the\n dendrogram should be cut. This is a hint usually provided by the\n clustering algorithm that produces the dendrogram. C{None} means\n that such a hint is not available; the optimal count will then be\n selected based on the modularity in such a case.\n @param params: additional parameters to be stored in this object.\n @param modularity_params: arguments that should be passed to\n L{Graph.modularity} when the modularity is (re)calculated. If the\n original graph was weighted, you should pass a dictionary\n containing a C{weight} key with the appropriate value here.\n ' Dendrogram.__init__(self, merges) self._graph = graph self._optimal_count = optimal_count if (modularity_params is None): self._modularity_params = {} else: self._modularity_params = dict(modularity_params)
def as_clustering(self, n=None): 'Cuts the dendrogram at the given level and returns a corresponding\n L{VertexClustering} object.\n\n @param n: the desired number of clusters. Merges are replayed from the\n beginning until the membership vector has exactly M{n} distinct elements\n or until there are no more recorded merges, whichever happens first.\n If C{None}, the optimal count hint given by the clustering algorithm\n will be used If the optimal count was not given either, it will be\n calculated by selecting the level where the modularity is maximal.\n @return: a new L{VertexClustering} object.\n ' if (n is None): n = self.optimal_count num_elts = self._graph.vcount() idgen = UniqueIdGenerator() membership = community_to_membership(self._merges, num_elts, (num_elts - n)) membership = [idgen[m] for m in membership] return VertexClustering(self._graph, membership, modularity_params=self._modularity_params)
-4,594,129,517,684,349,400
Cuts the dendrogram at the given level and returns a corresponding L{VertexClustering} object. @param n: the desired number of clusters. Merges are replayed from the beginning until the membership vector has exactly M{n} distinct elements or until there are no more recorded merges, whichever happens first. If C{None}, the optimal count hint given by the clustering algorithm will be used If the optimal count was not given either, it will be calculated by selecting the level where the modularity is maximal. @return: a new L{VertexClustering} object.
igraph/clustering.py
as_clustering
tuandnvn/ecat_learning
python
def as_clustering(self, n=None): 'Cuts the dendrogram at the given level and returns a corresponding\n L{VertexClustering} object.\n\n @param n: the desired number of clusters. Merges are replayed from the\n beginning until the membership vector has exactly M{n} distinct elements\n or until there are no more recorded merges, whichever happens first.\n If C{None}, the optimal count hint given by the clustering algorithm\n will be used If the optimal count was not given either, it will be\n calculated by selecting the level where the modularity is maximal.\n @return: a new L{VertexClustering} object.\n ' if (n is None): n = self.optimal_count num_elts = self._graph.vcount() idgen = UniqueIdGenerator() membership = community_to_membership(self._merges, num_elts, (num_elts - n)) membership = [idgen[m] for m in membership] return VertexClustering(self._graph, membership, modularity_params=self._modularity_params)
@property def optimal_count(self): 'Returns the optimal number of clusters for this dendrogram.\n\n If an optimal count hint was given at construction time, this\n property simply returns the hint. If such a count was not given,\n this method calculates the optimal number of clusters by maximizing\n the modularity along all the possible cuts in the dendrogram.\n ' if (self._optimal_count is not None): return self._optimal_count n = self._graph.vcount() (max_q, optimal_count) = (0, 1) for step in xrange(min((n - 1), len(self._merges))): membs = community_to_membership(self._merges, n, step) q = self._graph.modularity(membs, **self._modularity_params) if (q > max_q): optimal_count = (n - step) max_q = q self._optimal_count = optimal_count return optimal_count
8,939,029,117,350,291,000
Returns the optimal number of clusters for this dendrogram. If an optimal count hint was given at construction time, this property simply returns the hint. If such a count was not given, this method calculates the optimal number of clusters by maximizing the modularity along all the possible cuts in the dendrogram.
igraph/clustering.py
optimal_count
tuandnvn/ecat_learning
python
@property def optimal_count(self): 'Returns the optimal number of clusters for this dendrogram.\n\n If an optimal count hint was given at construction time, this\n property simply returns the hint. If such a count was not given,\n this method calculates the optimal number of clusters by maximizing\n the modularity along all the possible cuts in the dendrogram.\n ' if (self._optimal_count is not None): return self._optimal_count n = self._graph.vcount() (max_q, optimal_count) = (0, 1) for step in xrange(min((n - 1), len(self._merges))): membs = community_to_membership(self._merges, n, step) q = self._graph.modularity(membs, **self._modularity_params) if (q > max_q): optimal_count = (n - step) max_q = q self._optimal_count = optimal_count return optimal_count
def __plot__(self, context, bbox, palette, *args, **kwds): 'Draws the vertex dendrogram on the given Cairo context\n\n See L{Dendrogram.__plot__} for the list of supported keyword\n arguments.' from igraph.drawing.metamagic import AttributeCollectorBase class VisualVertexBuilder(AttributeCollectorBase): _kwds_prefix = 'vertex_' label = None builder = VisualVertexBuilder(self._graph.vs, kwds) self._names = [vertex.label for vertex in builder] self._names = [(name if (name is not None) else str(idx)) for (idx, name) in enumerate(self._names)] result = Dendrogram.__plot__(self, context, bbox, palette, *args, **kwds) del self._names return result
4,141,242,726,377,235,500
Draws the vertex dendrogram on the given Cairo context See L{Dendrogram.__plot__} for the list of supported keyword arguments.
igraph/clustering.py
__plot__
tuandnvn/ecat_learning
python
def __plot__(self, context, bbox, palette, *args, **kwds): 'Draws the vertex dendrogram on the given Cairo context\n\n See L{Dendrogram.__plot__} for the list of supported keyword\n arguments.' from igraph.drawing.metamagic import AttributeCollectorBase class VisualVertexBuilder(AttributeCollectorBase): _kwds_prefix = 'vertex_' label = None builder = VisualVertexBuilder(self._graph.vs, kwds) self._names = [vertex.label for vertex in builder] self._names = [(name if (name is not None) else str(idx)) for (idx, name) in enumerate(self._names)] result = Dendrogram.__plot__(self, context, bbox, palette, *args, **kwds) del self._names return result
def __init__(self, clusters, n=0): 'Constructs a cover with the given clusters.\n\n @param clusters: the clusters in this cover, as a list or iterable.\n Each cluster is specified by a list or tuple that contains the\n IDs of the items in this cluster. IDs start from zero.\n\n @param n: the total number of elements in the set that is covered\n by this cover. If it is less than the number of unique elements\n found in all the clusters, we will simply use the number of unique\n elements, so it is safe to leave this at zero. You only have to\n specify this parameter if there are some elements that are covered\n by none of the clusters.\n ' self._clusters = [list(cluster) for cluster in clusters] try: self._n = max(((max(cluster) + 1) for cluster in self._clusters if cluster)) except ValueError: self._n = 0 self._n = max(n, self._n)
-5,700,276,278,546,979,000
Constructs a cover with the given clusters. @param clusters: the clusters in this cover, as a list or iterable. Each cluster is specified by a list or tuple that contains the IDs of the items in this cluster. IDs start from zero. @param n: the total number of elements in the set that is covered by this cover. If it is less than the number of unique elements found in all the clusters, we will simply use the number of unique elements, so it is safe to leave this at zero. You only have to specify this parameter if there are some elements that are covered by none of the clusters.
igraph/clustering.py
__init__
tuandnvn/ecat_learning
python
def __init__(self, clusters, n=0): 'Constructs a cover with the given clusters.\n\n @param clusters: the clusters in this cover, as a list or iterable.\n Each cluster is specified by a list or tuple that contains the\n IDs of the items in this cluster. IDs start from zero.\n\n @param n: the total number of elements in the set that is covered\n by this cover. If it is less than the number of unique elements\n found in all the clusters, we will simply use the number of unique\n elements, so it is safe to leave this at zero. You only have to\n specify this parameter if there are some elements that are covered\n by none of the clusters.\n ' self._clusters = [list(cluster) for cluster in clusters] try: self._n = max(((max(cluster) + 1) for cluster in self._clusters if cluster)) except ValueError: self._n = 0 self._n = max(n, self._n)
def __getitem__(self, index): 'Returns the cluster with the given index.' return self._clusters[index]
-9,141,471,715,622,353,000
Returns the cluster with the given index.
igraph/clustering.py
__getitem__
tuandnvn/ecat_learning
python
def __getitem__(self, index): return self._clusters[index]
def __iter__(self): 'Iterates over the clusters in this cover.' return iter(self._clusters)
-8,856,924,646,904,825,000
Iterates over the clusters in this cover.
igraph/clustering.py
__iter__
tuandnvn/ecat_learning
python
def __iter__(self): return iter(self._clusters)
def __len__(self): 'Returns the number of clusters in this cover.' return len(self._clusters)
46,158,193,321,388,264
Returns the number of clusters in this cover.
igraph/clustering.py
__len__
tuandnvn/ecat_learning
python
def __len__(self): return len(self._clusters)
def __str__(self): 'Returns a string representation of the cover.' return self.summary(verbosity=1, width=78)
5,406,004,662,587,039,000
Returns a string representation of the cover.
igraph/clustering.py
__str__
tuandnvn/ecat_learning
python
def __str__(self): return self.summary(verbosity=1, width=78)
@property def membership(self): 'Returns the membership vector of this cover.\n\n The membership vector of a cover covering I{n} elements is a list of\n length I{n}, where element I{i} contains the cluster indices of the\n I{i}th item.\n ' result = [[] for _ in xrange(self._n)] for (idx, cluster) in enumerate(self): for item in cluster: result[item].append(idx) return result
-7,302,082,719,879,950,000
Returns the membership vector of this cover. The membership vector of a cover covering I{n} elements is a list of length I{n}, where element I{i} contains the cluster indices of the I{i}th item.
igraph/clustering.py
membership
tuandnvn/ecat_learning
python
@property def membership(self): 'Returns the membership vector of this cover.\n\n The membership vector of a cover covering I{n} elements is a list of\n length I{n}, where element I{i} contains the cluster indices of the\n I{i}th item.\n ' result = [[] for _ in xrange(self._n)] for (idx, cluster) in enumerate(self): for item in cluster: result[item].append(idx) return result