code
stringlengths 4
4.48k
| docstring
stringlengths 1
6.45k
| _id
stringlengths 24
24
|
---|---|---|
def put201( self, boolean_value=None, custom_headers=None, raw=False, **operation_config): <NEW_LINE> <INDENT> url = '/http/success/201' <NEW_LINE> query_parameters = {} <NEW_LINE> header_parameters = {} <NEW_LINE> header_parameters['Content-Type'] = 'application/json; charset=utf-8' <NEW_LINE> if custom_headers: <NEW_LINE> <INDENT> header_parameters.update(custom_headers) <NEW_LINE> <DEDENT> if boolean_value is not None: <NEW_LINE> <INDENT> body_content = self._serialize.body(boolean_value, 'bool') <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> body_content = None <NEW_LINE> <DEDENT> request = self._client.put(url, query_parameters) <NEW_LINE> response = self._client.send( request, header_parameters, body_content, **operation_config) <NEW_LINE> if response.status_code not in [201]: <NEW_LINE> <INDENT> raise models.ErrorException(self._deserialize, response) <NEW_LINE> <DEDENT> if raw: <NEW_LINE> <INDENT> client_raw_response = ClientRawResponse(None, response) <NEW_LINE> return client_raw_response | Put true Boolean value in request returns 201.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<fixtures.acceptancetestshttp.models.ErrorException>` | 625941b499cbb53fe67929c5 |
def test_forward_slice(): <NEW_LINE> <INDENT> _test_slice(np.arange(4, dtype=np.float32).reshape((4, )), begin=[0], size=[2]) <NEW_LINE> _test_slice(np.arange(18, dtype=np.int32).reshape((3, 2, 3)), begin=[1, 0, 0], size=[1, 1, 3]) <NEW_LINE> if package_version.parse(tf.VERSION) >= package_version.parse('1.14.0'): <NEW_LINE> <INDENT> _test_slice(np.arange(8, dtype=np.int32).reshape((2, 4)), begin=[0, 1], size=[-1, -1]) <NEW_LINE> _test_slice(np.arange(5, dtype=np.int32).reshape((5, )), begin=[4], size=[-1]) | SLICE | 625941b43cc13d1c6d3c7164 |
def update_hand(hand, word): <NEW_LINE> <INDENT> temp_dictionary = hand.copy() <NEW_LINE> for character in word: <NEW_LINE> <INDENT> temp_dictionary[character] -= 1 <NEW_LINE> <DEDENT> return temp_dictionary | Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
#Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
:param word: string
:param hand: dictionary (string -> int)
:returns: dictionary (string -> int) | 625941b44d74a7450ccd3fa1 |
def evaluate_and_log_bleu(model, params, bleu_source, bleu_ref, vocab_file, distribution_strategy=None): <NEW_LINE> <INDENT> subtokenizer = tokenizer.Subtokenizer(vocab_file) <NEW_LINE> uncased_score, cased_score = translate_and_compute_bleu( model, params, subtokenizer, bleu_source, bleu_ref, distribution_strategy) <NEW_LINE> logging.info("Bleu score (uncased): %s", uncased_score) <NEW_LINE> logging.info("Bleu score (cased): %s", cased_score) <NEW_LINE> return uncased_score, cased_score | Calculate and record the BLEU score.
Args:
model: A Keras model, used to generate the translations.
params: A dictionary, containing the translation related parameters.
bleu_source: A file containing source sentences for translation.
bleu_ref: A file containing the reference for the translated sentences.
vocab_file: A file containing the vocabulary for translation.
distribution_strategy: A platform distribution strategy, used for TPU based
translation.
Returns:
uncased_score: A float, the case insensitive BLEU score.
cased_score: A float, the case sensitive BLEU score. | 625941b4009cb60464c6319c |
def _create_user(self, username, email, password, fullname, mobile, **extra_fields): <NEW_LINE> <INDENT> if not email: <NEW_LINE> <INDENT> raise ValueError('The given email must be set') <NEW_LINE> <DEDENT> email = self.normalize_email(email) <NEW_LINE> user = self.model(username=username, email=email, name=fullname, mobile=mobile, **extra_fields) <NEW_LINE> user.set_password(password) <NEW_LINE> user.save(using=self._db) <NEW_LINE> return user | Creates and saves a User with the given email and password. | 625941b47cff6e4e81117764 |
def mergeTextRegionandCell(self,table): <NEW_LINE> <INDENT> pass | CVL LA tool create | 625941b4e64d504609d7461f |
def clean_neighbors(self): <NEW_LINE> <INDENT> with self._rlock: <NEW_LINE> <INDENT> alive_nodes = [node for node in self.neighbors if not node.has_timeout] <NEW_LINE> timeouted = set(self.neighbors) - set(alive_nodes) <NEW_LINE> self._neighbors = dict((node.id, node) for node in alive_nodes) <NEW_LINE> <DEDENT> for id in timeouted: <NEW_LINE> <INDENT> self.log.debug('Timeout contacting neighbor {0}' . format(id)) | Remove from the neighbors table all the ones that are too
old by their timestamp. | 625941b44527f215b584c23a |
def pretraga_po_visini(minimalna, maksimalna): <NEW_LINE> <INDENT> pronadjeni = [] <NEW_LINE> for o in vrednosti.okviri: <NEW_LINE> <INDENT> if minimalna is not None: <NEW_LINE> <INDENT> if o.visina < minimalna: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> if maksimalna is not None: <NEW_LINE> <INDENT> if o.visina > maksimalna: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> pronadjeni.append(o) <NEW_LINE> <DEDENT> return pronadjeni | Pretraga po visini u opsegu
:param minimalna:
:param maksimalna: | 625941b423849d37ff7b2e71 |
def __plotStraight(self, trace, ax, *args, **kwargs): <NEW_LINE> <INDENT> if len(trace) > 1: <NEW_LINE> <INDENT> stream = Stream(traces=trace) <NEW_LINE> if hasattr(trace[0].stats, 'preview') and trace[0].stats.preview: <NEW_LINE> <INDENT> stream = Stream(traces=stream) <NEW_LINE> stream = mergePreviews(stream) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> stream.merge(method=1) <NEW_LINE> <DEDENT> trace = stream[0] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> trace = trace[0] <NEW_LINE> <DEDENT> if hasattr(trace.stats, 'preview') and trace.stats.preview: <NEW_LINE> <INDENT> trace.data = np.ma.masked_array(trace.data) <NEW_LINE> trace.data[trace.data == -1] = np.ma.masked <NEW_LINE> dtype = trace.data.dtype <NEW_LINE> old_time_range = trace.stats.endtime - trace.stats.starttime <NEW_LINE> data = np.empty(2 * trace.stats.npts, dtype=dtype) <NEW_LINE> data[0::2] = trace.data / 2.0 <NEW_LINE> data[1::2] = -trace.data / 2.0 <NEW_LINE> trace.data = data <NEW_LINE> trace.stats.delta = old_time_range / float(trace.stats.npts - 1) <NEW_LINE> <DEDENT> calib = trace.stats.calib <NEW_LINE> max = trace.data.max() <NEW_LINE> min = trace.data.min() <NEW_LINE> if hasattr(trace.stats, 'preview') and trace.stats.preview: <NEW_LINE> <INDENT> tr_id = trace.id + ' [preview]' <NEW_LINE> <DEDENT> elif hasattr(trace, 'label'): <NEW_LINE> <INDENT> tr_id = trace.label <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> tr_id = trace.id <NEW_LINE> <DEDENT> self.stats.append([tr_id, calib * trace.data.mean(), calib * min, calib * max]) <NEW_LINE> concat = [trace] <NEW_LINE> if self.starttime != trace.stats.starttime: <NEW_LINE> <INDENT> samples = (trace.stats.starttime - self.starttime) * trace.stats.sampling_rate <NEW_LINE> temp = [np.ma.masked_all(int(samples))] <NEW_LINE> concat = temp.extend(concat) <NEW_LINE> concat = temp <NEW_LINE> <DEDENT> if self.endtime != trace.stats.endtime: <NEW_LINE> <INDENT> samples = (self.endtime - trace.stats.endtime) * trace.stats.sampling_rate <NEW_LINE> concat.append(np.ma.masked_all(int(samples))) <NEW_LINE> <DEDENT> if len(concat) > 1: <NEW_LINE> <INDENT> trace.data = np.ma.concatenate(concat) <NEW_LINE> trace.stats.starttime = self.starttime <NEW_LINE> <DEDENT> trace.data = np.require(trace.data, 'float64') * calib <NEW_LINE> ax.plot( trace.data, color=self.color, linewidth=self.linewidth, linestyle=self.linestyle) <NEW_LINE> ax.set_xlim(0, len(trace.data) - 1) | Just plots the data samples in the self.stream. Useful for smaller
datasets up to around 1000000 samples (depending on the machine its
being run on).
Slow and high memory consumption for large datasets. | 625941b43617ad0b5ed67cdd |
def from_jd(jd): <NEW_LINE> <INDENT> jd = trunc(jd) + 0.5 <NEW_LINE> year = trunc(((30 * (jd - EPOCH)) + 10646) / 10631) <NEW_LINE> month = min(12, ceil((jd - (29 + to_jd(year, 1, 1))) / 29.5) + 1) <NEW_LINE> day = int(jd - to_jd(year, month, 1)) + 1 <NEW_LINE> return (year, month, day) | Calculate Islamic date from Julian day | 625941b43c8af77a43ae357e |
def set_level(self, newLevel): <NEW_LINE> <INDENT> self.level = newLevel | (SETTER) Sets the level
ARGS: Level (self) an instance of Level | 625941b48a43f66fc4b53e48 |
def _insert_rows_in_table(self, table_path, columns, rows): <NEW_LINE> <INDENT> cursor = arcpy.da.InsertCursor(table_path, columns) <NEW_LINE> for row in rows: <NEW_LINE> <INDENT> cursor.insertRow(row) <NEW_LINE> <DEDENT> del cursor | insert rows into a table
Parameters
----------
table_path : str
full path to the table
columns: list of str,
the columns
rows: list of lists,
the rows to insert as key/value-pairs | 625941b423e79379d52ee347 |
@login_required <NEW_LINE> @API2.route('/file/query/total/<int:file_id>') <NEW_LINE> def query_files(file_id): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> page = int(request.args.get('page',1)) <NEW_LINE> size = int(request.args.get('limit',10)) <NEW_LINE> pagination = File.query.filter_by(file_superior_dir=file_id).paginate(page,per_page=size) <NEW_LINE> file_list = pagination.items <NEW_LINE> file_data = [] <NEW_LINE> for file in file_list: <NEW_LINE> <INDENT> dic = { 'id':file.id, 'file_name':file.file_name, 'file_create_time':format_time(file.file_create_time), 'file_size':size_format(int(file.file_size)) if file.file_type!='dir' else '', 'file_superior_dir':File.query.get(file.file_superior_dir).file_name if file_id!=0 else 'root', 'file_type':file.file_type, 'file_upload_user':file.file_upload_user } <NEW_LINE> file_data.append(dic) <NEW_LINE> dic = {} <NEW_LINE> <DEDENT> father_dir = File.query.filter_by( id=file_id).first().file_superior_dir if file_id!=0 else 0 <NEW_LINE> return jsonify( code=CODE.ALLOW[0], data=file_data, now_dir=file_id, file_superior_dir=father_dir, msg='success', total=pagination.total) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> return jsonify(code=CODE.ERROR[0], data='', now_dir=file_id, msg=str(e), total=pagination.total) | 文件查询
file_id:文件id
首先根据file_id 看一下它是谁的上级目录 28行 | 625941b4d486a94d0b98df2d |
def increase_score(self): <NEW_LINE> <INDENT> self.score += 1 <NEW_LINE> self.clear() <NEW_LINE> return self.write(f"Score : {self.score}", align=ALIGN, font=FONT) | Increment the score | 625941b499fddb7c1c9de172 |
def invoke_login_callbacks(self): <NEW_LINE> <INDENT> pass | Invoke registered callback functions when the session performs a
successful relogin attempt after disconnecting from the switch. | 625941b4eab8aa0e5d26d93d |
def create_ADN(): <NEW_LINE> <INDENT> file = open("ADN.txt", "w") <NEW_LINE> for i in range(1000): <NEW_LINE> <INDENT> c = randint(0, 3) <NEW_LINE> if c == 0: <NEW_LINE> <INDENT> file.write("A=T\n") <NEW_LINE> <DEDENT> elif c == 1: <NEW_LINE> <INDENT> file.write("G=C\n") <NEW_LINE> <DEDENT> elif c == 2: <NEW_LINE> <INDENT> file.write("C=G\n") <NEW_LINE> <DEDENT> elif c == 3: <NEW_LINE> <INDENT> file.write("T=A\n") | Creates an ADN with 5% errors. | 625941b421a7993f00bc7ac7 |
def back(self): <NEW_LINE> <INDENT> return _almathswig.vectorPosition6D_back(self) | back(self) -> value_type | 625941b4a17c0f6771cbde33 |
def name(self) -> str: <NEW_LINE> <INDENT> return self._name | This event's name. | 625941b4ad47b63b2c509d6a |
def _makePackages(parent, attributes, result): <NEW_LINE> <INDENT> attrs = {} <NEW_LINE> for (name, value) in list(attributes.items()): <NEW_LINE> <INDENT> if parent is None: <NEW_LINE> <INDENT> if isinstance(value, dict): <NEW_LINE> <INDENT> module = ModuleType(name) <NEW_LINE> module.__dict__.update(_makePackages(module, value, result)) <NEW_LINE> result[name] = module <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> result[name] = value <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> if isinstance(value, dict): <NEW_LINE> <INDENT> module = ModuleType(parent.__name__ + "." + name) <NEW_LINE> module.__dict__.update(_makePackages(module, value, result)) <NEW_LINE> result[parent.__name__ + "." + name] = module <NEW_LINE> attrs[name] = module <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> attrs[name] = value <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return attrs | Construct module objects (for either modules or packages).
@param parent: L{None} or a module object which is the Python package
containing all of the modules being created by this function call. Its
name will be prepended to the name of all created modules.
@param attributes: A mapping giving the attributes of the particular module
object this call is creating.
@param result: A mapping which is populated with all created module names.
This is suitable for use in updating C{sys.modules}.
@return: A mapping of all of the attributes created by this call. This is
suitable for populating the dictionary of C{parent}.
@see: L{_install}. | 625941b473bcbd0ca4b2be5b |
def obj_create(self, bundle, **kwargs): <NEW_LINE> <INDENT> user = bundle.request.user.username <NEW_LINE> data = bundle.data <NEW_LINE> name = data.get('name', None) <NEW_LINE> version = data.get('version', '') <NEW_LINE> aliases = data.get('aliases', '') <NEW_LINE> description = data.get('description', None) <NEW_LINE> source = data.get('source', None) <NEW_LINE> reference = data.get('reference', None) <NEW_LINE> method = data.get('method', None) <NEW_LINE> campaign = data.get('campaign', None) <NEW_LINE> confidence = data.get('confidence', None) <NEW_LINE> bucket_list = data.get('bucket_list', None) <NEW_LINE> ticket = data.get('ticket', None) <NEW_LINE> result = add_new_backdoor(name, version, aliases, description=description, source=source, source_method=method, source_reference=reference, campaign=campaign, confidence=confidence, user=user, bucket_list=bucket_list, ticket=ticket) <NEW_LINE> content = {'return_code': 0, 'type': 'Backdoor', 'message': result.get('message', ''), 'id': result.get('id', '')} <NEW_LINE> if result.get('id'): <NEW_LINE> <INDENT> url = reverse('api_dispatch_detail', kwargs={'resource_name': 'backdoors', 'api_name': 'v1', 'pk': result.get('id')}) <NEW_LINE> content['url'] = url <NEW_LINE> <DEDENT> if not result['success']: <NEW_LINE> <INDENT> content['return_code'] = 1 <NEW_LINE> <DEDENT> self.crits_response(content) | Handles creating Backdoors through the API.
:param bundle: Bundle containing the information to create the Backdoor.
:type bundle: Tastypie Bundle object.
:returns: HttpResponse object. | 625941b4851cf427c661a2fa |
def display(): <NEW_LINE> <INDENT> glClear(GL_COLOR_BUFFER_BIT) <NEW_LINE> glBegin(GL_POINTS) <NEW_LINE> glColor3f(1, 0, 0) <NEW_LINE> line_dda(-20, -20, 30, 25) <NEW_LINE> glEnd() <NEW_LINE> glFlush() | 显示回调函数 | 625941b466656f66f7cbbf89 |
def org_tags(dbo, username): <NEW_LINE> <INDENT> u = users.get_users(dbo, username) <NEW_LINE> realname = "" <NEW_LINE> email = "" <NEW_LINE> if len(u) > 0: <NEW_LINE> <INDENT> u = u[0] <NEW_LINE> realname = u["REALNAME"] <NEW_LINE> email = u["EMAILADDRESS"] <NEW_LINE> <DEDENT> tags = { "ORGANISATION" : configuration.organisation(dbo), "ORGANISATIONADDRESS" : configuration.organisation_address(dbo), "ORGANISATIONTELEPHONE" : configuration.organisation_telephone(dbo), "DATE" : python2display(dbo.locale, now(dbo.timezone)), "USERNAME" : username, "USERREALNAME" : realname, "USEREMAILADDRESS" : email } <NEW_LINE> return tags | Generates a list of tags from the organisation and user info | 625941b44f6381625f114825 |
def get_index(self, page_url): <NEW_LINE> <INDENT> pass | Returns: (next_page_url, [entries]) | 625941b4ac7a0e7691ed3eb9 |
def ros_setup(self): <NEW_LINE> <INDENT> RComponent.ros_setup(self) <NEW_LINE> self.pan_pos_sub = rospy.Subscriber( 'joint_pan_position_controller/command', Float64, self.pan_pos_sub_cb) <NEW_LINE> self.tilt_pos_sub = rospy.Subscriber( 'joint_tilt_position_controller/command', Float64, self.tilt_pos_sub_cb) <NEW_LINE> self.pan_speed_sub = rospy.Subscriber( 'joint_pan_speed_controller/command', Float64, self.pan_speed_sub_cb) <NEW_LINE> self.tilt_speed_sub = rospy.Subscriber( 'joint_tilt_speed_controller/command', Float64, self.tilt_speed_sub_cb) <NEW_LINE> self.ptz_sub = rospy.Subscriber( 'ptz/command', ptz, self.ptz_cb) <NEW_LINE> self.status_pub = rospy.Publisher( '~status', PantiltStatus, queue_size=10) <NEW_LINE> self.status_stamped_pub = rospy.Publisher( '~status_stamped', PantiltStatusStamped, queue_size=10) <NEW_LINE> self.joint_state_pub = rospy.Publisher( 'joint_states', JointState, queue_size=10) <NEW_LINE> self.set_max_pan_speed_server = rospy.Service('~set_max_pan_speed', set_float_value, self.set_max_pan_speed_cb) <NEW_LINE> self.set_max_tilt_speed_server = rospy.Service('~set_max_tilt_speed', set_float_value, self.set_max_tilt_speed_cb) <NEW_LINE> return 0 | Creates and inits ROS components | 625941b4c4546d3d9de72817 |
def make(): <NEW_LINE> <INDENT> return _pmt_cpp_swig.PDU_remove_all_tags_make() | make() -> PDU_remove_all_tags_sptr
Return a shared_ptr to a new instance of pmt_cpp::PDU_remove_all_tags.
To avoid accidental use of raw pointers, pmt_cpp::PDU_remove_all_tags's constructor is in a private implementation class. pmt_cpp::PDU_remove_all_tags::make is the public interface for creating new instances.
Params: (NONE) | 625941b48c0ade5d55d3e79e |
def action(self, element): <NEW_LINE> <INDENT> rsp = self.execute(self.connection.action, [element]) <NEW_LINE> return rsp | Wrapper for ncclient.manger.action
Args:
element: etree.Element sent to ncclient.manager.action
Returns:
The etree.Element returned from ncclient.manager.action | 625941b4462c4b4f79d1d4af |
def route_distance(self, individual): <NEW_LINE> <INDENT> distance = 0 <NEW_LINE> value = 0 <NEW_LINE> tour = individual + [self.origin_index] <NEW_LINE> index = 0 <NEW_LINE> p1 = p2 = None <NEW_LINE> while p2 != self.origin: <NEW_LINE> <INDENT> p1 = self.points[tour[index]] <NEW_LINE> p2 = self.points[tour[index + 1]] <NEW_LINE> distance += self.straight_line_distance(p1, p2) <NEW_LINE> index += 1 <NEW_LINE> <DEDENT> return distance | Determine the distance for the entire route | 625941b4046cf37aa974cb2a |
def from_dict(self, d): <NEW_LINE> <INDENT> for key, val in d.items(): <NEW_LINE> <INDENT> if len(val) == 1: <NEW_LINE> <INDENT> self.set_key_as(key, MongoFilters.EQUALS, val[0]) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> self.set_key_as(key, MongoFilters.GREATER_THAN_OR_EQUAL_TO, val[0]) <NEW_LINE> self.set_key_as(key, MongoFilters.LESS_THAN_OR_EQUAL_TO, val[1]) <NEW_LINE> <DEDENT> <DEDENT> return self | :param d: dict should be have the following format: key -> val. Each `val` must be a list: if list has 1 item,
key must equals the item, else key is between first and second element of list | 625941b491af0d3eaac9b7f2 |
def __init__(self, galaxy, system=0, position=0, type=Types.planet): <NEW_LINE> <INDENT> self.type = type <NEW_LINE> try: <NEW_LINE> <INDENT> self.parse(galaxy) <NEW_LINE> <DEDENT> except Exception: <NEW_LINE> <INDENT> self.galaxy = galaxy <NEW_LINE> self.system = system <NEW_LINE> self.position = position <NEW_LINE> self.convert_to_ints() | First parameter can be a string to be parsed e.g: [1:259:12] or the galaxy.
If it's the galaxy, system and planet must also be supplied. | 625941b471ff763f4b54946d |
def _deserialize(filename): <NEW_LINE> <INDENT> assert os.path.exists(filename), ("Must use a valid filename") <NEW_LINE> with open(filename, 'rb') as f: <NEW_LINE> <INDENT> data = pickle.load(f) <NEW_LINE> <DEDENT> return data | Deserialize a dataset
Args:
filename: `str` name of file to unpickle
Returns:
`datasets.DataSets` object
Raises:
AssertionError | 625941b43539df3088e2e12a |
def compute_precision(pred1, pred2): <NEW_LINE> <INDENT> return sklearn.metrics.precision_score(pred1, pred2) | Compute classifier precision | 625941b48a349b6b435e7f54 |
@get('/:hypervisor/:host/guests/:guest_id') <NEW_LINE> def guest_info(hypervisor, host, guest_id): <NEW_LINE> <INDENT> response.content_type = "application/json" <NEW_LINE> manager = create_manager(hypervisor, host) <NEW_LINE> guest_id = manager.guest_info(guest_id) <NEW_LINE> manager.logout() <NEW_LINE> return json.dumps(guest_id) | Get guest informations
::
GET /:hypervisor/:host/guests/:guest_id | 625941b4460517430c393f6f |
def test_quotas(self): <NEW_LINE> <INDENT> def test_counter_day(counter_day): <NEW_LINE> <INDENT> for day in counter_day: <NEW_LINE> <INDENT> if counter_day[day] > 2: <NEW_LINE> <INDENT> return False <NEW_LINE> <DEDENT> <DEDENT> return True <NEW_LINE> <DEDENT> counter_total = 0 <NEW_LINE> counter_r = 0 <NEW_LINE> counter_stm_r = 0 <NEW_LINE> counter_stm = 0 <NEW_LINE> counter_B = 0 <NEW_LINE> counter_C = 0 <NEW_LINE> counter_day = {'lundi':0, 'mardi':0, 'mercredi':0, 'jeudi':0, 'vendredi':0} <NEW_LINE> for schedule_defined in self.defined_schedules: <NEW_LINE> <INDENT> if self in schedule_defined.available_tellers_orig['B']: <NEW_LINE> <INDENT> counter_B += 0 <NEW_LINE> <DEDENT> if self in schedule_defined.available_tellers_orig['C']: <NEW_LINE> <INDENT> counter_C += 1 <NEW_LINE> <DEDENT> if schedule_defined.category == 'main': <NEW_LINE> <INDENT> counter_day[schedule_defined.day] += schedule_defined.weight <NEW_LINE> counter_total += schedule_defined.weight <NEW_LINE> <DEDENT> if schedule_defined.category == 'main_r': <NEW_LINE> <INDENT> counter_r += schedule_defined.weight <NEW_LINE> <DEDENT> if schedule_defined.category == 'stm': <NEW_LINE> <INDENT> counter_day[schedule_defined.day] += schedule_defined.weight <NEW_LINE> counter_stm += schedule_defined.weight <NEW_LINE> counter_total += schedule_defined.weight <NEW_LINE> <DEDENT> if schedule_defined.category == 'stm_r': <NEW_LINE> <INDENT> counter_r += schedule_defined.weight <NEW_LINE> counter_stm_r += schedule_defined.weight <NEW_LINE> <DEDENT> <DEDENT> if counter_total > self.quotas['total'] or counter_r > self.quotas['total_r'] or counter_stm > self.quotas['stm_max'] or counter_stm_r > self.quotas['stm_max_r'] or counter_B > 1 or counter_C > 1 or test_counter_day(counter_day) is False: <NEW_LINE> <INDENT> if self.name == '--Raphaël R.': <NEW_LINE> <INDENT> print(counter_total) <NEW_LINE> print(counter_r) <NEW_LINE> print(counter_stm) <NEW_LINE> print(counter_stm_r) <NEW_LINE> print(counter_B) <NEW_LINE> print(counter_C) <NEW_LINE> print(counter_day) <NEW_LINE> <DEDENT> return False <NEW_LINE> <DEDENT> return True | OBJECTIF: Vérfier si les quotas sont respectés.
- return: True/False | 625941b4e1aae11d1e749a92 |
def crypt_xor(plainbytes, keybytes): <NEW_LINE> <INDENT> return bytes([b1 ^ b2 for b1, b2 in zip(plainbytes, cycle(keybytes))]) | Take a plaintext bytes object and xor it with the given key bytes. Key
will be cycled if it is shorter than plaintext. Returns bytes. | 625941b4dc8b845886cb5313 |
def encode(self): <NEW_LINE> <INDENT> packet = struct.pack('>BBBBBB', self.sub_function_code, self.read_code, self.conformity, self.more_follows, self.next_object_id, self.number_of_objects) <NEW_LINE> for (object_id, data) in self.information.iteritems(): <NEW_LINE> <INDENT> packet += struct.pack('>BB', object_id, len(data)) <NEW_LINE> packet += data <NEW_LINE> <DEDENT> return packet | Encodes the response
:returns: The byte encoded message | 625941b45166f23b2e1a4f38 |
def blit(self, other, x, y): <NEW_LINE> <INDENT> dh, dw, dc = self._data.shape <NEW_LINE> sh, sw, sc = other._data.shape <NEW_LINE> if sc != dc: <NEW_LINE> <INDENT> raise ValueError("source image has incorrect format") <NEW_LINE> <DEDENT> sw = min(sw, dw - x) <NEW_LINE> sh = min(sh, dh - y) <NEW_LINE> self._data[y: y + sh, x: x + sw, :] = other._data <NEW_LINE> self.modified = time.time() | Copy the contents of an Image to another.
The target image may have a different size. | 625941b45f7d997b8717487a |
def factor(self): <NEW_LINE> <INDENT> token = self.current_token <NEW_LINE> if token.type == PLUS: <NEW_LINE> <INDENT> self.eat(PLUS) <NEW_LINE> return UnaryOp(token, self.factor()) <NEW_LINE> <DEDENT> if token.type == MINUS: <NEW_LINE> <INDENT> self.eat(MINUS) <NEW_LINE> return UnaryOp(token, self.factor()) <NEW_LINE> <DEDENT> if token.type == INTEGER: <NEW_LINE> <INDENT> self.eat(INTEGER) <NEW_LINE> return Num(token) <NEW_LINE> <DEDENT> if token.type == LPAREN: <NEW_LINE> <INDENT> self.eat(LPAREN) <NEW_LINE> node = self.expr() <NEW_LINE> self.eat(RPAREN) <NEW_LINE> return node <NEW_LINE> <DEDENT> return self.variable() | factor : PLUS factor
| MINUS factor
| INTEGER
| LPAREN expr RPAREN
| variable | 625941b44e4d5625662d41bd |
def test_gpu_sort_cpu(sample_data, args): <NEW_LINE> <INDENT> df = sample_data.copy() <NEW_LINE> if bool(args.gpus) and not args.has_gpu: <NEW_LINE> <INDENT> with pytest.raises(ImportError): <NEW_LINE> <INDENT> import cudf <NEW_LINE> <DEDENT> return <NEW_LINE> <DEDENT> dc_ddf = dq.transform.scatter_and_gpu(df, args) <NEW_LINE> is_dc_dd(dc_ddf, args.gpus) <NEW_LINE> df = df.set_index("item_id").sort_index() <NEW_LINE> if bool(args.gpus): <NEW_LINE> <INDENT> ddf = dq.transform.gpu_sort_cpu(dc_ddf, "item_id") <NEW_LINE> print(ddf.compute()) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> ddf = dc_ddf.set_index("item_id") <NEW_LINE> print(ddf.compute()) <NEW_LINE> <DEDENT> assert ddf.compute().equals(df) | create a dc_dd using scatter_and_gpu.
If gpus=1, scatter_and_gpu will create a
dask_cudf.DataFrame, then it will be sorted
and sent back to the CPU. If gpus=0, we'll
do the same but all on CPU. | 625941b4cc0a2c11143dcc78 |
def tabularize(columns, rows): <NEW_LINE> <INDENT> col_lens = [l for l in map(len, columns)] <NEW_LINE> for row in rows: <NEW_LINE> <INDENT> if isinstance(row, list): <NEW_LINE> <INDENT> col_lens = [max(col_lens[i], len(row[i])) for i in range(len(row))] <NEW_LINE> <DEDENT> <DEDENT> divider = '+-{0}-+'.format('-+-'.join(['-'*l for l in col_lens])) <NEW_LINE> strongdivider = '+={0}=+'.format('=+='.join(['='*l for l in col_lens])) <NEW_LINE> semidivider = '| {0} +-{1}-+'.format( ' '*col_lens[0], '-+-'.join(['-'*l for l in col_lens[1:]])) <NEW_LINE> columns = [name.center(width) for name, width in zip(columns, col_lens)] <NEW_LINE> heading = '| {0} |'.format(' | '.join(columns)) <NEW_LINE> report = [divider] + [heading] + [divider] <NEW_LINE> for row in rows: <NEW_LINE> <INDENT> if row == 'divider': <NEW_LINE> <INDENT> report += [divider] <NEW_LINE> <DEDENT> elif row == 'strongdivider': <NEW_LINE> <INDENT> report += [strongdivider] <NEW_LINE> <DEDENT> elif row == 'semidivider': <NEW_LINE> <INDENT> report += [semidivider] <NEW_LINE> <DEDENT> elif type(row) is str: <NEW_LINE> <INDENT> report += [row] <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> row = [name.ljust(width) for name, width in zip(row, col_lens)] <NEW_LINE> report += ['| {0} |'.format(' | '.join(row))] <NEW_LINE> <DEDENT> <DEDENT> return '\r\n'.join(report) + '\r\n' | Formats data like a table | 625941b4e8904600ed9f1d09 |
def ICA(*args, **kwargs): <NEW_LINE> <INDENT> if 'random_state' not in kwargs: <NEW_LINE> <INDENT> kwargs['random_state'] = 0 <NEW_LINE> <DEDENT> return _ICA(*args, **kwargs) | Fix the random state in tests. | 625941b42eb69b55b151c689 |
def get_lambda_paths(self): <NEW_LINE> <INDENT> paths = [] <NEW_LINE> dir_path = self.get_lambdas_dir() <NEW_LINE> if dir_path is None: <NEW_LINE> <INDENT> return paths <NEW_LINE> <DEDENT> for base_path in os.listdir(dir_path): <NEW_LINE> <INDENT> if base_path.startswith(os.curdir): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> path = os.path.join(dir_path, base_path) <NEW_LINE> if not os.path.isfile(path): <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> paths.append(path) <NEW_LINE> <DEDENT> return paths | Return the paths to the lambdas in the lambdas directory.
Returns all paths that are files and not hidden. | 625941b48e05c05ec3eea150 |
def __set_trade_need_info(self, json_data): <NEW_LINE> <INDENT> for account_info in json_data['item']: <NEW_LINE> <INDENT> if account_info['stock_account'].startswith('A') or account_info['stock_account'].startswith('B'): <NEW_LINE> <INDENT> if account_info['exchange_type'].isdigit(): <NEW_LINE> <INDENT> self.__sh_exchange_type = account_info['exchange_type'] <NEW_LINE> <DEDENT> self.__sh_stock_account = account_info['stock_account'] <NEW_LINE> log.debug('sh_A stock account %s' % self.__sh_stock_account) <NEW_LINE> <DEDENT> elif account_info['stock_account'].startswith('0'): <NEW_LINE> <INDENT> self.__sz_exchange_type = account_info['exchange_type'] <NEW_LINE> self.__sz_stock_account = account_info['stock_account'] <NEW_LINE> log.debug('sz_B stock account %s' % self.__sz_stock_account) <NEW_LINE> <DEDENT> <DEDENT> self.__fund_account = json_data['fund_account'] <NEW_LINE> self.__client_risklevel = json_data['branch_no'] <NEW_LINE> self.__op_station = json_data['op_station'] <NEW_LINE> self.__trdpwd = json_data['trdpwd'] <NEW_LINE> self.__uid = json_data['uid'] <NEW_LINE> self.__branch_no = json_data['branch_no'] | 设置交易所需的一些基本参数
:param json_data:登录成功返回的json数据 | 625941b4d7e4931a7ee9dcfb |
def view_batch(self): <NEW_LINE> <INDENT> for batch in self.trainloader: <NEW_LINE> <INDENT> batch[0] = batch[0].view(64,28,28).view(8,8,28,28).transpose(1,2).reshape(8*28,8*28) <NEW_LINE> batch[1] = batch[1].reshape(8,8).numpy() <NEW_LINE> return batch | TODO: Display first batch of images from trainloader in 8x8 grid
Do not make calls to plt.imshow() here
Return:
1) A float32 numpy array (of dim [28*8, 28*8]), containing a tiling of the batch images,
place the first 8 images on the first row, the second 8 on the second row, and so on
2) An int 8x8 numpy array of labels corresponding to this tiling | 625941b48c3a87329515819d |
def to_dict(self, count=False, **kwargs): <NEW_LINE> <INDENT> if self.filter: <NEW_LINE> <INDENT> d = { "query": { "filtered": { "query": self.query.to_dict(), "filter": self.filter.to_dict() } } } <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> d = {"query": self.query.to_dict()} <NEW_LINE> <DEDENT> if self.post_filter: <NEW_LINE> <INDENT> d['post_filter'] = self.post_filter.to_dict() <NEW_LINE> <DEDENT> if not count: <NEW_LINE> <INDENT> if self.aggs.aggs: <NEW_LINE> <INDENT> d.update(self.aggs.to_dict()) <NEW_LINE> <DEDENT> if self._sort: <NEW_LINE> <INDENT> d['sort'] = self._sort <NEW_LINE> <DEDENT> d.update(self._extra) <NEW_LINE> if self._fields is not None: <NEW_LINE> <INDENT> d['fields'] = self._fields <NEW_LINE> <DEDENT> if self._highlight: <NEW_LINE> <INDENT> d['highlight'] = {'fields': self._highlight} <NEW_LINE> d['highlight'].update(self._highlight_opts) <NEW_LINE> <DEDENT> if self._suggest: <NEW_LINE> <INDENT> d['suggest'] = self._suggest <NEW_LINE> <DEDENT> <DEDENT> d.update(kwargs) <NEW_LINE> return d | Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary. | 625941b4d99f1b3c44c6737f |
def move_border( self, width: Tuple[float, float], linking_side: str ) -> ConversionLanelet: <NEW_LINE> <INDENT> self.lanelet.move_border( mirror_border=linking_side, mirror_interval=self.change_interval, distance=width, adjacent_lanelet=self.adjacent_lanelet, ) <NEW_LINE> return self.lanelet | Move border of self.lanelet.
Args:
width: Start and end value of new width of lanelet.
linking_side: Side on which the split/join happens (either "left" or "right").
Returns:
Resulting lanelet after border movement. | 625941b485dfad0860c3ac38 |
def shows_memory_usage(self, screen, width, height, memory): <NEW_LINE> <INDENT> memory_surface = pygame.surface.Surface((width, height // 3)) <NEW_LINE> pygame.draw.rect(memory_surface, self.BLUE, (20, 50, (width - 2 * 20), 70)) <NEW_LINE> bar_width = (width - 2 * 20) * memory / 100 <NEW_LINE> pygame.draw.rect(memory_surface, self.RED, (20, 50, bar_width, 70)) <NEW_LINE> screen.blit(memory_surface, (0, 0)) <NEW_LINE> bar_text = 'Uso de Memória (Em uso: {}%):'.format(memory) <NEW_LINE> text = self.FONT.render(bar_text, 1, self.WHITE) <NEW_LINE> screen.blit(text, (20, 20)) | Creates a pygame surface and draws a blue and a red rectangle
to show memory usage | 625941b4a8ecb033257d2eb5 |
def test_choice_sort(): <NEW_LINE> <INDENT> assert(choice_sort([3, 2, 5, 7, 3, 4, 7, 0, 3, 1, 3, 6]) == [0, 1, 2, 3, 3, 3, 3, 4, 5, 6, 7, 7]) | Tests | 625941b4d4950a0f3b08c13a |
def piecewise(list_of_pairs, var=None): <NEW_LINE> <INDENT> return PiecewisePolynomial(list_of_pairs, var=var) | Returns a piecewise function from a list of (interval, function)
pairs.
``list_of_pairs`` is a list of pairs (I, fcn), where
fcn is a Sage function (such as a polynomial over RR, or functions
using the lambda notation), and I is an interval such as I = (1,3).
Two consecutive intervals must share a common endpoint.
If the optional ``var`` is specified, then any symbolic expressions
in the list will be converted to symbolic functions using
``fcn.function(var)``. (This says which variable is considered to
be "piecewise".)
We assume that these definitions are consistent (ie, no checking is
done).
EXAMPLES::
sage: f1(x) = -1
sage: f2(x) = 2
sage: f = Piecewise([[(0,pi/2),f1],[(pi/2,pi),f2]])
sage: f(1)
-1
sage: f(3)
2
sage: f = Piecewise([[(0,1),x], [(1,2),x^2]], x); f
Piecewise defined function with 2 parts, [[(0, 1), x |--> x], [(1, 2), x |--> x^2]]
sage: f(0.9)
0.900000000000000
sage: f(1.1)
1.21000000000000 | 625941b49f2886367277a674 |
def test_access_admin(self): <NEW_LINE> <INDENT> client = Client() <NEW_LINE> response = client.post( '/admin/login/', {'username': 'admin', 'password': 'secrettest'}) <NEW_LINE> self.assertEqual(response.status_code, 302) | Se o usuario for autenticado, redireciona para pagina de profile.
Esperando status_code = 302 | 625941b476d4e153a657e910 |
def calculate_min_angular_dist(Ra_catalog_degree, Dec_catalog_degree, Av_table_lines, tolerance): <NEW_LINE> <INDENT> Ra_catalog_rad = deg_to_rad(Ra_catalog_degree) <NEW_LINE> Dec_catalog_rad = deg_to_rad(Dec_catalog_degree) <NEW_LINE> minlist, min_info_list = [], [] <NEW_LINE> for line in Av_table_lines: <NEW_LINE> <INDENT> line = line.split() <NEW_LINE> if ((Ra_catalog_degree - float(line[Av_coor_ID[0]])) > -tolerance) and ((Ra_catalog_degree - float(line[Av_coor_ID[0]])) < tolerance) and ((Dec_catalog_degree - float(line[Av_coor_ID[1]])) > -tolerance) and ((Dec_catalog_degree - float(line[Av_coor_ID[1]])) < tolerance): <NEW_LINE> <INDENT> Ra_table_rad, Dec_table_rad = deg_to_rad(float(line[0])), deg_to_rad(float(line[1])) <NEW_LINE> diffX = cos(Ra_catalog_rad) * cos(Dec_catalog_rad) - cos(Ra_table_rad) * cos(Dec_table_rad) <NEW_LINE> diffY = cos(Ra_catalog_rad) * sin(Dec_catalog_rad) - cos(Ra_table_rad) * sin(Dec_table_rad) <NEW_LINE> diffZ = sin(Dec_catalog_rad) - sin(Dec_table_rad) <NEW_LINE> SQdistance = diffX ** 2 + diffY ** 2 + diffZ ** 2 <NEW_LINE> minlist.append(SQdistance) <NEW_LINE> min_info_list.append(line) <NEW_LINE> <DEDENT> <DEDENT> return minlist, min_info_list | This is to calculate angular distance from input to point on Av table
minlist: Store distance of possible point sources from Av table
min_info_list: Store information of all possible points | 625941b457b8e32f52483280 |
def load_distro(source_uri): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> if os.path.isfile(source_uri): <NEW_LINE> <INDENT> with open(source_uri) as f: <NEW_LINE> <INDENT> raw_data = yaml.safe_load(f.read()) <NEW_LINE> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> request = urlopen(source_uri) <NEW_LINE> <DEDENT> except Exception as e: <NEW_LINE> <INDENT> raise ResourceNotFound('%s (%s)' % (str(e), source_uri)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> raw_data = yaml.safe_load(request) <NEW_LINE> <DEDENT> except ValueError: <NEW_LINE> <INDENT> raise ResourceNotFound(source_uri) <NEW_LINE> <DEDENT> <DEDENT> if not type(raw_data) == dict: <NEW_LINE> <INDENT> raise InvalidDistro("Distro must be a dictionary: %s" % (source_uri)) <NEW_LINE> <DEDENT> <DEDENT> except yaml.YAMLError as e: <NEW_LINE> <INDENT> raise InvalidDistro(str(e)) <NEW_LINE> <DEDENT> try: <NEW_LINE> <INDENT> version = _distro_version(raw_data.get('version', '0')) <NEW_LINE> release_name = raw_data['release'] <NEW_LINE> stacks = _load_distro_stacks(raw_data, release_name) <NEW_LINE> variants = _load_variants(raw_data.get('variants', {}), stacks) <NEW_LINE> return Distro(stacks, variants, release_name, version, raw_data) <NEW_LINE> <DEDENT> except KeyError as e: <NEW_LINE> <INDENT> raise InvalidDistro("distro is missing required '%s' key" % (str(e))) | :param source_uri: source URI of distro file, or path to distro
file. Filename has precedence in resolution.
:raises: :exc:`InvalidDistro` If distro file is invalid
:raises: :exc:`ResourceNotFound` If file at *source_uri* is not found | 625941b44e696a04525c9234 |
def fixDeltaFlag(self, node, networkLayer, validLines, reason, reasonType=3, geomType=None): <NEW_LINE> <INDENT> flipCandidates = self.getLineIdFromReason(reason=reason, reasonType=reasonType) <NEW_LINE> for line in self.nodeDict[node]['start'] + self.nodeDict[node]['end']: <NEW_LINE> <INDENT> lineId = str(line.id()) <NEW_LINE> if lineId in flipCandidates and line not in validLines: <NEW_LINE> <INDENT> self.flipSingleLine(line=line, layer=networkLayer, geomType=geomType) <NEW_LINE> return line <NEW_LINE> <DEDENT> <DEDENT> return None | Tries to fix nodes flagged because of their delta angles.
:param node: (QgsPoint) invalid node.
:param network: (QgsVectorLayer) contains network lines.
:param validLines: (list-of-QgsFeature) lines already validated.
:param reason: (str) reason of node invalidation.
:param reasonType: (int) code for invalidation reason.
:param geomType: (int) code for the layer that contains the network lines.
:return: (QgsFeature) line to be flipped. If no line is identified as flippable, None is returned. | 625941b49c8ee82313fbb55d |
def listen(client, main): <NEW_LINE> <INDENT> @client.event <NEW_LINE> async def on_message(message): <NEW_LINE> <INDENT> main.message_handler(message, False) | Sends a recieved message to the message handler | 625941b430c21e258bdfa27c |
def get_options(parser): <NEW_LINE> <INDENT> parser.add_option("-u", "--user", dest="username", default=None, help="User name.") <NEW_LINE> parser.add_option("-f", "--file", dest="infile", default=None, help="File with tracks to read from.") <NEW_LINE> parser.add_option("-s", "--server", dest="server", default="libre.fm", help="Server to send tracks to, default is libre.fm") <NEW_LINE> parser.add_option("-t", "--type", dest="infotype", default=None, help="Type of tracks you are about to import, loved or banned.") <NEW_LINE> options, args = parser.parse_args() <NEW_LINE> if not options.username: <NEW_LINE> <INDENT> sys.exit('User name not specified, see --help') <NEW_LINE> <DEDENT> if not options.infotype in ['loved', 'unloved', 'banned', 'unbanned']: <NEW_LINE> <INDENT> sys.exit('No or invalid type of track specified, see --help') <NEW_LINE> <DEDENT> if not options.infile: <NEW_LINE> <INDENT> sys.exit('File with tracks not specified, see --help') <NEW_LINE> <DEDENT> if options.server == 'libre.fm': <NEW_LINE> <INDENT> options.server = 'http://alpha.libre.fm' <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if options.server[:7] != 'http://': <NEW_LINE> <INDENT> options.server = 'http://%s' % options.server <NEW_LINE> <DEDENT> <DEDENT> return options.server, options.username, options.infile, options.infotype | Define command line options. | 625941b4566aa707497f435b |
def rotateRight(self, head, k): <NEW_LINE> <INDENT> if not head: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> n = 0 <NEW_LINE> cursor = head <NEW_LINE> while cursor: <NEW_LINE> <INDENT> cursor = cursor.next <NEW_LINE> n += 1 <NEW_LINE> <DEDENT> k = k%n <NEW_LINE> if k==0: return head <NEW_LINE> fast = slow = head <NEW_LINE> for i in range(k): <NEW_LINE> <INDENT> if fast: <NEW_LINE> <INDENT> fast = fast.next <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> <DEDENT> preslow = None <NEW_LINE> prefast = None <NEW_LINE> while fast: <NEW_LINE> <INDENT> prefast = fast <NEW_LINE> fast = fast.next <NEW_LINE> preslow = slow <NEW_LINE> slow = slow.next <NEW_LINE> <DEDENT> if preslow: <NEW_LINE> <INDENT> prefast.next = head <NEW_LINE> preslow.next = None <NEW_LINE> return slow <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> return head | :type head: ListNode
:type k: int
:rtype: ListNode | 625941b4e5267d203edcda81 |
def accounts_account_id_portins_orderid_loas_fileid_delete_with_http_info(self, account_id, orderid, fileid, **kwargs): <NEW_LINE> <INDENT> all_params = ['account_id', 'orderid', 'fileid'] <NEW_LINE> all_params.append('callback') <NEW_LINE> all_params.append('_return_http_data_only') <NEW_LINE> params = locals() <NEW_LINE> for key, val in iteritems(params['kwargs']): <NEW_LINE> <INDENT> if key not in all_params: <NEW_LINE> <INDENT> raise TypeError( "Got an unexpected keyword argument '%s'" " to method accounts_account_id_portins_orderid_loas_fileid_delete" % key ) <NEW_LINE> <DEDENT> params[key] = val <NEW_LINE> <DEDENT> del params['kwargs'] <NEW_LINE> if ('account_id' not in params) or (params['account_id'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `account_id` when calling `accounts_account_id_portins_orderid_loas_fileid_delete`") <NEW_LINE> <DEDENT> if ('orderid' not in params) or (params['orderid'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `orderid` when calling `accounts_account_id_portins_orderid_loas_fileid_delete`") <NEW_LINE> <DEDENT> if ('fileid' not in params) or (params['fileid'] is None): <NEW_LINE> <INDENT> raise ValueError("Missing the required parameter `fileid` when calling `accounts_account_id_portins_orderid_loas_fileid_delete`") <NEW_LINE> <DEDENT> resource_path = '/accounts/{accountId}/portins/{orderid}/loas/{fileid}'.replace('{format}', 'json') <NEW_LINE> path_params = {} <NEW_LINE> if 'account_id' in params: <NEW_LINE> <INDENT> path_params['accountId'] = params['account_id'] <NEW_LINE> <DEDENT> if 'orderid' in params: <NEW_LINE> <INDENT> path_params['orderid'] = params['orderid'] <NEW_LINE> <DEDENT> if 'fileid' in params: <NEW_LINE> <INDENT> path_params['fileid'] = params['fileid'] <NEW_LINE> <DEDENT> query_params = {} <NEW_LINE> header_params = {} <NEW_LINE> form_params = [] <NEW_LINE> local_var_files = {} <NEW_LINE> body_params = None <NEW_LINE> header_params['Accept'] = self.api_client. select_header_accept([]) <NEW_LINE> if not header_params['Accept']: <NEW_LINE> <INDENT> del header_params['Accept'] <NEW_LINE> <DEDENT> header_params['Content-Type'] = self.api_client. select_header_content_type([]) <NEW_LINE> auth_settings = [] <NEW_LINE> return self.api_client.call_api(resource_path, 'DELETE', path_params, query_params, header_params, body=body_params, post_params=form_params, files=local_var_files, response_type=None, auth_settings=auth_settings, callback=params.get('callback'), _return_http_data_only=params.get('_return_http_data_only')) | Retrieves the list of the loa (and other) files associated with the portin order
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.accounts_account_id_portins_orderid_loas_fileid_delete_with_http_info(account_id, orderid, fileid, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str account_id: (required)
:param str orderid: (required)
:param str fileid: (required)
:return: None
If the method is called asynchronously,
returns the request thread. | 625941b47c178a314d6ef237 |
def push(self,item): <NEW_LINE> <INDENT> return self.items.append(item) | Add item to end of list and return nothing. Run time for this method is O(1) or constant time | 625941b48e7ae83300e4adac |
def do_exit(self, args): <NEW_LINE> <INDENT> return True | Exits the easier68k run sub-cli | 625941b4167d2b6e3121897e |
def detect_candidates(seq, sec, name, strain, start, end, parent_p, parent_m, strand, args_term, p_pos, m_pos): <NEW_LINE> <INDENT> term_len = 2 * args_term.max_stem + 2 * ( args_term.max_stem * args_term.miss_rate) + args_term.max_loop <NEW_LINE> cands = [] <NEW_LINE> nts = {"ut": 0, "no_ut": 0, "seq_num": 0, "detect": False} <NEW_LINE> num_sec = 0 <NEW_LINE> for st in reversed(sec): <NEW_LINE> <INDENT> if (st == "(") or (not nts["detect"]): <NEW_LINE> <INDENT> nts = {"ut": 0, "no_ut": 0, "seq_num": 0, "detect": False} <NEW_LINE> <DEDENT> if (st == ")") and (not nts["detect"]): <NEW_LINE> <INDENT> check_u(seq, num_sec, nts, args_term) <NEW_LINE> if nts["ut"] >= args_term.at_tail: <NEW_LINE> <INDENT> stop = len(seq) - num_sec + nts["seq_num"] <NEW_LINE> if stop > 10: <NEW_LINE> <INDENT> term_features = {"st_pos": 0, "rights": 0, "lefts": 0, "tmp_miss": 0, "real_miss": 0, "loop": 0, "r_stem": 0, "l_stem": 0} <NEW_LINE> detects = {"detect_r": False, "detect_l": False, "conflict": False} <NEW_LINE> term_features, detects = check_sec(sec, stop + 1) <NEW_LINE> if detects["conflict"] is False: <NEW_LINE> <INDENT> total_length = term_features["st_pos"] - nts["seq_num"] <NEW_LINE> term_features["l_stem"] = ( total_length - term_features["r_stem"] - term_features["loop"]) <NEW_LINE> if (total_length <= term_len) and ( term_features["loop"] <= args_term.max_loop) and ( term_features["loop"] >= args_term.min_loop) and ( ((term_features["r_stem"] + term_features["l_stem"] - term_features["real_miss"]) / 2) >= args_term.min_stem) and ( ((term_features["r_stem"] + term_features["l_stem"] - term_features["real_miss"]) / 2) <= args_term.max_stem): <NEW_LINE> <INDENT> nts["detect"] = True <NEW_LINE> if strand == "+": <NEW_LINE> <INDENT> import_candidate( cands, term_features, strain, start + (len(sec[0:stop + 1]) - term_features["st_pos"]) - 1, start + stop, nts["ut"], name, total_length, strand, parent_p, parent_m, p_pos, m_pos) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> import_candidate( cands, term_features, strain, end - (stop), end - (len(sec[0:stop + 1]) - term_features["st_pos"]) + 1, nts["ut"], name, total_length, strand, parent_p, parent_m, p_pos, m_pos) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> <DEDENT> num_sec += 1 <NEW_LINE> <DEDENT> return cands | check the criteria of sec str of terminator | 625941b4287bf620b61d3850 |
def _make_read_only(self, access_off=True, exclude_list=None): <NEW_LINE> <INDENT> super(BaseWindow, self)._make_read_only( access_off, self._mro_exclude_list + (exclude_list or [])) | Метод управляет режимом "только для чтения" окна
.. note::
Потомки могут дополнять список self._mro_exclude_list -
список визуальных компонентов, которые не будут
блокироваться в режиме "только для чтения".
Т.о. метод обычно не требует перекрытья -
достаточно списка исключений
:param access_off: True/False - включение/выключение режима
:type access_off: bool
:param exclude_list: список компонентов, которые не будут блокироваться
:type exclude_list: list | 625941b4cdde0d52a9e52e0e |
def test_cap_home(self): <NEW_LINE> <INDENT> term = self._terminal <NEW_LINE> self._check_cap_home((0, 0)) <NEW_LINE> self._put_string(['t'] * term._right_most, (0, 0)) <NEW_LINE> self._check_cap_home((term._right_most, term._bottom_most)) <NEW_LINE> rand_x = random.randint(1, term._right_most - 1) <NEW_LINE> rand_y = random.randint(1, term._bottom_most - 1) <NEW_LINE> self._check_cap_home((rand_x, rand_y)) | The terminal should have the possibility to move the cursor to the
home position. | 625941b47047854f462a11ee |
def cached_property(func): <NEW_LINE> <INDENT> def get(self): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> return self._property_cache[func] <NEW_LINE> <DEDENT> except AttributeError: <NEW_LINE> <INDENT> self._property_cache = {} <NEW_LINE> x = self._property_cache[func] = func(self) <NEW_LINE> return x <NEW_LINE> <DEDENT> except KeyError: <NEW_LINE> <INDENT> x = self._property_cache[func] = f(self) <NEW_LINE> return x <NEW_LINE> <DEDENT> <DEDENT> return property(get) | http://code.activestate.com/recipes/576563-cached-property/ | 625941b499cbb53fe67929c7 |
def get_json(self, encoding="utf8"): <NEW_LINE> <INDENT> body = self.request.get("BODY", "{}") <NEW_LINE> def encode_hook(pairs): <NEW_LINE> <INDENT> new_pairs = [] <NEW_LINE> for key, value in pairs.iteritems(): <NEW_LINE> <INDENT> if isinstance(key, six.string_types): <NEW_LINE> <INDENT> key = key.encode(encoding) <NEW_LINE> <DEDENT> if isinstance(value, six.string_types): <NEW_LINE> <INDENT> value = value.encode(encoding) <NEW_LINE> <DEDENT> new_pairs.append((key, value)) <NEW_LINE> <DEDENT> return dict(new_pairs) <NEW_LINE> <DEDENT> return json.loads(body, object_hook=encode_hook) | Extracts the JSON from the request
| 625941b423849d37ff7b2e72 |
def file_list(ftp, since=1993): <NEW_LINE> <INDENT> return daily_file_list(ftp) + quarterly_file_list(since) | Complete file list (idx+zip) from 1993 to current quarter | 625941b41f5feb6acb0c4935 |
def _count_listings(self): <NEW_LINE> <INDENT> cursor = self.db.cursor() <NEW_LINE> sql = "SELECT count(*) AS count FROM listings" + self._sql_filters() <NEW_LINE> cursor.execute(sql) <NEW_LINE> return cursor.fetchall()[0][0] | Count the number of listings given the provided set of filters.
Returns:
int: The number of listings that match the user's filter parameters. | 625941b4b7558d58953c4cfc |
def wait(self): <NEW_LINE> <INDENT> if self._started: <NEW_LINE> <INDENT> raise RequestPool.RequestPoolError("Can't re-start a RequestPool that was already started.") <NEW_LINE> <DEDENT> for req in self._requests: <NEW_LINE> <INDENT> req.submit() <NEW_LINE> <DEDENT> for req in self._requests: <NEW_LINE> <INDENT> req.block() <NEW_LINE> <DEDENT> self._requests = set() | If the pool hasn't been submitted yet, submit it.
Then wait for all requests in the pool to complete in the simplest way possible. | 625941b40383005118ecf3c5 |
def value_nodes(self, target_graph, focus): <NEW_LINE> <INDENT> if not isinstance(focus, (tuple, list, set)): <NEW_LINE> <INDENT> focus = [focus] <NEW_LINE> <DEDENT> if not self.is_property_shape: <NEW_LINE> <INDENT> return {f: set((f,)) for f in focus} <NEW_LINE> <DEDENT> path_val = self.path() <NEW_LINE> focus_dict = {} <NEW_LINE> for f in focus: <NEW_LINE> <INDENT> focus_dict[f] = self._value_nodes_from_path( f, path_val, target_graph) <NEW_LINE> <DEDENT> return focus_dict | For each focus node, you can get a set of value nodes.
For a Node Shape, each focus node has just one value node,
which is just the focus_node
:param target_graph:
:param focus:
:return: | 625941b407d97122c417866a |
def detectCycle(self, head): <NEW_LINE> <INDENT> if not head or not head.next: <NEW_LINE> <INDENT> return None <NEW_LINE> <DEDENT> slow = fast = head <NEW_LINE> while fast and fast.next: <NEW_LINE> <INDENT> slow = slow.next <NEW_LINE> fast = fast.next.next <NEW_LINE> if slow == fast: <NEW_LINE> <INDENT> slow = head <NEW_LINE> while slow != fast: <NEW_LINE> <INDENT> slow = slow.next <NEW_LINE> fast = fast.next <NEW_LINE> <DEDENT> return slow <NEW_LINE> <DEDENT> <DEDENT> return None | :type head: ListNode
:rtype: ListNode | 625941b445492302aab5e09f |
def SizeMessage(self): <NEW_LINE> <INDENT> availableSize = self.Parent.Size[0] - 5 <NEW_LINE> buttons = list(self.Panel.Sizer.Children)[2:] <NEW_LINE> for button in buttons: <NEW_LINE> <INDENT> availableSize -= button.Size[0] <NEW_LINE> <DEDENT> height = self.GetTextExtent(self.MessageText.Label[:5])[1] <NEW_LINE> self.MessageText.SetInitialSize((availableSize, height)) | Make sure the message text gets clipped appropriately. | 625941b4e64d504609d74621 |
def get_client(self, reset=False): <NEW_LINE> <INDENT> if self.client is None or reset is True: <NEW_LINE> <INDENT> self.client = Consul( host=self.settings.get('host'), port=self.settings.get('port'), token=self.settings.get('token'), scheme=self.settings.get('scheme'), consistency=self.settings.get('consistency'), verify=self.settings.get('verify'), ) <NEW_LINE> <DEDENT> return self.client | Return a consul client, create it if needed | 625941b423849d37ff7b2e73 |
def test_empty_input_invalid_message(self): <NEW_LINE> <INDENT> self.check_nothing_in_databse_helper() <NEW_LINE> self.fill_complete_form_helper_method() <NEW_LINE> self.driver.find_element_by_id(f'project-desc0').clear() <NEW_LINE> self.assertEqual("", self.driver.find_element_by_id(f'project-desc0').get_attribute("value")) <NEW_LINE> self.assertFalse(self.driver.find_element_by_id('project-desc0-error').is_displayed()) <NEW_LINE> self.driver.find_element_by_id('submit-btn').click() <NEW_LINE> sleep(1) <NEW_LINE> WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, f'#project-desc0-error'))) <NEW_LINE> self.assertTrue(self.driver.find_element_by_id('project-desc0-error').is_displayed()) <NEW_LINE> self.check_nothing_in_databse_helper() | Test that nothing is entered in the database when a field is left blank
Also test that invalid message is not hidden in the view
:return: | 625941b445492302aab5e0a0 |
def test_no_exceptions(self): <NEW_LINE> <INDENT> with patch.dict('os.environ', { 'APP_NAME': 'my-app', 'VERSION': '1.3.0' }): <NEW_LINE> <INDENT> signal.signal(signal.SIGALRM, signal_handler) <NEW_LINE> signal.alarm(3) <NEW_LINE> try: <NEW_LINE> <INDENT> register_loop() <NEW_LINE> <DEDENT> except TimeoutError: <NEW_LINE> <INDENT> pass | This test uses signals to terminate an infinite loop.
'signal.alarm(3)' sends signal.SIGALRM after 3 seconds
which causes TimeoutError and ends loop. | 625941b4a934411ee375147c |
def update_stats(self): <NEW_LINE> <INDENT> m, s = self._get_stats() <NEW_LINE> self.mean = m <NEW_LINE> self.stdv = s | Updates the mean and stdv estimates - used for sensitivity analysis
where the parent distribution params may change after instantiation | 625941b43c8af77a43ae357f |
@tier2 <NEW_LINE> def test_positive_update_template(session): <NEW_LINE> <INDENT> template = entities.ProvisioningTemplate().create() <NEW_LINE> loc = entities.Location().create() <NEW_LINE> with session: <NEW_LINE> <INDENT> session.location.update( loc.name, {'provisioning_templates.all_templates': False, 'provisioning_templates.resources.unassigned': [template.name]} ) <NEW_LINE> loc_values = session.location.read(loc.name) <NEW_LINE> assert loc_values['provisioning_templates']['resources'][ 'unassigned'][0] == template.name <NEW_LINE> session.location.update(loc.name, { 'provisioning_templates.resources.assigned': [template.name]}) <NEW_LINE> new_loc_values = session.location.read(loc.name) <NEW_LINE> assert len(new_loc_values[ 'provisioning_templates']['resources']['assigned']) == len(loc_values[ 'provisioning_templates']['resources']['assigned']) + 1 <NEW_LINE> assert template.name in new_loc_values[ 'provisioning_templates']['resources']['assigned'] | Add/Remove template from/to location
:id: 8faf60d1-f4d6-4a58-a484-606a42957ce7
:expectedresults: config template is removed and then added to the location
:CaseLevel: Integration | 625941b423e79379d52ee349 |
def ProcessL1A(self): <NEW_LINE> <INDENT> for ifile in self.ifileGen: <NEW_LINE> <INDENT> self.pArgs.ifile = ifile <NEW_LINE> mcr = MCRunner(self.pArgs) <NEW_LINE> pickle.dump(mcr,open(os.path.join(mcr.l2MainPath,'mcr_%s.pkl' % mcr.basename), 'wb')) <NEW_LINE> cmdGen = mcr.GetCmdList() <NEW_LINE> status = mcr.Runner(cmdGen) <NEW_LINE> if status: <NEW_LINE> <INDENT> if self.verbose: <NEW_LINE> <INDENT> print('\r%s: Finished processing %s' % (dt.now(), ifile), end='',flush=True) <NEW_LINE> with open(self.logMeta,'a') as fmeta: <NEW_LINE> <INDENT> print('Finished processing %s' % ifile,file=fmeta) <NEW_LINE> <DEDENT> <DEDENT> del mcr <NEW_LINE> <DEDENT> <DEDENT> return None | Calls L1AGenerator to get next file to process | 625941b4627d3e7fe0d68c2e |
def __init__(self): <NEW_LINE> <INDENT> self.discriminator = None | CloudProvider - a model defined in OpenAPI | 625941b499fddb7c1c9de174 |
def labelled_image2regions(labelled_image,roi=None): <NEW_LINE> <INDENT> if roi is not None: <NEW_LINE> <INDENT> tmp_masked_array=np.ma.masked_array(labelled_image, mask=np.logical_not(roi)) <NEW_LINE> return labelled_image2regions(tmp_masked_array) <NEW_LINE> <DEDENT> regions=[] <NEW_LINE> if type(labelled_image) == np.ma.masked_array : <NEW_LINE> <INDENT> mask_roi=np.logical_not(labelled_image.mask) <NEW_LINE> min_image,max_image=labelled_image.compressed().min(),labelled_image.compressed().max() <NEW_LINE> hist,bins = np.histogram(labelled_image.compressed(), bins=max_image-min_image+1,range=(min_image,max_image+1)) <NEW_LINE> bins=bins[0:bins.size-1] <NEW_LINE> for i in range(0,len(hist)): <NEW_LINE> <INDENT> if hist[i] != 0: <NEW_LINE> <INDENT> new_region=np.where(labelled_image==bins[i],1,0) <NEW_LINE> new_region=np.logical_and(mask_roi,new_region) <NEW_LINE> regions+=[new_region] <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> else: <NEW_LINE> <INDENT> min_image,max_image=labelled_image.min(),labelled_image.max() <NEW_LINE> hist,bins = np.histogram(labelled_image, bins=max_image-min_image+1,range=(min_image,max_image+1)) <NEW_LINE> bins=bins[0:bins.size-1] <NEW_LINE> for i in range(0,len(hist)): <NEW_LINE> <INDENT> if hist[i] != 0: regions+=[np.where(labelled_image==bins[i],1,0)] <NEW_LINE> <DEDENT> <DEDENT> return regions | Generate regions from labelled image: each region correspond to a specific label | 625941b4e5267d203edcda82 |
def get_elm(line): <NEW_LINE> <INDENT> dlist = [] <NEW_LINE> line = line.replace(' ', '') <NEW_LINE> atemp = re.split(':', line) <NEW_LINE> if len(atemp) > 1: <NEW_LINE> <INDENT> for k in range(1, len(atemp)): <NEW_LINE> <INDENT> ent = atemp[k].strip() <NEW_LINE> dlist.append(ent) <NEW_LINE> <DEDENT> <DEDENT> return dlist | separate data into a list
input: line --- a data line
output: dlist --- a list of data | 625941b4293b9510aa2c307a |
def test_edit_resolve(self): <NEW_LINE> <INDENT> self.assertEqual( resolve('/brands/edit/1/').view_name, 'brands:edit' ) | /brands/<pk>/edit/ should resolve to brands:edit. | 625941b4b57a9660fec3365f |
def clean_username(self): <NEW_LINE> <INDENT> username = self.cleaned_data.get('username', '').lower() <NEW_LINE> if User.objects.filter(username=username).exists(): <NEW_LINE> <INDENT> raise forms.ValidationError('El usuario {0} existe ya.'.format(username)) <NEW_LINE> <DEDENT> return username | Normalizamos el dato del username y comprobamos que el username no existe ya en la BBDD
Devolvemos el dato que validamos confirmado y validado de que no existe previamente | 625941b47b25080760e3923c |
def test_delete_user(self): <NEW_LINE> <INDENT> self.new_user.save_user() <NEW_LINE> test_user = User("instagram","loop","334") <NEW_LINE> test_user.save_user() <NEW_LINE> self.new_user.delete_user() <NEW_LINE> self.assertEqual(len(User.user_list),1) | test_delete_user to test if we can remove a user from our user list | 625941b467a9b606de4a7c9e |
def test_pressure_network_no_gradient(self): <NEW_LINE> <INDENT> k = np.ones(4) * 0.5 <NEW_LINE> Qtot = 4 <NEW_LINE> initial_guess = array([2., 0., 2., 0.]) <NEW_LINE> final_flows, info, ier, mesg = optimize.fsolve( pressure_network, initial_guess, args=(Qtot, k), full_output=True) <NEW_LINE> assert_array_almost_equal(final_flows, np.ones(4)) <NEW_LINE> assert_(ier == 1, mesg) | fsolve without gradient, equal pipes -> equal flows | 625941b466656f66f7cbbf8b |
def missing_output(self, requested=None): <NEW_LINE> <INDENT> if requested is None: <NEW_LINE> <INDENT> requested = set(self.output) <NEW_LINE> <DEDENT> files = set() <NEW_LINE> for f, f_ in zip(self.output, self.rule.output): <NEW_LINE> <INDENT> if f in requested: <NEW_LINE> <INDENT> if f in self.dynamic_output: <NEW_LINE> <INDENT> if not self.expand_dynamic( f_, restriction=self.wildcards, omit_value=_IOFile.dynamic_fill): <NEW_LINE> <INDENT> files.add("{} (dynamic)".format(f_)) <NEW_LINE> <DEDENT> <DEDENT> elif not f.exists: <NEW_LINE> <INDENT> files.add(f) <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> return files | Return missing output files. | 625941b473bcbd0ca4b2be5d |
def __init__(self, features2process, test_size=0.2, random_state=0): <NEW_LINE> <INDENT> FormatConverter.__init__(self, features2process) <NEW_LINE> self.test_size = test_size <NEW_LINE> self.random_state = random_state | data → (trainset, testset), only features2process will be preserved.
:param features2process: | 625941b4f548e778e58cd35c |
def write_slack_markdown(message): <NEW_LINE> <INDENT> return { "type": "section", "text": { "type": "mrkdwn", "text": f"{message}" } } | Function to write a slack markdown section
Parameters
----------
message: str
Message
Returns
-------
dict:
Block message | 625941b44f6381625f114827 |
def test_grammar(): <NEW_LINE> <INDENT> validator_fail = False <NEW_LINE> class DummyType(object): <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> class TestGrammarClass(object): <NEW_LINE> <INDENT> def __init__(self): <NEW_LINE> <INDENT> self.grammar = GrammarDict() <NEW_LINE> <DEDENT> @grammar <NEW_LINE> def test_grammar(value): <NEW_LINE> <INDENT> if validator_fail: <NEW_LINE> <INDENT> raise ValueError('validator failed') <NEW_LINE> <DEDENT> <DEDENT> @grammar(grammar_type=DummyType) <NEW_LINE> def test_grammar_with_type(value): <NEW_LINE> <INDENT> if validator_fail: <NEW_LINE> <INDENT> raise ValueError('validator failed') <NEW_LINE> <DEDENT> <DEDENT> @grammar(grammar_name='a name') <NEW_LINE> def test_grammar_with_name(value): <NEW_LINE> <INDENT> if validator_fail: <NEW_LINE> <INDENT> raise ValueError('validator failed') <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> test = TestGrammarClass() <NEW_LINE> nt.assert_is_none(test.test_grammar) <NEW_LINE> nt.assert_dict_equal(test.grammar, {}) <NEW_LINE> test.test_grammar = 'testing' <NEW_LINE> nt.assert_equal(test.test_grammar, 'testing') <NEW_LINE> nt.assert_dict_equal(test.grammar, {'test_grammar': 'testing'}) <NEW_LINE> del test.test_grammar <NEW_LINE> nt.assert_is_none(test.test_grammar) <NEW_LINE> nt.assert_dict_equal(test.grammar, {}) <NEW_LINE> validator_fail = True <NEW_LINE> nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test, 'test_grammar', 'testing') <NEW_LINE> test = TestGrammarClass() <NEW_LINE> validator_fail = False <NEW_LINE> dummy = DummyType() <NEW_LINE> test.test_grammar_with_type = dummy <NEW_LINE> nt.assert_equal(test.test_grammar_with_type, dummy) <NEW_LINE> nt.assert_dict_equal(test.grammar, {'test_grammar_with_type': dummy}) <NEW_LINE> nt.assert_raises_regexp(ValueError, 'must be DummyType', setattr, test, 'test_grammar_with_type', 'testing') <NEW_LINE> validator_fail = True <NEW_LINE> nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test, 'test_grammar_with_type', dummy) <NEW_LINE> test = TestGrammarClass() <NEW_LINE> validator_fail = False <NEW_LINE> test.test_grammar_with_name = 'testing' <NEW_LINE> nt.assert_equal(test.test_grammar_with_name, 'testing') <NEW_LINE> nt.assert_dict_equal(test.grammar, {'a name': 'testing'}) <NEW_LINE> validator_fail = True <NEW_LINE> nt.assert_raises_regexp(ValueError, 'validator failed', setattr, test, 'test_grammar_with_name', 'testing') | Grammar decorator behaves correctly. | 625941b416aa5153ce362258 |
@api_view(["GET"]) <NEW_LINE> def get_namespaces_for_object(request, model, instance_pk): <NEW_LINE> <INDENT> instance = Tag.LINKED_TO_MODEL[model].objects.get(pk=instance_pk) <NEW_LINE> parent = get_parent_object(instance) <NEW_LINE> namespaces = Namespace.objects.filter( Q(scoped_to_model=parent.__class__.__name__.lower(), scoped_to_pk=parent.id) | Q(scoped_to_model="global") ).all() <NEW_LINE> return Response( {"namespaces": NamespaceSerializer(many=True).to_representation(namespaces)} ) | Returns the namespaces that the object of type `model` and id `instance_pk` can get tags from | 625941b4091ae35668666d47 |
def get_next_num(self): <NEW_LINE> <INDENT> allnums = self.get_nums() <NEW_LINE> return max(allnums) + 1 if allnums else 1 | return the next available num | 625941b4e8904600ed9f1d0a |
def get_stocks_mc_gbm(hist_file='hist0316.pkl', gbm_file='gbm0316.pkl', days_for_statistic=60, days_for_predict=5, simulation=5000, bottom=0.1, top=0.2, p_value=0.05): <NEW_LINE> <INDENT> warnings.filterwarnings("ignore") <NEW_LINE> fn = hist_file <NEW_LINE> with open(fn, 'rb') as f: <NEW_LINE> <INDENT> content = pickle.load(f) <NEW_LINE> <DEDENT> def get_all_er_of_mc_gbm(_content, _days_for_statistic, _days_for_predict=5, _simulation=5000): <NEW_LINE> <INDENT> _count = 0 <NEW_LINE> _raw_results = [] <NEW_LINE> for _key, _value in tqdm(_content.items(), desc='[GMB Monte Carlo]'): <NEW_LINE> <INDENT> _count += 1 <NEW_LINE> _raw_result = {} <NEW_LINE> try: <NEW_LINE> <INDENT> _raw_result['code'] = _key <NEW_LINE> _raw_result['expected_return'], _raw_result['p_value'] = _calculate_mc_gbm( _value[-_days_for_statistic:], _days_for_predict, _simulation) <NEW_LINE> _raw_result['expected_return'] = np.nan_to_num(_raw_result['expected_return']) <NEW_LINE> _raw_result['p_value'] = np.nan_to_num(_raw_result['p_value']) <NEW_LINE> _raw_results.append(_raw_result) <NEW_LINE> <DEDENT> except: <NEW_LINE> <INDENT> continue <NEW_LINE> <DEDENT> <DEDENT> return _raw_results <NEW_LINE> <DEDENT> raw_results = get_all_er_of_mc_gbm(content, days_for_statistic, days_for_predict, simulation) <NEW_LINE> refined_results = [] <NEW_LINE> for raw_result in raw_results: <NEW_LINE> <INDENT> if bottom <= raw_result['expected_return'] < top: <NEW_LINE> <INDENT> if _is_booming_stock(content, raw_result['code'], days_for_statistic) == 1: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> if raw_result['p_value'] < p_value: <NEW_LINE> <INDENT> refined_results.append(raw_result) <NEW_LINE> <DEDENT> else: <NEW_LINE> <INDENT> pass <NEW_LINE> <DEDENT> <DEDENT> <DEDENT> <DEDENT> refined_results.sort(key=lambda k: (k.get('expected_return', 0))) <NEW_LINE> with open(gbm_file, 'wb') as f: <NEW_LINE> <INDENT> pickle.dump(refined_results, f) <NEW_LINE> <DEDENT> print('[Final Results]') <NEW_LINE> for x in refined_results: <NEW_LINE> <INDENT> print(x) <NEW_LINE> <DEDENT> return refined_results | public
(get_k_hist)
function: get the stocks using geometric brownian motion in the specific interval
return: a sorted list of selected dict. which dict looks like
[{'code':'300403',
'return':0.01,
'p-value':0.05},] | 625941b48a43f66fc4b53e4b |
def p_three_address_instruction_times(p): <NEW_LINE> <INDENT> print('\n\t;',p[1],'=',p[3].val,'*',p[5].val) <NEW_LINE> if p[3].kind == 'num': <NEW_LINE> <INDENT> print('\t','LDR r0, =',p[3].val,sep='') <NEW_LINE> <DEDENT> elif p[3].kind == 'id': <NEW_LINE> <INDENT> print('\t','LDR r2, =',p[3].val,sep='') <NEW_LINE> print('\t','LDR r0, [r2]',sep='') <NEW_LINE> <DEDENT> if p[5].kind == 'num': <NEW_LINE> <INDENT> print('\t','LDR r1, =',p[5].val,sep='') <NEW_LINE> <DEDENT> elif p[5].kind == 'id': <NEW_LINE> <INDENT> print('\t','LDR r3, =',p[5].val,sep='') <NEW_LINE> print('\t','LDR r1, [r3]',sep='') <NEW_LINE> <DEDENT> print('\t','LDR r4, =',p[1],sep='') <NEW_LINE> print('\t','BL signed_multiply',sep='') <NEW_LINE> print('\t','STR r0, [r4]',sep='') | three_address_instruction : lhs ASSIGN operand TIMES operand | 625941b40c0af96317bb7fca |
def do_request(json_payload): <NEW_LINE> <INDENT> try: <NEW_LINE> <INDENT> response = requests.post(CONNECT_URL, json=json_payload) <NEW_LINE> <DEDENT> except requests.exceptions.ConnectionError as err: <NEW_LINE> <INDENT> msg = 'Connection error to `{}`, ' 'check that RocketWS is started'.format(CONNECT_URL) <NEW_LINE> logger.error(err) <NEW_LINE> raise requests.exceptions.ConnectionError(msg) <NEW_LINE> <DEDENT> return response | Request helper
:param json_payload:
:return: :raise requests.exceptions.ConnectionError: | 625941b4796e427e537b03a3 |
def uninstallProducts(products, cascade=['types', 'skins', 'actions', 'portalobjects', 'workflows', 'slots', 'registrypredicates'], REQUEST=None): <NEW_LINE> <INDENT> pass | removes a list of products, cascade defines which items created by
the install shall be uninstalled | 625941b4ab23a570cc24ff61 |
def proportion_jefferson(arrays, seats): <NEW_LINE> <INDENT> local_count = len(arrays) <NEW_LINE> print('number of locals', local_count) <NEW_LINE> proportion = round(sum(arrays) / seats, 0) <NEW_LINE> print('exact num. of ppl per seat:', proportion) <NEW_LINE> area = 0 <NEW_LINE> seats_result = 0 <NEW_LINE> while seats_result != seats: <NEW_LINE> <INDENT> seats_result = 0 <NEW_LINE> for local in arrays: <NEW_LINE> <INDENT> area += 1 <NEW_LINE> share = int(local / proportion) <NEW_LINE> print('area {} pop: {} | share of seats under proportion of {} : {}'.format(area, local, proportion, share)) <NEW_LINE> seats_result += share <NEW_LINE> <DEDENT> print('total seats:', seats_result) <NEW_LINE> if seats_result > seats: <NEW_LINE> <INDENT> break <NEW_LINE> <DEDENT> elif seats_result < seats: <NEW_LINE> <INDENT> proportion -= 1 <NEW_LINE> <DEDENT> <DEDENT> return '1' | :param arrays:
:param seats:
:return: | 625941b4dc8b845886cb5315 |
def read_files(self): <NEW_LINE> <INDENT> pass | Read and parse locally downloaded metadata files and
populates
:attr:`Bcfg2.Server.Plugins.Packages.Source.Source.pkgnames`. Should
call
:func:`Bcfg2.Server.Plugins.Packages.Source.Source.process_files`
as its final step. | 625941b4cdde0d52a9e52e0f |
def test_utc_time_control(self): <NEW_LINE> <INDENT> answer = self.datetime_utc <NEW_LINE> self.assertEqual(TimeUtils.parse_datetime(self.datetime_utc, utc=True), answer) | This should just return itself | 625941b46aa9bd52df036b83 |
def p_type_args_expr_epsilon( p ): <NEW_LINE> <INDENT> p[0] = [] | type_expr_many : epsilon | 625941b43346ee7daa2b2b4a |
def get_renumber_starting_ids_from_model(model): <NEW_LINE> <INDENT> starting_id_dict = { 'cid' : max(model.coords.keys()) + 1, 'nid' : max(model.point_ids) + 1, 'eid' : max([max(model.elements.keys()), max(model.masses.keys()) if model.masses else 0, max(model.rigid_elements.keys()) if model.rigid_elements else 0, ]) + 1, 'pid' : max([max(model.properties.keys()), 0 if len(model.properties_mass) == 0 else max(model.properties_mass.keys()), ]) + 1, 'mid' : max(model.material_ids) + 1, 'set_id' : max(model.sets.keys()) + 1 if model.sets else 1, 'spline_id' : max(model.splines.keys()) + 1 if model.splines else 1, 'caero_id' : max(caero.box_ids[-1, -1] for caero in itervalues(model.caeros)) + 1 if model.caeros else 1, } <NEW_LINE> return starting_id_dict | Get the starting ids dictionary used for renumbering with ids greater than those in model.
Parameters
-----------
model : BDF
BDF object to get maximum ids from.
Returns
--------
starting_id_dict : dict {str : int, ...}
Dictionary from id type to starting id. | 625941b466673b3332b91e78 |
def check(self, coordinate_list): <NEW_LINE> <INDENT> action = webdriver.ActionChains(self.driver) <NEW_LINE> image = self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[1]/div/div/div[2]/div[1]/div/div[2]/img') <NEW_LINE> for coordinate in coordinate_list: <NEW_LINE> <INDENT> x, y = coordinate.split(",") <NEW_LINE> action.move_to_element_with_offset(image, int(x), int(y)) <NEW_LINE> action.click() <NEW_LINE> <DEDENT> action.perform() <NEW_LINE> self.driver.find_element_by_xpath('/html/body/div[5]/div[2]/div[1]/div/div/div[3]/a/div').click() | 点选验证码 | 625941b4a8370b7717052683 |
def rados_write_objects(self, pool, num_objects, size, timelimit, threads, cleanup=False): <NEW_LINE> <INDENT> args = [ '-p', pool, '--num-objects', num_objects, '-b', size, 'bench', timelimit, 'write' ] <NEW_LINE> if not cleanup: <NEW_LINE> <INDENT> args.append('--no-cleanup') <NEW_LINE> <DEDENT> return self.do_rados(self.controller, map(str, args)) | Write rados objects
Threads not used yet. | 625941b476d4e153a657e911 |
def setUp(self): <NEW_LINE> <INDENT> self.filename = get_path(fixtures, '1912_11_10_galen.xml') <NEW_LINE> issue = Issue(self.filename) <NEW_LINE> self.article = issue.articles[0] <NEW_LINE> self.article_id = 'NID123-1912-1110-0001-001' <NEW_LINE> self.page_id = '0001' | Creates Issue from test file fixtures/1912_11_10_galen.xml then
retrieves first Article. | 625941b44a966d76dd550ded |
Subsets and Splits