language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def enumerate_droppables(conn): """ Return all droppable index names. The indexes are without unique or other constraints. """ index_name_result = run_query(conn, """ SELECT s.indexrelname AS indexname FROM pg_catalog.pg_stat_user_indexes s JOIN pg_catalog.pg_index i ON s.indexrelid = i.indexrelid WHERE NOT i.indisunique AND Not EXISTS (SELECT 1 FROM pg_catalog.pg_constraint c WHERE c.conindid = s.indexrelid) and s.schemaname='public' """) return set([tup[0] for tup in index_name_result])
def enumerate_droppables(conn): """ Return all droppable index names. The indexes are without unique or other constraints. """ index_name_result = run_query(conn, """ SELECT s.indexrelname AS indexname FROM pg_catalog.pg_stat_user_indexes s JOIN pg_catalog.pg_index i ON s.indexrelid = i.indexrelid WHERE NOT i.indisunique AND Not EXISTS (SELECT 1 FROM pg_catalog.pg_constraint c WHERE c.conindid = s.indexrelid) and s.schemaname='public' """) return set([tup[0] for tup in index_name_result])
Python
def recommend_index(queries, conn, hypo_added_index): """ A very simplified Dexter. Enumerate all up to 3 columns used by queries. indexes not created, including three types. For each one, call hypopg to create fake index and explain all queries. Select the one causing smallest cumulatative cost. The optimization on total cost or portion of queries should exceed certain threshold. Or the recommendation should be empty. """ index_candiates = enumerate_index(conn, queries) - hypo_added_index # Get initial costs without any hypo indexes. original_total_cost, original_cost_per_query = get_workload_costs(queries, conn) minimum_cost = original_total_cost recommendation = None new_cost_per_query = original_cost_per_query for index_candidate in index_candiates: hypo_result = run_query(conn, f"select indexrelid from hypopg_create_index('{get_create_index_sql(index_candidate)}')") conn.commit() oid = int(hypo_result[0][0]) total_cost, cost_per_query = get_workload_costs(queries, conn) if total_cost < minimum_cost: minimum_cost = total_cost new_cost_per_query = cost_per_query recommendation = index_candidate # Remove hypopg index of current index. run_query(conn, f"select * from hypopg_drop_index({oid})") conn.commit() # See if the index causes good performance on some query templates. is_significant_query_template = False for idx in range(len(original_cost_per_query)): if new_cost_per_query[idx] <= PER_QUERY_THRESHOLD * original_cost_per_query[idx]: is_significant_query_template = True # If optimization is not significant, recommendation is not used. if minimum_cost < (1.0 - IMPROVE_THRESHOLD) * original_total_cost\ or (is_significant_query_template and minimum_cost < original_total_cost): # Add this to hypopg, for further invoke in this iteration. hypo_added_index.add(recommendation) run_query(conn, f"select indexrelid from hypopg_create_index('{get_create_index_sql(recommendation)}')") conn.commit() return [f"{get_create_index_sql(recommendation)};",] else: return []
def recommend_index(queries, conn, hypo_added_index): """ A very simplified Dexter. Enumerate all up to 3 columns used by queries. indexes not created, including three types. For each one, call hypopg to create fake index and explain all queries. Select the one causing smallest cumulatative cost. The optimization on total cost or portion of queries should exceed certain threshold. Or the recommendation should be empty. """ index_candiates = enumerate_index(conn, queries) - hypo_added_index # Get initial costs without any hypo indexes. original_total_cost, original_cost_per_query = get_workload_costs(queries, conn) minimum_cost = original_total_cost recommendation = None new_cost_per_query = original_cost_per_query for index_candidate in index_candiates: hypo_result = run_query(conn, f"select indexrelid from hypopg_create_index('{get_create_index_sql(index_candidate)}')") conn.commit() oid = int(hypo_result[0][0]) total_cost, cost_per_query = get_workload_costs(queries, conn) if total_cost < minimum_cost: minimum_cost = total_cost new_cost_per_query = cost_per_query recommendation = index_candidate # Remove hypopg index of current index. run_query(conn, f"select * from hypopg_drop_index({oid})") conn.commit() # See if the index causes good performance on some query templates. is_significant_query_template = False for idx in range(len(original_cost_per_query)): if new_cost_per_query[idx] <= PER_QUERY_THRESHOLD * original_cost_per_query[idx]: is_significant_query_template = True # If optimization is not significant, recommendation is not used. if minimum_cost < (1.0 - IMPROVE_THRESHOLD) * original_total_cost\ or (is_significant_query_template and minimum_cost < original_total_cost): # Add this to hypopg, for further invoke in this iteration. hypo_added_index.add(recommendation) run_query(conn, f"select indexrelid from hypopg_create_index('{get_create_index_sql(recommendation)}')") conn.commit() return [f"{get_create_index_sql(recommendation)};",] else: return []
Python
def drop_index(queries, conn, hypo_dropped_index): """ My implementation not only supports adding index, but also supports dropping index. Like dexter, but simpler than hypopg, just set booleans to disable indexes, to hypothetically drop the indexes. """ drop_candidates = enumerate_droppables(conn) - hypo_dropped_index original_total_cost, original_cost_per_query = get_workload_costs(queries, conn) minimum_cost = original_total_cost + 1.0 recommendation = None new_cost_per_query = original_cost_per_query for drop_candidate in drop_candidates: # Hypothetically disable the index. run_query(conn, f"UPDATE pg_index SET indisvalid=false, indisready=false WHERE indexrelid='{drop_candidate}'::regclass") total_cost, cost_per_query = get_workload_costs(queries, conn) if total_cost < minimum_cost: minimum_cost = total_cost new_cost_per_query = cost_per_query recommendation = drop_candidate # Enable the index back again. run_query(conn, f"UPDATE pg_index SET indisvalid=true, indisready=true WHERE indexrelid='{drop_candidate}'::regclass") # See if the best drop index does not cause spurious degrade on some query templates. is_significant_query_template = False for idx in range(len(original_cost_per_query)): if new_cost_per_query[idx] * PER_QUERY_THRESHOLD >= original_cost_per_query[idx]: is_significant_query_template = True # Only choose the drop candidate if minimum cost is same or better. # Not causing spurious degrade on some queries. if minimum_cost <= original_total_cost and not is_significant_query_template: # Add this to hypothetically drop list. hypo_dropped_index.add(recommendation) run_query(conn, f"UPDATE pg_index SET indisvalid=false, indisready=false WHERE indexrelid='{recommendation}'::regclass") return [f"DROP INDEX {recommendation};"] else: return []
def drop_index(queries, conn, hypo_dropped_index): """ My implementation not only supports adding index, but also supports dropping index. Like dexter, but simpler than hypopg, just set booleans to disable indexes, to hypothetically drop the indexes. """ drop_candidates = enumerate_droppables(conn) - hypo_dropped_index original_total_cost, original_cost_per_query = get_workload_costs(queries, conn) minimum_cost = original_total_cost + 1.0 recommendation = None new_cost_per_query = original_cost_per_query for drop_candidate in drop_candidates: # Hypothetically disable the index. run_query(conn, f"UPDATE pg_index SET indisvalid=false, indisready=false WHERE indexrelid='{drop_candidate}'::regclass") total_cost, cost_per_query = get_workload_costs(queries, conn) if total_cost < minimum_cost: minimum_cost = total_cost new_cost_per_query = cost_per_query recommendation = drop_candidate # Enable the index back again. run_query(conn, f"UPDATE pg_index SET indisvalid=true, indisready=true WHERE indexrelid='{drop_candidate}'::regclass") # See if the best drop index does not cause spurious degrade on some query templates. is_significant_query_template = False for idx in range(len(original_cost_per_query)): if new_cost_per_query[idx] * PER_QUERY_THRESHOLD >= original_cost_per_query[idx]: is_significant_query_template = True # Only choose the drop candidate if minimum cost is same or better. # Not causing spurious degrade on some queries. if minimum_cost <= original_total_cost and not is_significant_query_template: # Add this to hypothetically drop list. hypo_dropped_index.add(recommendation) run_query(conn, f"UPDATE pg_index SET indisvalid=false, indisready=false WHERE indexrelid='{recommendation}'::regclass") return [f"DROP INDEX {recommendation};"] else: return []
Python
def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or Client.disconnect has been called. """ initial_data = yield from self._initialize_chat() self._channel = channel.Channel(self._cookies, self._connector) @asyncio.coroutine def _on_connect(): """Wrapper to fire on_connect with initial_data.""" yield from self.on_connect.fire(initial_data) self._channel.on_connect.add_observer(_on_connect) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_message.add_observer(self._on_push_data) self._listen_future = asyncio.async(self._channel.listen()) try: yield from self._listen_future except asyncio.CancelledError: pass logger.info('disconnecting gracefully')
def connect(self): """Establish a connection to the chat server. Returns when an error has occurred, or Client.disconnect has been called. """ initial_data = yield from self._initialize_chat() self._channel = channel.Channel(self._cookies, self._connector) @asyncio.coroutine def _on_connect(): """Wrapper to fire on_connect with initial_data.""" yield from self.on_connect.fire(initial_data) self._channel.on_connect.add_observer(_on_connect) self._channel.on_reconnect.add_observer(self.on_reconnect.fire) self._channel.on_disconnect.add_observer(self.on_disconnect.fire) self._channel.on_message.add_observer(self._on_push_data) self._listen_future = asyncio.async(self._channel.listen()) try: yield from self._listen_future except asyncio.CancelledError: pass logger.info('disconnecting gracefully')
Python
def disconnect(self): """Gracefully disconnect from the server. When disconnection is complete, Client.connect will return. """ self._listen_future.cancel() self._connector.close()
def disconnect(self): """Gracefully disconnect from the server. When disconnection is complete, Client.connect will return. """ self._listen_future.cancel() self._connector.close()
Python
def _initialize_chat(self): """Request push channel creation and initial chat data. Returns instance of InitialData. The response body is a HTML document containing a series of script tags containing JavaScript objects. We need to parse the objects to get at the data. """ # We first need to fetch the 'pvt' token, which is required for the # initialization request (otherwise it will return 400). try: res = yield from http_utils.fetch( 'get', PVT_TOKEN_URL, cookies=self._cookies, connector=self._connector ) CHAT_INIT_PARAMS['pvt'] = javascript.loads(res.body.decode())[1] logger.info('Found PVT token: {}'.format(CHAT_INIT_PARAMS['pvt'])) except (exceptions.NetworkError, ValueError) as e: raise exceptions.HangupsError('Failed to fetch PVT token: {}' .format(e)) # Now make the actual initialization request: try: res = yield from http_utils.fetch( 'get', CHAT_INIT_URL, cookies=self._cookies, params=CHAT_INIT_PARAMS, connector=self._connector ) except exceptions.NetworkError as e: raise exceptions.HangupsError('Initialize chat request failed: {}' .format(e)) # Parse the response by using a regex to find all the JS objects, and # parsing them. Not everything will be parsable, but we don't care if # an object we don't need can't be parsed. data_dict = {} for data in CHAT_INIT_REGEX.findall(res.body.decode()): try: logger.debug("Attempting to load javascript: {}..." .format(repr(data[:100]))) data = javascript.loads(data) # pylint: disable=invalid-sequence-index data_dict[data['key']] = data['data'] except ValueError as e: try: data = data.replace("data:function(){return", "data:") data = data.replace("}}", "}") data = javascript.loads(data) data_dict[data['key']] = data['data'] except ValueError as e: pass # logger.debug('Failed to parse initialize chat object: {}\n{}' # .format(e, data)) # Extract various values that we will need. try: self._api_key = data_dict['ds:7'][0][2] self._email = data_dict['ds:36'][0][2] self._header_date = data_dict['ds:2'][0][4] self._header_version = data_dict['ds:2'][0][6] self._header_id = data_dict['ds:4'][0][7] _sync_timestamp = parsers.from_timestamp( # cgserp? # data_dict['ds:21'][0][1][4] # data_dict['ds:35'][0][1][4] # data_dict['ds:21'][0][1][4] data_dict['ds:21'][0][1][4] ) except KeyError as e: raise exceptions.HangupsError('Failed to get initialize chat ' 'value: {}'.format(e)) # Parse the entity representing the current user. self_entity = schemas.CLIENT_GET_SELF_INFO_RESPONSE.parse( # cgsirp? # data_dict['ds:20'][0] # data_dict['ds:35'][0] data_dict['ds:21'][0] ).self_entity # Parse every existing conversation's state, including participants. initial_conv_states = schemas.CLIENT_CONVERSATION_STATE_LIST.parse( # csrcrp? # data_dict['ds:19'][0][3] # data_dict['ds:36'][0][3] data_dict['ds:20'][0][3] ) initial_conv_parts = [] for conv_state in initial_conv_states: initial_conv_parts.extend(conv_state.conversation.participant_data) # Parse the entities for the user's contacts (doesn't include users not # in contacts). If this fails, continue without the rest of the # entities. initial_entities = [] try: entities = schemas.INITIAL_CLIENT_ENTITIES.parse( # cgserp? # data_dict['ds:21'][0] # data_dict['ds:37'][0] # data_dict['ds:22'][0] data_dict['ds:21'][0] ) except ValueError as e: logger.warning('Failed to parse initial client entities: {}' .format(e)) else: initial_entities.extend(entities.entities) initial_entities.extend(e.entity for e in itertools.chain( entities.group1.entity, entities.group2.entity, entities.group3.entity, entities.group4.entity, entities.group5.entity )) return InitialData(initial_conv_states, self_entity, initial_entities, initial_conv_parts, _sync_timestamp)
def _initialize_chat(self): """Request push channel creation and initial chat data. Returns instance of InitialData. The response body is a HTML document containing a series of script tags containing JavaScript objects. We need to parse the objects to get at the data. """ # We first need to fetch the 'pvt' token, which is required for the # initialization request (otherwise it will return 400). try: res = yield from http_utils.fetch( 'get', PVT_TOKEN_URL, cookies=self._cookies, connector=self._connector ) CHAT_INIT_PARAMS['pvt'] = javascript.loads(res.body.decode())[1] logger.info('Found PVT token: {}'.format(CHAT_INIT_PARAMS['pvt'])) except (exceptions.NetworkError, ValueError) as e: raise exceptions.HangupsError('Failed to fetch PVT token: {}' .format(e)) # Now make the actual initialization request: try: res = yield from http_utils.fetch( 'get', CHAT_INIT_URL, cookies=self._cookies, params=CHAT_INIT_PARAMS, connector=self._connector ) except exceptions.NetworkError as e: raise exceptions.HangupsError('Initialize chat request failed: {}' .format(e)) # Parse the response by using a regex to find all the JS objects, and # parsing them. Not everything will be parsable, but we don't care if # an object we don't need can't be parsed. data_dict = {} for data in CHAT_INIT_REGEX.findall(res.body.decode()): try: logger.debug("Attempting to load javascript: {}..." .format(repr(data[:100]))) data = javascript.loads(data) # pylint: disable=invalid-sequence-index data_dict[data['key']] = data['data'] except ValueError as e: try: data = data.replace("data:function(){return", "data:") data = data.replace("}}", "}") data = javascript.loads(data) data_dict[data['key']] = data['data'] except ValueError as e: pass # logger.debug('Failed to parse initialize chat object: {}\n{}' # .format(e, data)) # Extract various values that we will need. try: self._api_key = data_dict['ds:7'][0][2] self._email = data_dict['ds:36'][0][2] self._header_date = data_dict['ds:2'][0][4] self._header_version = data_dict['ds:2'][0][6] self._header_id = data_dict['ds:4'][0][7] _sync_timestamp = parsers.from_timestamp( # cgserp? # data_dict['ds:21'][0][1][4] # data_dict['ds:35'][0][1][4] # data_dict['ds:21'][0][1][4] data_dict['ds:21'][0][1][4] ) except KeyError as e: raise exceptions.HangupsError('Failed to get initialize chat ' 'value: {}'.format(e)) # Parse the entity representing the current user. self_entity = schemas.CLIENT_GET_SELF_INFO_RESPONSE.parse( # cgsirp? # data_dict['ds:20'][0] # data_dict['ds:35'][0] data_dict['ds:21'][0] ).self_entity # Parse every existing conversation's state, including participants. initial_conv_states = schemas.CLIENT_CONVERSATION_STATE_LIST.parse( # csrcrp? # data_dict['ds:19'][0][3] # data_dict['ds:36'][0][3] data_dict['ds:20'][0][3] ) initial_conv_parts = [] for conv_state in initial_conv_states: initial_conv_parts.extend(conv_state.conversation.participant_data) # Parse the entities for the user's contacts (doesn't include users not # in contacts). If this fails, continue without the rest of the # entities. initial_entities = [] try: entities = schemas.INITIAL_CLIENT_ENTITIES.parse( # cgserp? # data_dict['ds:21'][0] # data_dict['ds:37'][0] # data_dict['ds:22'][0] data_dict['ds:21'][0] ) except ValueError as e: logger.warning('Failed to parse initial client entities: {}' .format(e)) else: initial_entities.extend(entities.entities) initial_entities.extend(e.entity for e in itertools.chain( entities.group1.entity, entities.group2.entity, entities.group3.entity, entities.group4.entity, entities.group5.entity )) return InitialData(initial_conv_states, self_entity, initial_entities, initial_conv_parts, _sync_timestamp)
Python
def _get_request_header(self): """Return request header for chat API request.""" return [ [6, 3, self._header_version, self._header_date], [self._client_id, self._header_id], None, "en" ]
def _get_request_header(self): """Return request header for chat API request.""" return [ [6, 3, self._header_version, self._header_date], [self._client_id, self._header_id], None, "en" ]
Python
def _on_push_data(self, submission): """Parse ClientStateUpdate and call the appropriate events.""" for state_update in parsers.parse_submission(submission): if isinstance(state_update, dict) and 'client_id' in state_update: # Hack to receive client ID: self._client_id = state_update['client_id'] logger.info('Received new client_id: {}' .format(self._client_id)) else: self._active_client_state = ( state_update.state_update_header.active_client_state ) yield from self.on_state_update.fire(state_update)
def _on_push_data(self, submission): """Parse ClientStateUpdate and call the appropriate events.""" for state_update in parsers.parse_submission(submission): if isinstance(state_update, dict) and 'client_id' in state_update: # Hack to receive client ID: self._client_id = state_update['client_id'] logger.info('Received new client_id: {}' .format(self._client_id)) else: self._active_client_state = ( state_update.state_update_header.active_client_state ) yield from self.on_state_update.fire(state_update)
Python
def _request(self, endpoint, body_json, use_json=True): """Make chat API request. Raises hangups.NetworkError if the request fails. """ url = 'https://clients6.google.com/chat/v1/{}'.format(endpoint) res = yield from self._base_request( url, 'application/json+protobuf', json.dumps(body_json), use_json=use_json ) return res
def _request(self, endpoint, body_json, use_json=True): """Make chat API request. Raises hangups.NetworkError if the request fails. """ url = 'https://clients6.google.com/chat/v1/{}'.format(endpoint) res = yield from self._base_request( url, 'application/json+protobuf', json.dumps(body_json), use_json=use_json ) return res
Python
def _base_request(self, url, content_type, data, use_json=True): """Make API request. Raises hangups.NetworkError if the request fails. """ headers = channel.get_authorization_headers( self._get_cookie('SAPISID') ) headers['content-type'] = content_type required_cookies = ['SAPISID', 'HSID', 'SSID', 'APISID', 'SID'] cookies = {cookie: self._get_cookie(cookie) for cookie in required_cookies} params = { 'key': self._api_key, 'alt': 'json' if use_json else 'protojson', } res = yield from http_utils.fetch( 'post', url, headers=headers, cookies=cookies, params=params, data=data, connector=self._connector ) logger.debug('Response to request for {} was {}:\n{}' .format(url, res.code, res.body)) return res
def _base_request(self, url, content_type, data, use_json=True): """Make API request. Raises hangups.NetworkError if the request fails. """ headers = channel.get_authorization_headers( self._get_cookie('SAPISID') ) headers['content-type'] = content_type required_cookies = ['SAPISID', 'HSID', 'SSID', 'APISID', 'SID'] cookies = {cookie: self._get_cookie(cookie) for cookie in required_cookies} params = { 'key': self._api_key, 'alt': 'json' if use_json else 'protojson', } res = yield from http_utils.fetch( 'post', url, headers=headers, cookies=cookies, params=params, data=data, connector=self._connector ) logger.debug('Response to request for {} was {}:\n{}' .format(url, res.code, res.body)) return res
Python
def syncallnewevents(self, timestamp): """List all events occurring at or after timestamp. This method requests protojson rather than json so we have one chat message parser rather than two. timestamp: datetime.datetime instance specifying the time after which to return all events occurring in. Raises hangups.NetworkError if the request fails. Returns a ClientSyncAllNewEventsResponse. """ res = yield from self._request('conversations/syncallnewevents', [ self._get_request_header(), # last_sync_timestamp parsers.to_timestamp(timestamp), [], None, [], False, [], 1048576 # max_response_size_bytes ], use_json=False) try: res = schemas.CLIENT_SYNC_ALL_NEW_EVENTS_RESPONSE.parse( javascript.loads(res.body.decode()) ) except ValueError as e: raise exceptions.NetworkError('Response failed to parse: {}' .format(e)) # can return 200 but still contain an error status = res.response_header.status if status != 1: raise exceptions.NetworkError('Response status is \'{}\'' .format(status)) return res
def syncallnewevents(self, timestamp): """List all events occurring at or after timestamp. This method requests protojson rather than json so we have one chat message parser rather than two. timestamp: datetime.datetime instance specifying the time after which to return all events occurring in. Raises hangups.NetworkError if the request fails. Returns a ClientSyncAllNewEventsResponse. """ res = yield from self._request('conversations/syncallnewevents', [ self._get_request_header(), # last_sync_timestamp parsers.to_timestamp(timestamp), [], None, [], False, [], 1048576 # max_response_size_bytes ], use_json=False) try: res = schemas.CLIENT_SYNC_ALL_NEW_EVENTS_RESPONSE.parse( javascript.loads(res.body.decode()) ) except ValueError as e: raise exceptions.NetworkError('Response failed to parse: {}' .format(e)) # can return 200 but still contain an error status = res.response_header.status if status != 1: raise exceptions.NetworkError('Response status is \'{}\'' .format(status)) return res
Python
def sendchatmessage( self, conversation_id, segments, image_id=None, otr_status=schemas.OffTheRecordStatus.ON_THE_RECORD ): """Send a chat message to a conversation. conversation_id must be a valid conversation ID. segments must be a list of message segments to send, in pblite format. otr_status determines whether the message will be saved in the server's chat history. Note that the OTR status of the conversation is irrelevant, clients may send messages with whatever OTR status they like. image_id is an option ID of an image retrieved from Client.upload_image. If provided, the image will be attached to the message. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, None, None, [], [ segments, [] ], [[image_id, False]] if image_id else None, [ [conversation_id], client_generated_id, otr_status.value, ], None, None, None, [] ] res = yield from self._request('conversations/sendchatmessage', body) # sendchatmessage can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
def sendchatmessage( self, conversation_id, segments, image_id=None, otr_status=schemas.OffTheRecordStatus.ON_THE_RECORD ): """Send a chat message to a conversation. conversation_id must be a valid conversation ID. segments must be a list of message segments to send, in pblite format. otr_status determines whether the message will be saved in the server's chat history. Note that the OTR status of the conversation is irrelevant, clients may send messages with whatever OTR status they like. image_id is an option ID of an image retrieved from Client.upload_image. If provided, the image will be attached to the message. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, None, None, [], [ segments, [] ], [[image_id, False]] if image_id else None, [ [conversation_id], client_generated_id, otr_status.value, ], None, None, None, [] ] res = yield from self._request('conversations/sendchatmessage', body) # sendchatmessage can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
Python
def upload_image(self, image_file, filename=None): """Upload an image that can be later attached to a chat message. image_file is a file-like object containing an image. The name of the uploaded file may be changed by specifying the filename argument. Raises hangups.NetworkError if the request fails. Returns ID of uploaded image. """ image_filename = (filename if filename else os.path.basename(image_file.name)) image_data = image_file.read() # Create image and request upload URL res1 = yield from self._base_request( IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', json.dumps({ "protocolVersion": "0.8", "createSessionRequest": { "fields": [{ "external": { "name": "file", "filename": image_filename, "put": {}, "size": len(image_data), } }] } })) upload_url = (json.loads(res1.body.decode())['sessionStatus'] ['externalFieldTransfers'][0]['putInfo']['url']) # Upload image data and get image ID res2 = yield from self._base_request( upload_url, 'application/octet-stream', image_data ) return (json.loads(res2.body.decode())['sessionStatus'] ['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo']['photoid'])
def upload_image(self, image_file, filename=None): """Upload an image that can be later attached to a chat message. image_file is a file-like object containing an image. The name of the uploaded file may be changed by specifying the filename argument. Raises hangups.NetworkError if the request fails. Returns ID of uploaded image. """ image_filename = (filename if filename else os.path.basename(image_file.name)) image_data = image_file.read() # Create image and request upload URL res1 = yield from self._base_request( IMAGE_UPLOAD_URL, 'application/x-www-form-urlencoded;charset=UTF-8', json.dumps({ "protocolVersion": "0.8", "createSessionRequest": { "fields": [{ "external": { "name": "file", "filename": image_filename, "put": {}, "size": len(image_data), } }] } })) upload_url = (json.loads(res1.body.decode())['sessionStatus'] ['externalFieldTransfers'][0]['putInfo']['url']) # Upload image data and get image ID res2 = yield from self._base_request( upload_url, 'application/octet-stream', image_data ) return (json.loads(res2.body.decode())['sessionStatus'] ['additionalInfo'] ['uploader_service.GoogleRupioAdditionalInfo'] ['completionInfo']['customerSpecificInfo']['photoid'])
Python
def removeuser(self, conversation_id): """Leave group conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) res = yield from self._request('conversations/removeuser', [ self._get_request_header(), None, None, None, [ [conversation_id], client_generated_id, 2 ], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
def removeuser(self, conversation_id): """Leave group conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) res = yield from self._request('conversations/removeuser', [ self._get_request_header(), None, None, None, [ [conversation_id], client_generated_id, 2 ], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
Python
def deleteconversation(self, conversation_id): """Delete one-to-one conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/deleteconversation', [ self._get_request_header(), [conversation_id], # Not sure what timestamp should be there, last time I have tried # it Hangouts client in GMail sent something like now() - 5 hours parsers.to_timestamp( datetime.datetime.now(tz=datetime.timezone.utc) ), None, [], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
def deleteconversation(self, conversation_id): """Delete one-to-one conversation. conversation_id must be a valid conversation ID. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/deleteconversation', [ self._get_request_header(), [conversation_id], # Not sure what timestamp should be there, last time I have tried # it Hangouts client in GMail sent something like now() - 5 hours parsers.to_timestamp( datetime.datetime.now(tz=datetime.timezone.utc) ), None, [], ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
Python
def updatewatermark(self, conv_id, read_timestamp): """Update the watermark (read timestamp) for a conversation. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/updatewatermark', [ self._get_request_header(), # conversation_id [conv_id], # latest_read_timestamp parsers.to_timestamp(read_timestamp), ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
def updatewatermark(self, conv_id, read_timestamp): """Update the watermark (read timestamp) for a conversation. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/updatewatermark', [ self._get_request_header(), # conversation_id [conv_id], # latest_read_timestamp parsers.to_timestamp(read_timestamp), ]) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status))
Python
def searchentities(self, search_string, max_results): """Search for people. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('contacts/searchentities', [ self._get_request_header(), [], search_string, max_results ]) return json.loads(res.body.decode())
def searchentities(self, search_string, max_results): """Search for people. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('contacts/searchentities', [ self._get_request_header(), [], search_string, max_results ]) return json.loads(res.body.decode())
Python
def querypresence(self, chat_id): """Check someone's presence status. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('presence/querypresence', [ self._get_request_header(), [ [chat_id] ], [1, 2, 5, 7, 8] ]) return json.loads(res.body.decode())
def querypresence(self, chat_id): """Check someone's presence status. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('presence/querypresence', [ self._get_request_header(), [ [chat_id] ], [1, 2, 5, 7, 8] ]) return json.loads(res.body.decode())
Python
def syncrecentconversations(self): """List the contents of recent conversations, including messages. Similar to syncallnewevents, but appears to return a limited number of conversations (20) rather than all conversations in a given date range. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/syncrecentconversations', [self._get_request_header()]) return json.loads(res.body.decode())
def syncrecentconversations(self): """List the contents of recent conversations, including messages. Similar to syncallnewevents, but appears to return a limited number of conversations (20) rather than all conversations in a given date range. Raises hangups.NetworkError if the request fails. """ res = yield from self._request('conversations/syncrecentconversations', [self._get_request_header()]) return json.loads(res.body.decode())
Python
def sendeasteregg(self, conversation_id, easteregg): """Send a easteregg to a conversation. easteregg may not be empty. Raises hangups.NetworkError if the request fails. """ body = [ self._get_request_header(), [conversation_id], [easteregg, None, 1] ] res = yield from self._request('conversations/easteregg', body) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': logger.warning('easteregg returned status {}' .format(res_status)) raise exceptions.NetworkError()
def sendeasteregg(self, conversation_id, easteregg): """Send a easteregg to a conversation. easteregg may not be empty. Raises hangups.NetworkError if the request fails. """ body = [ self._get_request_header(), [conversation_id], [easteregg, None, 1] ] res = yield from self._request('conversations/easteregg', body) res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': logger.warning('easteregg returned status {}' .format(res_status)) raise exceptions.NetworkError()
Python
def createconversation(self, chat_id_list, force_group=False): """Create new conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation (except from yourself). New conversation ID is returned as res['conversation']['id']['id'] Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), 1 if len(chat_id_list) == 1 and not force_group else 2, client_generated_id, None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list] ] res = yield from self._request('conversations/createconversation', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res
def createconversation(self, chat_id_list, force_group=False): """Create new conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation (except from yourself). New conversation ID is returned as res['conversation']['id']['id'] Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), 1 if len(chat_id_list) == 1 and not force_group else 2, client_generated_id, None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list] ] res = yield from self._request('conversations/createconversation', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res
Python
def adduser(self, conversation_id, chat_id_list): """Add user to existing conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list], None, [ [conversation_id], client_generated_id, 2, None, 4 ] ] res = yield from self._request('conversations/adduser', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res
def adduser(self, conversation_id, chat_id_list): """Add user to existing conversation. conversation_id must be a valid conversation ID. chat_id_list is list of users which should be invited to conversation. Raises hangups.NetworkError if the request fails. """ client_generated_id = random.randint(0, 2**32) body = [ self._get_request_header(), None, [[str(chat_id), None, None, "unknown", None, []] for chat_id in chat_id_list], None, [ [conversation_id], client_generated_id, 2, None, 4 ] ] res = yield from self._request('conversations/adduser', body) # can return 200 but still contain an error res = json.loads(res.body.decode()) res_status = res['response_header']['status'] if res_status != 'OK': raise exceptions.NetworkError('Unexpected status: {}' .format(res_status)) return res
Python
def main(loadData: str = None, target: str = 'data/schrijverskabinet.trig'): """Main function that starts the scraping and conversion to RDF. Args: loadData (str, optional): File pointer to a json file with earlier scraped data. If supplied, the data will not be fetched again. Defaults to None. target (str, optional): Destination file location. Defaults to 'data/schrijverskabinet.trig'. """ if loadData: with open(loadData, 'r', encoding='utf-8') as infile: DATA = json.load(infile) else: DATA = { 'portrets': {}, 'metadata': { 'date': datetime.datetime.now().isoformat() } } # fetch all urls to scrape pages = fetchUrls(url=PORTRETURL) # fetch from individual pages for n, (url, img) in enumerate(pages, 1): print(f"{n}/{len(pages)}\tFetching {url}") pageData = fetchPortretPage(url, img) DATA['portrets'][url] = pageData # dump file with open('data/data.json', 'w', encoding='utf-8') as outfile: json.dump(DATA, outfile, indent=4) ####### # RDF # ####### toRDF(DATA, target=target)
def main(loadData: str = None, target: str = 'data/schrijverskabinet.trig'): """Main function that starts the scraping and conversion to RDF. Args: loadData (str, optional): File pointer to a json file with earlier scraped data. If supplied, the data will not be fetched again. Defaults to None. target (str, optional): Destination file location. Defaults to 'data/schrijverskabinet.trig'. """ if loadData: with open(loadData, 'r', encoding='utf-8') as infile: DATA = json.load(infile) else: DATA = { 'portrets': {}, 'metadata': { 'date': datetime.datetime.now().isoformat() } } # fetch all urls to scrape pages = fetchUrls(url=PORTRETURL) # fetch from individual pages for n, (url, img) in enumerate(pages, 1): print(f"{n}/{len(pages)}\tFetching {url}") pageData = fetchPortretPage(url, img) DATA['portrets'][url] = pageData # dump file with open('data/data.json', 'w', encoding='utf-8') as outfile: json.dump(DATA, outfile, indent=4) ####### # RDF # ####### toRDF(DATA, target=target)
Python
def fetchUrls(url: str): """Fetches portrait data (info + image) from an overview portrait page. Args: url (str): The url to download Returns: list: List of tuples with an url + img src for the portraits on the overview page. """ r = requests.get(url) soup = BeautifulSoup(r.text, 'html.parser') portraits = soup.findAll('a', class_='portrait') pagedata = [] for portrait in portraits: url = portrait['href'] el_img = portrait.find('img', recursive=False) img = el_img.get('data-lazy-src', el_img['src']) pagedata.append((url, img)) return pagedata
def fetchUrls(url: str): """Fetches portrait data (info + image) from an overview portrait page. Args: url (str): The url to download Returns: list: List of tuples with an url + img src for the portraits on the overview page. """ r = requests.get(url) soup = BeautifulSoup(r.text, 'html.parser') portraits = soup.findAll('a', class_='portrait') pagedata = [] for portrait in portraits: url = portrait['href'] el_img = portrait.find('img', recursive=False) img = el_img.get('data-lazy-src', el_img['src']) pagedata.append((url, img)) return pagedata
Python
def normalize_name(name: str): """Normalize a name for usage in a URI by replacing spaces with hyphens, lowercasing it, transforming it to ASCII, and by stripping it of non-alpha characters. Args: name (str): An entity's name Returns: str: Normalized name that can be used in a URI >>> normalize_name("Arnoud van Halen") "arnoud-van-halen" """ name = name.lower().replace(' ', '-') name = unidecode(name) name = "".join([i for i in name if i in 'abcdefghijklmnopqrstuvwxyz-']) return name
def normalize_name(name: str): """Normalize a name for usage in a URI by replacing spaces with hyphens, lowercasing it, transforming it to ASCII, and by stripping it of non-alpha characters. Args: name (str): An entity's name Returns: str: Normalized name that can be used in a URI >>> normalize_name("Arnoud van Halen") "arnoud-van-halen" """ name = name.lower().replace(' ', '-') name = unidecode(name) name = "".join([i for i in name if i in 'abcdefghijklmnopqrstuvwxyz-']) return name
Python
def person2uri(name: str, data: dict): """Convert a reference to a person (str) to an URIRef. Function to keep an URI for a reference of person (based on uniqueness of string). The data argument (dict) is used to store the references. Args: name (str): A person's name data (dict): Dictionary to store the reference for reuse Returns: tuple: URI or BNode to identify a person and the dictionary """ name = normalize_name(name) if name == "onbekend": return BNode(), data uri = data.get(name, None) if uri: return URIRef(uri), data else: data[name] = nsPerson.term(name) return data[name], data
def person2uri(name: str, data: dict): """Convert a reference to a person (str) to an URIRef. Function to keep an URI for a reference of person (based on uniqueness of string). The data argument (dict) is used to store the references. Args: name (str): A person's name data (dict): Dictionary to store the reference for reuse Returns: tuple: URI or BNode to identify a person and the dictionary """ name = normalize_name(name) if name == "onbekend": return BNode(), data uri = data.get(name, None) if uri: return URIRef(uri), data else: data[name] = nsPerson.term(name) return data[name], data
Python
def datePortretParser(date: str): """Return a PublicationEvent with filled dates for a date string. Args: date (str): Date reference from the portrait page Returns: PublicationEvent: PublicationEvent with hasEarliestBeginTimeStamp and hasLatestEndTimeStamp properties filled for the publication year. """ date = date.strip() if date.isdigit(): begin = date end = date elif ' en ' in date: dateCreated, dateModified = date.split(' en ') dateCreated = dateCreated.strip().replace('(', '').replace(')', '') dateModified = dateModified.strip().replace('(', '').replace(')', '') # for now only creation begin, end = dateCreated.split(' – ') begin = begin.strip() end = end.strip() elif ' – ' in date: begin, end = date.split(' – ') begin = begin.strip() end = end.strip() else: return [] return [ PublicationEvent(None, hasEarliestBeginTimeStamp=Literal(f"{begin}-01-01", datatype=XSD.date), hasLatestEndTimeStamp=Literal(f"{end}-12-31", datatype=XSD.date)) ]
def datePortretParser(date: str): """Return a PublicationEvent with filled dates for a date string. Args: date (str): Date reference from the portrait page Returns: PublicationEvent: PublicationEvent with hasEarliestBeginTimeStamp and hasLatestEndTimeStamp properties filled for the publication year. """ date = date.strip() if date.isdigit(): begin = date end = date elif ' en ' in date: dateCreated, dateModified = date.split(' en ') dateCreated = dateCreated.strip().replace('(', '').replace(')', '') dateModified = dateModified.strip().replace('(', '').replace(')', '') # for now only creation begin, end = dateCreated.split(' – ') begin = begin.strip() end = end.strip() elif ' – ' in date: begin, end = date.split(' – ') begin = begin.strip() end = end.strip() else: return [] return [ PublicationEvent(None, hasEarliestBeginTimeStamp=Literal(f"{begin}-01-01", datatype=XSD.date), hasLatestEndTimeStamp=Literal(f"{end}-12-31", datatype=XSD.date)) ]
Python
def lazy_loads(rawtext: str) -> EntryPointIterator: """ Parse the entry points from the given text lazily. :param rawtext: :returns: An iterator over ``(group, entry_point)`` tuples, where ``entry_point`` is an iterator over ``(name, object)`` tuples. """ lines = filter(None, map(str.strip, rawtext.splitlines())) for section, values in itertools.groupby(lines, _Section()): if section is not None: yield section, map(_parse_value, values)
def lazy_loads(rawtext: str) -> EntryPointIterator: """ Parse the entry points from the given text lazily. :param rawtext: :returns: An iterator over ``(group, entry_point)`` tuples, where ``entry_point`` is an iterator over ``(name, object)`` tuples. """ lines = filter(None, map(str.strip, rawtext.splitlines())) for section, values in itertools.groupby(lines, _Section()): if section is not None: yield section, map(_parse_value, values)
Python
def lazy_load(filename: PathLike) -> EntryPointIterator: """ Parse the entry points from the given file lazily. :param filename: :returns: An iterator over ``(group, entry_point)`` tuples, where ``entry_point`` is an iterator over ``(name, object)`` tuples. """ filename = PathPlus(filename) return lazy_loads(filename.read_text())
def lazy_load(filename: PathLike) -> EntryPointIterator: """ Parse the entry points from the given file lazily. :param filename: :returns: An iterator over ``(group, entry_point)`` tuples, where ``entry_point`` is an iterator over ``(name, object)`` tuples. """ filename = PathPlus(filename) return lazy_loads(filename.read_text())
Python
def loads(rawtext: str) -> EntryPointMap: """ Parse the entry points from the given text. :param rawtext: :returns: A mapping of entry point groups to entry points. Entry points in each group are contained in a dictionary mapping entry point names to objects. :class:`dist_meta.entry_points.EntryPoint` objects can be constructed as follows: .. code-block:: python for name, epstr in distro.get_entry_points().get("console_scripts", {}).items(): EntryPoint(name, epstr) """ eps = lazy_loads(rawtext) return {k: dict(v) for k, v in eps}
def loads(rawtext: str) -> EntryPointMap: """ Parse the entry points from the given text. :param rawtext: :returns: A mapping of entry point groups to entry points. Entry points in each group are contained in a dictionary mapping entry point names to objects. :class:`dist_meta.entry_points.EntryPoint` objects can be constructed as follows: .. code-block:: python for name, epstr in distro.get_entry_points().get("console_scripts", {}).items(): EntryPoint(name, epstr) """ eps = lazy_loads(rawtext) return {k: dict(v) for k, v in eps}
Python
def load(filename: PathLike) -> EntryPointMap: """ Parse the entry points from the given file. :param filename: :returns: A mapping of entry point groups to entry points. Entry points in each group are contained in a dictionary mapping entry point names to objects. :class:`dist_meta.entry_points.EntryPoint` objects can be constructed as follows: .. code-block:: python for name, epstr in distro.get_entry_points().get("console_scripts", {}).items(): EntryPoint(name, epstr) """ filename = PathPlus(filename) return loads(filename.read_text())
def load(filename: PathLike) -> EntryPointMap: """ Parse the entry points from the given file. :param filename: :returns: A mapping of entry point groups to entry points. Entry points in each group are contained in a dictionary mapping entry point names to objects. :class:`dist_meta.entry_points.EntryPoint` objects can be constructed as follows: .. code-block:: python for name, epstr in distro.get_entry_points().get("console_scripts", {}).items(): EntryPoint(name, epstr) """ filename = PathPlus(filename) return loads(filename.read_text())
Python
def load(self): """ Load the object referred to by this entry point. If only a module is indicated by the value, return that module. Otherwise, return the named object. """ match = _entry_point_pattern.match(self.value) if not match: raise ValueError(f"Malformed entry point {self.value!r}") module_name, object_name = match.group("modulename", "objectname") obj = importlib.import_module(module_name) if object_name: for attr in object_name.split('.'): obj = getattr(obj, attr) return obj
def load(self): """ Load the object referred to by this entry point. If only a module is indicated by the value, return that module. Otherwise, return the named object. """ match = _entry_point_pattern.match(self.value) if not match: raise ValueError(f"Malformed entry point {self.value!r}") module_name, object_name = match.group("modulename", "objectname") obj = importlib.import_module(module_name) if object_name: for attr in object_name.split('.'): obj = getattr(obj, attr) return obj
Python
def extras(self) -> List[str]: """ Returns the list of extras associated with the entry point. """ match = _entry_point_pattern.match(self.value) if not match: raise ValueError(f"Malformed entry point {self.value!r}") extras = match.group("extras") if extras is not None: return re.split(r',\s*', extras) return []
def extras(self) -> List[str]: """ Returns the list of extras associated with the entry point. """ match = _entry_point_pattern.match(self.value) if not match: raise ValueError(f"Malformed entry point {self.value!r}") extras = match.group("extras") if extras is not None: return re.split(r',\s*', extras) return []
Python
def copy_asset_files(app: Sphinx, exception: Optional[Exception] = None) -> None: """ Copy asset files to the output. :param app: The Sphinx application. :param exception: Any exception which occurred and caused Sphinx to abort. """ if exception: # pragma: no cover return if app.builder.format.lower() != "html": return source_dir = PathPlus(sphinx_inline_tabs.__file__).parent / "static" css_static_dir = PathPlus(app.builder.outdir) / "_static" / "css" css_static_dir.maybe_make(parents=True) shutil.copy2(source_dir / "tabs.css", css_static_dir / "inline-tabs.css") js_static_dir = PathPlus(app.builder.outdir) / "_static" / "js" js_static_dir.maybe_make(parents=True) shutil.copy2(source_dir / "tabs.js", js_static_dir / "inline-tabs.js")
def copy_asset_files(app: Sphinx, exception: Optional[Exception] = None) -> None: """ Copy asset files to the output. :param app: The Sphinx application. :param exception: Any exception which occurred and caused Sphinx to abort. """ if exception: # pragma: no cover return if app.builder.format.lower() != "html": return source_dir = PathPlus(sphinx_inline_tabs.__file__).parent / "static" css_static_dir = PathPlus(app.builder.outdir) / "_static" / "css" css_static_dir.maybe_make(parents=True) shutil.copy2(source_dir / "tabs.css", css_static_dir / "inline-tabs.css") js_static_dir = PathPlus(app.builder.outdir) / "_static" / "js" js_static_dir.maybe_make(parents=True) shutil.copy2(source_dir / "tabs.js", js_static_dir / "inline-tabs.js")
Python
def loads(rawtext: str) -> MetadataMapping: """ Parse Python core metadata from the given string. :param rawtext: :returns: A mapping of the metadata fields, and the long description """ if DELIMITER in rawtext: rawtext, body = rawtext.split(DELIMITER, maxsplit=1) else: body = '' # unfold per RFC 5322 § 2.2.3 rawtext = _unfold_re.sub(fr"{NEWLINE_MARK}\1", rawtext) file_content: List[str] = rawtext.split('\n') file_content.reverse() fields: MetadataMapping = MetadataMapping() while file_content: line = file_content.pop() if not line: continue field_name, field_value = divide(line, ':') if field_name.lower() != "description": fields[field_name] = field_value.replace(NEWLINE_MARK, '').lstrip() else: # Unwrap description_lines = field_value.split(NEWLINE_MARK) description_lines = _clean_desc(description_lines, ' ') description_lines = _clean_desc(description_lines, '\t') description_lines = _clean_desc(description_lines, '|') # Remove any trailing or leading blank lines. while description_lines and not description_lines[-1]: description_lines.pop() while description_lines and not description_lines[0]: description_lines.pop(0) field_value = '\n'.join(description_lines).strip() + '\n' fields["Description"] = field_value if body.strip(): if "Description" in fields: raise ValueError( "A value was given for the 'Description' field " "but the body of the file is not empty." ) else: fields["Description"] = body.strip() + '\n' for required_field in ["Metadata-Version", "Name", "Version"]: if required_field not in fields: raise MissingFieldError(f"No {required_field!r} field was provided.") return fields
def loads(rawtext: str) -> MetadataMapping: """ Parse Python core metadata from the given string. :param rawtext: :returns: A mapping of the metadata fields, and the long description """ if DELIMITER in rawtext: rawtext, body = rawtext.split(DELIMITER, maxsplit=1) else: body = '' # unfold per RFC 5322 § 2.2.3 rawtext = _unfold_re.sub(fr"{NEWLINE_MARK}\1", rawtext) file_content: List[str] = rawtext.split('\n') file_content.reverse() fields: MetadataMapping = MetadataMapping() while file_content: line = file_content.pop() if not line: continue field_name, field_value = divide(line, ':') if field_name.lower() != "description": fields[field_name] = field_value.replace(NEWLINE_MARK, '').lstrip() else: # Unwrap description_lines = field_value.split(NEWLINE_MARK) description_lines = _clean_desc(description_lines, ' ') description_lines = _clean_desc(description_lines, '\t') description_lines = _clean_desc(description_lines, '|') # Remove any trailing or leading blank lines. while description_lines and not description_lines[-1]: description_lines.pop() while description_lines and not description_lines[0]: description_lines.pop(0) field_value = '\n'.join(description_lines).strip() + '\n' fields["Description"] = field_value if body.strip(): if "Description" in fields: raise ValueError( "A value was given for the 'Description' field " "but the body of the file is not empty." ) else: fields["Description"] = body.strip() + '\n' for required_field in ["Metadata-Version", "Name", "Version"]: if required_field not in fields: raise MissingFieldError(f"No {required_field!r} field was provided.") return fields
Python
def load(filename: PathLike) -> MetadataMapping: """ Parse Python core metadata from the given file. :param filename: :returns: A mapping of the metadata fields, and the long description """ filename = PathPlus(filename) return loads(filename.read_text())
def load(filename: PathLike) -> MetadataMapping: """ Parse Python core metadata from the given file. :param filename: :returns: A mapping of the metadata fields, and the long description """ filename = PathPlus(filename) return loads(filename.read_text())
Python
def dumps(fields: MetadataMapping) -> str: """ Construct Python core metadata from the given fields. :param fields: """ output = MetadataEmitter(fields) if "Metadata-Version" in fields: version = float(fields["Metadata-Version"]) output.append(f"Metadata-Version: {fields['Metadata-Version']}") else: raise MissingFieldError("No 'Metadata-Version' field was provided.") if version < 2.1: raise ValueError("'dump_metadata' only supports metadata version 2.1 and above.") for required_field in ["Name", "Version"]: if required_field in fields: output.append(f"{required_field}: {fields[required_field]}") else: raise MissingFieldError(f"No {required_field!r} field was provided.") if version >= 2.2: output.add_multiple("Dynamic") # General Meta output.add_single("Summary") output.add_single("Author") output.add_single("Author-email") output.add_single("Maintainer") output.add_single("Maintainer-email") output.add_single("License") output.add_single("Keywords") # URLs output.add_single("Home-page") output.add_single("Download-URL") output.add_multiple("Project-URL") # Platforms output.add_multiple("Platform") output.add_multiple("Supported-Platform") output.add_multiple("Classifier") # Requirements output.add_single("Requires-Python") output.add_multiple("Requires-Dist") output.add_multiple("Provides-Extra") output.add_multiple("Requires-External") output.add_multiple("Provides-Dist") output.add_multiple("Obsoletes-Dist") # Description output.add_single("Description-Content-Type") if "Description" in fields: output.add_body(fields["Description"]) return str(output)
def dumps(fields: MetadataMapping) -> str: """ Construct Python core metadata from the given fields. :param fields: """ output = MetadataEmitter(fields) if "Metadata-Version" in fields: version = float(fields["Metadata-Version"]) output.append(f"Metadata-Version: {fields['Metadata-Version']}") else: raise MissingFieldError("No 'Metadata-Version' field was provided.") if version < 2.1: raise ValueError("'dump_metadata' only supports metadata version 2.1 and above.") for required_field in ["Name", "Version"]: if required_field in fields: output.append(f"{required_field}: {fields[required_field]}") else: raise MissingFieldError(f"No {required_field!r} field was provided.") if version >= 2.2: output.add_multiple("Dynamic") # General Meta output.add_single("Summary") output.add_single("Author") output.add_single("Author-email") output.add_single("Maintainer") output.add_single("Maintainer-email") output.add_single("License") output.add_single("Keywords") # URLs output.add_single("Home-page") output.add_single("Download-URL") output.add_multiple("Project-URL") # Platforms output.add_multiple("Platform") output.add_multiple("Supported-Platform") output.add_multiple("Classifier") # Requirements output.add_single("Requires-Python") output.add_multiple("Requires-Dist") output.add_multiple("Provides-Extra") output.add_multiple("Requires-External") output.add_multiple("Provides-Dist") output.add_multiple("Obsoletes-Dist") # Description output.add_single("Description-Content-Type") if "Description" in fields: output.add_body(fields["Description"]) return str(output)
Python
def dump(fields: MetadataMapping, filename: PathLike) -> int: """ Construct Python core metadata from the given fields, and write it to ``filename``. :param fields: :param filename: """ filename = PathPlus(filename) return filename.write_text(dumps(fields))
def dump(fields: MetadataMapping, filename: PathLike) -> int: """ Construct Python core metadata from the given fields, and write it to ``filename``. :param fields: :param filename: """ filename = PathPlus(filename) return filename.write_text(dumps(fields))
Python
def read_text( self, encoding: Optional[str] = "UTF-8", errors: Optional[str] = None, ) -> str: """ Open the file in text mode, read it, and close the file. :param encoding: The encoding to write to the file in. :param errors: :return: The content of the file. .. attention:: This operation requires a value for :attr:`self.distro <.RecordEntry.distro>`. """ if self.distro is None: raise ValueError("Cannot read files with 'self.distro = None'") return (self.distro.path.parent / self).read_text(encoding=encoding, errors=errors)
def read_text( self, encoding: Optional[str] = "UTF-8", errors: Optional[str] = None, ) -> str: """ Open the file in text mode, read it, and close the file. :param encoding: The encoding to write to the file in. :param errors: :return: The content of the file. .. attention:: This operation requires a value for :attr:`self.distro <.RecordEntry.distro>`. """ if self.distro is None: raise ValueError("Cannot read files with 'self.distro = None'") return (self.distro.path.parent / self).read_text(encoding=encoding, errors=errors)
Python
def read_bytes(self): """ Open the file in bytes mode, read it, and close the file. :return: The content of the file. .. attention:: This operation requires a value for :attr:`self.distro <.RecordEntry.distro>`. """ if self.distro is None: raise ValueError("Cannot read files with 'self.distro = None'") return (self.distro.path.parent / self).read_bytes()
def read_bytes(self): """ Open the file in bytes mode, read it, and close the file. :return: The content of the file. .. attention:: This operation requires a value for :attr:`self.distro <.RecordEntry.distro>`. """ if self.distro is None: raise ValueError("Cannot read files with 'self.distro = None'") return (self.distro.path.parent / self).read_bytes()
Python
def digest(self) -> bytes: """ Returns the digest of the hash. This is a bytes object which may contain bytes in the whole range from 0 to 255. """ return urlsafe_b64decode(f"{self.value}==".encode("latin1"))
def digest(self) -> bytes: """ Returns the digest of the hash. This is a bytes object which may contain bytes in the whole range from 0 to 255. """ return urlsafe_b64decode(f"{self.value}==".encode("latin1"))
Python
def hexdigest(self) -> str: """ Like :meth:`self.digest() <.FileHash.digest>` except the digest is returned as a string object of double length, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non-binary environments. """ # noqa: D400 return ''.join(f"{x:0{2}x}" for x in self.digest())
def hexdigest(self) -> str: """ Like :meth:`self.digest() <.FileHash.digest>` except the digest is returned as a string object of double length, containing only hexadecimal digits. This may be used to exchange the value safely in email or other non-binary environments. """ # noqa: D400 return ''.join(f"{x:0{2}x}" for x in self.digest())
Python
def keys(self) -> List[str]: # type: ignore """ Return a list of all field *names*. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return [k for k, v in self._fields]
def keys(self) -> List[str]: # type: ignore """ Return a list of all field *names*. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return [k for k, v in self._fields]
Python
def values(self) -> List[str]: # type: ignore """ Return a list of all field *values*. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return [v for k, v in self._fields]
def values(self) -> List[str]: # type: ignore """ Return a list of all field *values*. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return [v for k, v in self._fields]
Python
def items(self) -> List[Tuple[str, str]]: # type: ignore """ Get all the fields and their values. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return self._fields[:]
def items(self) -> List[Tuple[str, str]]: # type: ignore """ Get all the fields and their values. These will be sorted by insertion order, and may contain duplicates. Any fields deleted and re-inserted are always appended to the field list. """ return self._fields[:]
Python
def replace(self, name: str, value: str): """ Replace the value of the first matching field, retaining header order and case. :raises KeyError: If no matching field was found. """ for i, (key, val) in enumerate(self._fields): if key.lower() == name.lower(): self._fields[i] = (name, value) break else: raise KeyError(name)
def replace(self, name: str, value: str): """ Replace the value of the first matching field, retaining header order and case. :raises KeyError: If no matching field was found. """ for i, (key, val) in enumerate(self._fields): if key.lower() == name.lower(): self._fields[i] = (name, value) break else: raise KeyError(name)
Python
def add_single(self, field_name: str): """ Add a single value for the field with the given name. :param field_name: """ if field_name in self.fields: self.append(f"{field_name}: {self.fields[field_name]}")
def add_single(self, field_name: str): """ Add a single value for the field with the given name. :param field_name: """ if field_name in self.fields: self.append(f"{field_name}: {self.fields[field_name]}")
Python
def add_multiple(self, field_name: str): """ Add all values for the "multiple use" field with the given name. :param field_name: """ if field_name in self.fields: for value in self.fields.get_all(field_name, ()): self.append(f"{field_name}: {value}")
def add_multiple(self, field_name: str): """ Add all values for the "multiple use" field with the given name. :param field_name: """ if field_name in self.fields: for value in self.fields.get_all(field_name, ()): self.append(f"{field_name}: {value}")
Python
def add_body(self, body: str): """ Add a body to the file. In an email message this is the message content itself. :param body: """ self.blankline(ensure_single=True) self.blankline() self.append(body) self.blankline(ensure_single=True)
def add_body(self, body: str): """ Add a body to the file. In an email message this is the message content itself. :param body: """ self.blankline(ensure_single=True) self.blankline() self.append(body) self.blankline(ensure_single=True)
Python
def url(self) -> str: """ The URL of the remote wheel. """ return "https://foo.bar/wheel.whl"
def url(self) -> str: """ The URL of the remote wheel. """ return "https://foo.bar/wheel.whl"
Python
def _asdict(self) -> Dict[str, Any]: """ Return a new dict which maps field names to their values. """ return dict(zip(self._fields, self))
def _asdict(self) -> Dict[str, Any]: """ Return a new dict which maps field names to their values. """ return dict(zip(self._fields, self))
Python
def _replace(self: _DT, **kwargs) -> _DT: """ Make a new :class:`~.DistributionType` object, of the same type as this one, replacing the specified fields with new values. :param iterable: """ # noqa: D400 result = self._make(map(kwargs.pop, self._fields, self)) if kwargs: raise ValueError(f'Got unexpected field names: {list(kwargs)!r}') return result
def _replace(self: _DT, **kwargs) -> _DT: """ Make a new :class:`~.DistributionType` object, of the same type as this one, replacing the specified fields with new values. :param iterable: """ # noqa: D400 result = self._make(map(kwargs.pop, self._fields, self)) if kwargs: raise ValueError(f'Got unexpected field names: {list(kwargs)!r}') return result
Python
def iter_distributions(path: Optional[Iterable[PathLike]] = None) -> Iterator[Distribution]: """ Returns an iterator over installed distributions on ``path``. :param path: The directories entries to search for distributions in. :default path: :py:data:`sys.path` """ if path is None: # pragma: no cover path = sys.path # Distributions found earlier in path will shadow those with the same name found later. # If these distributions used different module names, it may actually be possible to import both, # but in most cases this shadowing will be correct. distro_names_seen = set() for folder in map(PathPlus, path): if not folder.is_dir(): continue for subdir in _iter_dist_infos(folder): distro = Distribution.from_path(subdir) normalized_name = _canonicalize(distro.name) if normalized_name in distro_names_seen: continue distro_names_seen.add(normalized_name) yield distro
def iter_distributions(path: Optional[Iterable[PathLike]] = None) -> Iterator[Distribution]: """ Returns an iterator over installed distributions on ``path``. :param path: The directories entries to search for distributions in. :default path: :py:data:`sys.path` """ if path is None: # pragma: no cover path = sys.path # Distributions found earlier in path will shadow those with the same name found later. # If these distributions used different module names, it may actually be possible to import both, # but in most cases this shadowing will be correct. distro_names_seen = set() for folder in map(PathPlus, path): if not folder.is_dir(): continue for subdir in _iter_dist_infos(folder): distro = Distribution.from_path(subdir) normalized_name = _canonicalize(distro.name) if normalized_name in distro_names_seen: continue distro_names_seen.add(normalized_name) yield distro
Python
def check_stability(self): ''' check stability of process (max alpha eigenvalue < 1)''' w, v = np.linalg.eig(self.alpha) me = np.amax(np.abs(w)) print('Max eigenvalue: %1.5f' % me) if me >= 1.: print('(WARNING) Unstable.')
def check_stability(self): ''' check stability of process (max alpha eigenvalue < 1)''' w, v = np.linalg.eig(self.alpha) me = np.amax(np.abs(w)) print('Max eigenvalue: %1.5f' % me) if me >= 1.: print('(WARNING) Unstable.')
Python
def generate_seq_daily_weekly(self, horizon): '''Generate a sequence based on mu, alpha, omega values. Uses Ogata's thinning method, with some speedups, noted below''' self.data = [] # clear history M = np.sum(self.mu) Dstar = np.sum(self.mu_day) mu_day_max = np.max(self.mu_day) mu_hour_max = np.max(self.mu_hour) while True: s = np.random.exponential(scale=1. / M) day = int(np.floor(s) % 7) hour = int(24*(s - day)) # attribute (weighted random sample, since sum(self.mu)==M) U = np.random.uniform() if U <= self.mu_day[day]/Dstar: event_type = np.random.choice(np.arange(self.dim), 1, p=(self.mu / M)) #[0] self.data.append([s, event_type]) break last_rates = self.mu * self.mu_day[day] * self.mu_hour[hour] last_day = day last_hour = hour event_rejected = False while True: tj, uj = self.data[-1][0], int(self.data[-1][1]) if event_rejected: M = np.sum(rates) + np.sum(self.mu) * \ (mu_day_max*mu_hour_max - self.mu_day[day]*self.mu_hour[hour]) event_rejected = False else: # recalculate M (inclusive of last event) M = mu_day_max*mu_hour_max*np.sum(self.mu) + \ np.sum(last_rates) + self.omega * np.sum(self.alpha[:, uj]) # generate new event s += np.random.exponential(scale=1. / M) day = int(np.floor(s) % 7) hour = int(24*(s - day)) # calc rates at time s (use trick to take advantage of rates at last event) rates = self.mu*self.mu_day[day]*self.mu_hour[hour] + np.exp(-self.omega * (s - tj)) * \ (self.alpha[:, uj].flatten() * self.omega + last_rates \ - self.mu*self.mu_day[last_day]*self.mu_hour[last_hour]) # attribution/rejection test # handle attribution and thinning in one step as weighted random sample diff = M - np.sum(rates) event_type = np.random.choice(np.arange(self.dim + 1), 1, p=(np.append(rates, diff) / M)) if event_type < self.dim: self.data.append([s, event_type]) last_day = day last_hour = hour last_rates = rates.copy() else: event_rejected = True # if past horizon, done if s >= horizon: self.data = np.array(self.data) self.data = self.data[self.data[:, 0] < horizon] return self.data
def generate_seq_daily_weekly(self, horizon): '''Generate a sequence based on mu, alpha, omega values. Uses Ogata's thinning method, with some speedups, noted below''' self.data = [] # clear history M = np.sum(self.mu) Dstar = np.sum(self.mu_day) mu_day_max = np.max(self.mu_day) mu_hour_max = np.max(self.mu_hour) while True: s = np.random.exponential(scale=1. / M) day = int(np.floor(s) % 7) hour = int(24*(s - day)) # attribute (weighted random sample, since sum(self.mu)==M) U = np.random.uniform() if U <= self.mu_day[day]/Dstar: event_type = np.random.choice(np.arange(self.dim), 1, p=(self.mu / M)) #[0] self.data.append([s, event_type]) break last_rates = self.mu * self.mu_day[day] * self.mu_hour[hour] last_day = day last_hour = hour event_rejected = False while True: tj, uj = self.data[-1][0], int(self.data[-1][1]) if event_rejected: M = np.sum(rates) + np.sum(self.mu) * \ (mu_day_max*mu_hour_max - self.mu_day[day]*self.mu_hour[hour]) event_rejected = False else: # recalculate M (inclusive of last event) M = mu_day_max*mu_hour_max*np.sum(self.mu) + \ np.sum(last_rates) + self.omega * np.sum(self.alpha[:, uj]) # generate new event s += np.random.exponential(scale=1. / M) day = int(np.floor(s) % 7) hour = int(24*(s - day)) # calc rates at time s (use trick to take advantage of rates at last event) rates = self.mu*self.mu_day[day]*self.mu_hour[hour] + np.exp(-self.omega * (s - tj)) * \ (self.alpha[:, uj].flatten() * self.omega + last_rates \ - self.mu*self.mu_day[last_day]*self.mu_hour[last_hour]) # attribution/rejection test # handle attribution and thinning in one step as weighted random sample diff = M - np.sum(rates) event_type = np.random.choice(np.arange(self.dim + 1), 1, p=(np.append(rates, diff) / M)) if event_type < self.dim: self.data.append([s, event_type]) last_day = day last_hour = hour last_rates = rates.copy() else: event_rejected = True # if past horizon, done if s >= horizon: self.data = np.array(self.data) self.data = self.data[self.data[:, 0] < horizon] return self.data
Python
def check_stability(self, verbose): ''' check stability of process (max alpha eigenvalue < 1)''' w, v = np.linalg.eig(self.alpha) me = np.amax(np.abs(w)) # if verbose: # print('Max eigenvalue: %1.5f' % me) # if me >= 1.: # print('(WARNING) Unstable.')
def check_stability(self, verbose): ''' check stability of process (max alpha eigenvalue < 1)''' w, v = np.linalg.eig(self.alpha) me = np.amax(np.abs(w)) # if verbose: # print('Max eigenvalue: %1.5f' % me) # if me >= 1.: # print('(WARNING) Unstable.')
Python
def generate_seq(self, window=np.inf, N_events=np.inf): '''Generate a sequence based on mu values. ''' def simulate_window(window, seq, t, mu, mu_day, mu_day_max): dim = mu.shape[0] for event_type in range(dim): while t[event_type] < window: t[event_type] += np.random.exponential(scale=1. / (mu_day_max*mu[event_type])) day = int(np.floor(t[event_type]) % 7) p_accept = mu_day[day] / mu_day_max U = np.random.uniform() if p_accept > U: seq.append([t[event_type], event_type]) return t, seq seq = [] mu_day_max = float(np.max(self.mu_day)) t = np.zeros((self.dim,)) if N_events < np.inf: window = 2*np.ceil(float(N_events)/np.sum(self.mu)) while len(seq) < N_events: t, seq = simulate_window(window, seq, t, self.mu, self.mu_day, mu_day_max) window = 2*np.ceil(float(N_events - len(seq))/np.sum(self.mu)) else: t, seq = simulate_window(window, seq, t, self.mu, self.mu_day, mu_day_max) seq = np.array(seq) if len(seq) > 0: seq = seq[seq[:, 0].argsort()] self.data = seq if N_events < np.inf: seq = seq[:N_events,] return seq
def generate_seq(self, window=np.inf, N_events=np.inf): '''Generate a sequence based on mu values. ''' def simulate_window(window, seq, t, mu, mu_day, mu_day_max): dim = mu.shape[0] for event_type in range(dim): while t[event_type] < window: t[event_type] += np.random.exponential(scale=1. / (mu_day_max*mu[event_type])) day = int(np.floor(t[event_type]) % 7) p_accept = mu_day[day] / mu_day_max U = np.random.uniform() if p_accept > U: seq.append([t[event_type], event_type]) return t, seq seq = [] mu_day_max = float(np.max(self.mu_day)) t = np.zeros((self.dim,)) if N_events < np.inf: window = 2*np.ceil(float(N_events)/np.sum(self.mu)) while len(seq) < N_events: t, seq = simulate_window(window, seq, t, self.mu, self.mu_day, mu_day_max) window = 2*np.ceil(float(N_events - len(seq))/np.sum(self.mu)) else: t, seq = simulate_window(window, seq, t, self.mu, self.mu_day, mu_day_max) seq = np.array(seq) if len(seq) > 0: seq = seq[seq[:, 0].argsort()] self.data = seq if N_events < np.inf: seq = seq[:N_events,] return seq
Python
def nutrition(POM_data): Nutrition_values = pd.read_csv(r"data/nutritionvalues.csv", sep = ";") Nutrition_values = Nutrition_values.rename(columns = {"type": "group"}) N_to_P_conversion = pd.read_csv(data+"FAOnitrogen_protein_conversion_factors.csv", sep = ";") Nutrition_values["nitrogen(%)"] = np.where(Nutrition_values["item number"].eq(N_to_P_conversion["item number"]),\ Nutrition_values["protein(%)"]/N_to_P_conversion["conversion factor"], 0) Protein_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["protein(%)"].mean()) Nutrient_percent = Protein_percent.reset_index(level = ["group"]) Nutrient_percent = Nutrient_percent.fillna(value = 0) Nutrient_percent = Nutrient_percent.rename(columns = {0: "%protein"}) Calorie_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["calories (100g)"].mean()/100) Calorie_percent = Calorie_percent.reset_index(level = ["group"]) Calorie_percent = Calorie_percent.fillna(value = 0) Calorie_percent = Calorie_percent.rename(columns = {0: "calories per g"}) Fat_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["fat(%)"].mean()) Fat_percent = Fat_percent.reset_index(level = ["group"]) Fat_percent = Fat_percent.fillna(value = 0) Fat_percent = Fat_percent.rename(columns = {0: "%fat"}) Nutrient_percent["calories per g"] = Calorie_percent["calories per g"] Nutrient_percent["%fat"] = Fat_percent['%fat'] POM_data = pd.merge(POM_data, Nutrient_percent, on = ["group"]) POM_data["% Protein"].fillna(POM_data["%protein"], inplace = True) POM_data = POM_data.drop(["%protein"], axis = 1) POM_data = POM_data.dropna(subset = ['POM']) """*** Calculate protein and calorie demand of each nation ***""" POM_data["Protein Needed (g)"] = POM_data["avg weight"] * 1.6 POM_data["Calories Needed (cal)"] = POM_data["avg weight"] * 15 + 587.5 """*** Determine the ratio of what people eat based on EAT Lancet Categories *** """ POM_data["EAT_group"] = POM_data.apply(lambda x: regions.EAT_Lancet_Group(x["group"]), axis =1) POM_data["POM CAL (no waste)"] = POM_data['POM (no waste)']*POM_data['calories per g'] POM_data["POM fat (no waste)"] = POM_data['POM (no waste)']*POM_data['%fat']/100 #POM_data["POM EAT Group %"] = POM_data["EAT_group"] POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: (x["POM CAL (no waste)"])/(x["POM CAL (no waste)"]).sum()) #fix the last definition of POM group % POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True) POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"]) POM_eat_group = POM_eat_group.rename(columns={0 : "POM CAL (no waste)"}) #POM_eat_group.set_index("Index", inplace = True) POM_data = POM_data.merge(POM_eat_group["POM CAL (no waste)"], left_index=True, right_index = True) POM_data = POM_data.rename(columns={"POM CAL (no waste)_x" : "POM CAL (no waste)",\ "POM CAL (no waste)_y" : "POM EAT Group cal %"}) POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: x["POM (no waste)"]/x["POM (no waste)"].sum()) #fix the last definition of POM group % POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True) POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"]) POM_data = POM_data.merge(POM_eat_group["POM (no waste)"], left_index=True, right_index = True) POM_data = POM_data.rename(columns={"POM (no waste)_x" : "POM (no waste)",\ "POM (no waste)_y" : "POM EAT Group g %"}) POM_data["POM EAT Group cal %"] = POM_data["POM EAT Group cal %"] * 100 POM_data["POM EAT Group g %"] = POM_data["POM EAT Group g %"] * 100 return POM_data
def nutrition(POM_data): Nutrition_values = pd.read_csv(r"data/nutritionvalues.csv", sep = ";") Nutrition_values = Nutrition_values.rename(columns = {"type": "group"}) N_to_P_conversion = pd.read_csv(data+"FAOnitrogen_protein_conversion_factors.csv", sep = ";") Nutrition_values["nitrogen(%)"] = np.where(Nutrition_values["item number"].eq(N_to_P_conversion["item number"]),\ Nutrition_values["protein(%)"]/N_to_P_conversion["conversion factor"], 0) Protein_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["protein(%)"].mean()) Nutrient_percent = Protein_percent.reset_index(level = ["group"]) Nutrient_percent = Nutrient_percent.fillna(value = 0) Nutrient_percent = Nutrient_percent.rename(columns = {0: "%protein"}) Calorie_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["calories (100g)"].mean()/100) Calorie_percent = Calorie_percent.reset_index(level = ["group"]) Calorie_percent = Calorie_percent.fillna(value = 0) Calorie_percent = Calorie_percent.rename(columns = {0: "calories per g"}) Fat_percent = Nutrition_values.groupby(["group"]).apply(lambda x: x["fat(%)"].mean()) Fat_percent = Fat_percent.reset_index(level = ["group"]) Fat_percent = Fat_percent.fillna(value = 0) Fat_percent = Fat_percent.rename(columns = {0: "%fat"}) Nutrient_percent["calories per g"] = Calorie_percent["calories per g"] Nutrient_percent["%fat"] = Fat_percent['%fat'] POM_data = pd.merge(POM_data, Nutrient_percent, on = ["group"]) POM_data["% Protein"].fillna(POM_data["%protein"], inplace = True) POM_data = POM_data.drop(["%protein"], axis = 1) POM_data = POM_data.dropna(subset = ['POM']) """*** Calculate protein and calorie demand of each nation ***""" POM_data["Protein Needed (g)"] = POM_data["avg weight"] * 1.6 POM_data["Calories Needed (cal)"] = POM_data["avg weight"] * 15 + 587.5 """*** Determine the ratio of what people eat based on EAT Lancet Categories *** """ POM_data["EAT_group"] = POM_data.apply(lambda x: regions.EAT_Lancet_Group(x["group"]), axis =1) POM_data["POM CAL (no waste)"] = POM_data['POM (no waste)']*POM_data['calories per g'] POM_data["POM fat (no waste)"] = POM_data['POM (no waste)']*POM_data['%fat']/100 #POM_data["POM EAT Group %"] = POM_data["EAT_group"] POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: (x["POM CAL (no waste)"])/(x["POM CAL (no waste)"]).sum()) #fix the last definition of POM group % POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True) POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"]) POM_eat_group = POM_eat_group.rename(columns={0 : "POM CAL (no waste)"}) #POM_eat_group.set_index("Index", inplace = True) POM_data = POM_data.merge(POM_eat_group["POM CAL (no waste)"], left_index=True, right_index = True) POM_data = POM_data.rename(columns={"POM CAL (no waste)_x" : "POM CAL (no waste)",\ "POM CAL (no waste)_y" : "POM EAT Group cal %"}) POM_eat_group = POM_data.groupby(["Area", "EAT_group"]).apply(lambda x: x["POM (no waste)"]/x["POM (no waste)"].sum()) #fix the last definition of POM group % POM_eat_group = POM_eat_group.to_frame() #set_index("Index", inplace = True) POM_eat_group = POM_eat_group.reset_index(level = ["Area", "EAT_group"]) POM_data = POM_data.merge(POM_eat_group["POM (no waste)"], left_index=True, right_index = True) POM_data = POM_data.rename(columns={"POM (no waste)_x" : "POM (no waste)",\ "POM (no waste)_y" : "POM EAT Group g %"}) POM_data["POM EAT Group cal %"] = POM_data["POM EAT Group cal %"] * 100 POM_data["POM EAT Group g %"] = POM_data["POM EAT Group g %"] * 100 return POM_data
Python
def _default_name(): """Default Guerilla transform node name when created. This name is used at transform node creation, when no name is provided. Returns: str: Default Guerilla transform node name when created. """ raise NotImplementedError
def _default_name(): """Default Guerilla transform node name when created. This name is used at transform node creation, when no name is provided. Returns: str: Default Guerilla transform node name when created. """ raise NotImplementedError
Python
def create(cls, node, mod, name=None): """Create default typed transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) top_plug = get_top_transform_plug(node) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
def create(cls, node, mod, name=None): """Create default typed transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) top_plug = get_top_transform_plug(node) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
Python
def move_top(self): """Move Guerilla transform on top. """ while not self.is_on_top: self.node.movetop()
def move_top(self): """Move Guerilla transform on top. """ while not self.is_on_top: self.node.movetop()
Python
def move_bottom(self): """Move Guerilla transform on bottom. """ while not self.is_on_bottom: self.node.movedown()
def move_bottom(self): """Move Guerilla transform on bottom. """ while not self.is_on_bottom: self.node.movedown()
Python
def is_alone(self): """Return if transform is alone. Returns: bool: True if transform is alone. """ return self.is_on_top and self.is_on_bottom
def is_alone(self): """Return if transform is alone. Returns: bool: True if transform is alone. """ return self.is_on_top and self.is_on_bottom
Python
def is_on_top(self): """Return if transform is on top. Returns: bool: True if transform is on top. """ return self.node.In.get() is None
def is_on_top(self): """Return if transform is on top. Returns: bool: True if transform is on top. """ return self.node.In.get() is None
Python
def is_on_bottom(self): """Return if transform is on bottom. Returns: bool: True if transform is on bottom. """ return any(out_plug.name == 'Transform' for out_plug in self.node.Out.getoutputs())
def is_on_bottom(self): """Return if transform is on bottom. Returns: bool: True if transform is on bottom. """ return any(out_plug.name == 'Transform' for out_plug in self.node.Out.getoutputs())
Python
def is_on_middle(self): """Return if transform is in between top and bottom transform. Returns: bool: True if transform is between top and bottom transform. """ return (self.node.In.getinput() is not None and all(out_plug.name != 'Transform' for out_plug in self.node.Out.getoutputs()))
def is_on_middle(self): """Return if transform is in between top and bottom transform. Returns: bool: True if transform is between top and bottom transform. """ return (self.node.In.getinput() is not None and all(out_plug.name != 'Transform' for out_plug in self.node.Out.getoutputs()))
Python
def create(cls, node, mod, name=None): """Create default euler transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created euler transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) # If node has no transform, create transform from current # transformation. if node.Transform.getinput() is None: (sx, sy, sz, rx, ry, rz, tx, ty, tz) = node.getmatrix().decompose() transform_node.SX.set(sx) transform_node.SY.set(sy) transform_node.SZ.set(sz) transform_node.RX.set((rx*180.0)/math.pi) transform_node.RY.set((ry*180.0)/math.pi) transform_node.RZ.set((rz*180.0)/math.pi) transform_node.TX.set(tx) transform_node.TY.set(ty) transform_node.TZ.set(tz) # Get transform node on the top of the stack top_plug = get_top_transform_plug(node) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
def create(cls, node, mod, name=None): """Create default euler transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created euler transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) # If node has no transform, create transform from current # transformation. if node.Transform.getinput() is None: (sx, sy, sz, rx, ry, rz, tx, ty, tz) = node.getmatrix().decompose() transform_node.SX.set(sx) transform_node.SY.set(sy) transform_node.SZ.set(sz) transform_node.RX.set((rx*180.0)/math.pi) transform_node.RY.set((ry*180.0)/math.pi) transform_node.RZ.set((rz*180.0)/math.pi) transform_node.TX.set(tx) transform_node.TY.set(ty) transform_node.TZ.set(tz) # Get transform node on the top of the stack top_plug = get_top_transform_plug(node) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
Python
def create(cls, node, mod, name=None): """Create default target transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created target transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) target_node = mod.createnode(transform_node.path.replace('|', ''), 'Target', guerilla.Document()) top_plug = get_top_transform_plug(node) # Offset target one in Z. mtx = top_plug.parent.getmatrix() transform = guerilla.transform(mtx.asarray()) transform.translate(guerilla.point3(0.0, 0.0, 1.0)) target_node.Transform.set(transform) mod.connect(transform_node.TargetWorldTransform, target_node._WorldTransform) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
def create(cls, node, mod, name=None): """Create default target transform on given Guerilla `node`. Args: node (guerilla.SceneGraphNode): Parent scene graph node. mod (guerilla.Modifier): name (str, optional): Transform node name. Returns: Transform: The created target transform object. """ if name is None: name = cls._default_name() transform_node = mod.createnode(name, cls.guerilla_type_name(), node) target_node = mod.createnode(transform_node.path.replace('|', ''), 'Target', guerilla.Document()) top_plug = get_top_transform_plug(node) # Offset target one in Z. mtx = top_plug.parent.getmatrix() transform = guerilla.transform(mtx.asarray()) transform.translate(guerilla.point3(0.0, 0.0, 1.0)) target_node.Transform.set(transform) mod.connect(transform_node.TargetWorldTransform, target_node._WorldTransform) mod.connect(top_plug, transform_node.Out) return cls(transform_node)
Python
def target(self): """Return the target node connected to the transform. Returns: guerilla.Node: """ return self.node.TargetWorldTransform.getinput().parent
def target(self): """Return the target node connected to the transform. Returns: guerilla.Node: """ return self.node.TargetWorldTransform.getinput().parent
Python
def add(self, node): """Add given node as a constraint to transform (like 'Add' button). Args: node (guerilla.SceneGraphNode): Node to add as constraint. """ self.node.createplug("Weight{}".format(node.path.replace('|', '')), "ConstraintTransformWeightPlug", guerilla.types('float', desc={'min': 0, 'max': 1}), 4, 1) self.node.Objects.adddependency(node.Transform)
def add(self, node): """Add given node as a constraint to transform (like 'Add' button). Args: node (guerilla.SceneGraphNode): Node to add as constraint. """ self.node.createplug("Weight{}".format(node.path.replace('|', '')), "ConstraintTransformWeightPlug", guerilla.types('float', desc={'min': 0, 'max': 1}), 4, 1) self.node.Objects.adddependency(node.Transform)
Python
def __node_to_class(node): """Return transform class from Guerilla transform node. Args: node (guerilla.Transform): Guerilla transform node. Returns: type: Transform class to instantiate transform object. """ class_name = guerilla.getclassname(node) try: return guerilla_type_name_to_class[class_name] except KeyError: raise TypeError("invalid Guerilla transform type '{}'" .format(class_name))
def __node_to_class(node): """Return transform class from Guerilla transform node. Args: node (guerilla.Transform): Guerilla transform node. Returns: type: Transform class to instantiate transform object. """ class_name = guerilla.getclassname(node) try: return guerilla_type_name_to_class[class_name] except KeyError: raise TypeError("invalid Guerilla transform type '{}'" .format(class_name))
Python
def _node_to_transform(self, node): """Return transform object from Guerilla transform node. Args: node (guerilla.Transform): Guerilla transform node. Returns: Transform: Transform object. """ return self.__node_to_class(node)(node)
def _node_to_transform(self, node): """Return transform object from Guerilla transform node. Args: node (guerilla.Transform): Guerilla transform node. Returns: Transform: Transform object. """ return self.__node_to_class(node)(node)
Python
def add(self, type_, name=None): """Add Guerilla transform node with given `type_`. Args: type_ (str): Transform type ('euler', 'target', 'baked', 'contraint', 'shake'). name (str, optional): Guerilla transform node name. Returns: Transform: """ try: cls = type_name_to_class[type_] except KeyError: raise ValueError("invalid transform type argument") with guerilla.Modifier() as mod: return cls.create(self.node, mod, name)
def add(self, type_, name=None): """Add Guerilla transform node with given `type_`. Args: type_ (str): Transform type ('euler', 'target', 'baked', 'contraint', 'shake'). name (str, optional): Guerilla transform node name. Returns: Transform: """ try: cls = type_name_to_class[type_] except KeyError: raise ValueError("invalid transform type argument") with guerilla.Modifier() as mod: return cls.create(self.node, mod, name)
Python
def top(self): """Return the transformation node at the top of the stack. Returns: Transform: Transformation node at the top of the stack. Raises: ValueError: If transform stack is empty. """ transform_node = self.node.gettransform() if transform_node.path == self.node.path: raise ValueError("transform stack is empty for node '{}'" .format(self.node.path)) return self._node_to_transform(transform_node)
def top(self): """Return the transformation node at the top of the stack. Returns: Transform: Transformation node at the top of the stack. Raises: ValueError: If transform stack is empty. """ transform_node = self.node.gettransform() if transform_node.path == self.node.path: raise ValueError("transform stack is empty for node '{}'" .format(self.node.path)) return self._node_to_transform(transform_node)
Python
def bottom(self): """Return the transformation node at the bottom of the stack. Returns: Transform: Transformation node at the bottom of the stack. Raises: ValueError: If transform stack is empty. """ cur_plug = self.node.Transform in_plug = cur_plug.getinput() if in_plug is None: raise ValueError("transform stack is empty for node '{}'" .format(self.node.path)) return self._node_to_transform(in_plug.parent)
def bottom(self): """Return the transformation node at the bottom of the stack. Returns: Transform: Transformation node at the bottom of the stack. Raises: ValueError: If transform stack is empty. """ cur_plug = self.node.Transform in_plug = cur_plug.getinput() if in_plug is None: raise ValueError("transform stack is empty for node '{}'" .format(self.node.path)) return self._node_to_transform(in_plug.parent)
Python
def is_empty(self): """Return if transform stack is empty. Returns: bool: True if transform stack is empty. """ return self.node.gettransform().path == self.node.path
def is_empty(self): """Return if transform stack is empty. Returns: bool: True if transform stack is empty. """ return self.node.gettransform().path == self.node.path
Python
def read_local_market_info_json_file(self, exchange_str): """ Load data from json file in temp path. """ filepath = f"{self.cur_market_info_json_path}/{exchange_str}_market_code_info.json" if os.path.exists(filepath): with open(filepath, mode="r", encoding="UTF-8") as f: data = json.load(f) return data else: print(f"{filepath} 不存在!") return None
def read_local_market_info_json_file(self, exchange_str): """ Load data from json file in temp path. """ filepath = f"{self.cur_market_info_json_path}/{exchange_str}_market_code_info.json" if os.path.exists(filepath): with open(filepath, mode="r", encoding="UTF-8") as f: data = json.load(f) return data else: print(f"{filepath} 不存在!") return None
Python
def save_csv(self) -> None: """ Save table data into a csv file """ path, _ = QtWidgets.QFileDialog.getSaveFileName( self, "保存数据", "", "CSV(*.csv)") if not path: return with open(path, "w") as f: writer = csv.writer(f, lineterminator="\n") writer.writerow(["参数", self.target_display]) for tp in self.result_values: setting, target_value, _ = tp row_data = [str(setting), str(target_value)] writer.writerow(row_data)
def save_csv(self) -> None: """ Save table data into a csv file """ path, _ = QtWidgets.QFileDialog.getSaveFileName( self, "保存数据", "", "CSV(*.csv)") if not path: return with open(path, "w") as f: writer = csv.writer(f, lineterminator="\n") writer.writerow(["参数", self.target_display]) for tp in self.result_values: setting, target_value, _ = tp row_data = [str(setting), str(target_value)] writer.writerow(row_data)
Python
def c_int_types(module) -> List: """Figure out what int types need to be imported from ctypes""" c_types = [] for namespace in _NAMESPACE_TO_TYPE.keys(): for t in module.__fbs_meta__[namespace]: if namespace == "unions": continue if namespace == "enums": py_type = c_int_from_fbs_type(t._FBSType) if py_type: c_types.append(py_type) continue for _, mtype in t._fspec.items(): fbs_type = mtype[1] py_type = c_int_from_fbs_type(fbs_type) if py_type: c_types.append(py_type) return c_types
def c_int_types(module) -> List: """Figure out what int types need to be imported from ctypes""" c_types = [] for namespace in _NAMESPACE_TO_TYPE.keys(): for t in module.__fbs_meta__[namespace]: if namespace == "unions": continue if namespace == "enums": py_type = c_int_from_fbs_type(t._FBSType) if py_type: c_types.append(py_type) continue for _, mtype in t._fspec.items(): fbs_type = mtype[1] py_type = c_int_from_fbs_type(fbs_type) if py_type: c_types.append(py_type) return c_types
Python
def check_fbs_unsupported(tree): """Throw ThriftParserError if features unsupported by fbs are used.""" meta = tree.__thrift_meta__ for feature in ["services", "consts", "exceptions"]: if meta[feature]: raise ThriftParserError("%s not supported" % feature)
def check_fbs_unsupported(tree): """Throw ThriftParserError if features unsupported by fbs are used.""" meta = tree.__thrift_meta__ for feature in ["services", "consts", "exceptions"]: if meta[feature]: raise ThriftParserError("%s not supported" % feature)
Python
def load_fp(source, module_name): """Load fbs file like object as a module. """ fbs = parse_fp(source, module_name) sys.modules[module_name] = fbs return fbs
def load_fp(source, module_name): """Load fbs file like object as a module. """ fbs = parse_fp(source, module_name) sys.modules[module_name] = fbs return fbs
Python
def check_enum(ftype, classes) -> bool: "Check if ftype is in the list of classes" if not classes: return False for c in classes: if c.__name__ == ftype: return True return False
def check_enum(ftype, classes) -> bool: "Check if ftype is in the list of classes" if not classes: return False for c in classes: if c.__name__ == ftype: return True return False
Python
def json_loads(cls, json_string: str) -> "JSONDeSerializable": """Deserialize from JSON document string. :param json_string: The json string to be deserialized. :return: The deserialized object whose type is a subclass of :class:`josepy.JSONDeSerializable`. """ try: if len(json_string) == 0: loads = "{}" else: loads = json.loads(json_string) except ValueError as error: raise josepy.errors.DeserializationError(error) return cls.from_json(loads)
def json_loads(cls, json_string: str) -> "JSONDeSerializable": """Deserialize from JSON document string. :param json_string: The json string to be deserialized. :return: The deserialized object whose type is a subclass of :class:`josepy.JSONDeSerializable`. """ try: if len(json_string) == 0: loads = "{}" else: loads = json.loads(json_string) except ValueError as error: raise josepy.errors.DeserializationError(error) return cls.from_json(loads)
Python
def from_data( cls, identifiers: typing.Union[ typing.List[typing.Dict[str, str]], typing.List[str] ] = None, not_before: "datetime.datetime" = None, not_after: "datetime.datetime" = None, ) -> "NewOrder": """Class factory that takes care of parsing the list of *identifiers*. :param identifiers: Either a :class:`list` of :class:`dict` where each dict consists of the keys *type* \ and *value*, or a :class:`list` of :class:`str` that represent the DNS names. :param not_before: The requested *notBefore* field in the certificate. :param not_after: The requested *notAfter* field in the certificate. :return: The new order object. """ kwargs = {} if type(identifiers[0]) is dict: kwargs["identifiers"] = identifiers elif type(identifiers[0]) is str: kwargs["identifiers"] = [ dict(type="dns", value=identifier) for identifier in identifiers ] else: raise ValueError( "Could not decode identifiers list. Must be either List(str) or List(dict) where " "the dict has two keys 'type' and 'value'" ) kwargs["not_before"] = not_before kwargs["not_after"] = not_after return cls(**kwargs)
def from_data( cls, identifiers: typing.Union[ typing.List[typing.Dict[str, str]], typing.List[str] ] = None, not_before: "datetime.datetime" = None, not_after: "datetime.datetime" = None, ) -> "NewOrder": """Class factory that takes care of parsing the list of *identifiers*. :param identifiers: Either a :class:`list` of :class:`dict` where each dict consists of the keys *type* \ and *value*, or a :class:`list` of :class:`str` that represent the DNS names. :param not_before: The requested *notBefore* field in the certificate. :param not_after: The requested *notAfter* field in the certificate. :return: The new order object. """ kwargs = {} if type(identifiers[0]) is dict: kwargs["identifiers"] = identifiers elif type(identifiers[0]) is str: kwargs["identifiers"] = [ dict(type="dns", value=identifier) for identifier in identifiers ] else: raise ValueError( "Could not decode identifiers list. Must be either List(str) or List(dict) where " "the dict has two keys 'type' and 'value'" ) kwargs["not_before"] = not_before kwargs["not_after"] = not_after return cls(**kwargs)
Python
def generate_csr( CN: str, private_key: rsa.RSAPrivateKey, path: Path, names: typing.List[str] ): """Generates a certificate signing request. :param CN: The requested common name. :param private_key: The private key to sign the CSR with. :param path: The path to write the PEM-serialized CSR to. :param names: The requested names in the CSR. :return: The generated CSR. """ csr = ( x509.CertificateSigningRequestBuilder() .subject_name(x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, CN)])) .add_extension( x509.SubjectAlternativeName([x509.DNSName(name) for name in names]), critical=False, ) .sign(private_key, hashes.SHA256()) ) with open(path, "wb") as pem_out: pem_out.write(csr.public_bytes(serialization.Encoding.PEM)) return csr
def generate_csr( CN: str, private_key: rsa.RSAPrivateKey, path: Path, names: typing.List[str] ): """Generates a certificate signing request. :param CN: The requested common name. :param private_key: The private key to sign the CSR with. :param path: The path to write the PEM-serialized CSR to. :param names: The requested names in the CSR. :return: The generated CSR. """ csr = ( x509.CertificateSigningRequestBuilder() .subject_name(x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, CN)])) .add_extension( x509.SubjectAlternativeName([x509.DNSName(name) for name in names]), critical=False, ) .sign(private_key, hashes.SHA256()) ) with open(path, "wb") as pem_out: pem_out.write(csr.public_bytes(serialization.Encoding.PEM)) return csr
Python
def generate_rsa_key(path: Path, key_size=2048) -> rsa.RSAPrivateKey: """Generates an RSA private key and saves it to the given path as PEM. :param path: The path to write the PEM-serialized key to. :param key_size: The RSA key size. :return: The generated private key. """ private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_size) pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) path.touch(KEY_FILE_MODE) if not path.exists() else path.chmod(KEY_FILE_MODE) with open(path, "wb") as pem_out: pem_out.write(pem) return private_key
def generate_rsa_key(path: Path, key_size=2048) -> rsa.RSAPrivateKey: """Generates an RSA private key and saves it to the given path as PEM. :param path: The path to write the PEM-serialized key to. :param key_size: The RSA key size. :return: The generated private key. """ private_key = rsa.generate_private_key(public_exponent=65537, key_size=key_size) pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) path.touch(KEY_FILE_MODE) if not path.exists() else path.chmod(KEY_FILE_MODE) with open(path, "wb") as pem_out: pem_out.write(pem) return private_key
Python
def generate_ec_key(path: Path, key_size=256) -> ec.EllipticCurvePrivateKey: """Generates an EC private key and saves it to the given path as PEM. :param path: The path to write the PEM-serialized key to. :param key_size: The EC key size. :return: The generated private key. """ curve = getattr(ec, f"SECP{key_size}R1") private_key = ec.generate_private_key(curve()) pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) path.touch(KEY_FILE_MODE) if not path.exists() else path.chmod(KEY_FILE_MODE) with open(path, "wb") as pem_out: pem_out.write(pem) return private_key
def generate_ec_key(path: Path, key_size=256) -> ec.EllipticCurvePrivateKey: """Generates an EC private key and saves it to the given path as PEM. :param path: The path to write the PEM-serialized key to. :param key_size: The EC key size. :return: The generated private key. """ curve = getattr(ec, f"SECP{key_size}R1") private_key = ec.generate_private_key(curve()) pem = private_key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), ) path.touch(KEY_FILE_MODE) if not path.exists() else path.chmod(KEY_FILE_MODE) with open(path, "wb") as pem_out: pem_out.write(pem) return private_key
Python
def forwarded_url(request) -> "yarl.URL": """Returns the URL with the correct protocol scheme. Looks for the X-Forwarded-Proto header and replaces the request URL's protocol scheme if applicable. :param request: The request needed to build the URL. :return: The corrected URL. """ if forwarded_protocol := request.headers.get("X-Forwarded-Proto"): return request.url.with_scheme(forwarded_protocol) else: return request.url
def forwarded_url(request) -> "yarl.URL": """Returns the URL with the correct protocol scheme. Looks for the X-Forwarded-Proto header and replaces the request URL's protocol scheme if applicable. :param request: The request needed to build the URL. :return: The corrected URL. """ if forwarded_protocol := request.headers.get("X-Forwarded-Proto"): return request.url.with_scheme(forwarded_protocol) else: return request.url
Python
def url_for(request, path: str, **kwargs) -> str: """Builds a URL for a given path and optional parameters. :param request: The request needed to build the URL. :param path: The path for which to build a URL. :param kwargs: Optional parameters for URL construction, such as an account ID. :return: The constructed URL. """ return str( forwarded_url(request).with_path( str(request.app.router[path].url_for(**kwargs)) ) )
def url_for(request, path: str, **kwargs) -> str: """Builds a URL for a given path and optional parameters. :param request: The request needed to build the URL. :param path: The path for which to build a URL. :param kwargs: Optional parameters for URL construction, such as an account ID. :return: The constructed URL. """ return str( forwarded_url(request).with_path( str(request.app.router[path].url_for(**kwargs)) ) )
Python
def next_url(url: str, current_cursor: int) -> str: """Returns the URL's cursor query given its current value :param url: The URL whose cursor is to be incremented. :type current_cursor: The cursor's current value. :return: The URL with its cursor query value incremented. """ return str(yarl.URL(url) % {"cursor": current_cursor + 1})
def next_url(url: str, current_cursor: int) -> str: """Returns the URL's cursor query given its current value :param url: The URL whose cursor is to be incremented. :type current_cursor: The cursor's current value. :return: The URL with its cursor query value incremented. """ return str(yarl.URL(url) % {"cursor": current_cursor + 1})
Python
def generate_cert_from_csr( csr: "cryptography.x509.CertificateSigningRequest", root_cert: "cryptography.x509.Certificate", root_key: rsa.RSAPrivateKey, ) -> "cryptography.x509.Certificate": """Generates a signed certificate from a certificate signing request given the certificate authority's certificate and private key. :param csr: The certificate signing request to generate a certificate from. :param root_cert: The signing CA's root certificate. :param root_key: The signing CA's root key. :return: The generated certificate. """ names = list(names_of(csr)) subject = csr.subject or x509.Name( [x509.NameAttribute(NameOID.COMMON_NAME, names[0])] ) cert = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(root_cert.issuer) .public_key(csr.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(datetime.utcnow() - timedelta(days=1)) .not_valid_after(datetime.utcnow() + timedelta(days=29)) .add_extension( x509.SubjectAlternativeName([x509.DNSName(i) for i in names]), critical=False, ) .sign(root_key, hashes.SHA256()) ) return cert
def generate_cert_from_csr( csr: "cryptography.x509.CertificateSigningRequest", root_cert: "cryptography.x509.Certificate", root_key: rsa.RSAPrivateKey, ) -> "cryptography.x509.Certificate": """Generates a signed certificate from a certificate signing request given the certificate authority's certificate and private key. :param csr: The certificate signing request to generate a certificate from. :param root_cert: The signing CA's root certificate. :param root_key: The signing CA's root key. :return: The generated certificate. """ names = list(names_of(csr)) subject = csr.subject or x509.Name( [x509.NameAttribute(NameOID.COMMON_NAME, names[0])] ) cert = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(root_cert.issuer) .public_key(csr.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(datetime.utcnow() - timedelta(days=1)) .not_valid_after(datetime.utcnow() + timedelta(days=29)) .add_extension( x509.SubjectAlternativeName([x509.DNSName(i) for i in names]), critical=False, ) .sign(root_key, hashes.SHA256()) ) return cert
Python
def generate_root_cert( path: Path, country: str, state: str, locality: str, org_name: str, common_name: str ) -> typing.Tuple["cryptography.x509.Certificate", rsa.RSAPrivateKey]: """Generates a self-signed CA root certificate (RSA). :param path: The path of the generated private key. The resulting certificate will be saved to the same directory as :code:`root.crt`. :param country: The requested *country name* in the certificate. :param state: The requested *state or province name* in the certificate. :param locality: The requested *locality name* in the certificate. :param org_name: The requested *organization name* in the certificate. :param common_name: The requested *common name* in the certificate. :return: The resulting root certificate and corresponding private key. """ root_key = generate_rsa_key(path) subject = x509.Name( [ x509.NameAttribute(NameOID.COUNTRY_NAME, country), x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, state), x509.NameAttribute(NameOID.LOCALITY_NAME, locality), x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name), x509.NameAttribute(NameOID.LOCALITY_NAME, common_name), ] ) root_cert_builder = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(subject) .public_key(root_key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(datetime.utcnow()) .not_valid_after(datetime.utcnow() + timedelta(days=365 * 4)) .add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True) ) root_cert = root_cert_builder.sign(root_key, hashes.SHA256()) pem = root_cert.public_bytes(serialization.Encoding.PEM) with open(path.parent / "root.crt", "wb") as pem_out: pem_out.write(pem) return root_cert, root_key
def generate_root_cert( path: Path, country: str, state: str, locality: str, org_name: str, common_name: str ) -> typing.Tuple["cryptography.x509.Certificate", rsa.RSAPrivateKey]: """Generates a self-signed CA root certificate (RSA). :param path: The path of the generated private key. The resulting certificate will be saved to the same directory as :code:`root.crt`. :param country: The requested *country name* in the certificate. :param state: The requested *state or province name* in the certificate. :param locality: The requested *locality name* in the certificate. :param org_name: The requested *organization name* in the certificate. :param common_name: The requested *common name* in the certificate. :return: The resulting root certificate and corresponding private key. """ root_key = generate_rsa_key(path) subject = x509.Name( [ x509.NameAttribute(NameOID.COUNTRY_NAME, country), x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, state), x509.NameAttribute(NameOID.LOCALITY_NAME, locality), x509.NameAttribute(NameOID.ORGANIZATION_NAME, org_name), x509.NameAttribute(NameOID.LOCALITY_NAME, common_name), ] ) root_cert_builder = ( x509.CertificateBuilder() .subject_name(subject) .issuer_name(subject) .public_key(root_key.public_key()) .serial_number(x509.random_serial_number()) .not_valid_before(datetime.utcnow()) .not_valid_after(datetime.utcnow() + timedelta(days=365 * 4)) .add_extension(x509.BasicConstraints(ca=True, path_length=None), critical=True) ) root_cert = root_cert_builder.sign(root_key, hashes.SHA256()) pem = root_cert.public_bytes(serialization.Encoding.PEM) with open(path.parent / "root.crt", "wb") as pem_out: pem_out.write(pem) return root_cert, root_key
Python
def names_of( csr: "cryptography.x509.CertificateSigningRequest", lower: bool = False ) -> typing.Set[str]: """Returns all names contained in the given CSR. :param csr: The CRS whose names to extract. :param lower: True if the names should be returned in lowercase. :return: Set of the contained identifier strings. """ names = [ v.value for v in csr.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) ] names.extend( csr.extensions.get_extension_for_class( x509.SubjectAlternativeName ).value.get_values_for_type(x509.DNSName) ) return set([name.lower() if lower else name for name in names])
def names_of( csr: "cryptography.x509.CertificateSigningRequest", lower: bool = False ) -> typing.Set[str]: """Returns all names contained in the given CSR. :param csr: The CRS whose names to extract. :param lower: True if the names should be returned in lowercase. :return: Set of the contained identifier strings. """ names = [ v.value for v in csr.subject.get_attributes_for_oid(x509.oid.NameOID.COMMON_NAME) ] names.extend( csr.extensions.get_extension_for_class( x509.SubjectAlternativeName ).value.get_values_for_type(x509.DNSName) ) return set([name.lower() if lower else name for name in names])
Python
def pem_split( pem: str, ) -> typing.List[ typing.Union[ "cryptography.x509.CertificateSigningRequest", "cryptography.x509.Certificate" ] ]: """Parses a PEM encoded string and returns all contained CSRs and certificates. :param pem: The concatenated PEM encoded CSRs and certificates. :return: List of all certificate signing requests and certificates found in the PEM string. """ _PEM_TO_CLASS = { b"CERTIFICATE": x509.load_pem_x509_certificate, b"CERTIFICATE REQUEST": x509.load_pem_x509_csr, b"EC PRIVATE KEY": lambda x: serialization.load_pem_private_key( x, password=None ), b"RSA PRIVATE KEY": lambda x: serialization.load_pem_private_key( x, password=None ), } _PEM_RE = re.compile( b"-----BEGIN (?P<cls>" + b"|".join(_PEM_TO_CLASS.keys()) + b""")-----""" + b"""\r? .+?\r? -----END \\1-----\r?\n?""", re.DOTALL, ) return [ _PEM_TO_CLASS[match.groupdict()["cls"]](match.group(0)) for match in _PEM_RE.finditer(pem.encode()) ]
def pem_split( pem: str, ) -> typing.List[ typing.Union[ "cryptography.x509.CertificateSigningRequest", "cryptography.x509.Certificate" ] ]: """Parses a PEM encoded string and returns all contained CSRs and certificates. :param pem: The concatenated PEM encoded CSRs and certificates. :return: List of all certificate signing requests and certificates found in the PEM string. """ _PEM_TO_CLASS = { b"CERTIFICATE": x509.load_pem_x509_certificate, b"CERTIFICATE REQUEST": x509.load_pem_x509_csr, b"EC PRIVATE KEY": lambda x: serialization.load_pem_private_key( x, password=None ), b"RSA PRIVATE KEY": lambda x: serialization.load_pem_private_key( x, password=None ), } _PEM_RE = re.compile( b"-----BEGIN (?P<cls>" + b"|".join(_PEM_TO_CLASS.keys()) + b""")-----""" + b"""\r? .+?\r? -----END \\1-----\r?\n?""", re.DOTALL, ) return [ _PEM_TO_CLASS[match.groupdict()["cls"]](match.group(0)) for match in _PEM_RE.finditer(pem.encode()) ]
Python
async def connect(self): """Handles connecting the solver implementation to its remote API. Must not be overridden if no initial connection is required. """ pass
async def connect(self): """Handles connecting the solver implementation to its remote API. Must not be overridden if no initial connection is required. """ pass
Python
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Complete the given challenge. This method should complete the given challenge and then delay returning until the server is allowed to check for completion. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. :raises: :class:`~acmetk.client.exceptions.CouldNotCompleteChallenge` If the challenge completion attempt failed. """ pass
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Complete the given challenge. This method should complete the given challenge and then delay returning until the server is allowed to check for completion. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. :raises: :class:`~acmetk.client.exceptions.CouldNotCompleteChallenge` If the challenge completion attempt failed. """ pass
Python
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. This method should de-provision the resource that was provisioned for the given challenge. It is called once the challenge is complete, i.e. its status has transitioned to :class:`~acmetk.models.challenge.ChallengeStatus.VALID` or :class:`~acmetk.models.challenge.ChallengeStatus.INVALID`. This method should not assume that the challenge was successfully completed, meaning it should silently return if there is nothing to clean up. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ pass
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. This method should de-provision the resource that was provisioned for the given challenge. It is called once the challenge is complete, i.e. its status has transitioned to :class:`~acmetk.models.challenge.ChallengeStatus.VALID` or :class:`~acmetk.models.challenge.ChallengeStatus.INVALID`. This method should not assume that the challenge was successfully completed, meaning it should silently return if there is nothing to clean up. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ pass
Python
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Does not complete the given challenge. Instead, this method only logs the mock completion attempt and pauses execution for one second. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. """ logger.debug( f"(not) solving challenge {challenge.uri}, type {challenge.chall.typ}, identifier {identifier}" ) # await asyncio.sleep(1)
async def complete_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Does not complete the given challenge. Instead, this method only logs the mock completion attempt and pauses execution for one second. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to be completed. """ logger.debug( f"(not) solving challenge {challenge.uri}, type {challenge.chall.typ}, identifier {identifier}" ) # await asyncio.sleep(1)
Python
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. Does not actually do any cleanup, instead it just logs the mock attempt and pauses execution for one second. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ logger.debug( f"(not) cleaning up after challenge {challenge.uri}, type {challenge.chall.typ}" )
async def cleanup_challenge( self, key: josepy.jwk.JWK, identifier: acme.messages.Identifier, challenge: acme.messages.ChallengeBody, ): """Performs cleanup for the given challenge. Does not actually do any cleanup, instead it just logs the mock attempt and pauses execution for one second. :param key: The client's account key. :param identifier: The identifier that is associated with the challenge. :param challenge: The challenge to clean up after. """ logger.debug( f"(not) cleaning up after challenge {challenge.uri}, type {challenge.chall.typ}" )
Python
async def connect(self): """Connects to the InfoBlox API. This method must be called before attempting to complete challenges. """ self._conn = await self._loop.run_in_executor( None, connector.Connector, self._creds )
async def connect(self): """Connects to the InfoBlox API. This method must be called before attempting to complete challenges. """ self._conn = await self._loop.run_in_executor( None, connector.Connector, self._creds )