language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def _generate_front_and_back_keyword_lists( sorted_keywords: List[Dict[str, Any]], title_to_process: str, title_words: List[str], description_words: List[str], product_types_words: List[str], language: str) -> Tuple[List[str], List[str], str]: """Generates two lists of keywords: those in the front and back of the title. Args: sorted_keywords: A list of dictionaries of keywords and weights, sorted by descending weight. title_to_process: The product title being optimized. title_words: A list of semantically tokenized words in the title. description_words: A list of semantically tokenized description words. product_types_words: A list of semantically tokenized product types words. language: The configured language code. Returns: A list of matching keywords near the front of the title, a list of matching keywords near the back of the title, and a title with the matching keywords removed. """ keywords_visible_to_user = [] keywords_not_visible_to_user = [] title_without_keywords = title_to_process # Identify matching keywords in the title and populate separate lists # of keywords near the front and keywords not near the front. for keyword_and_weight in sorted_keywords: try: keyword = keyword_and_weight['keyword'] except KeyError: continue # Creates a title for "moved" keywords, i.e. keywords removed from the # title and added to the front, used in the case of too-long titles. if keyword in title_words or keyword in description_words or keyword in product_types_words: title_without_keywords = title_without_keywords.replace(keyword, '') if language == constants.LANGUAGE_CODE_JA: user_visible_text = title_to_process[:_TITLE_CHARS_VISIBLE_TO_USER_JA] else: user_visible_text = title_to_process[:_TITLE_CHARS_VISIBLE_TO_USER_EN] if keyword in user_visible_text: keywords_visible_to_user.append(keyword) else: keywords_not_visible_to_user.append(keyword) if _num_keywords_to_prepend_meets_or_exceeds_limit( keywords_not_visible_to_user): break return (keywords_visible_to_user, keywords_not_visible_to_user, title_without_keywords)
def _generate_front_and_back_keyword_lists( sorted_keywords: List[Dict[str, Any]], title_to_process: str, title_words: List[str], description_words: List[str], product_types_words: List[str], language: str) -> Tuple[List[str], List[str], str]: """Generates two lists of keywords: those in the front and back of the title. Args: sorted_keywords: A list of dictionaries of keywords and weights, sorted by descending weight. title_to_process: The product title being optimized. title_words: A list of semantically tokenized words in the title. description_words: A list of semantically tokenized description words. product_types_words: A list of semantically tokenized product types words. language: The configured language code. Returns: A list of matching keywords near the front of the title, a list of matching keywords near the back of the title, and a title with the matching keywords removed. """ keywords_visible_to_user = [] keywords_not_visible_to_user = [] title_without_keywords = title_to_process # Identify matching keywords in the title and populate separate lists # of keywords near the front and keywords not near the front. for keyword_and_weight in sorted_keywords: try: keyword = keyword_and_weight['keyword'] except KeyError: continue # Creates a title for "moved" keywords, i.e. keywords removed from the # title and added to the front, used in the case of too-long titles. if keyword in title_words or keyword in description_words or keyword in product_types_words: title_without_keywords = title_without_keywords.replace(keyword, '') if language == constants.LANGUAGE_CODE_JA: user_visible_text = title_to_process[:_TITLE_CHARS_VISIBLE_TO_USER_JA] else: user_visible_text = title_to_process[:_TITLE_CHARS_VISIBLE_TO_USER_EN] if keyword in user_visible_text: keywords_visible_to_user.append(keyword) else: keywords_not_visible_to_user.append(keyword) if _num_keywords_to_prepend_meets_or_exceeds_limit( keywords_not_visible_to_user): break return (keywords_visible_to_user, keywords_not_visible_to_user, title_without_keywords)
Python
def _generate_list_of_keywords_to_prepend( keywords_visible_to_user: List[str], keywords_not_visible_to_user: List[str], title: str, language: str) -> List[str]: """Determines which performant keywords need to be prepended and sorts them. Args: keywords_visible_to_user: keywords that were found near the front of the given title. keywords_not_visible_to_user: keywords that were not found near the front of the given title. title: the title to append performant keywords to. language: The configured language code. Returns: A list of high-performing keywords. """ keywords_to_be_prepended = keywords_not_visible_to_user for skipped_keyword in keywords_visible_to_user: temp_prepended_title = _generate_prepended_title(keywords_to_be_prepended, title) if language == constants.LANGUAGE_CODE_JA: front_of_title = temp_prepended_title[:_TITLE_CHARS_VISIBLE_TO_USER_JA] else: front_of_title = temp_prepended_title[:_TITLE_CHARS_VISIBLE_TO_USER_EN] # The skipped keyword was pushed out too far due to the prepend, so # include it in the list of to-be-prepended keywords. if skipped_keyword not in front_of_title: keywords_to_be_prepended.append(skipped_keyword) keywords_visible_to_user.remove(skipped_keyword) return keywords_to_be_prepended
def _generate_list_of_keywords_to_prepend( keywords_visible_to_user: List[str], keywords_not_visible_to_user: List[str], title: str, language: str) -> List[str]: """Determines which performant keywords need to be prepended and sorts them. Args: keywords_visible_to_user: keywords that were found near the front of the given title. keywords_not_visible_to_user: keywords that were not found near the front of the given title. title: the title to append performant keywords to. language: The configured language code. Returns: A list of high-performing keywords. """ keywords_to_be_prepended = keywords_not_visible_to_user for skipped_keyword in keywords_visible_to_user: temp_prepended_title = _generate_prepended_title(keywords_to_be_prepended, title) if language == constants.LANGUAGE_CODE_JA: front_of_title = temp_prepended_title[:_TITLE_CHARS_VISIBLE_TO_USER_JA] else: front_of_title = temp_prepended_title[:_TITLE_CHARS_VISIBLE_TO_USER_EN] # The skipped keyword was pushed out too far due to the prepend, so # include it in the list of to-be-prepended keywords. if skipped_keyword not in front_of_title: keywords_to_be_prepended.append(skipped_keyword) keywords_visible_to_user.remove(skipped_keyword) return keywords_to_be_prepended
Python
def _generate_prepended_title(performant_keywords_to_prepend: List[str], title: str) -> str: """Prepends keywords in square brackets to the title. Args: performant_keywords_to_prepend: keywords to prepend to the title. title: the original title. Returns: The title with keywords in square brackets prepended to it. """ formatted_keywords = [ '[' + keyword + ']' for keyword in performant_keywords_to_prepend[:_MAX_KEYWORDS_PER_TITLE] ] prepended_title = f'{"".join(formatted_keywords)} {title}' return ' '.join(prepended_title.split())
def _generate_prepended_title(performant_keywords_to_prepend: List[str], title: str) -> str: """Prepends keywords in square brackets to the title. Args: performant_keywords_to_prepend: keywords to prepend to the title. title: the original title. Returns: The title with keywords in square brackets prepended to it. """ formatted_keywords = [ '[' + keyword + ']' for keyword in performant_keywords_to_prepend[:_MAX_KEYWORDS_PER_TITLE] ] prepended_title = f'{"".join(formatted_keywords)} {title}' return ' '.join(prepended_title.split())
Python
def validate_wheel(self, whl_path): """ Verify that the given wheel can safely be included in the current installer. If so, the given wheel info will be included in the given wheel info array. If not, an exception will be raised. """ wheel_name = os.path.basename(whl_path) distribution = wheel_name.split('-', 1)[0] # Check that a distribution of same name has not been included before if distribution in self.got_distributions: prev_path = self.got_distributions[distribution] raise ValueError('Multiple wheels specified for {}:\n {}\n {}'.format( distribution, prev_path, whl_path)) # Check that the wheel is compatible with the installer environment scores = self.scorer.score(wheel_name) if any(s == 0 for s in scores): raise ValueError('Wheel {} is not compatible with Python {}, {}' .format(wheel_name, self.scorer.py_version, self.scorer.platform)) self.got_distributions[distribution] = whl_path
def validate_wheel(self, whl_path): """ Verify that the given wheel can safely be included in the current installer. If so, the given wheel info will be included in the given wheel info array. If not, an exception will be raised. """ wheel_name = os.path.basename(whl_path) distribution = wheel_name.split('-', 1)[0] # Check that a distribution of same name has not been included before if distribution in self.got_distributions: prev_path = self.got_distributions[distribution] raise ValueError('Multiple wheels specified for {}:\n {}\n {}'.format( distribution, prev_path, whl_path)) # Check that the wheel is compatible with the installer environment scores = self.scorer.score(wheel_name) if any(s == 0 for s in scores): raise ValueError('Wheel {} is not compatible with Python {}, {}' .format(wheel_name, self.scorer.py_version, self.scorer.platform)) self.got_distributions[distribution] = whl_path
Python
def check(self, config, section_name): """ validates the section, if this is the correct validator for it returns True if this is the correct validator for this section raises InvalidConfig if something inside the section is wrong """ self._check_mandatory_fields(section_name, config[section_name]) self._check_invalid_keys(section_name, config[section_name])
def check(self, config, section_name): """ validates the section, if this is the correct validator for it returns True if this is the correct validator for this section raises InvalidConfig if something inside the section is wrong """ self._check_mandatory_fields(section_name, config[section_name]) self._check_invalid_keys(section_name, config[section_name])
Python
def read_extra_files(cfg): """Read the list of extra files from the config file. Returns a list of 2-tuples: (file, destination_directory), which can be passed as the ``extra_files`` parameter to :class:`nsist.InstallerBuilder`. """ lines = cfg.get('Include', 'files', fallback='').splitlines() pairs = [] for line in lines: if '>' in line: file, dest = line.rsplit('>', 1) pairs.append((file.strip(), dest.strip())) else: pairs.append((line, '$INSTDIR')) return pairs
def read_extra_files(cfg): """Read the list of extra files from the config file. Returns a list of 2-tuples: (file, destination_directory), which can be passed as the ``extra_files`` parameter to :class:`nsist.InstallerBuilder`. """ lines = cfg.get('Include', 'files', fallback='').splitlines() pairs = [] for line in lines: if '>' in line: file, dest = line.rsplit('>', 1) pairs.append((file.strip(), dest.strip())) else: pairs.append((line, '$INSTDIR')) return pairs
Python
def read_shortcuts_config(cfg): """Read and verify the shortcut definitions from the config file. There is one shortcut per 'Shortcut <name>' section, and one for the Application section. Returns a dict of dicts with the fields from the shortcut sections. The optional 'icon' and 'console' fields will be filled with their default values if not supplied. """ shortcuts = {} def _check_shortcut(name, sc, section): alternatives = ['entry_point', 'script', 'target'] has_alternatives = sum(1 for k in alternatives if k in sc) if has_alternatives < 1: raise InvalidConfig('Section [{}] has none of {}.'.format( section, ', '.join(alternatives))) elif has_alternatives > 1: raise InvalidConfig('Section [{}] has more than one of {}.'.format( section, ', '.join(alternatives))) # Copy to a regular dict so it can hold a boolean value sc2 = dict(sc) if 'icon' not in sc2: from . import DEFAULT_ICON sc2['icon'] = DEFAULT_ICON sc2['console'] = sc.getboolean('console', fallback=False) sc2['parameters'] = sc.get('parameters', fallback='') if 'extra_preamble' in sc2: if 'entry_point' not in sc2: raise InvalidConfig('extra_preamble is only valid with entry_point') preamb_file = sc2['extra_preamble'] if not os.path.isfile(preamb_file): raise InvalidConfig('extra_preamble file %r does not exist' % preamb_file) shortcuts[name] = sc2 for section in cfg.sections(): if section.startswith("Shortcut "): name = section[len("Shortcut "):] _check_shortcut(name, cfg[section], section) appcfg = cfg['Application'] _check_shortcut(appcfg['name'], appcfg, 'Application') return shortcuts
def read_shortcuts_config(cfg): """Read and verify the shortcut definitions from the config file. There is one shortcut per 'Shortcut <name>' section, and one for the Application section. Returns a dict of dicts with the fields from the shortcut sections. The optional 'icon' and 'console' fields will be filled with their default values if not supplied. """ shortcuts = {} def _check_shortcut(name, sc, section): alternatives = ['entry_point', 'script', 'target'] has_alternatives = sum(1 for k in alternatives if k in sc) if has_alternatives < 1: raise InvalidConfig('Section [{}] has none of {}.'.format( section, ', '.join(alternatives))) elif has_alternatives > 1: raise InvalidConfig('Section [{}] has more than one of {}.'.format( section, ', '.join(alternatives))) # Copy to a regular dict so it can hold a boolean value sc2 = dict(sc) if 'icon' not in sc2: from . import DEFAULT_ICON sc2['icon'] = DEFAULT_ICON sc2['console'] = sc.getboolean('console', fallback=False) sc2['parameters'] = sc.get('parameters', fallback='') if 'extra_preamble' in sc2: if 'entry_point' not in sc2: raise InvalidConfig('extra_preamble is only valid with entry_point') preamb_file = sc2['extra_preamble'] if not os.path.isfile(preamb_file): raise InvalidConfig('extra_preamble file %r does not exist' % preamb_file) shortcuts[name] = sc2 for section in cfg.sections(): if section.startswith("Shortcut "): name = section[len("Shortcut "):] _check_shortcut(name, cfg[section], section) appcfg = cfg['Application'] _check_shortcut(appcfg['name'], appcfg, 'Application') return shortcuts
Python
def read_commands_config(cfg): """Read and verify the command definitions from the config file. Returns a dict of dicts, keyed by command name, containing the values from the command sections of the config file. """ commands = {} for section in cfg.sections(): if section.startswith("Command "): name = section[len("Command "):] commands[name] = cc = dict(cfg[section]) cc['console'] = cfg[section].getboolean('console', fallback=True) if ('extra_preamble' in cc) and \ not os.path.isfile(cc['extra_preamble']): raise InvalidConfig('extra_preamble file %r does not exist' % cc['extra_preamble']) return commands
def read_commands_config(cfg): """Read and verify the command definitions from the config file. Returns a dict of dicts, keyed by command name, containing the values from the command sections of the config file. """ commands = {} for section in cfg.sections(): if section.startswith("Command "): name = section[len("Command "):] commands[name] = cc = dict(cfg[section]) cc['console'] = cfg[section].getboolean('console', fallback=True) if ('extra_preamble' in cc) and \ not os.path.isfile(cc['extra_preamble']): raise InvalidConfig('extra_preamble file %r does not exist' % cc['extra_preamble']) return commands
Python
def _get_devices(hass, discovery_type, keys, interface): """Get the HomeMatic devices for given discovery_type.""" device_arr = [] for key in keys: device = hass.data[DATA_HOMEMATIC].devices[interface][key] class_name = device.__class__.__name__ metadata = {} # Class not supported by discovery type if class_name not in HM_DEVICE_TYPES[discovery_type]: continue # Load metadata needed to generate a parameter list if discovery_type == DISCOVER_SENSORS: metadata.update(device.SENSORNODE) elif discovery_type == DISCOVER_BINARY_SENSORS: metadata.update(device.BINARYNODE) else: metadata.update({None: device.ELEMENT}) # Generate options for 1...n elements with 1...n parameters for param, channels in metadata.items(): if param in HM_IGNORE_DISCOVERY_NODE and class_name not in \ HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, []): continue # Add devices _LOGGER.debug("%s: Handling %s: %s: %s", discovery_type, key, param, channels) for channel in channels: name = _create_ha_id( name=device.NAME, channel=channel, param=param, count=len(channels) ) unique_id = _create_ha_id( name=key, channel=channel, param=param, count=len(channels) ) device_dict = { CONF_PLATFORM: "homematic", ATTR_ADDRESS: key, ATTR_INTERFACE: interface, ATTR_NAME: name, ATTR_CHANNEL: channel, ATTR_UNIQUE_ID: unique_id } if param is not None: device_dict[ATTR_PARAM] = param # Add new device try: DEVICE_SCHEMA(device_dict) device_arr.append(device_dict) except vol.MultipleInvalid as err: _LOGGER.error("Invalid device config: %s", str(err)) return device_arr
def _get_devices(hass, discovery_type, keys, interface): """Get the HomeMatic devices for given discovery_type.""" device_arr = [] for key in keys: device = hass.data[DATA_HOMEMATIC].devices[interface][key] class_name = device.__class__.__name__ metadata = {} # Class not supported by discovery type if class_name not in HM_DEVICE_TYPES[discovery_type]: continue # Load metadata needed to generate a parameter list if discovery_type == DISCOVER_SENSORS: metadata.update(device.SENSORNODE) elif discovery_type == DISCOVER_BINARY_SENSORS: metadata.update(device.BINARYNODE) else: metadata.update({None: device.ELEMENT}) # Generate options for 1...n elements with 1...n parameters for param, channels in metadata.items(): if param in HM_IGNORE_DISCOVERY_NODE and class_name not in \ HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, []): continue # Add devices _LOGGER.debug("%s: Handling %s: %s: %s", discovery_type, key, param, channels) for channel in channels: name = _create_ha_id( name=device.NAME, channel=channel, param=param, count=len(channels) ) unique_id = _create_ha_id( name=key, channel=channel, param=param, count=len(channels) ) device_dict = { CONF_PLATFORM: "homematic", ATTR_ADDRESS: key, ATTR_INTERFACE: interface, ATTR_NAME: name, ATTR_CHANNEL: channel, ATTR_UNIQUE_ID: unique_id } if param is not None: device_dict[ATTR_PARAM] = param # Add new device try: DEVICE_SCHEMA(device_dict) device_arr.append(device_dict) except vol.MultipleInvalid as err: _LOGGER.error("Invalid device config: %s", str(err)) return device_arr
Python
def _create_ha_id(name, channel, param, count): """Generate a unique entity id.""" # HMDevice is a simple device if count == 1 and param is None: return name # Has multiple elements/channels if count > 1 and param is None: return "{} {}".format(name, channel) # With multiple parameters on first channel if count == 1 and param is not None: return "{} {}".format(name, param) # Multiple parameters with multiple channels if count > 1 and param is not None: return "{} {} {}".format(name, channel, param)
def _create_ha_id(name, channel, param, count): """Generate a unique entity id.""" # HMDevice is a simple device if count == 1 and param is None: return name # Has multiple elements/channels if count > 1 and param is None: return "{} {}".format(name, channel) # With multiple parameters on first channel if count == 1 and param is not None: return "{} {}".format(name, param) # Multiple parameters with multiple channels if count > 1 and param is not None: return "{} {} {}".format(name, channel, param)
Python
def id(self) -> Optional[str]: # pylint: disable=invalid-name """Return id of the auth provider. Optional, can be None. """ return self.config.get(CONF_ID)
def id(self) -> Optional[str]: # pylint: disable=invalid-name """Return id of the auth provider. Optional, can be None. """ return self.config.get(CONF_ID)
Python
async def async_credentials(self) -> List[Credentials]: """Return all credentials of this provider.""" users = await self.store.async_get_users() return [ credentials for user in users for credentials in user.credentials if (credentials.auth_provider_type == self.type and credentials.auth_provider_id == self.id) ]
async def async_credentials(self) -> List[Credentials]: """Return all credentials of this provider.""" users = await self.store.async_get_users() return [ credentials for user in users for credentials in user.credentials if (credentials.auth_provider_type == self.type and credentials.auth_provider_id == self.id) ]
Python
async def auth_provider_from_config( hass: HomeAssistant, store: AuthStore, config: Dict[str, Any]) -> Optional[AuthProvider]: """Initialize an auth provider from a config.""" provider_name = config[CONF_TYPE] module = await load_auth_provider_module(hass, provider_name) if module is None: return None try: config = module.CONFIG_SCHEMA(config) # type: ignore except vol.Invalid as err: _LOGGER.error('Invalid configuration for auth provider %s: %s', provider_name, humanize_error(config, err)) return None return AUTH_PROVIDERS[provider_name](hass, store, config)
async def auth_provider_from_config( hass: HomeAssistant, store: AuthStore, config: Dict[str, Any]) -> Optional[AuthProvider]: """Initialize an auth provider from a config.""" provider_name = config[CONF_TYPE] module = await load_auth_provider_module(hass, provider_name) if module is None: return None try: config = module.CONFIG_SCHEMA(config) # type: ignore except vol.Invalid as err: _LOGGER.error('Invalid configuration for auth provider %s: %s', provider_name, humanize_error(config, err)) return None return AUTH_PROVIDERS[provider_name](hass, store, config)
Python
async def async_credential_flow( self, context: Optional[Dict]) -> 'LoginFlow': """Return a flow to login.""" assert context is not None users = await self.store.async_get_users() available_users = {user.id: user.name for user in users if not user.system_generated and user.is_active} return LoginFlow(self, cast(str, context.get('ip_address')), available_users)
async def async_credential_flow( self, context: Optional[Dict]) -> 'LoginFlow': """Return a flow to login.""" assert context is not None users = await self.store.async_get_users() available_users = {user.id: user.name for user in users if not user.system_generated and user.is_active} return LoginFlow(self, cast(str, context.get('ip_address')), available_users)
Python
def async_validate_access(self, ip_address: str) -> None: """Make sure the access from trusted networks. Raise InvalidAuthError if not. Raise InvalidAuthError if trusted_networks is not configured. """ hass_http = getattr(self.hass, 'http', None) # type: HomeAssistantHTTP if not hass_http or not hass_http.trusted_networks: raise InvalidAuthError('trusted_networks is not configured') if not any(ip_address in trusted_network for trusted_network in hass_http.trusted_networks): raise InvalidAuthError('Not in trusted_networks')
def async_validate_access(self, ip_address: str) -> None: """Make sure the access from trusted networks. Raise InvalidAuthError if not. Raise InvalidAuthError if trusted_networks is not configured. """ hass_http = getattr(self.hass, 'http', None) # type: HomeAssistantHTTP if not hass_http or not hass_http.trusted_networks: raise InvalidAuthError('trusted_networks is not configured') if not any(ip_address in trusted_network for trusted_network in hass_http.trusted_networks): raise InvalidAuthError('Not in trusted_networks')
Python
async def async_step_init( self, user_input: Optional[Dict[str, str]] = None) \ -> Dict[str, Any]: """Handle the step of the form.""" errors = {} try: self._auth_provider.async_validate_access(self._ip_address) except InvalidAuthError: errors['base'] = 'invalid_auth' return self.async_show_form( step_id='init', data_schema=None, errors=errors, ) if user_input is not None: user_id = user_input['user'] if user_id not in self._available_users: errors['base'] = 'invalid_auth' if not errors: return self.async_create_entry( title=self._auth_provider.name, data=user_input ) schema = {'user': vol.In(self._available_users)} return self.async_show_form( step_id='init', data_schema=vol.Schema(schema), errors=errors, )
async def async_step_init( self, user_input: Optional[Dict[str, str]] = None) \ -> Dict[str, Any]: """Handle the step of the form.""" errors = {} try: self._auth_provider.async_validate_access(self._ip_address) except InvalidAuthError: errors['base'] = 'invalid_auth' return self.async_show_form( step_id='init', data_schema=None, errors=errors, ) if user_input is not None: user_id = user_input['user'] if user_id not in self._available_users: errors['base'] = 'invalid_auth' if not errors: return self.async_create_entry( title=self._auth_provider.name, data=user_input ) schema = {'user': vol.In(self._available_users)} return self.async_show_form( step_id='init', data_schema=vol.Schema(schema), errors=errors, )
Python
def shared_dataset(data_xy, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32')
def shared_dataset(data_xy, borrow=True): """ Function that loads the dataset into shared variables The reason we store our dataset in shared variables is to allow Theano to copy it into the GPU memory (when code is run on GPU). Since copying data into the GPU is slow, copying a minibatch everytime is needed (the default behaviour if the data is not in a shared variable) would lead to a large decrease in performance. """ data_x, data_y = data_xy shared_x = theano.shared(numpy.asarray(data_x, dtype=theano.config.floatX), borrow=borrow) shared_y = theano.shared(numpy.asarray(data_y, dtype=theano.config.floatX), borrow=borrow) # When storing data on the GPU it has to be stored as floats # therefore we will store the labels as ``floatX`` as well # (``shared_y`` does exactly that). But during our computations # we need them as ints (we use labels as index, and if they are # floats it doesn't make sense) therefore instead of returning # ``shared_y`` we will have to cast it to int. This little hack # lets ous get around this issue return shared_x, T.cast(shared_y, 'int32')
Python
def write_raw_mne(nfname, mneraw, split_data_channels=False, split_stimuli=False): """ Writes the provided Raw MNE structure to a NIX file with the given name. :param nfname: Name for the NIX file to write to. Existing file will be overwritten. :param mneraw: An MNE Raw structure (any mne.io.BaseRaw subclass). :param split_data_channels: If True, each raw data channel will be stored in a separate DataArray. :param split_stimuli: If True, stimuli will be split into separate MultiTags based on the stimulus type (label). :rtype: None """ mneinfo = mneraw.info extrainfo = mneraw._raw_extras # Create NIX file nf = nix.File(nfname, nix.FileMode.Overwrite) # Write Data to NIX block = nf.create_block(DATA_BLOCK_NAME, DATA_BLOCK_TYPE, compression=nix.Compression.DeflateNormal) block.create_group(RAW_DATA_GROUP_NAME, RAW_DATA_GROUP_TYPE) if split_data_channels: write_multi_da(mneraw, block) else: write_single_da(mneraw, block) if mneraw.annotations: write_stim_tags(mneraw, block, split_stimuli) # Write metadata to NIX # info dictionary infomd = nf.create_section("Info", "File metadata") create_md_tree(infomd, mneinfo, block) # extras if len(extrainfo) > 1: for idx, emd_i in enumerate(extrainfo): extrasmd = nf.create_section(f"Extras-{idx}", "Raw Extras metadata") create_md_tree(extrasmd, emd_i, block) elif extrainfo: extrasmd = nf.create_section("Extras", "Raw Extras metadata") create_md_tree(extrasmd, extrainfo[0], block) # all done nf.close() print(f"Created NIX file at '{nfname}'") print("Done")
def write_raw_mne(nfname, mneraw, split_data_channels=False, split_stimuli=False): """ Writes the provided Raw MNE structure to a NIX file with the given name. :param nfname: Name for the NIX file to write to. Existing file will be overwritten. :param mneraw: An MNE Raw structure (any mne.io.BaseRaw subclass). :param split_data_channels: If True, each raw data channel will be stored in a separate DataArray. :param split_stimuli: If True, stimuli will be split into separate MultiTags based on the stimulus type (label). :rtype: None """ mneinfo = mneraw.info extrainfo = mneraw._raw_extras # Create NIX file nf = nix.File(nfname, nix.FileMode.Overwrite) # Write Data to NIX block = nf.create_block(DATA_BLOCK_NAME, DATA_BLOCK_TYPE, compression=nix.Compression.DeflateNormal) block.create_group(RAW_DATA_GROUP_NAME, RAW_DATA_GROUP_TYPE) if split_data_channels: write_multi_da(mneraw, block) else: write_single_da(mneraw, block) if mneraw.annotations: write_stim_tags(mneraw, block, split_stimuli) # Write metadata to NIX # info dictionary infomd = nf.create_section("Info", "File metadata") create_md_tree(infomd, mneinfo, block) # extras if len(extrainfo) > 1: for idx, emd_i in enumerate(extrainfo): extrasmd = nf.create_section(f"Extras-{idx}", "Raw Extras metadata") create_md_tree(extrasmd, emd_i, block) elif extrainfo: extrasmd = nf.create_section("Extras", "Raw Extras metadata") create_md_tree(extrasmd, extrainfo[0], block) # all done nf.close() print(f"Created NIX file at '{nfname}'") print("Done")
Python
def import_nix(nixfilename): """ Import a NIX file (generated with mnetonix.py) into an MNE Raw structure. :param nixfilename: Path to the NIX file to be loaded. :rtype: mne.io.RawArray """ nixfile = nix.File(nixfilename, mode=nix.FileMode.ReadOnly) # root, ext = os.path.splitext(nixfilename) # bvfilename = root + os.extsep + "vhdr" # bvfile = mne.io.read_raw_brainvision(bvfilename, stim_channel=False) # Create MNE Info object infosec = nixfile.sections["Info"] nchan = infosec["nchan"] sfreq = infosec["sfreq"] info = mne.create_info(nchan, sfreq) nixinfodict = md_to_dict(infosec) info.update(nixinfodict) # Read raw data into MNE objects datagroup = nixfile.blocks[DATA_BLOCK_NAME].groups[RAW_DATA_GROUP_NAME] if len(datagroup.data_arrays) > 1: # Data split: One DataArray per channel. Merging nixrawdata = merge_data_arrays(datagroup.data_arrays) else: nixrawdata = datagroup.data_arrays[0][:] # Create MNE RawArray mnerawdata = mne.io.RawArray(nixrawdata, info) # Add annotations: Stimuli from MultiTags mtags = datagroup.multi_tags annotations = create_mne_annotations(mtags) mnerawdata.set_annotations(annotations) nixfile.close() return mnerawdata
def import_nix(nixfilename): """ Import a NIX file (generated with mnetonix.py) into an MNE Raw structure. :param nixfilename: Path to the NIX file to be loaded. :rtype: mne.io.RawArray """ nixfile = nix.File(nixfilename, mode=nix.FileMode.ReadOnly) # root, ext = os.path.splitext(nixfilename) # bvfilename = root + os.extsep + "vhdr" # bvfile = mne.io.read_raw_brainvision(bvfilename, stim_channel=False) # Create MNE Info object infosec = nixfile.sections["Info"] nchan = infosec["nchan"] sfreq = infosec["sfreq"] info = mne.create_info(nchan, sfreq) nixinfodict = md_to_dict(infosec) info.update(nixinfodict) # Read raw data into MNE objects datagroup = nixfile.blocks[DATA_BLOCK_NAME].groups[RAW_DATA_GROUP_NAME] if len(datagroup.data_arrays) > 1: # Data split: One DataArray per channel. Merging nixrawdata = merge_data_arrays(datagroup.data_arrays) else: nixrawdata = datagroup.data_arrays[0][:] # Create MNE RawArray mnerawdata = mne.io.RawArray(nixrawdata, info) # Add annotations: Stimuli from MultiTags mtags = datagroup.multi_tags annotations = create_mne_annotations(mtags) mnerawdata.set_annotations(annotations) nixfile.close() return mnerawdata
Python
def parse_record(raw_record, _mode, dtype): """Parse CIFAR-10 image and label from a raw record.""" image = tf.reshape(raw_record, [_HEIGHT, _WIDTH, _NUM_CHANNELS]) # normalise images to range 0-1 image = tf.cast(image, dtype) image = tf.divide(image, 255.0) return image, image
def parse_record(raw_record, _mode, dtype): """Parse CIFAR-10 image and label from a raw record.""" image = tf.reshape(raw_record, [_HEIGHT, _WIDTH, _NUM_CHANNELS]) # normalise images to range 0-1 image = tf.cast(image, dtype) image = tf.divide(image, 255.0) return image, image
Python
def maybe_download_and_extract(data_dir): """Download and extract the tarball from Alex's website.""" if os.path.exists(data_dir): print("dir: ",data_dir," EXIST!!") return else: os.makedirs(data_dir) filepath = data_dir url = "http://www.cs.albany.edu/~xypan/research/img/Kodak/kodim{}.png" def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filepath, 100.0 * count * block_size / total_size)) sys.stdout.flush() for i in range(25): print(i) print(url.format(str(i+1).zfill(2))) filepath, _ = urllib.request.urlretrieve(url.format(str(i+1).zfill(2)),"kodim{}.png".format(str(i+1).zfill(2)),_progress) print(filepath, _) print() statinfo = os.stat(filepath) print('Successfully downloaded', filepath, statinfo.st_size, 'bytes.')
def maybe_download_and_extract(data_dir): """Download and extract the tarball from Alex's website.""" if os.path.exists(data_dir): print("dir: ",data_dir," EXIST!!") return else: os.makedirs(data_dir) filepath = data_dir url = "http://www.cs.albany.edu/~xypan/research/img/Kodak/kodim{}.png" def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filepath, 100.0 * count * block_size / total_size)) sys.stdout.flush() for i in range(25): print(i) print(url.format(str(i+1).zfill(2))) filepath, _ = urllib.request.urlretrieve(url.format(str(i+1).zfill(2)),"kodim{}.png".format(str(i+1).zfill(2)),_progress) print(filepath, _) print() statinfo = os.stat(filepath) print('Successfully downloaded', filepath, statinfo.st_size, 'bytes.')
Python
def parse_record(raw_record, _mode, dtype): """Parse CIFAR-10 image and label from a raw record.""" # Convert bytes to a vector of uint8 that is record_bytes long. record_vector = tf.io.decode_raw(raw_record, tf.uint8) # The first byte represents the label, which we convert from uint8 to int32 # and then to one-hot. label = tf.cast(record_vector[0], tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. depth_major = tf.reshape(record_vector[1:_RECORD_BYTES], [_NUM_CHANNELS, _HEIGHT, _WIDTH]) # Convert from [depth, height, width] to [height, width, depth], and cast # as float32. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) # normalise images to range 0-1 image = image/255.0 image = tf.cast(image, dtype) return image, image
def parse_record(raw_record, _mode, dtype): """Parse CIFAR-10 image and label from a raw record.""" # Convert bytes to a vector of uint8 that is record_bytes long. record_vector = tf.io.decode_raw(raw_record, tf.uint8) # The first byte represents the label, which we convert from uint8 to int32 # and then to one-hot. label = tf.cast(record_vector[0], tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. depth_major = tf.reshape(record_vector[1:_RECORD_BYTES], [_NUM_CHANNELS, _HEIGHT, _WIDTH]) # Convert from [depth, height, width] to [height, width, depth], and cast # as float32. image = tf.cast(tf.transpose(depth_major, [1, 2, 0]), tf.float32) # normalise images to range 0-1 image = image/255.0 image = tf.cast(image, dtype) return image, image
Python
def maybe_download_and_extract(data_dir): """Download and extract the tarball from Alex's website.""" if not os.path.exists(data_dir): os.makedirs(data_dir) filename = DATA_URL.split('/')[-1] filepath = os.path.join(data_dir, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, 100.0 * count * block_size / total_size)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(data_dir, 'cifar-10-batches-bin') if not os.path.exists(extracted_dir_path): tarfile.open(filepath, 'r:gz').extractall(data_dir)
def maybe_download_and_extract(data_dir): """Download and extract the tarball from Alex's website.""" if not os.path.exists(data_dir): os.makedirs(data_dir) filename = DATA_URL.split('/')[-1] filepath = os.path.join(data_dir, filename) if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % ( filename, 100.0 * count * block_size / total_size)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') extracted_dir_path = os.path.join(data_dir, 'cifar-10-batches-bin') if not os.path.exists(extracted_dir_path): tarfile.open(filepath, 'r:gz').extractall(data_dir)
Python
def real_awgn(x, stddev): """Implements the real additive white gaussian noise channel. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # additive white gaussian noise awgn = tf.random.normal(tf.shape(x), 0, stddev, dtype=tf.float32) y = x + awgn return y
def real_awgn(x, stddev): """Implements the real additive white gaussian noise channel. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # additive white gaussian noise awgn = tf.random.normal(tf.shape(x), 0, stddev, dtype=tf.float32) y = x + awgn return y
Python
def fading(x, stddev, h=None): """Implements the fading channel with multiplicative fading and additive white gaussian noise. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # channel gain if h is None: h = tf.complex( tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2)), tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2)), ) # additive white gaussian noise awgn = tf.complex( tf.random.normal(tf.shape(x), 0, 1 / np.sqrt(2)), tf.random.normal(tf.shape(x), 0, 1 / np.sqrt(2)), ) return (h * x + stddev * awgn), h
def fading(x, stddev, h=None): """Implements the fading channel with multiplicative fading and additive white gaussian noise. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # channel gain if h is None: h = tf.complex( tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2)), tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2)), ) # additive white gaussian noise awgn = tf.complex( tf.random.normal(tf.shape(x), 0, 1 / np.sqrt(2)), tf.random.normal(tf.shape(x), 0, 1 / np.sqrt(2)), ) return (h * x + stddev * awgn), h
Python
def phase_invariant_fading(x, stddev, h=None): """Implements the fading channel with multiplicative fading and additive white gaussian noise. Also assumes that phase shift introduced by the fading channel is known at the receiver, making the model equivalent to a real slow fading channel. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # channel gain if h is None: n1 = tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2), dtype=tf.float32) n2 = tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2), dtype=tf.float32) h = tf.sqrt(tf.square(n1) + tf.square(n2)) # additive white gaussian noise awgn = tf.random.normal(tf.shape(x), 0, stddev / np.sqrt(2), dtype=tf.float32) return (h * x + awgn), h
def phase_invariant_fading(x, stddev, h=None): """Implements the fading channel with multiplicative fading and additive white gaussian noise. Also assumes that phase shift introduced by the fading channel is known at the receiver, making the model equivalent to a real slow fading channel. Args: x: channel input symbols stddev: standard deviation of noise Returns: y: noisy channel output symbols """ # channel gain if h is None: n1 = tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2), dtype=tf.float32) n2 = tf.random.normal([tf.shape(x)[0], 1], 0, 1 / np.sqrt(2), dtype=tf.float32) h = tf.sqrt(tf.square(n1) + tf.square(n2)) # additive white gaussian noise awgn = tf.random.normal(tf.shape(x), 0, stddev / np.sqrt(2), dtype=tf.float32) return (h * x + awgn), h
Python
def _parse_example_proto(example_serialized): """Parses an Example proto containing a training example of an image. The output of the build_image_data.py image preprocessing script is a dataset containing serialized Example protocol buffers. Each Example proto contains the following fields (values are included as examples): image/height: 462 image/width: 581 image/colorspace: 'RGB' image/channels: 3 image/class/label: 615 image/class/synset: 'n03623198' image/class/text: 'knee pad' image/object/bbox/xmin: 0.1 image/object/bbox/xmax: 0.9 image/object/bbox/ymin: 0.2 image/object/bbox/ymax: 0.6 image/object/bbox/label: 615 image/format: 'JPEG' image/filename: 'ILSVRC2012_val_00041207.JPEG' image/encoded: <JPEG encoded string> Args: example_serialized: scalar Tensor tf.string containing a serialized Example protocol buffer. Returns: image_buffer: Tensor tf.string containing the contents of a JPEG file. label: Tensor tf.int32 containing the label. bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. """ # Dense features in Example proto. feature_map = { 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), } sparse_float32 = tf.VarLenFeature(dtype=tf.float32) # Sparse features in Example proto. feature_map.update( {k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) features = tf.parse_single_example(example_serialized, feature_map) label = tf.cast(features['image/class/label'], dtype=tf.int32) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) # Note that we impose an ordering of (y, x) just to make life difficult. bbox = tf.concat([ymin, xmin, ymax, xmax], 0) # Force the variable number of bounding boxes into the shape # [1, num_boxes, coords]. bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) return features['image/encoded'], label, bbox
def _parse_example_proto(example_serialized): """Parses an Example proto containing a training example of an image. The output of the build_image_data.py image preprocessing script is a dataset containing serialized Example protocol buffers. Each Example proto contains the following fields (values are included as examples): image/height: 462 image/width: 581 image/colorspace: 'RGB' image/channels: 3 image/class/label: 615 image/class/synset: 'n03623198' image/class/text: 'knee pad' image/object/bbox/xmin: 0.1 image/object/bbox/xmax: 0.9 image/object/bbox/ymin: 0.2 image/object/bbox/ymax: 0.6 image/object/bbox/label: 615 image/format: 'JPEG' image/filename: 'ILSVRC2012_val_00041207.JPEG' image/encoded: <JPEG encoded string> Args: example_serialized: scalar Tensor tf.string containing a serialized Example protocol buffer. Returns: image_buffer: Tensor tf.string containing the contents of a JPEG file. label: Tensor tf.int32 containing the label. bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] where each coordinate is [0, 1) and the coordinates are arranged as [ymin, xmin, ymax, xmax]. """ # Dense features in Example proto. feature_map = { 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value=''), 'image/class/label': tf.FixedLenFeature([], dtype=tf.int64, default_value=-1), 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, default_value=''), } sparse_float32 = tf.VarLenFeature(dtype=tf.float32) # Sparse features in Example proto. feature_map.update( {k: sparse_float32 for k in ['image/object/bbox/xmin', 'image/object/bbox/ymin', 'image/object/bbox/xmax', 'image/object/bbox/ymax']}) features = tf.parse_single_example(example_serialized, feature_map) label = tf.cast(features['image/class/label'], dtype=tf.int32) xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) # Note that we impose an ordering of (y, x) just to make life difficult. bbox = tf.concat([ymin, xmin, ymax, xmax], 0) # Force the variable number of bounding boxes into the shape # [1, num_boxes, coords]. bbox = tf.expand_dims(bbox, 0) bbox = tf.transpose(bbox, [0, 2, 1]) return features['image/encoded'], label, bbox
Python
def parse_record(raw_record, _mode, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label, bbox = _parse_example_proto(raw_record) image = imgnet_preprocessing.preprocess_image( image_buffer=image_buffer, bbox=bbox, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=False) # as we are not classifying, do minimal processing image = tf.cast(image, dtype) return image, image
def parse_record(raw_record, _mode, dtype): """Parses a record containing a training example of an image. The input record is parsed into a label and image, and the image is passed through preprocessing steps (cropping, flipping, and so on). Args: raw_record: scalar Tensor tf.string containing a serialized Example protocol buffer. is_training: A boolean denoting whether the input is for training. dtype: data type to use for images/features. Returns: Tuple with processed image tensor and one-hot-encoded label tensor. """ image_buffer, label, bbox = _parse_example_proto(raw_record) image = imgnet_preprocessing.preprocess_image( image_buffer=image_buffer, bbox=bbox, output_height=_DEFAULT_IMAGE_SIZE, output_width=_DEFAULT_IMAGE_SIZE, num_channels=_NUM_CHANNELS, is_training=False) # as we are not classifying, do minimal processing image = tf.cast(image, dtype) return image, image
Python
def _ConvertArgsDictToList(statement, args): """Convert a given args mapping to a list of positional arguments. Takes a statement written in 'pyformat' style which uses mapping keys from the given args mapping, and returns the list of args values that would be used for interpolation if the statement were written in a positional 'format' style instead. For example, consider the following pyformat string and a mapping used for interpolation: '%(foo)s '%(bar)s' % {'foo': 1, 'bar': 2} Given these parameters, this function would return the following output: [1, 2] This could then be used for interpolation if the given string were instead expressed using a positional format style: '%s %s' % (1, 2) Args: statement: The statement, possibly containing pyformat style tokens. args: Mapping to pull values from. Returns: A list containing values from the given args mapping. """ access_logger = _AccessLogger() statement % access_logger return [args[key] for key in access_logger.accessed_keys]
def _ConvertArgsDictToList(statement, args): """Convert a given args mapping to a list of positional arguments. Takes a statement written in 'pyformat' style which uses mapping keys from the given args mapping, and returns the list of args values that would be used for interpolation if the statement were written in a positional 'format' style instead. For example, consider the following pyformat string and a mapping used for interpolation: '%(foo)s '%(bar)s' % {'foo': 1, 'bar': 2} Given these parameters, this function would return the following output: [1, 2] This could then be used for interpolation if the given string were instead expressed using a positional format style: '%s %s' % (1, 2) Args: statement: The statement, possibly containing pyformat style tokens. args: Mapping to pull values from. Returns: A list containing values from the given args mapping. """ access_logger = _AccessLogger() statement % access_logger return [args[key] for key in access_logger.accessed_keys]
Python
def _GetJdbcTypeForArg(self, arg): """Get the JDBC type which corresponds to the given Python object type.""" arg_jdbc_type = _PYTHON_TYPE_TO_JDBC_TYPE.get(type(arg)) if arg_jdbc_type: return arg_jdbc_type for python_t, jdbc_t in _PYTHON_TYPE_TO_JDBC_TYPE.items(): if isinstance(arg, python_t): return jdbc_t try: return self._GetJdbcTypeForArg(arg[0]) except TypeError: raise TypeError('unknown type')
def _GetJdbcTypeForArg(self, arg): """Get the JDBC type which corresponds to the given Python object type.""" arg_jdbc_type = _PYTHON_TYPE_TO_JDBC_TYPE.get(type(arg)) if arg_jdbc_type: return arg_jdbc_type for python_t, jdbc_t in _PYTHON_TYPE_TO_JDBC_TYPE.items(): if isinstance(arg, python_t): return jdbc_t try: return self._GetJdbcTypeForArg(arg[0]) except TypeError: raise TypeError('unknown type')
Python
def _EncodeVariable(self, arg): """Converts a variable to a type and value. Args: arg: Any tuple, string, numeric, or datetime object. Returns: A (int, str) tuple, representing a JDBC type and encoded value. Raises: TypeError: The argument is not a recognized type. """ arg_jdbc_type = self._GetJdbcTypeForArg(arg) value = self._conn.encoders[type(arg)](arg, self._conn.encoders) return arg_jdbc_type, value
def _EncodeVariable(self, arg): """Converts a variable to a type and value. Args: arg: Any tuple, string, numeric, or datetime object. Returns: A (int, str) tuple, representing a JDBC type and encoded value. Raises: TypeError: The argument is not a recognized type. """ arg_jdbc_type = self._GetJdbcTypeForArg(arg) value = self._conn.encoders[type(arg)](arg, self._conn.encoders) return arg_jdbc_type, value
Python
def _DecodeVariable(self, datatype, value): """Converts a type and value to a variable. Args: datatype: An integer. value: A string. Returns: An object of some appropriate type. Raises: InterfaceError: datatype is not a recognized JDBC type. ValueError: The value could not be parsed. """ converter = self._conn.converter.get(datatype) if converter is None: raise InterfaceError('unknown JDBC type %d' % datatype) return converter(value)
def _DecodeVariable(self, datatype, value): """Converts a type and value to a variable. Args: datatype: An integer. value: A string. Returns: An object of some appropriate type. Raises: InterfaceError: datatype is not a recognized JDBC type. ValueError: The value could not be parsed. """ converter = self._conn.converter.get(datatype) if converter is None: raise InterfaceError('unknown JDBC type %d' % datatype) return converter(value)
Python
def _AddBindVariablesToRequest(self, statement, args, bind_variable_factory): """Add args to the request BindVariableProto list. Args: statement: The SQL statement. args: Sequence of arguments to turn into BindVariableProtos. bind_variable_factory: A callable which returns new BindVariableProtos. Raises: InterfaceError: Unknown type used as a bind variable. """ if isinstance(args, dict): args = _ConvertArgsDictToList(statement, args) for i, arg in enumerate(args): bv = bind_variable_factory() bv.position = i + 1 if arg is None: bv.type = jdbc_type.NULL else: try: bv.type, bv.value = self._EncodeVariable(arg) except TypeError: raise InterfaceError('unknown type %s for arg %d' % (type(arg), i))
def _AddBindVariablesToRequest(self, statement, args, bind_variable_factory): """Add args to the request BindVariableProto list. Args: statement: The SQL statement. args: Sequence of arguments to turn into BindVariableProtos. bind_variable_factory: A callable which returns new BindVariableProtos. Raises: InterfaceError: Unknown type used as a bind variable. """ if isinstance(args, dict): args = _ConvertArgsDictToList(statement, args) for i, arg in enumerate(args): bv = bind_variable_factory() bv.position = i + 1 if arg is None: bv.type = jdbc_type.NULL else: try: bv.type, bv.value = self._EncodeVariable(arg) except TypeError: raise InterfaceError('unknown type %s for arg %d' % (type(arg), i))
Python
def _DoExec(self, request): """Send an ExecRequest and handle the response. Args: request: The sql_pb2.ExecRequest to send. Returns: The client_pb2.ResultProto returned by the server. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ response = self._conn.MakeRequest('Exec', request) result = response.result if result.HasField('sql_exception'): raise DatabaseError('%d: %s' % (result.sql_exception.code, result.sql_exception.message)) self._rows = collections.deque() if result.rows.columns: self._description = [] for column in result.rows.columns: self._description.append( (column.label, column.type, column.display_size, None, column.precision, column.scale, column.nullable)) else: self._description = None if result.rows.tuples: assert self._description, 'Column descriptions do not exist.' column_names = [col[0] for col in self._description] self._rowcount = len(result.rows.tuples) for tuple_proto in result.rows.tuples: row = [] nulls = set(tuple_proto.nulls) value_index = 0 for i, column_descr in enumerate(self._description): if i in nulls: row.append(None) else: row.append(self._DecodeVariable(column_descr[1], tuple_proto.values[value_index])) value_index += 1 if self._use_dict_cursor: assert len(column_names) == len(row) row = dict(zip(column_names, row)) else: row = tuple(row) self._rows.append(row) else: self._rowcount = result.rows_updated if result.generated_keys: self.lastrowid = long(result.generated_keys[-1]) return result
def _DoExec(self, request): """Send an ExecRequest and handle the response. Args: request: The sql_pb2.ExecRequest to send. Returns: The client_pb2.ResultProto returned by the server. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ response = self._conn.MakeRequest('Exec', request) result = response.result if result.HasField('sql_exception'): raise DatabaseError('%d: %s' % (result.sql_exception.code, result.sql_exception.message)) self._rows = collections.deque() if result.rows.columns: self._description = [] for column in result.rows.columns: self._description.append( (column.label, column.type, column.display_size, None, column.precision, column.scale, column.nullable)) else: self._description = None if result.rows.tuples: assert self._description, 'Column descriptions do not exist.' column_names = [col[0] for col in self._description] self._rowcount = len(result.rows.tuples) for tuple_proto in result.rows.tuples: row = [] nulls = set(tuple_proto.nulls) value_index = 0 for i, column_descr in enumerate(self._description): if i in nulls: row.append(None) else: row.append(self._DecodeVariable(column_descr[1], tuple_proto.values[value_index])) value_index += 1 if self._use_dict_cursor: assert len(column_names) == len(row) row = dict(zip(column_names, row)) else: row = tuple(row) self._rows.append(row) else: self._rowcount = result.rows_updated if result.generated_keys: self.lastrowid = long(result.generated_keys[-1]) return result
Python
def execute(self, statement, args=None): """Prepares and executes a database operation (query or command). Args: statement: A string, a SQL statement. args: A sequence or mapping of arguments matching the statement's bind variables, if any. Raises: InterfaceError: Unknown type used as a bind variable. DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self._CheckOpen() request = sql_pb2.ExecRequest() request.options.include_generated_keys = True if args is not None: if not hasattr(args, '__iter__'): args = [args] self._AddBindVariablesToRequest( statement, args, request.bind_variable.add) request.statement = _ConvertFormatToQmark(statement, args) self._DoExec(request)
def execute(self, statement, args=None): """Prepares and executes a database operation (query or command). Args: statement: A string, a SQL statement. args: A sequence or mapping of arguments matching the statement's bind variables, if any. Raises: InterfaceError: Unknown type used as a bind variable. DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self._CheckOpen() request = sql_pb2.ExecRequest() request.options.include_generated_keys = True if args is not None: if not hasattr(args, '__iter__'): args = [args] self._AddBindVariablesToRequest( statement, args, request.bind_variable.add) request.statement = _ConvertFormatToQmark(statement, args) self._DoExec(request)
Python
def executemany(self, statement, seq_of_args): """Prepares and executes a database operation for given parameter sequences. Args: statement: A string, a SQL statement. seq_of_args: A sequence, each entry of which is a sequence or mapping of arguments matching the statement's bind variables, if any. Raises: InterfaceError: Unknown type used as a bind variable. DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self._CheckOpen() request = sql_pb2.ExecRequest() request.options.include_generated_keys = True args = None for args in seq_of_args: if not hasattr(args, '__iter__'): args = [args] bbv = request.batch.batch_bind_variable.add() self._AddBindVariablesToRequest( statement, args, bbv.bind_variable.add) request.statement = _ConvertFormatToQmark(statement, args) result = self._DoExec(request) self._rowcount = sum(result.batch_rows_updated)
def executemany(self, statement, seq_of_args): """Prepares and executes a database operation for given parameter sequences. Args: statement: A string, a SQL statement. seq_of_args: A sequence, each entry of which is a sequence or mapping of arguments matching the statement's bind variables, if any. Raises: InterfaceError: Unknown type used as a bind variable. DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self._CheckOpen() request = sql_pb2.ExecRequest() request.options.include_generated_keys = True args = None for args in seq_of_args: if not hasattr(args, '__iter__'): args = [args] bbv = request.batch.batch_bind_variable.add() self._AddBindVariablesToRequest( statement, args, bbv.bind_variable.add) request.statement = _ConvertFormatToQmark(statement, args) result = self._DoExec(request) self._rowcount = sum(result.batch_rows_updated)
Python
def fetchone(self): """Fetches the next row of a query result set. Returns: A sequence, or None when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchone() called before execute') try: return self._rows.popleft() except IndexError: return None
def fetchone(self): """Fetches the next row of a query result set. Returns: A sequence, or None when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchone() called before execute') try: return self._rows.popleft() except IndexError: return None
Python
def fetchmany(self, size=None): """Fetches the next set of rows of a query result. Args: size: The maximum number of rows to return; by default, self.arraysize. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchmany() called before execute') if size is None: size = self.arraysize if size >= len(self._rows): return self.fetchall() else: result = [] for _ in xrange(size): result.append(self._rows.popleft()) return tuple(result)
def fetchmany(self, size=None): """Fetches the next set of rows of a query result. Args: size: The maximum number of rows to return; by default, self.arraysize. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchmany() called before execute') if size is None: size = self.arraysize if size >= len(self._rows): return self.fetchall() else: result = [] for _ in xrange(size): result.append(self._rows.popleft()) return tuple(result)
Python
def fetchall(self): """Fetches all remaining rows of a query result. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchall() called before execute') rows = self._rows self._rows = collections.deque() return tuple(rows)
def fetchall(self): """Fetches all remaining rows of a query result. Returns: A sequence of sequences, or an empty sequence when no more data is available. Raises: InternalError: The cursor has been closed, or no statement has been executed yet. """ self._CheckOpen() if self._rowcount == -1: raise InternalError('fetchall() called before execute') rows = self._rows self._rows = collections.deque() return tuple(rows)
Python
def OpenConnection(self): """Opens a connection to SQL Service.""" request = sql_pb2.OpenConnectionRequest() request.client_type = client_pb2.CLIENT_TYPE_PYTHON_DBAPI prop = request.property.add() prop.key = 'autoCommit' prop.value = 'false' if self._user: prop = request.property.add() prop.key = 'user' prop.value = self._user if self._password: prop = request.property.add() prop.key = 'password' prop.value = self._password if self._database: prop = request.property.add() prop.key = 'database' prop.value = self._database self.SetupClient() response = self.MakeRequest('OpenConnection', request) self._connection_id = response.connection_id
def OpenConnection(self): """Opens a connection to SQL Service.""" request = sql_pb2.OpenConnectionRequest() request.client_type = client_pb2.CLIENT_TYPE_PYTHON_DBAPI prop = request.property.add() prop.key = 'autoCommit' prop.value = 'false' if self._user: prop = request.property.add() prop.key = 'user' prop.value = self._user if self._password: prop = request.property.add() prop.key = 'password' prop.value = self._password if self._database: prop = request.property.add() prop.key = 'database' prop.value = self._database self.SetupClient() response = self.MakeRequest('OpenConnection', request) self._connection_id = response.connection_id
Python
def SetupClient(self): """Setup a transport client to communicate with rdbms. This is a template method to provide subclasses with a hook to perform any necessary client initialization while opening a connection to rdbms. """ pass
def SetupClient(self): """Setup a transport client to communicate with rdbms. This is a template method to provide subclasses with a hook to perform any necessary client initialization while opening a connection to rdbms. """ pass
Python
def close(self): """Makes the connection and all its cursors unusable. The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. """ self.CheckOpen() request = sql_pb2.CloseConnectionRequest() try: self.MakeRequest('CloseConnection', request) except DatabaseError: pass self._connection_id = None
def close(self): """Makes the connection and all its cursors unusable. The connection will be unusable from this point forward; an Error (or subclass) exception will be raised if any operation is attempted with the connection. """ self.CheckOpen() request = sql_pb2.CloseConnectionRequest() try: self.MakeRequest('CloseConnection', request) except DatabaseError: pass self._connection_id = None
Python
def commit(self): """Commits any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.COMMIT self.MakeRequest('ExecOp', request)
def commit(self): """Commits any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.COMMIT self.MakeRequest('ExecOp', request)
Python
def rollback(self): """Rolls back any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.ROLLBACK self.MakeRequest('ExecOp', request)
def rollback(self): """Rolls back any pending transaction to the database. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.ROLLBACK self.MakeRequest('ExecOp', request)
Python
def autocommit(self, value): """Changes whether there is an implicit commit after each statement. By default, transactions must be explicitly committed. Args: value: A boolean. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.SET_AUTO_COMMIT request.op.auto_commit = value self.MakeRequest('ExecOp', request)
def autocommit(self, value): """Changes whether there is an implicit commit after each statement. By default, transactions must be explicitly committed. Args: value: A boolean. Raises: DatabaseError: A SQL exception occurred. OperationalError: RPC problem. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.SET_AUTO_COMMIT request.op.auto_commit = value self.MakeRequest('ExecOp', request)
Python
def cursor(self, **kwargs): """Returns a cursor for the current connection. Args: **kwargs: Optional keyword args to pass into cursor. Returns: A Cursor object. """ return Cursor(self, **kwargs)
def cursor(self, **kwargs): """Returns a cursor for the current connection. Args: **kwargs: Optional keyword args to pass into cursor. Returns: A Cursor object. """ return Cursor(self, **kwargs)
Python
def MakeRequest(self, stub_method, request): """Makes an ApiProxy request, and possibly raises an appropriate exception. Args: stub_method: A string, the name of the method to call. request: A protobuf; 'instance' and 'connection_id' will be set when available. Returns: A protobuf. Raises: DatabaseError: Error from SQL Service server. """ request.instance = self._instance if self._connection_id is not None: request.connection_id = self._connection_id if stub_method in ('Exec', 'ExecOp', 'GetMetadata'): self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self._MakeRetriableRequest(stub_method, request) else: response = self.MakeRequestImpl(stub_method, request) if (hasattr(response, 'sql_exception') and response.HasField('sql_exception')): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response
def MakeRequest(self, stub_method, request): """Makes an ApiProxy request, and possibly raises an appropriate exception. Args: stub_method: A string, the name of the method to call. request: A protobuf; 'instance' and 'connection_id' will be set when available. Returns: A protobuf. Raises: DatabaseError: Error from SQL Service server. """ request.instance = self._instance if self._connection_id is not None: request.connection_id = self._connection_id if stub_method in ('Exec', 'ExecOp', 'GetMetadata'): self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self._MakeRetriableRequest(stub_method, request) else: response = self.MakeRequestImpl(stub_method, request) if (hasattr(response, 'sql_exception') and response.HasField('sql_exception')): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response
Python
def _Retry(self, stub_method, request_id, absolute_deadline_seconds): """Retries request with the given request id. Continues to retry until either the deadline has expired or the response has been received. Args: stub_method: A string, the name of the original method that triggered the retry. request_id: An integer, the request id used in the original request absolute_deadline_seconds: An integer, absolute deadline in seconds. Returns: A protobuf. Raises: DatabaseError: If the ExecOpResponse contains a SqlException that it not related to retry. InternalError: If the ExceOpResponse is not valid. """ request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.RETRY request.op.request_id = request_id request.connection_id = self._connection_id request.instance = self._instance while True: seconds_remaining = absolute_deadline_seconds - time.clock() if seconds_remaining <= 0: raise InternalError('Request [%d] timed out' % (request_id)) time.sleep(min(self._retry_interval_seconds, seconds_remaining)) self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self.MakeRequestImpl('ExecOp', request) if not response.HasField('sql_exception'): return self._ConvertCachedResponse(stub_method, response) sql_exception = response.sql_exception if (sql_exception.application_error_code != client_error_code_pb2.SqlServiceClientError.ERROR_RESPONSE_PENDING): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message))
def _Retry(self, stub_method, request_id, absolute_deadline_seconds): """Retries request with the given request id. Continues to retry until either the deadline has expired or the response has been received. Args: stub_method: A string, the name of the original method that triggered the retry. request_id: An integer, the request id used in the original request absolute_deadline_seconds: An integer, absolute deadline in seconds. Returns: A protobuf. Raises: DatabaseError: If the ExecOpResponse contains a SqlException that it not related to retry. InternalError: If the ExceOpResponse is not valid. """ request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.RETRY request.op.request_id = request_id request.connection_id = self._connection_id request.instance = self._instance while True: seconds_remaining = absolute_deadline_seconds - time.clock() if seconds_remaining <= 0: raise InternalError('Request [%d] timed out' % (request_id)) time.sleep(min(self._retry_interval_seconds, seconds_remaining)) self._idempotent_request_id += 1 request.request_id = self._idempotent_request_id response = self.MakeRequestImpl('ExecOp', request) if not response.HasField('sql_exception'): return self._ConvertCachedResponse(stub_method, response) sql_exception = response.sql_exception if (sql_exception.application_error_code != client_error_code_pb2.SqlServiceClientError.ERROR_RESPONSE_PENDING): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message))
Python
def _ConvertCachedResponse(self, stub_method, exec_op_response): """Converts the cached response or RPC error. Args: stub_method: A string, the name of the original method that triggered the retry. exec_op_response: A protobuf, the retry response that contains either the RPC error or the cached response. Returns: A protobuf, the cached response. Raises: DatabaseError: If the cached response contains SqlException. InternalError: If a cached RpcErrorProto exists. """ if exec_op_response.HasField('cached_rpc_error'): raise InternalError('%d: %s' % ( exec_op_response.cached_rpc_error.error_code, exec_op_response.cached_rpc_error.error_message)) if not exec_op_response.HasField('cached_payload'): raise InternalError('Invalid exec op response for retry request') if stub_method == 'Exec': response = sql_pb2.ExecResponse() elif stub_method == 'ExecOp': response = sql_pb2.ExecOpResponse() elif stub_method == 'GetMetadata': response = sql_pb2.MetadataResponse() else: raise InternalError('Found unexpected stub_method: %s' % (stub_method)) response.ParseFromString(exec_op_response.cached_payload) if response.HasField('sql_exception'): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response
def _ConvertCachedResponse(self, stub_method, exec_op_response): """Converts the cached response or RPC error. Args: stub_method: A string, the name of the original method that triggered the retry. exec_op_response: A protobuf, the retry response that contains either the RPC error or the cached response. Returns: A protobuf, the cached response. Raises: DatabaseError: If the cached response contains SqlException. InternalError: If a cached RpcErrorProto exists. """ if exec_op_response.HasField('cached_rpc_error'): raise InternalError('%d: %s' % ( exec_op_response.cached_rpc_error.error_code, exec_op_response.cached_rpc_error.error_message)) if not exec_op_response.HasField('cached_payload'): raise InternalError('Invalid exec op response for retry request') if stub_method == 'Exec': response = sql_pb2.ExecResponse() elif stub_method == 'ExecOp': response = sql_pb2.ExecOpResponse() elif stub_method == 'GetMetadata': response = sql_pb2.MetadataResponse() else: raise InternalError('Found unexpected stub_method: %s' % (stub_method)) response.ParseFromString(exec_op_response.cached_payload) if response.HasField('sql_exception'): raise DatabaseError('%d: %s' % (response.sql_exception.code, response.sql_exception.message)) return response
Python
def ping(self, reconnect=False): """Checks whether or not the connection to the server is working. If it has gone down, an automatic reconnection is attempted. This function can be used by clients that remain idle for a long while, to check whether or not the server has closed the connection and reconnect if necessary. Non-standard. You should assume that ping() performs an implicit rollback; use only when starting a new transaction. You have been warned. Args: reconnect: Whether to perform an automatic reconnection. Raises: DatabaseError: The connection to the server is not working. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.PING try: self.MakeRequest('ExecOp', request) except DatabaseError: if not reconnect: raise self._connection_id = None self.OpenConnection()
def ping(self, reconnect=False): """Checks whether or not the connection to the server is working. If it has gone down, an automatic reconnection is attempted. This function can be used by clients that remain idle for a long while, to check whether or not the server has closed the connection and reconnect if necessary. Non-standard. You should assume that ping() performs an implicit rollback; use only when starting a new transaction. You have been warned. Args: reconnect: Whether to perform an automatic reconnection. Raises: DatabaseError: The connection to the server is not working. """ self.CheckOpen() request = sql_pb2.ExecOpRequest() request.op.type = client_pb2.OpProto.PING try: self.MakeRequest('ExecOp', request) except DatabaseError: if not reconnect: raise self._connection_id = None self.OpenConnection()
Python
def SetOAuthUser(self, email=_OAUTH_EMAIL, domain=_OAUTH_AUTH_DOMAIN, user_id=_OAUTH_USER_ID, is_admin=False, scopes=None): """Set test OAuth user. Determines what user is returned by requests to GetOAuthUser. Args: email: Email address of oauth user. None indicates that no oauth user is authenticated. domain: Domain of oauth user. user_id: User ID of oauth user. is_admin: Whether the user is an admin. scopes: List of scopes that user is authenticated against. """ self.__email = email self.__domain = domain self.__user_id = user_id self.__is_admin = is_admin self.__scopes = scopes
def SetOAuthUser(self, email=_OAUTH_EMAIL, domain=_OAUTH_AUTH_DOMAIN, user_id=_OAUTH_USER_ID, is_admin=False, scopes=None): """Set test OAuth user. Determines what user is returned by requests to GetOAuthUser. Args: email: Email address of oauth user. None indicates that no oauth user is authenticated. domain: Domain of oauth user. user_id: User ID of oauth user. is_admin: Whether the user is an admin. scopes: List of scopes that user is authenticated against. """ self.__email = email self.__domain = domain self.__user_id = user_id self.__is_admin = is_admin self.__scopes = scopes
Python
def _AddHostToContinueURL(self, continue_url, request_id): """Adds the request host to the continue url if no host is specified. Args: continue_url: the URL which may or may not have a host specified request_id: A unique string identifying the request associated with the API call. Returns: string """ (protocol, host, path, parameters, query, fragment) = urlparse.urlparse(continue_url) if host and protocol: return continue_url protocol, host, _, _, _, _ = urlparse.urlparse( self.request_data.get_request_url(request_id)) if path == '': path = '/' return urlparse.urlunparse( (protocol, host, path, parameters, query, fragment))
def _AddHostToContinueURL(self, continue_url, request_id): """Adds the request host to the continue url if no host is specified. Args: continue_url: the URL which may or may not have a host specified request_id: A unique string identifying the request associated with the API call. Returns: string """ (protocol, host, path, parameters, query, fragment) = urlparse.urlparse(continue_url) if host and protocol: return continue_url protocol, host, _, _, _, _ = urlparse.urlparse( self.request_data.get_request_url(request_id)) if path == '': path = '/' return urlparse.urlunparse( (protocol, host, path, parameters, query, fragment))
Python
def AddFieldsToDocumentPb(doc_id, fields, document): """Add the id and fields to document. Args: doc_id: The document id. fields: List of tuples of field name, value and optionally type. document: The document to add the fields to. """ if doc_id is not None: document.set_id(doc_id) for field_tuple in fields: name = field_tuple[0] value = field_tuple[1] field = document.add_field() field.set_name(name) field_value = field.mutable_value() field_value.set_string_value(value) if len(field_tuple) > 2: field_value.set_type(field_tuple[2])
def AddFieldsToDocumentPb(doc_id, fields, document): """Add the id and fields to document. Args: doc_id: The document id. fields: List of tuples of field name, value and optionally type. document: The document to add the fields to. """ if doc_id is not None: document.set_id(doc_id) for field_tuple in fields: name = field_tuple[0] value = field_tuple[1] field = document.add_field() field.set_name(name) field_value = field.mutable_value() field_value.set_string_value(value) if len(field_tuple) > 2: field_value.set_type(field_tuple[2])
Python
def GetFieldValue(field): """Returns the value of a field as the correct type.""" value = field.value().string_value() value_type = field.value().type() if value_type in TEXT_DOCUMENT_FIELD_TYPES: return value if value_type is document_pb.FieldValue.DATE: return DeserializeDate(value) if value_type is document_pb.FieldValue.NUMBER: return float(value) raise TypeError('No conversion defined for type %s' % value_type)
def GetFieldValue(field): """Returns the value of a field as the correct type.""" value = field.value().string_value() value_type = field.value().type() if value_type in TEXT_DOCUMENT_FIELD_TYPES: return value if value_type is document_pb.FieldValue.DATE: return DeserializeDate(value) if value_type is document_pb.FieldValue.NUMBER: return float(value) raise TypeError('No conversion defined for type %s' % value_type)
Python
def _MatchPhrase(self, field, match, document): """Match a textual field with a phrase query node.""" phrase = self._SplitPhrase(query_parser.GetQueryNodeText(match)) if not phrase: return True field_text = self._parser.TokenizeText(field.value().string_value()) posting = None for post in self._PostingsForFieldToken(field.name(), phrase[0].chars): if post.doc_id == document.id(): posting = post break if not posting: return False def ExtractWords(token_list): return (token.chars for token in token_list) for position in posting.positions: match_words = zip(ExtractWords(field_text[position:]), ExtractWords(phrase)) if len(match_words) != len(phrase): continue match = True for doc_word, match_word in match_words: if doc_word != match_word: match = False if match: return True return False
def _MatchPhrase(self, field, match, document): """Match a textual field with a phrase query node.""" phrase = self._SplitPhrase(query_parser.GetQueryNodeText(match)) if not phrase: return True field_text = self._parser.TokenizeText(field.value().string_value()) posting = None for post in self._PostingsForFieldToken(field.name(), phrase[0].chars): if post.doc_id == document.id(): posting = post break if not posting: return False def ExtractWords(token_list): return (token.chars for token in token_list) for position in posting.positions: match_words = zip(ExtractWords(field_text[position:]), ExtractWords(phrase)) if len(match_words) != len(phrase): continue match = True for doc_word, match_word in match_words: if doc_word != match_word: match = False if match: return True return False
Python
def _MatchTextField(self, field, match, document): """Check if a textual field matches a query tree node.""" if (match.getType() in (QueryParser.TEXT, QueryParser.NAME) or match.getType() in search_util.NUMBER_QUERY_TYPES): matching_docids = [ post.doc_id for post in self._PostingsForFieldToken( field.name(), query_parser.GetQueryNodeText(match))] return document.id() in matching_docids if match.getType() is QueryParser.PHRASE: return self._MatchPhrase(field, match, document) if match.getType() is QueryParser.CONJUNCTION: return all(self._MatchTextField(field, child, document) for child in match.children) if match.getType() is QueryParser.DISJUNCTION: return any(self._MatchTextField(field, child, document) for child in match.children) if match.getType() is QueryParser.NEGATION: return not self._MatchTextField(field, match.children[0], document) return False
def _MatchTextField(self, field, match, document): """Check if a textual field matches a query tree node.""" if (match.getType() in (QueryParser.TEXT, QueryParser.NAME) or match.getType() in search_util.NUMBER_QUERY_TYPES): matching_docids = [ post.doc_id for post in self._PostingsForFieldToken( field.name(), query_parser.GetQueryNodeText(match))] return document.id() in matching_docids if match.getType() is QueryParser.PHRASE: return self._MatchPhrase(field, match, document) if match.getType() is QueryParser.CONJUNCTION: return all(self._MatchTextField(field, child, document) for child in match.children) if match.getType() is QueryParser.DISJUNCTION: return any(self._MatchTextField(field, child, document) for child in match.children) if match.getType() is QueryParser.NEGATION: return not self._MatchTextField(field, match.children[0], document) return False
Python
def _MatchDateField(self, field, match, document): """Check if a date field matches a query tree node.""" return self._MatchComparableField( field, match, search_util.DeserializeDate, search_util.TEXT_QUERY_TYPES, document)
def _MatchDateField(self, field, match, document): """Check if a date field matches a query tree node.""" return self._MatchComparableField( field, match, search_util.DeserializeDate, search_util.TEXT_QUERY_TYPES, document)
Python
def _MatchComparableField( self, field, match, cast_to_type, query_node_types, document): """A generic method to test matching for comparable types. Comparable types are defined to be anything that supports <, >, <=, >=, == and !=. For our purposes, this is numbers and dates. Args: field: The document_pb.Field to test match: The query node to match against cast_to_type: The type to cast the node string values to query_node_types: The query node types that would be valid matches document: The document that the field is in Returns: True iff the field matches the query. Raises: UnsupportedOnDevError: Raised when an unsupported operator is used, or when the query node is of the wrong type. """ field_val = cast_to_type(field.value().string_value()) op = QueryParser.EQ if match.getType() in query_node_types: try: match_val = cast_to_type(query_parser.GetQueryNodeText(match)) except ValueError: return False elif match.children: op = match.getType() try: match_val = cast_to_type( query_parser.GetQueryNodeText(match.children[0])) except ValueError: return False else: return False if op is QueryParser.EQ: return field_val == match_val if op is QueryParser.NE: return field_val != match_val if op is QueryParser.GT: return field_val > match_val if op is QueryParser.GE: return field_val >= match_val if op is QueryParser.LT: return field_val < match_val if op is QueryParser.LE: return field_val <= match_val raise search_util.UnsupportedOnDevError( 'Operator %s not supported for numerical fields on development server.' % match.getText())
def _MatchComparableField( self, field, match, cast_to_type, query_node_types, document): """A generic method to test matching for comparable types. Comparable types are defined to be anything that supports <, >, <=, >=, == and !=. For our purposes, this is numbers and dates. Args: field: The document_pb.Field to test match: The query node to match against cast_to_type: The type to cast the node string values to query_node_types: The query node types that would be valid matches document: The document that the field is in Returns: True iff the field matches the query. Raises: UnsupportedOnDevError: Raised when an unsupported operator is used, or when the query node is of the wrong type. """ field_val = cast_to_type(field.value().string_value()) op = QueryParser.EQ if match.getType() in query_node_types: try: match_val = cast_to_type(query_parser.GetQueryNodeText(match)) except ValueError: return False elif match.children: op = match.getType() try: match_val = cast_to_type( query_parser.GetQueryNodeText(match.children[0])) except ValueError: return False else: return False if op is QueryParser.EQ: return field_val == match_val if op is QueryParser.NE: return field_val != match_val if op is QueryParser.GT: return field_val > match_val if op is QueryParser.GE: return field_val >= match_val if op is QueryParser.LT: return field_val < match_val if op is QueryParser.LE: return field_val <= match_val raise search_util.UnsupportedOnDevError( 'Operator %s not supported for numerical fields on development server.' % match.getText())
Python
def _MatchField(self, field_query_node, match, document): """Check if a field matches a query tree.""" if isinstance(field_query_node, str): field = search_util.GetFieldInDocument(document, field_query_node) else: field = search_util.GetFieldInDocument( document, field_query_node.getText()) if not field: return False if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES: return self._MatchTextField(field, match, document) if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES: return self._MatchNumericField(field, match, document) if field.value().type() == document_pb.FieldValue.DATE: return self._MatchDateField(field, match, document) raise search_util.UnsupportedOnDevError( 'Matching to field type of field "%s" (type=%d) is unsupported on ' 'dev server' % (field.name(), field.value().type()))
def _MatchField(self, field_query_node, match, document): """Check if a field matches a query tree.""" if isinstance(field_query_node, str): field = search_util.GetFieldInDocument(document, field_query_node) else: field = search_util.GetFieldInDocument( document, field_query_node.getText()) if not field: return False if field.value().type() in search_util.TEXT_DOCUMENT_FIELD_TYPES: return self._MatchTextField(field, match, document) if field.value().type() in search_util.NUMBER_DOCUMENT_FIELD_TYPES: return self._MatchNumericField(field, match, document) if field.value().type() == document_pb.FieldValue.DATE: return self._MatchDateField(field, match, document) raise search_util.UnsupportedOnDevError( 'Matching to field type of field "%s" (type=%d) is unsupported on ' 'dev server' % (field.name(), field.value().type()))
Python
def _CheckMatch(self, node, document): """Check if a document matches a query tree.""" if node.getType() is QueryParser.CONJUNCTION: return all(self._CheckMatch(child, document) for child in node.children) if node.getType() is QueryParser.DISJUNCTION: return any(self._CheckMatch(child, document) for child in node.children) if node.getType() is QueryParser.NEGATION: return not self._CheckMatch(node.children[0], document) if node.getType() is QueryParser.RESTRICTION: field, match = node.children return self._MatchField(field, match, document) return self._MatchGlobal(node, document)
def _CheckMatch(self, node, document): """Check if a document matches a query tree.""" if node.getType() is QueryParser.CONJUNCTION: return all(self._CheckMatch(child, document) for child in node.children) if node.getType() is QueryParser.DISJUNCTION: return any(self._CheckMatch(child, document) for child in node.children) if node.getType() is QueryParser.NEGATION: return not self._CheckMatch(node.children[0], document) if node.getType() is QueryParser.RESTRICTION: field, match = node.children return self._MatchField(field, match, document) return self._MatchGlobal(node, document)
Python
def Normalize(filters, orders, properties): """ Normalizes filter and order query components. The resulting components have the same effect as the given components if used in a query. Returns: (filter, orders) the reduced set of filters and orders """ eq_properties = set() inequality_properties = set() for f in filters: if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1: f.set_op(datastore_pb.Query_Filter.EQUAL) if f.op() in EQUALITY_OPERATORS: eq_properties.add(f.property(0).name()) elif f.op() in INEQUALITY_OPERATORS: inequality_properties.add(f.property(0).name()) eq_properties -= inequality_properties remove_set = eq_properties.copy() new_orders = [] for o in orders: if o.property() not in remove_set: remove_set.add(o.property()) new_orders.append(o) orders = new_orders remove_set.update(inequality_properties) new_filters = [] for f in filters: if f.op() not in EXISTS_OPERATORS: new_filters.append(f) continue name = f.property(0).name() if name not in remove_set: remove_set.add(name) new_filters.append(f) for prop in properties: if prop not in remove_set: remove_set.add(prop) new_filter = datastore_pb.Query_Filter() new_filter.set_op(datastore_pb.Query_Filter.EXISTS) new_prop = new_filter.add_property() new_prop.set_name(prop) new_prop.set_multiple(False) new_prop.mutable_value() new_filters.append(new_filter) filters = new_filters if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties: orders = [] new_orders = [] for o in orders: if o.property() == datastore_types.KEY_SPECIAL_PROPERTY: new_orders.append(o) break new_orders.append(o) orders = new_orders return (filters, orders)
def Normalize(filters, orders, properties): """ Normalizes filter and order query components. The resulting components have the same effect as the given components if used in a query. Returns: (filter, orders) the reduced set of filters and orders """ eq_properties = set() inequality_properties = set() for f in filters: if f.op() == datastore_pb.Query_Filter.IN and f.property_size() == 1: f.set_op(datastore_pb.Query_Filter.EQUAL) if f.op() in EQUALITY_OPERATORS: eq_properties.add(f.property(0).name()) elif f.op() in INEQUALITY_OPERATORS: inequality_properties.add(f.property(0).name()) eq_properties -= inequality_properties remove_set = eq_properties.copy() new_orders = [] for o in orders: if o.property() not in remove_set: remove_set.add(o.property()) new_orders.append(o) orders = new_orders remove_set.update(inequality_properties) new_filters = [] for f in filters: if f.op() not in EXISTS_OPERATORS: new_filters.append(f) continue name = f.property(0).name() if name not in remove_set: remove_set.add(name) new_filters.append(f) for prop in properties: if prop not in remove_set: remove_set.add(prop) new_filter = datastore_pb.Query_Filter() new_filter.set_op(datastore_pb.Query_Filter.EXISTS) new_prop = new_filter.add_property() new_prop.set_name(prop) new_prop.set_multiple(False) new_prop.mutable_value() new_filters.append(new_filter) filters = new_filters if datastore_types.KEY_SPECIAL_PROPERTY in eq_properties: orders = [] new_orders = [] for o in orders: if o.property() == datastore_types.KEY_SPECIAL_PROPERTY: new_orders.append(o) break new_orders.append(o) orders = new_orders return (filters, orders)
Python
def RemoveNativelySupportedComponents(filters, orders, properties): """ Removes query components that are natively supported by the datastore. The resulting filters and orders should not be used in an actual query. Returns (filters, orders) the reduced set of filters and orders """ (filters, orders) = Normalize(filters, orders, properties) for f in filters: if f.op() in EXISTS_OPERATORS: return (filters, orders) has_key_desc_order = False if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY: if orders[-1].direction() == ASCENDING: orders = orders[:-1] else: has_key_desc_order = True if not has_key_desc_order: for f in filters: if (f.op() in INEQUALITY_OPERATORS and f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY): break else: filters = [f for f in filters if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY] return (filters, orders)
def RemoveNativelySupportedComponents(filters, orders, properties): """ Removes query components that are natively supported by the datastore. The resulting filters and orders should not be used in an actual query. Returns (filters, orders) the reduced set of filters and orders """ (filters, orders) = Normalize(filters, orders, properties) for f in filters: if f.op() in EXISTS_OPERATORS: return (filters, orders) has_key_desc_order = False if orders and orders[-1].property() == datastore_types.KEY_SPECIAL_PROPERTY: if orders[-1].direction() == ASCENDING: orders = orders[:-1] else: has_key_desc_order = True if not has_key_desc_order: for f in filters: if (f.op() in INEQUALITY_OPERATORS and f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY): break else: filters = [f for f in filters if f.property(0).name() != datastore_types.KEY_SPECIAL_PROPERTY] return (filters, orders)
Python
def CompositeIndexForQuery(query): """Return the composite index needed for a query. A query is translated into a tuple, as follows: - The first item is the kind string, or None if we're not filtering on kind (see below). - The second item is a bool giving whether the query specifies an ancestor. - After that come (property, ASCENDING) pairs for those Filter entries whose operator is EQUAL or IN. Since the order of these doesn't matter, they are sorted by property name to normalize them in order to avoid duplicates. - After that comes at most one (property, ASCENDING) pair for a Filter entry whose operator is on of the four inequalities. There can be at most one of these. - After that come all the (property, direction) pairs for the Order entries, in the order given in the query. Exceptions: (a) if there is a Filter entry with an inequality operator that matches the first Order entry, the first order pair is omitted (or, equivalently, in this case the inequality pair is omitted). (b) if an Order entry corresponds to an equality filter, it is ignored (since there will only ever be one value returned). (c) if there is an equality filter on __key__ all orders are dropped (since there will be at most one result returned). (d) if there is an order on __key__ all further orders are dropped (since keys are unique). (e) orders on __key__ ASCENDING are dropped (since this is supported natively by the datastore). - Finally, if there are Filter entries whose operator is EXISTS, and whose property names are not already listed, they are added, with the direction set to ASCENDING. This algorithm should consume all Filter and Order entries. Additional notes: - The low-level implementation allows queries that don't specify a kind; but the Python API doesn't support this yet. - If there's an inequality filter and one or more sort orders, the first sort order *must* match the inequality filter. - The following indexes are always built in and should be suppressed: - query on kind only; - query on kind and one filter *or* one order; - query on ancestor only, without kind (not exposed in Python yet); - query on kind and equality filters only, no order (with or without ancestor). - While the protocol buffer allows a Filter to contain multiple properties, we don't use this. It is only needed for the IN operator but this is (currently) handled on the client side, so in practice each Filter is expected to have exactly one property. Args: query: A datastore_pb.Query instance. Returns: A tuple of the form (required, kind, ancestor, properties). required: boolean, whether the index is required; kind: the kind or None; ancestor: True if this is an ancestor query; properties: A tuple consisting of any number of: - Sets of property names: Indicates these properties can appear in any order with any direction. - Tuples of (property name, direction) tuples. Indicating the properties must appear in the exact order with the given direction. direction can be None if direction does not matter. """ required = True kind = query.kind() ancestor = query.has_ancestor() filters = query.filter_list() orders = query.order_list() for filter in filters: assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN' nprops = len(filter.property_list()) assert nprops == 1, 'Filter has %s properties, expected 1' % nprops if not kind: required = False filters, orders = RemoveNativelySupportedComponents( filters, orders, query.property_name_list()) eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS] ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS] exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS] assert (len(eq_filters) + len(ineq_filters) + len(exists_filters)) == len(filters), 'Not all filters used' if (kind and not ineq_filters and not exists_filters and not orders): names = set(f.property(0).name() for f in eq_filters) if not names.intersection(datastore_types._SPECIAL_PROPERTIES): required = False ineq_property = None if ineq_filters: for filter in ineq_filters: if (filter.property(0).name() == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY): continue if not ineq_property: ineq_property = filter.property(0).name() else: assert filter.property(0).name() == ineq_property prefix = frozenset(f.property(0).name() for f in eq_filters) postfix_ordered = [(order.property(), order.direction()) for order in orders] postfix_unordered = frozenset(f.property(0).name() for f in exists_filters) if ineq_property: if orders: assert ineq_property == orders[0].property() else: postfix_ordered.append((ineq_property, None)) property_count = len(prefix) + len(postfix_ordered) + len(postfix_unordered) if kind and not ancestor and property_count <= 1: required = False if postfix_ordered: prop, dir = postfix_ordered[0] if prop == datastore_types.KEY_SPECIAL_PROPERTY and dir is DESCENDING: required = True return (required, kind, ancestor, (prefix, tuple(postfix_ordered), postfix_unordered))
def CompositeIndexForQuery(query): """Return the composite index needed for a query. A query is translated into a tuple, as follows: - The first item is the kind string, or None if we're not filtering on kind (see below). - The second item is a bool giving whether the query specifies an ancestor. - After that come (property, ASCENDING) pairs for those Filter entries whose operator is EQUAL or IN. Since the order of these doesn't matter, they are sorted by property name to normalize them in order to avoid duplicates. - After that comes at most one (property, ASCENDING) pair for a Filter entry whose operator is on of the four inequalities. There can be at most one of these. - After that come all the (property, direction) pairs for the Order entries, in the order given in the query. Exceptions: (a) if there is a Filter entry with an inequality operator that matches the first Order entry, the first order pair is omitted (or, equivalently, in this case the inequality pair is omitted). (b) if an Order entry corresponds to an equality filter, it is ignored (since there will only ever be one value returned). (c) if there is an equality filter on __key__ all orders are dropped (since there will be at most one result returned). (d) if there is an order on __key__ all further orders are dropped (since keys are unique). (e) orders on __key__ ASCENDING are dropped (since this is supported natively by the datastore). - Finally, if there are Filter entries whose operator is EXISTS, and whose property names are not already listed, they are added, with the direction set to ASCENDING. This algorithm should consume all Filter and Order entries. Additional notes: - The low-level implementation allows queries that don't specify a kind; but the Python API doesn't support this yet. - If there's an inequality filter and one or more sort orders, the first sort order *must* match the inequality filter. - The following indexes are always built in and should be suppressed: - query on kind only; - query on kind and one filter *or* one order; - query on ancestor only, without kind (not exposed in Python yet); - query on kind and equality filters only, no order (with or without ancestor). - While the protocol buffer allows a Filter to contain multiple properties, we don't use this. It is only needed for the IN operator but this is (currently) handled on the client side, so in practice each Filter is expected to have exactly one property. Args: query: A datastore_pb.Query instance. Returns: A tuple of the form (required, kind, ancestor, properties). required: boolean, whether the index is required; kind: the kind or None; ancestor: True if this is an ancestor query; properties: A tuple consisting of any number of: - Sets of property names: Indicates these properties can appear in any order with any direction. - Tuples of (property name, direction) tuples. Indicating the properties must appear in the exact order with the given direction. direction can be None if direction does not matter. """ required = True kind = query.kind() ancestor = query.has_ancestor() filters = query.filter_list() orders = query.order_list() for filter in filters: assert filter.op() != datastore_pb.Query_Filter.IN, 'Filter.op()==IN' nprops = len(filter.property_list()) assert nprops == 1, 'Filter has %s properties, expected 1' % nprops if not kind: required = False filters, orders = RemoveNativelySupportedComponents( filters, orders, query.property_name_list()) eq_filters = [f for f in filters if f.op() in EQUALITY_OPERATORS] ineq_filters = [f for f in filters if f.op() in INEQUALITY_OPERATORS] exists_filters = [f for f in filters if f.op() in EXISTS_OPERATORS] assert (len(eq_filters) + len(ineq_filters) + len(exists_filters)) == len(filters), 'Not all filters used' if (kind and not ineq_filters and not exists_filters and not orders): names = set(f.property(0).name() for f in eq_filters) if not names.intersection(datastore_types._SPECIAL_PROPERTIES): required = False ineq_property = None if ineq_filters: for filter in ineq_filters: if (filter.property(0).name() == datastore_types._UNAPPLIED_LOG_TIMESTAMP_SPECIAL_PROPERTY): continue if not ineq_property: ineq_property = filter.property(0).name() else: assert filter.property(0).name() == ineq_property prefix = frozenset(f.property(0).name() for f in eq_filters) postfix_ordered = [(order.property(), order.direction()) for order in orders] postfix_unordered = frozenset(f.property(0).name() for f in exists_filters) if ineq_property: if orders: assert ineq_property == orders[0].property() else: postfix_ordered.append((ineq_property, None)) property_count = len(prefix) + len(postfix_ordered) + len(postfix_unordered) if kind and not ancestor and property_count <= 1: required = False if postfix_ordered: prop, dir = postfix_ordered[0] if prop == datastore_types.KEY_SPECIAL_PROPERTY and dir is DESCENDING: required = True return (required, kind, ancestor, (prefix, tuple(postfix_ordered), postfix_unordered))
Python
def GetRecommendedIndexProperties(properties): """Converts the properties returned by datastore_index.CompositeIndexForQuery into a recommended list of index properties and directions. All unordered components are sorted and assigned an ASCENDING direction. All ordered components with out a direction are assigned an ASCEDNING direction. Args: properties: See datastore_index.CompositeIndexForQuery Returns: A tuple of (name, direction) tuples where: name: a property name direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING """ result = [] for sub_list in properties: if isinstance(sub_list, (frozenset, set)): for prop in sorted(sub_list): result.append((prop, ASCENDING)) else: for prop, dir in sub_list: result.append((prop, dir if dir is not None else ASCENDING)) return tuple(result)
def GetRecommendedIndexProperties(properties): """Converts the properties returned by datastore_index.CompositeIndexForQuery into a recommended list of index properties and directions. All unordered components are sorted and assigned an ASCENDING direction. All ordered components with out a direction are assigned an ASCEDNING direction. Args: properties: See datastore_index.CompositeIndexForQuery Returns: A tuple of (name, direction) tuples where: name: a property name direction: datastore_pb.Query_Order.ASCENDING or ...DESCENDING """ result = [] for sub_list in properties: if isinstance(sub_list, (frozenset, set)): for prop in sorted(sub_list): result.append((prop, ASCENDING)) else: for prop, dir in sub_list: result.append((prop, dir if dir is not None else ASCENDING)) return tuple(result)
Python
def MinimalCompositeIndexForQuery(query, index_defs): """Computes the minimal composite index for this query. Unlike datastore_index.CompositeIndexForQuery, this function takes into account indexes that already exist in the system. Args: query: the datastore_pb.Query to compute suggestions for index_defs: a list of datastore_index.Index objects that already exist. Returns: None if no index is needed, otherwise the minimal index in the form (is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a boolean denoting if the suggested index is the most efficient (i.e. the one returned by datastore_index.CompositeIndexForQuery). kind, ancestor, and properties are the same variables returned by datastore_index.CompositeIndexForQuery. """ required, kind, ancestor, props = CompositeIndexForQuery(query) if not required: return None prefix, postfix_ordered, postfix_unordered = props remaining_dict = {} for definition in index_defs: if (kind != definition.kind or (not ancestor and definition.ancestor)): continue _, _, index_props = IndexToKey(definition) postfix_split = len(index_props) - 1 while (postfix_split >= 0 and index_props[postfix_split][0] in postfix_unordered): postfix_split -= 1 postfix_split += 1 index_postfix_unordered = index_props[postfix_split:] if (set(prop for prop, _ in index_postfix_unordered) != postfix_unordered or len(index_postfix_unordered) != len(postfix_unordered)): continue postfix_start = postfix_split - len(postfix_ordered) if postfix_start < 0: continue index_postfix_ordered = index_props[postfix_start:postfix_split] match = True for (index_prop, index_dir), (prop, dir) in zip(index_postfix_ordered, postfix_ordered): if index_prop != prop or (dir is not None and index_dir != dir): match = False break if not match: continue index_prefix = set(prop for prop, dir in index_props[:postfix_start]) if index_prefix - prefix: continue index_postfix = tuple(index_postfix_ordered + index_postfix_unordered) remaining = remaining_dict.get(index_postfix) if remaining is None: remaining = prefix.copy(), ancestor props_remaining, ancestor_remaining = remaining props_remaining = props_remaining - index_prefix if definition.ancestor: ancestor_remaining = False if not (props_remaining or ancestor_remaining): return None if (props_remaining, ancestor_remaining) == remaining: continue remaining_dict[index_postfix] = (props_remaining, ancestor_remaining) if not remaining_dict: return (True, kind, ancestor, props) def calc_cost(minimal_props, minimal_ancestor): result = len(minimal_props) if minimal_ancestor: result += 2 minimal_postfix, remaining = remaining_dict.popitem() minimal_props, minimal_ancestor = remaining minimal_cost = calc_cost(minimal_props, minimal_ancestor) for index_postfix, (props_remaining, ancestor_remaining) in remaining_dict: cost = calc_cost(props_remaining, ancestor_remaining) if cost < minimal_cost: minimal_cost = cost minimal_postfix = index_postfix minimal_props = props_remaining minimal_ancestor = ancestor_remaining return False, kind, minimal_ancestor, (frozenset(minimal_props), minimal_postfix, frozenset())
def MinimalCompositeIndexForQuery(query, index_defs): """Computes the minimal composite index for this query. Unlike datastore_index.CompositeIndexForQuery, this function takes into account indexes that already exist in the system. Args: query: the datastore_pb.Query to compute suggestions for index_defs: a list of datastore_index.Index objects that already exist. Returns: None if no index is needed, otherwise the minimal index in the form (is_most_efficient, kind, ancestor, properties). Where is_most_efficient is a boolean denoting if the suggested index is the most efficient (i.e. the one returned by datastore_index.CompositeIndexForQuery). kind, ancestor, and properties are the same variables returned by datastore_index.CompositeIndexForQuery. """ required, kind, ancestor, props = CompositeIndexForQuery(query) if not required: return None prefix, postfix_ordered, postfix_unordered = props remaining_dict = {} for definition in index_defs: if (kind != definition.kind or (not ancestor and definition.ancestor)): continue _, _, index_props = IndexToKey(definition) postfix_split = len(index_props) - 1 while (postfix_split >= 0 and index_props[postfix_split][0] in postfix_unordered): postfix_split -= 1 postfix_split += 1 index_postfix_unordered = index_props[postfix_split:] if (set(prop for prop, _ in index_postfix_unordered) != postfix_unordered or len(index_postfix_unordered) != len(postfix_unordered)): continue postfix_start = postfix_split - len(postfix_ordered) if postfix_start < 0: continue index_postfix_ordered = index_props[postfix_start:postfix_split] match = True for (index_prop, index_dir), (prop, dir) in zip(index_postfix_ordered, postfix_ordered): if index_prop != prop or (dir is not None and index_dir != dir): match = False break if not match: continue index_prefix = set(prop for prop, dir in index_props[:postfix_start]) if index_prefix - prefix: continue index_postfix = tuple(index_postfix_ordered + index_postfix_unordered) remaining = remaining_dict.get(index_postfix) if remaining is None: remaining = prefix.copy(), ancestor props_remaining, ancestor_remaining = remaining props_remaining = props_remaining - index_prefix if definition.ancestor: ancestor_remaining = False if not (props_remaining or ancestor_remaining): return None if (props_remaining, ancestor_remaining) == remaining: continue remaining_dict[index_postfix] = (props_remaining, ancestor_remaining) if not remaining_dict: return (True, kind, ancestor, props) def calc_cost(minimal_props, minimal_ancestor): result = len(minimal_props) if minimal_ancestor: result += 2 minimal_postfix, remaining = remaining_dict.popitem() minimal_props, minimal_ancestor = remaining minimal_cost = calc_cost(minimal_props, minimal_ancestor) for index_postfix, (props_remaining, ancestor_remaining) in remaining_dict: cost = calc_cost(props_remaining, ancestor_remaining) if cost < minimal_cost: minimal_cost = cost minimal_postfix = index_postfix minimal_props = props_remaining minimal_ancestor = ancestor_remaining return False, kind, minimal_ancestor, (frozenset(minimal_props), minimal_postfix, frozenset())
Python
def IndexYamlForQuery(kind, ancestor, props): """Return the composite index definition YAML needed for a query. Given a query, the arguments for this method can be computed with: _, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query) props = datastore_index.GetRecommendedIndexProperties(props) Args: kind: the kind or None ancestor: True if this is an ancestor query, False otherwise props: tuples of the form (name, direction) where: name - a property name; direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING; Returns: A string with the YAML for the composite index needed by the query. """ yaml = [] yaml.append('- kind: %s' % kind) if ancestor: yaml.append(' ancestor: yes') if props: yaml.append(' properties:') for name, direction in props: yaml.append(' - name: %s' % name) if direction == DESCENDING: yaml.append(' direction: desc') return '\n'.join(yaml)
def IndexYamlForQuery(kind, ancestor, props): """Return the composite index definition YAML needed for a query. Given a query, the arguments for this method can be computed with: _, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query) props = datastore_index.GetRecommendedIndexProperties(props) Args: kind: the kind or None ancestor: True if this is an ancestor query, False otherwise props: tuples of the form (name, direction) where: name - a property name; direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING; Returns: A string with the YAML for the composite index needed by the query. """ yaml = [] yaml.append('- kind: %s' % kind) if ancestor: yaml.append(' ancestor: yes') if props: yaml.append(' properties:') for name, direction in props: yaml.append(' - name: %s' % name) if direction == DESCENDING: yaml.append(' direction: desc') return '\n'.join(yaml)
Python
def IndexXmlForQuery(kind, ancestor, props): """Return the composite index definition XML needed for a query. Given a query, the arguments for this method can be computed with: _, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query) props = datastore_index.GetRecommendedIndexProperties(props) Args: kind: the kind or None ancestor: True if this is an ancestor query, False otherwise props: tuples of the form (name, direction) where: name - a property name; direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING; Returns: A string with the XML for the composite index needed by the query. """ xml = [] xml.append('<datastore-index kind="%s" ancestor="%s">' % (kind, 'true' if ancestor else 'false')) for name, direction in props: xml.append(' <property name="%s" direction="%s" />' % (name, 'asc' if direction == ASCENDING else 'desc')) xml.append('</datastore-index>') return '\n'.join(xml)
def IndexXmlForQuery(kind, ancestor, props): """Return the composite index definition XML needed for a query. Given a query, the arguments for this method can be computed with: _, kind, ancestor, props = datastore_index.CompositeIndexForQuery(query) props = datastore_index.GetRecommendedIndexProperties(props) Args: kind: the kind or None ancestor: True if this is an ancestor query, False otherwise props: tuples of the form (name, direction) where: name - a property name; direction - datastore_pb.Query_Order.ASCENDING or ...DESCENDING; Returns: A string with the XML for the composite index needed by the query. """ xml = [] xml.append('<datastore-index kind="%s" ancestor="%s">' % (kind, 'true' if ancestor else 'false')) for name, direction in props: xml.append(' <property name="%s" direction="%s" />' % (name, 'asc' if direction == ASCENDING else 'desc')) xml.append('</datastore-index>') return '\n'.join(xml)
Python
def ProtoToIndexDefinition(proto): """Transform individual index protocol buffer to index definition. Args: proto: An instance of entity_pb.CompositeIndex to transform. Returns: A new instance of datastore_index.Index. """ properties = [] proto_index = proto.definition() for prop_proto in proto_index.property_list(): prop_definition = Property(name=prop_proto.name()) if prop_proto.direction() == entity_pb.Index_Property.DESCENDING: prop_definition.direction = 'descending' properties.append(prop_definition) index = Index(kind=proto_index.entity_type(), properties=properties) if proto_index.ancestor(): index.ancestor = True return index
def ProtoToIndexDefinition(proto): """Transform individual index protocol buffer to index definition. Args: proto: An instance of entity_pb.CompositeIndex to transform. Returns: A new instance of datastore_index.Index. """ properties = [] proto_index = proto.definition() for prop_proto in proto_index.property_list(): prop_definition = Property(name=prop_proto.name()) if prop_proto.direction() == entity_pb.Index_Property.DESCENDING: prop_definition.direction = 'descending' properties.append(prop_definition) index = Index(kind=proto_index.entity_type(), properties=properties) if proto_index.ancestor(): index.ancestor = True return index
Python
def _schedule_slice(cls, shard_state, transient_shard_state, queue_name=None, eta=None, countdown=None): """Schedule slice scanning by adding it to the task queue. Args: shard_state: An instance of ShardState. transient_shard_state: An instance of TransientShardState. queue_name: Optional queue to run on; uses the current queue of execution or the default queue if unspecified. eta: Absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: Time in seconds into the future that this MR should execute. Defaults to zero. """ base_path = transient_shard_state.base_path mapreduce_spec = transient_shard_state.mapreduce_spec task_name = MapperWorkerCallbackHandler.get_task_name( transient_shard_state.shard_id, transient_shard_state.slice_id) queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default") worker_task = util.HugeTask(url=base_path + "/worker_callback", params=transient_shard_state.to_dict(), name=task_name, eta=eta, countdown=countdown) if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_worker_task", worker_task, queue_name): try: worker_task.add(queue_name, parent=shard_state) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e: logging.warning("Task %r with params %r already exists. %s: %s", task_name, transient_shard_state.to_dict(), e.__class__, e)
def _schedule_slice(cls, shard_state, transient_shard_state, queue_name=None, eta=None, countdown=None): """Schedule slice scanning by adding it to the task queue. Args: shard_state: An instance of ShardState. transient_shard_state: An instance of TransientShardState. queue_name: Optional queue to run on; uses the current queue of execution or the default queue if unspecified. eta: Absolute time when the MR should execute. May not be specified if 'countdown' is also supplied. This may be timezone-aware or timezone-naive. countdown: Time in seconds into the future that this MR should execute. Defaults to zero. """ base_path = transient_shard_state.base_path mapreduce_spec = transient_shard_state.mapreduce_spec task_name = MapperWorkerCallbackHandler.get_task_name( transient_shard_state.shard_id, transient_shard_state.slice_id) queue_name = queue_name or os.environ.get("HTTP_X_APPENGINE_QUEUENAME", "default") worker_task = util.HugeTask(url=base_path + "/worker_callback", params=transient_shard_state.to_dict(), name=task_name, eta=eta, countdown=countdown) if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_worker_task", worker_task, queue_name): try: worker_task.add(queue_name, parent=shard_state) except (taskqueue.TombstonedTaskError, taskqueue.TaskAlreadyExistsError), e: logging.warning("Task %r with params %r already exists. %s: %s", task_name, transient_shard_state.to_dict(), e.__class__, e)
Python
def _finalize_job(mapreduce_spec, mapreduce_state, base_path): """Finalize job execution. Finalizes output writer, invokes done callback an schedules finalize job execution. Args: mapreduce_spec: an instance of MapreduceSpec mapreduce_state: an instance of MapreduceState base_path: handler base path. """ config = util.create_datastore_write_config(mapreduce_spec) if (mapreduce_spec.mapper.output_writer_class() and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state) def put_state(state): state.put(config=config) done_callback = mapreduce_spec.params.get( model.MapreduceSpec.PARAM_DONE_CALLBACK) if done_callback: done_task = taskqueue.Task( url=done_callback, headers={"Mapreduce-Id": mapreduce_spec.mapreduce_id}, method=mapreduce_spec.params.get("done_callback_method", "POST")) queue_name = mapreduce_spec.params.get( model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE, "default") if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_done_task", done_task, queue_name): done_task.add(queue_name, transactional=True) FinalizeJobHandler.schedule(base_path, mapreduce_spec) db.run_in_transaction(put_state, mapreduce_state)
def _finalize_job(mapreduce_spec, mapreduce_state, base_path): """Finalize job execution. Finalizes output writer, invokes done callback an schedules finalize job execution. Args: mapreduce_spec: an instance of MapreduceSpec mapreduce_state: an instance of MapreduceState base_path: handler base path. """ config = util.create_datastore_write_config(mapreduce_spec) if (mapreduce_spec.mapper.output_writer_class() and mapreduce_state.result_status == model.MapreduceState.RESULT_SUCCESS): mapreduce_spec.mapper.output_writer_class().finalize_job(mapreduce_state) def put_state(state): state.put(config=config) done_callback = mapreduce_spec.params.get( model.MapreduceSpec.PARAM_DONE_CALLBACK) if done_callback: done_task = taskqueue.Task( url=done_callback, headers={"Mapreduce-Id": mapreduce_spec.mapreduce_id}, method=mapreduce_spec.params.get("done_callback_method", "POST")) queue_name = mapreduce_spec.params.get( model.MapreduceSpec.PARAM_DONE_CALLBACK_QUEUE, "default") if not _run_task_hook(mapreduce_spec.get_hooks(), "enqueue_done_task", done_task, queue_name): done_task.add(queue_name, transactional=True) FinalizeJobHandler.schedule(base_path, mapreduce_spec) db.run_in_transaction(put_state, mapreduce_state)
Python
def _schedule_shards(cls, spec, input_readers, output_writers, queue_name, base_path): """Prepares shard states and schedules their execution. Args: spec: mapreduce specification as MapreduceSpec. input_readers: list of InputReaders describing shard splits. queue_name: The queue to run this job on. base_path: The base url path of mapreduce callbacks. """ assert len(input_readers) == len(output_writers) shard_states = [] for shard_number, input_reader in enumerate(input_readers): shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number) shard_state.shard_description = str(input_reader) shard_states.append(shard_state) existing_shard_states = db.get(shard.key() for shard in shard_states) existing_shard_keys = set(shard.key() for shard in existing_shard_states if shard is not None) db.put((shard for shard in shard_states if shard.key() not in existing_shard_keys), config=util.create_datastore_write_config(spec)) processing_rate = int(spec.mapper.params.get( "processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC) quota_refill = processing_rate / len(shard_states) quota_manager = quota.QuotaManager(memcache.Client()) for shard_state in shard_states: quota_manager.put(shard_state.shard_id, quota_refill) for shard_number, (input_reader, output_writer) in enumerate( zip(input_readers, output_writers)): shard_id = model.ShardState.shard_id_from_number( spec.mapreduce_id, shard_number) MapperWorkerCallbackHandler._schedule_slice( shard_states[shard_number], model.TransientShardState( base_path, spec, shard_id, 0, input_reader, output_writer=output_writer), queue_name=queue_name)
def _schedule_shards(cls, spec, input_readers, output_writers, queue_name, base_path): """Prepares shard states and schedules their execution. Args: spec: mapreduce specification as MapreduceSpec. input_readers: list of InputReaders describing shard splits. queue_name: The queue to run this job on. base_path: The base url path of mapreduce callbacks. """ assert len(input_readers) == len(output_writers) shard_states = [] for shard_number, input_reader in enumerate(input_readers): shard_state = model.ShardState.create_new(spec.mapreduce_id, shard_number) shard_state.shard_description = str(input_reader) shard_states.append(shard_state) existing_shard_states = db.get(shard.key() for shard in shard_states) existing_shard_keys = set(shard.key() for shard in existing_shard_states if shard is not None) db.put((shard for shard in shard_states if shard.key() not in existing_shard_keys), config=util.create_datastore_write_config(spec)) processing_rate = int(spec.mapper.params.get( "processing_rate") or model._DEFAULT_PROCESSING_RATE_PER_SEC) quota_refill = processing_rate / len(shard_states) quota_manager = quota.QuotaManager(memcache.Client()) for shard_state in shard_states: quota_manager.put(shard_state.shard_id, quota_refill) for shard_number, (input_reader, output_writer) in enumerate( zip(input_readers, output_writers)): shard_id = model.ShardState.shard_id_from_number( spec.mapreduce_id, shard_number) MapperWorkerCallbackHandler._schedule_slice( shard_states[shard_number], model.TransientShardState( base_path, spec, shard_id, 0, input_reader, output_writer=output_writer), queue_name=queue_name)
Python
def TokenizeValue(self, field_value, token_position=0): """Tokenizes a document_pb.FieldValue into a sequence of Tokens.""" if field_value.type() is document_pb.FieldValue.GEO: return self._TokenizeForType(field_type=field_value.type(), value=field_value.geo(), token_position=token_position) return self._TokenizeForType(field_type=field_value.type(), value=field_value.string_value(), token_position=token_position)
def TokenizeValue(self, field_value, token_position=0): """Tokenizes a document_pb.FieldValue into a sequence of Tokens.""" if field_value.type() is document_pb.FieldValue.GEO: return self._TokenizeForType(field_type=field_value.type(), value=field_value.geo(), token_position=token_position) return self._TokenizeForType(field_type=field_value.type(), value=field_value.string_value(), token_position=token_position)
Python
def _TokenizeForType(self, field_type, value, token_position=0): """Tokenizes value into a sequence of Tokens.""" if field_type is document_pb.FieldValue.NUMBER: return [tokens.Token(chars=value, position=token_position)] if field_type is document_pb.FieldValue.GEO: return [tokens.GeoPoint(latitude=value.lat(), longitude=value.lng(), position=token_position)] tokens_found = [] token_strings = [] if not self._split_restricts: token_strings = self.SetCase(value).split() else: token_strings = self._TokenizeString(value, field_type) for token in token_strings: if ':' in token and self._split_restricts: for subtoken in token.split(':'): tokens_found.append( tokens.Token(chars=subtoken, position=token_position)) token_position += 1 elif '"' in token: for subtoken in token.split('"'): if not subtoken: tokens_found.append( tokens.Quote(chars='"', position=token_position)) else: tokens_found.append( tokens.Token(chars=subtoken, position=token_position)) token_position += 1 else: tokens_found.append(tokens.Token(chars=token, position=token_position)) token_position += 1 return tokens_found
def _TokenizeForType(self, field_type, value, token_position=0): """Tokenizes value into a sequence of Tokens.""" if field_type is document_pb.FieldValue.NUMBER: return [tokens.Token(chars=value, position=token_position)] if field_type is document_pb.FieldValue.GEO: return [tokens.GeoPoint(latitude=value.lat(), longitude=value.lng(), position=token_position)] tokens_found = [] token_strings = [] if not self._split_restricts: token_strings = self.SetCase(value).split() else: token_strings = self._TokenizeString(value, field_type) for token in token_strings: if ':' in token and self._split_restricts: for subtoken in token.split(':'): tokens_found.append( tokens.Token(chars=subtoken, position=token_position)) token_position += 1 elif '"' in token: for subtoken in token.split('"'): if not subtoken: tokens_found.append( tokens.Quote(chars='"', position=token_position)) else: tokens_found.append( tokens.Token(chars=subtoken, position=token_position)) token_position += 1 else: tokens_found.append(tokens.Token(chars=token, position=token_position)) token_position += 1 return tokens_found
Python
def ParseApiConfigResponse(self, body): """Parses a json api config and registers methods for dispatch. Side effects: Parses method name, etc for all methods and updates the indexing datastructures with the information. Args: body: body of getApiConfigs response """ try: response_obj = json.loads(body) except ValueError, unused_err: logging.error('Can not parse BackendService.getApiConfigs response: %s', body) else: for api_config_json in response_obj.get('items', []): try: config = json.loads(api_config_json) except ValueError, unused_err: logging.error('Can not parse API config: %s', api_config_json) else: version = config.get('version', '') if config.has_key('methods'): for method_name, method in config.get('methods', {}).iteritems(): method['api'] = config self.SaveRpcMethod(method_name, version, method) self.SaveRestMethod(method_name, version, method)
def ParseApiConfigResponse(self, body): """Parses a json api config and registers methods for dispatch. Side effects: Parses method name, etc for all methods and updates the indexing datastructures with the information. Args: body: body of getApiConfigs response """ try: response_obj = json.loads(body) except ValueError, unused_err: logging.error('Can not parse BackendService.getApiConfigs response: %s', body) else: for api_config_json in response_obj.get('items', []): try: config = json.loads(api_config_json) except ValueError, unused_err: logging.error('Can not parse API config: %s', api_config_json) else: version = config.get('version', '') if config.has_key('methods'): for method_name, method in config.get('methods', {}).iteritems(): method['api'] = config self.SaveRpcMethod(method_name, version, method) self.SaveRestMethod(method_name, version, method)
Python
def CompilePathPattern(pattern): """Generates a compiled regex pattern for a path pattern. e.g. '/{!name}/{!version}/notes/{id}' returns re.compile(r'/([^:/?#\[\]{}]*)' r'/([^:/?#\[\]{}]*)' r'/notes/(?P<id>[^:/?#\[\]{}]*)') Note in this example that !name and !version are reserved variable names used to match the API name and version that should not be migrated into the method argument namespace. As such they are not named in the regex, so groupdict() excludes them. Args: pattern: parameterized path pattern to be checked Returns: compiled regex to match this path pattern """ def ReplaceReservedVariable(match): """Replaces a {!variable} with a regex to match it not by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(%s)' % (match.group(1), _PATH_VALUE_PATTERN) return match.group(0) def ReplaceVariable(match): """Replaces a {variable} with a regex to match it by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(?P<%s>%s)' % (match.group(1), match.group(2), _PATH_VALUE_PATTERN) return match.group(0) pattern = re.sub('(/|^){(%s)}(?=/|$)' % _RESERVED_PATH_VARIABLE_PATTERN, ReplaceReservedVariable, pattern, 2) pattern = re.sub('(/|^){(%s)}(?=/|$)' % _PATH_VARIABLE_PATTERN, ReplaceVariable, pattern) return re.compile(pattern + '$')
def CompilePathPattern(pattern): """Generates a compiled regex pattern for a path pattern. e.g. '/{!name}/{!version}/notes/{id}' returns re.compile(r'/([^:/?#\[\]{}]*)' r'/([^:/?#\[\]{}]*)' r'/notes/(?P<id>[^:/?#\[\]{}]*)') Note in this example that !name and !version are reserved variable names used to match the API name and version that should not be migrated into the method argument namespace. As such they are not named in the regex, so groupdict() excludes them. Args: pattern: parameterized path pattern to be checked Returns: compiled regex to match this path pattern """ def ReplaceReservedVariable(match): """Replaces a {!variable} with a regex to match it not by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(%s)' % (match.group(1), _PATH_VALUE_PATTERN) return match.group(0) def ReplaceVariable(match): """Replaces a {variable} with a regex to match it by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(?P<%s>%s)' % (match.group(1), match.group(2), _PATH_VALUE_PATTERN) return match.group(0) pattern = re.sub('(/|^){(%s)}(?=/|$)' % _RESERVED_PATH_VARIABLE_PATTERN, ReplaceReservedVariable, pattern, 2) pattern = re.sub('(/|^){(%s)}(?=/|$)' % _PATH_VARIABLE_PATTERN, ReplaceVariable, pattern) return re.compile(pattern + '$')
Python
def ReplaceVariable(match): """Replaces a {variable} with a regex to match it by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(?P<%s>%s)' % (match.group(1), match.group(2), _PATH_VALUE_PATTERN) return match.group(0)
def ReplaceVariable(match): """Replaces a {variable} with a regex to match it by name. Args: match: The matching regex group as sent by re.sub() Returns: Regex to match the variable by name, if the full pattern was matched. """ if match.lastindex > 1: return '%s(?P<%s>%s)' % (match.group(1), match.group(2), _PATH_VALUE_PATTERN) return match.group(0)
Python
def SaveRpcMethod(self, method_name, version, method): """Store JsonRpc api methods in a map for lookup at call time. (rpcMethodName, apiVersion) => method. Args: method_name: Name of the API method version: Version of the API method: method descriptor (as in the api config file). """ self.__rpc_method_dict[(method_name, version)] = method
def SaveRpcMethod(self, method_name, version, method): """Store JsonRpc api methods in a map for lookup at call time. (rpcMethodName, apiVersion) => method. Args: method_name: Name of the API method version: Version of the API method: method descriptor (as in the api config file). """ self.__rpc_method_dict[(method_name, version)] = method
Python
def LookupRpcMethod(self, method_name, version): """Lookup the JsonRPC method at call time. The method is looked up in self.__rpc_method_dict, the dictionary that it is saved in for SaveRpcMethod(). Args: method_name: String name of the method version: String version of the API Returns: Method descriptor as specified in the API configuration. """ method = self.__rpc_method_dict.get((method_name, version)) return method
def LookupRpcMethod(self, method_name, version): """Lookup the JsonRPC method at call time. The method is looked up in self.__rpc_method_dict, the dictionary that it is saved in for SaveRpcMethod(). Args: method_name: String name of the method version: String version of the API Returns: Method descriptor as specified in the API configuration. """ method = self.__rpc_method_dict.get((method_name, version)) return method
Python
def SaveRestMethod(self, method_name, version, method): """Store Rest api methods in a list for lookup at call time. The list is self.__rest_methods, a list of tuples: [(<compiled_path>, <path_pattern>, <method_dict>), ...] where: <compiled_path> is a compiled regex to match against the incoming URL <path_pattern> is a string representing the original path pattern, checked on insertion to prevent duplicates. -and- <method_dict> is a dict (httpMethod, apiVersion) => (method_name, method) This structure is a bit complex, it supports use in two contexts: Creation time: - SaveRestMethod is called repeatedly, each method will have a path, which we want to be compiled for fast lookup at call time - We want to prevent duplicate incoming path patterns, so store the un-compiled path, not counting on a compiled regex being a stable comparison as it is not documented as being stable for this use. - Need to store the method that will be mapped at calltime. - Different methods may have the same path but different http method. and/or API versions. Call time: - Quickly scan through the list attempting .match(path) on each compiled regex to find the path that matches. - When a path is matched, look up the API version and method from the request and get the method name and method config for the matching API method and method name. Args: method_name: Name of the API method version: Version of the API method: method descriptor (as in the api config file). """ path_pattern = _API_REST_PATH_FORMAT % method.get('path', '') http_method = method.get('httpMethod', '').lower() for _, path, methods in self.__rest_methods: if path == path_pattern: methods[(http_method, version)] = method_name, method break else: self.__rest_methods.append( (self.CompilePathPattern(path_pattern), path_pattern, {(http_method, version): (method_name, method)}))
def SaveRestMethod(self, method_name, version, method): """Store Rest api methods in a list for lookup at call time. The list is self.__rest_methods, a list of tuples: [(<compiled_path>, <path_pattern>, <method_dict>), ...] where: <compiled_path> is a compiled regex to match against the incoming URL <path_pattern> is a string representing the original path pattern, checked on insertion to prevent duplicates. -and- <method_dict> is a dict (httpMethod, apiVersion) => (method_name, method) This structure is a bit complex, it supports use in two contexts: Creation time: - SaveRestMethod is called repeatedly, each method will have a path, which we want to be compiled for fast lookup at call time - We want to prevent duplicate incoming path patterns, so store the un-compiled path, not counting on a compiled regex being a stable comparison as it is not documented as being stable for this use. - Need to store the method that will be mapped at calltime. - Different methods may have the same path but different http method. and/or API versions. Call time: - Quickly scan through the list attempting .match(path) on each compiled regex to find the path that matches. - When a path is matched, look up the API version and method from the request and get the method name and method config for the matching API method and method name. Args: method_name: Name of the API method version: Version of the API method: method descriptor (as in the api config file). """ path_pattern = _API_REST_PATH_FORMAT % method.get('path', '') http_method = method.get('httpMethod', '').lower() for _, path, methods in self.__rest_methods: if path == path_pattern: methods[(http_method, version)] = method_name, method break else: self.__rest_methods.append( (self.CompilePathPattern(path_pattern), path_pattern, {(http_method, version): (method_name, method)}))
Python
def LookupRestMethod(self, path, http_method): """Look up the rest method at call time. The method is looked up in self.__rest_methods, the list it is saved in for SaveRestMethod. Args: path: Path from the URL of the request. http_method: HTTP method of the request. Returns: Tuple of (<method name>, <method>, <params>) Where: <method name> is the string name of the method that was matched. <method> is the descriptor as specified in the API configuration. -and- <params> is a dict of path parameters matched in the rest request. """ for compiled_path_pattern, unused_path, methods in self.__rest_methods: match = compiled_path_pattern.match(path) if match: params = match.groupdict() version = match.group(2) method_key = (http_method.lower(), version) method_name, method = methods.get(method_key, (None, None)) if method is not None: break else: method_name = None method = None params = None return method_name, method, params
def LookupRestMethod(self, path, http_method): """Look up the rest method at call time. The method is looked up in self.__rest_methods, the list it is saved in for SaveRestMethod. Args: path: Path from the URL of the request. http_method: HTTP method of the request. Returns: Tuple of (<method name>, <method>, <params>) Where: <method name> is the string name of the method that was matched. <method> is the descriptor as specified in the API configuration. -and- <params> is a dict of path parameters matched in the rest request. """ for compiled_path_pattern, unused_path, methods in self.__rest_methods: match = compiled_path_pattern.match(path) if match: params = match.groupdict() version = match.group(2) method_key = (http_method.lower(), version) method_name, method = methods.get(method_key, (None, None)) if method is not None: break else: method_name = None method = None params = None return method_name, method, params
Python
def CreateApiserverDispatcher(config_manager=None): """Function to create Apiserver dispatcher. Args: config_manager: Allow setting of ApiConfigManager for testing. Returns: New dispatcher capable of handling requests to the built-in apiserver handlers. """ from google.appengine.tools import dev_appserver class ApiserverDispatcher(dev_appserver.URLDispatcher): """Dispatcher that handles requests to the built-in apiserver handlers.""" class RequestState(object): """Enum tracking request state.""" INIT = 0 GET_API_CONFIGS = 1 SPI_CALL = 2 END = 3 def __init__(self, config_manager=None, *args, **kwargs): self._request_stage = self.RequestState.INIT if config_manager is None: config_manager = ApiConfigManager() self.config_manager = config_manager dev_appserver.URLDispatcher.__init__(self, *args, **kwargs) def Dispatch(self, request, outfile, base_env_dict=None): """Handles dispatch to apiserver handlers. base_env_dict should contain at least: REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME, SERVER_PROTOCOL, SERVER_PORT Args: request: AppServerRequest. outfile: The response file. base_env_dict: Dictionary of CGI environment parameters if available. Defaults to None. Returns: AppServerRequest internal redirect for normal calls or None for error conditions (e.g. method not found -> 404) """ if self._request_stage != self.RequestState.INIT: return self.FailRequest('Dispatch in unexpected state', outfile) if not base_env_dict: return self.FailRequest('CGI Environment Not Available', outfile) self.request = ApiRequest(base_env_dict, dev_appserver, request) self._request_stage = self.RequestState.GET_API_CONFIGS return self.GetApiConfigs(base_env_dict, dev_appserver) def EndRedirect(self, dispatched_output, outfile): """Handle the end of getApiConfigs and SPI complete notification. This EndRedirect is called twice. The first time is upon completion of the BackendService.getApiConfigs() call. After this call, the set of all available methods and their parameters / paths / config is contained in dispatched_output. This is parsed and used to dispatch the request to the SPI backend itself. In order to cause a second dispatch and EndRedirect, this EndRedirect will return an AppServerRequest filled out with the SPI backend request. The second time it is called is upon completion of the call to the SPI backend. After this call, if the initial request (sent in Dispatch, prior to getApiConfigs) is used to reformat the response as needed. This currently only results in changes for JsonRPC requests, where the response body is moved into {'result': response_body_goes_here} and the request id is copied back into the response. Args: dispatched_output: resulting output from the SPI outfile: final output file for this handler Returns: An AppServerRequest for redirect or None for an immediate response. """ if self._request_stage == self.RequestState.GET_API_CONFIGS: if self.HandleGetApiConfigsResponse(dispatched_output, outfile): return self.CallSpi(outfile) elif self._request_stage == self.RequestState.SPI_CALL: return self.HandleSpiResponse(dispatched_output, outfile) else: return self.FailRequest('EndRedirect in unexpected state', outfile) def GetApiConfigs(self, cgi_env, dev_appserver): """Makes a call to BackendService.getApiConfigs and parses result. Args: cgi_env: CGI environment dictionary as passed in by the framework dev_appserver: dev_appserver instance used to generate AppServerRequest. Returns: AppServerRequest to be returned as an internal redirect to getApiConfigs """ request = ApiRequest(cgi_env, dev_appserver) request.path = 'BackendService.getApiConfigs' request.body = '{}' return BuildCGIRequest(cgi_env, request, dev_appserver) @staticmethod def VerifyResponse(response, status_code, content_type=None): """Verifies that a response has the expected status and content type. Args: response: Response to be checked. status_code: HTTP status code to be compared with response status. content_type: acceptable Content-Type: header value, None allows any. Returns: True if both status_code and content_type match, else False. """ if response.status_code != status_code: return False if content_type is None: return True for header in response.headers: if header.lower() == 'content-type': return response.headers[header].lower() == content_type else: return False @staticmethod def ParseCgiResponse(response): """Parses a CGI response, returning a headers dict and body. Args: response: a CGI response Returns: tuple of ({header: header_value, ...}, body) """ header_dict = {} for header in response.headers.headers: header_name, header_value = header.split(':', 1) header_dict[header_name.strip()] = header_value.strip() if response.body: body = response.body.read() else: body = '' return header_dict, body def HandleGetApiConfigsResponse(self, dispatched_output, outfile): """Parses the result of getApiConfigs, returning True on success. Args: dispatched_output: Output from the getApiConfigs call handler. outfile: CGI output handle, used for error conditions. Returns: True on success, False on failure """ response = dev_appserver.RewriteResponse(dispatched_output) if self.VerifyResponse(response, 200, 'application/json'): self.config_manager.ParseApiConfigResponse(response.body.read()) return True else: self.FailRequest('BackendService.getApiConfigs Error', outfile) return False def CallSpi(self, outfile): """Generate SPI call (from earlier-saved request). Side effects: self.request is modified from Rest/JsonRPC format to apiserving format. Args: outfile: File to write out CGI-style response in case of error. Returns: AppServerRequest for redirect or None to send immediate CGI response. """ if self.request.IsRpc(): method = self.LookupRpcMethod() params = None else: method, params = self.LookupRestMethod() if method: self.TransformRequest(params) self._request_stage = self.RequestState.SPI_CALL return BuildCGIRequest(self.request.cgi_env, self.request, dev_appserver) else: self._request_stage = self.RequestState.END return SendCGIResponse('404', {'Content-Type': 'text/plain'}, 'Not Found', outfile) def HandleSpiResponse(self, dispatched_output, outfile): """Handle SPI response, transforming output as needed. Args: dispatched_output: Response returned by SPI backend. outfile: File-like object to write transformed result. Returns: None """ response = dev_appserver.AppServerResponse( response_file=dispatched_output) headers, body = self.ParseCgiResponse(response) if self.request.IsRpc(): body = self.TransformJsonrpcResponse(body) self._request_stage = self.RequestState.END return SendCGIResponse(response.status_code, headers, body, outfile) def FailRequest(self, message, outfile): """Write an immediate failure response to outfile, no redirect. Args: message: Error message to be displayed to user (plain text). outfile: File-like object to write CGI response to. Returns: None """ self._request_stage = self.RequestState.END return SendCGIResponse('500', {'Content-Type': 'text/plain'}, message, outfile) def LookupRestMethod(self): """Looks up and returns rest method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: tuple of (method, parameters) """ method_name, method, params = self.config_manager.LookupRestMethod( self.request.path, self.request.http_method) self.request.method_name = method_name return method, params def LookupRpcMethod(self): """Looks up and returns RPC method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: RPC method that was found for the current request. """ if not self.request.body_obj: return None method_name = self.request.body_obj.get('method', '') version = self.request.body_obj.get('apiVersion', '') self.request.method_name = method_name return self.config_manager.LookupRpcMethod(method_name, version) def TransformRequest(self, params): """Transforms self.request to apiserving request. This method uses self.request to determint the currently-pending request. This method accepts a rest-style or RPC-style request. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: Path parameters dictionary for rest request """ if self.request.IsRpc(): self.TransformJsonrpcRequest() else: self.TransformRestRequest(params) self.request.path = self.request.method_name def TransformRestRequest(self, params): """Translates a Rest request/response into an apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: URL path parameter dict extracted by config_manager lookup. """ body_obj = json.loads(self.request.body or '{}') if params: body_obj.update(params) self.request.body = json.dumps(body_obj) def TransformJsonrpcRequest(self): """Translates a JsonRpc request/response into apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) """ body_obj = json.loads(self.request.body) if self.request.body else {} self.request.request_id = body_obj.get('id') body_obj = body_obj.get('params', {}) self.request.body = json.dumps(body_obj) def TransformJsonrpcResponse(self, response_body): """Translates a apiserving response to a JsonRpc response. Side effects: Updates self.request to JsonRpc format. (e.g. restoring request id and moving body object into {'result': body_obj} Args: response_body: Backend response to transform back to JsonRPC Returns: Updated, JsonRPC-formatted request body """ body_obj = {'result': json.loads(response_body)} if self.request.request_id is not None: body_obj['id'] = self.request.request_id return json.dumps(body_obj) return ApiserverDispatcher(config_manager)
def CreateApiserverDispatcher(config_manager=None): """Function to create Apiserver dispatcher. Args: config_manager: Allow setting of ApiConfigManager for testing. Returns: New dispatcher capable of handling requests to the built-in apiserver handlers. """ from google.appengine.tools import dev_appserver class ApiserverDispatcher(dev_appserver.URLDispatcher): """Dispatcher that handles requests to the built-in apiserver handlers.""" class RequestState(object): """Enum tracking request state.""" INIT = 0 GET_API_CONFIGS = 1 SPI_CALL = 2 END = 3 def __init__(self, config_manager=None, *args, **kwargs): self._request_stage = self.RequestState.INIT if config_manager is None: config_manager = ApiConfigManager() self.config_manager = config_manager dev_appserver.URLDispatcher.__init__(self, *args, **kwargs) def Dispatch(self, request, outfile, base_env_dict=None): """Handles dispatch to apiserver handlers. base_env_dict should contain at least: REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME, SERVER_PROTOCOL, SERVER_PORT Args: request: AppServerRequest. outfile: The response file. base_env_dict: Dictionary of CGI environment parameters if available. Defaults to None. Returns: AppServerRequest internal redirect for normal calls or None for error conditions (e.g. method not found -> 404) """ if self._request_stage != self.RequestState.INIT: return self.FailRequest('Dispatch in unexpected state', outfile) if not base_env_dict: return self.FailRequest('CGI Environment Not Available', outfile) self.request = ApiRequest(base_env_dict, dev_appserver, request) self._request_stage = self.RequestState.GET_API_CONFIGS return self.GetApiConfigs(base_env_dict, dev_appserver) def EndRedirect(self, dispatched_output, outfile): """Handle the end of getApiConfigs and SPI complete notification. This EndRedirect is called twice. The first time is upon completion of the BackendService.getApiConfigs() call. After this call, the set of all available methods and their parameters / paths / config is contained in dispatched_output. This is parsed and used to dispatch the request to the SPI backend itself. In order to cause a second dispatch and EndRedirect, this EndRedirect will return an AppServerRequest filled out with the SPI backend request. The second time it is called is upon completion of the call to the SPI backend. After this call, if the initial request (sent in Dispatch, prior to getApiConfigs) is used to reformat the response as needed. This currently only results in changes for JsonRPC requests, where the response body is moved into {'result': response_body_goes_here} and the request id is copied back into the response. Args: dispatched_output: resulting output from the SPI outfile: final output file for this handler Returns: An AppServerRequest for redirect or None for an immediate response. """ if self._request_stage == self.RequestState.GET_API_CONFIGS: if self.HandleGetApiConfigsResponse(dispatched_output, outfile): return self.CallSpi(outfile) elif self._request_stage == self.RequestState.SPI_CALL: return self.HandleSpiResponse(dispatched_output, outfile) else: return self.FailRequest('EndRedirect in unexpected state', outfile) def GetApiConfigs(self, cgi_env, dev_appserver): """Makes a call to BackendService.getApiConfigs and parses result. Args: cgi_env: CGI environment dictionary as passed in by the framework dev_appserver: dev_appserver instance used to generate AppServerRequest. Returns: AppServerRequest to be returned as an internal redirect to getApiConfigs """ request = ApiRequest(cgi_env, dev_appserver) request.path = 'BackendService.getApiConfigs' request.body = '{}' return BuildCGIRequest(cgi_env, request, dev_appserver) @staticmethod def VerifyResponse(response, status_code, content_type=None): """Verifies that a response has the expected status and content type. Args: response: Response to be checked. status_code: HTTP status code to be compared with response status. content_type: acceptable Content-Type: header value, None allows any. Returns: True if both status_code and content_type match, else False. """ if response.status_code != status_code: return False if content_type is None: return True for header in response.headers: if header.lower() == 'content-type': return response.headers[header].lower() == content_type else: return False @staticmethod def ParseCgiResponse(response): """Parses a CGI response, returning a headers dict and body. Args: response: a CGI response Returns: tuple of ({header: header_value, ...}, body) """ header_dict = {} for header in response.headers.headers: header_name, header_value = header.split(':', 1) header_dict[header_name.strip()] = header_value.strip() if response.body: body = response.body.read() else: body = '' return header_dict, body def HandleGetApiConfigsResponse(self, dispatched_output, outfile): """Parses the result of getApiConfigs, returning True on success. Args: dispatched_output: Output from the getApiConfigs call handler. outfile: CGI output handle, used for error conditions. Returns: True on success, False on failure """ response = dev_appserver.RewriteResponse(dispatched_output) if self.VerifyResponse(response, 200, 'application/json'): self.config_manager.ParseApiConfigResponse(response.body.read()) return True else: self.FailRequest('BackendService.getApiConfigs Error', outfile) return False def CallSpi(self, outfile): """Generate SPI call (from earlier-saved request). Side effects: self.request is modified from Rest/JsonRPC format to apiserving format. Args: outfile: File to write out CGI-style response in case of error. Returns: AppServerRequest for redirect or None to send immediate CGI response. """ if self.request.IsRpc(): method = self.LookupRpcMethod() params = None else: method, params = self.LookupRestMethod() if method: self.TransformRequest(params) self._request_stage = self.RequestState.SPI_CALL return BuildCGIRequest(self.request.cgi_env, self.request, dev_appserver) else: self._request_stage = self.RequestState.END return SendCGIResponse('404', {'Content-Type': 'text/plain'}, 'Not Found', outfile) def HandleSpiResponse(self, dispatched_output, outfile): """Handle SPI response, transforming output as needed. Args: dispatched_output: Response returned by SPI backend. outfile: File-like object to write transformed result. Returns: None """ response = dev_appserver.AppServerResponse( response_file=dispatched_output) headers, body = self.ParseCgiResponse(response) if self.request.IsRpc(): body = self.TransformJsonrpcResponse(body) self._request_stage = self.RequestState.END return SendCGIResponse(response.status_code, headers, body, outfile) def FailRequest(self, message, outfile): """Write an immediate failure response to outfile, no redirect. Args: message: Error message to be displayed to user (plain text). outfile: File-like object to write CGI response to. Returns: None """ self._request_stage = self.RequestState.END return SendCGIResponse('500', {'Content-Type': 'text/plain'}, message, outfile) def LookupRestMethod(self): """Looks up and returns rest method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: tuple of (method, parameters) """ method_name, method, params = self.config_manager.LookupRestMethod( self.request.path, self.request.http_method) self.request.method_name = method_name return method, params def LookupRpcMethod(self): """Looks up and returns RPC method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: RPC method that was found for the current request. """ if not self.request.body_obj: return None method_name = self.request.body_obj.get('method', '') version = self.request.body_obj.get('apiVersion', '') self.request.method_name = method_name return self.config_manager.LookupRpcMethod(method_name, version) def TransformRequest(self, params): """Transforms self.request to apiserving request. This method uses self.request to determint the currently-pending request. This method accepts a rest-style or RPC-style request. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: Path parameters dictionary for rest request """ if self.request.IsRpc(): self.TransformJsonrpcRequest() else: self.TransformRestRequest(params) self.request.path = self.request.method_name def TransformRestRequest(self, params): """Translates a Rest request/response into an apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: URL path parameter dict extracted by config_manager lookup. """ body_obj = json.loads(self.request.body or '{}') if params: body_obj.update(params) self.request.body = json.dumps(body_obj) def TransformJsonrpcRequest(self): """Translates a JsonRpc request/response into apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) """ body_obj = json.loads(self.request.body) if self.request.body else {} self.request.request_id = body_obj.get('id') body_obj = body_obj.get('params', {}) self.request.body = json.dumps(body_obj) def TransformJsonrpcResponse(self, response_body): """Translates a apiserving response to a JsonRpc response. Side effects: Updates self.request to JsonRpc format. (e.g. restoring request id and moving body object into {'result': body_obj} Args: response_body: Backend response to transform back to JsonRPC Returns: Updated, JsonRPC-formatted request body """ body_obj = {'result': json.loads(response_body)} if self.request.request_id is not None: body_obj['id'] = self.request.request_id return json.dumps(body_obj) return ApiserverDispatcher(config_manager)
Python
def Dispatch(self, request, outfile, base_env_dict=None): """Handles dispatch to apiserver handlers. base_env_dict should contain at least: REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME, SERVER_PROTOCOL, SERVER_PORT Args: request: AppServerRequest. outfile: The response file. base_env_dict: Dictionary of CGI environment parameters if available. Defaults to None. Returns: AppServerRequest internal redirect for normal calls or None for error conditions (e.g. method not found -> 404) """ if self._request_stage != self.RequestState.INIT: return self.FailRequest('Dispatch in unexpected state', outfile) if not base_env_dict: return self.FailRequest('CGI Environment Not Available', outfile) self.request = ApiRequest(base_env_dict, dev_appserver, request) self._request_stage = self.RequestState.GET_API_CONFIGS return self.GetApiConfigs(base_env_dict, dev_appserver)
def Dispatch(self, request, outfile, base_env_dict=None): """Handles dispatch to apiserver handlers. base_env_dict should contain at least: REQUEST_METHOD, REMOTE_ADDR, SERVER_SOFTWARE, SERVER_NAME, SERVER_PROTOCOL, SERVER_PORT Args: request: AppServerRequest. outfile: The response file. base_env_dict: Dictionary of CGI environment parameters if available. Defaults to None. Returns: AppServerRequest internal redirect for normal calls or None for error conditions (e.g. method not found -> 404) """ if self._request_stage != self.RequestState.INIT: return self.FailRequest('Dispatch in unexpected state', outfile) if not base_env_dict: return self.FailRequest('CGI Environment Not Available', outfile) self.request = ApiRequest(base_env_dict, dev_appserver, request) self._request_stage = self.RequestState.GET_API_CONFIGS return self.GetApiConfigs(base_env_dict, dev_appserver)
Python
def CallSpi(self, outfile): """Generate SPI call (from earlier-saved request). Side effects: self.request is modified from Rest/JsonRPC format to apiserving format. Args: outfile: File to write out CGI-style response in case of error. Returns: AppServerRequest for redirect or None to send immediate CGI response. """ if self.request.IsRpc(): method = self.LookupRpcMethod() params = None else: method, params = self.LookupRestMethod() if method: self.TransformRequest(params) self._request_stage = self.RequestState.SPI_CALL return BuildCGIRequest(self.request.cgi_env, self.request, dev_appserver) else: self._request_stage = self.RequestState.END return SendCGIResponse('404', {'Content-Type': 'text/plain'}, 'Not Found', outfile)
def CallSpi(self, outfile): """Generate SPI call (from earlier-saved request). Side effects: self.request is modified from Rest/JsonRPC format to apiserving format. Args: outfile: File to write out CGI-style response in case of error. Returns: AppServerRequest for redirect or None to send immediate CGI response. """ if self.request.IsRpc(): method = self.LookupRpcMethod() params = None else: method, params = self.LookupRestMethod() if method: self.TransformRequest(params) self._request_stage = self.RequestState.SPI_CALL return BuildCGIRequest(self.request.cgi_env, self.request, dev_appserver) else: self._request_stage = self.RequestState.END return SendCGIResponse('404', {'Content-Type': 'text/plain'}, 'Not Found', outfile)
Python
def HandleSpiResponse(self, dispatched_output, outfile): """Handle SPI response, transforming output as needed. Args: dispatched_output: Response returned by SPI backend. outfile: File-like object to write transformed result. Returns: None """ response = dev_appserver.AppServerResponse( response_file=dispatched_output) headers, body = self.ParseCgiResponse(response) if self.request.IsRpc(): body = self.TransformJsonrpcResponse(body) self._request_stage = self.RequestState.END return SendCGIResponse(response.status_code, headers, body, outfile)
def HandleSpiResponse(self, dispatched_output, outfile): """Handle SPI response, transforming output as needed. Args: dispatched_output: Response returned by SPI backend. outfile: File-like object to write transformed result. Returns: None """ response = dev_appserver.AppServerResponse( response_file=dispatched_output) headers, body = self.ParseCgiResponse(response) if self.request.IsRpc(): body = self.TransformJsonrpcResponse(body) self._request_stage = self.RequestState.END return SendCGIResponse(response.status_code, headers, body, outfile)
Python
def FailRequest(self, message, outfile): """Write an immediate failure response to outfile, no redirect. Args: message: Error message to be displayed to user (plain text). outfile: File-like object to write CGI response to. Returns: None """ self._request_stage = self.RequestState.END return SendCGIResponse('500', {'Content-Type': 'text/plain'}, message, outfile)
def FailRequest(self, message, outfile): """Write an immediate failure response to outfile, no redirect. Args: message: Error message to be displayed to user (plain text). outfile: File-like object to write CGI response to. Returns: None """ self._request_stage = self.RequestState.END return SendCGIResponse('500', {'Content-Type': 'text/plain'}, message, outfile)
Python
def LookupRpcMethod(self): """Looks up and returns RPC method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: RPC method that was found for the current request. """ if not self.request.body_obj: return None method_name = self.request.body_obj.get('method', '') version = self.request.body_obj.get('apiVersion', '') self.request.method_name = method_name return self.config_manager.LookupRpcMethod(method_name, version)
def LookupRpcMethod(self): """Looks up and returns RPC method for the currently-pending request. This method uses self.request as the currently-pending request. Returns: RPC method that was found for the current request. """ if not self.request.body_obj: return None method_name = self.request.body_obj.get('method', '') version = self.request.body_obj.get('apiVersion', '') self.request.method_name = method_name return self.config_manager.LookupRpcMethod(method_name, version)
Python
def TransformRequest(self, params): """Transforms self.request to apiserving request. This method uses self.request to determint the currently-pending request. This method accepts a rest-style or RPC-style request. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: Path parameters dictionary for rest request """ if self.request.IsRpc(): self.TransformJsonrpcRequest() else: self.TransformRestRequest(params) self.request.path = self.request.method_name
def TransformRequest(self, params): """Transforms self.request to apiserving request. This method uses self.request to determint the currently-pending request. This method accepts a rest-style or RPC-style request. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: Path parameters dictionary for rest request """ if self.request.IsRpc(): self.TransformJsonrpcRequest() else: self.TransformRestRequest(params) self.request.path = self.request.method_name
Python
def TransformRestRequest(self, params): """Translates a Rest request/response into an apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: URL path parameter dict extracted by config_manager lookup. """ body_obj = json.loads(self.request.body or '{}') if params: body_obj.update(params) self.request.body = json.dumps(body_obj)
def TransformRestRequest(self, params): """Translates a Rest request/response into an apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) Args: params: URL path parameter dict extracted by config_manager lookup. """ body_obj = json.loads(self.request.body or '{}') if params: body_obj.update(params) self.request.body = json.dumps(body_obj)
Python
def TransformJsonrpcRequest(self): """Translates a JsonRpc request/response into apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) """ body_obj = json.loads(self.request.body) if self.request.body else {} self.request.request_id = body_obj.get('id') body_obj = body_obj.get('params', {}) self.request.body = json.dumps(body_obj)
def TransformJsonrpcRequest(self): """Translates a JsonRpc request/response into apiserving request/response. Side effects: Updates self.request to apiserving format. (e.g. updating path to be the method name, and moving request parameters to the body.) """ body_obj = json.loads(self.request.body) if self.request.body else {} self.request.request_id = body_obj.get('id') body_obj = body_obj.get('params', {}) self.request.body = json.dumps(body_obj)
Python
def TransformJsonrpcResponse(self, response_body): """Translates a apiserving response to a JsonRpc response. Side effects: Updates self.request to JsonRpc format. (e.g. restoring request id and moving body object into {'result': body_obj} Args: response_body: Backend response to transform back to JsonRPC Returns: Updated, JsonRPC-formatted request body """ body_obj = {'result': json.loads(response_body)} if self.request.request_id is not None: body_obj['id'] = self.request.request_id return json.dumps(body_obj)
def TransformJsonrpcResponse(self, response_body): """Translates a apiserving response to a JsonRpc response. Side effects: Updates self.request to JsonRpc format. (e.g. restoring request id and moving body object into {'result': body_obj} Args: response_body: Backend response to transform back to JsonRPC Returns: Updated, JsonRPC-formatted request body """ body_obj = {'result': json.loads(response_body)} if self.request.request_id is not None: body_obj['id'] = self.request.request_id return json.dumps(body_obj)
Python
def SendCGIResponse(status, headers, content, outfile): """Dump reformatted response to CGI outfile. Args: status: HTTP status code to send headers: Headers dictionary {header_name: header_value, ...} content: Body content to write outfile: File-like object where response will be written. Returns: None """ outfile.write('Status: %s\r\n' % status) WriteHeaders(headers, outfile, len(content)) outfile.write('\r\n') outfile.write(content) outfile.seek(0)
def SendCGIResponse(status, headers, content, outfile): """Dump reformatted response to CGI outfile. Args: status: HTTP status code to send headers: Headers dictionary {header_name: header_value, ...} content: Body content to write outfile: File-like object where response will be written. Returns: None """ outfile.write('Status: %s\r\n' % status) WriteHeaders(headers, outfile, len(content)) outfile.write('\r\n') outfile.write(content) outfile.seek(0)
Python
def validate(cls, mapper_spec): """Validates mapper spec and all mapper parameters. Args: mapper_spec: The MapperSpec for this InputReader. Raises: BadReaderParamsError: required parameters are missing or invalid. """ if mapper_spec.input_reader_class() != cls: raise BadReaderParamsError("Input reader class mismatch") params = _get_params(mapper_spec) if cls.ENTITY_KIND_PARAM not in params: raise BadReaderParamsError("Missing mapper parameter 'entity_kind'") if cls.BATCH_SIZE_PARAM in params: try: batch_size = int(params[cls.BATCH_SIZE_PARAM]) if batch_size < 1: raise BadReaderParamsError("Bad batch size: %s" % batch_size) except ValueError, e: raise BadReaderParamsError("Bad batch size: %s" % e) if cls.NAMESPACE_PARAM in params: if not isinstance(params[cls.NAMESPACE_PARAM], (str, unicode, type(None))): raise BadReaderParamsError( "Expected a single namespace string") if cls.NAMESPACES_PARAM in params: raise BadReaderParamsError("Multiple namespaces are no longer supported") if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if not isinstance(filters, list): raise BadReaderParamsError("Expected list for filters parameter") for f in filters: if not isinstance(f, tuple): raise BadReaderParamsError("Filter should be a tuple: %s", f) if len(f) != 3: raise BadReaderParamsError("Filter should be a 3-tuple: %s", f) if not isinstance(f[0], basestring): raise BadReaderParamsError("First element should be string: %s", f) if f[1] != "=": raise BadReaderParamsError( "Only equality filters are supported: %s", f)
def validate(cls, mapper_spec): """Validates mapper spec and all mapper parameters. Args: mapper_spec: The MapperSpec for this InputReader. Raises: BadReaderParamsError: required parameters are missing or invalid. """ if mapper_spec.input_reader_class() != cls: raise BadReaderParamsError("Input reader class mismatch") params = _get_params(mapper_spec) if cls.ENTITY_KIND_PARAM not in params: raise BadReaderParamsError("Missing mapper parameter 'entity_kind'") if cls.BATCH_SIZE_PARAM in params: try: batch_size = int(params[cls.BATCH_SIZE_PARAM]) if batch_size < 1: raise BadReaderParamsError("Bad batch size: %s" % batch_size) except ValueError, e: raise BadReaderParamsError("Bad batch size: %s" % e) if cls.NAMESPACE_PARAM in params: if not isinstance(params[cls.NAMESPACE_PARAM], (str, unicode, type(None))): raise BadReaderParamsError( "Expected a single namespace string") if cls.NAMESPACES_PARAM in params: raise BadReaderParamsError("Multiple namespaces are no longer supported") if cls.FILTERS_PARAM in params: filters = params[cls.FILTERS_PARAM] if not isinstance(filters, list): raise BadReaderParamsError("Expected list for filters parameter") for f in filters: if not isinstance(f, tuple): raise BadReaderParamsError("Filter should be a tuple: %s", f) if len(f) != 3: raise BadReaderParamsError("Filter should be a 3-tuple: %s", f) if not isinstance(f[0], basestring): raise BadReaderParamsError("First element should be string: %s", f) if f[1] != "=": raise BadReaderParamsError( "Only equality filters are supported: %s", f)
Python
def __LookupErrorBlob(config, filename): """Looks up the mime type and error_code for 'filename'. Uses the error handlers in 'config' to determine if the file should be treated as an error blob. Args: config: The app.yaml object to check the filename against. filename: The name of the file. Returns: A tuple of (mime_type, error_code), or (None, None) if this is not an error blob. For example, ('text/plain', default) or ('image/gif', timeout) or (None, None). """ if not config.error_handlers: return (None, None) for error_handler in config.error_handlers: if error_handler.file == filename: error_code = error_handler.error_code error_code = error_code or 'default' if error_handler.mime_type: return (error_handler.mime_type, error_code) else: return (FileClassification.__MimeType(filename), error_code) return (None, None)
def __LookupErrorBlob(config, filename): """Looks up the mime type and error_code for 'filename'. Uses the error handlers in 'config' to determine if the file should be treated as an error blob. Args: config: The app.yaml object to check the filename against. filename: The name of the file. Returns: A tuple of (mime_type, error_code), or (None, None) if this is not an error blob. For example, ('text/plain', default) or ('image/gif', timeout) or (None, None). """ if not config.error_handlers: return (None, None) for error_handler in config.error_handlers: if error_handler.file == filename: error_code = error_handler.error_code error_code = error_code or 'default' if error_handler.mime_type: return (error_handler.mime_type, error_code) else: return (FileClassification.__MimeType(filename), error_code) return (None, None)
Python
def GetResourceLimits(rpcserver, config): """Gets the resource limits. Gets the resource limits that should be applied to apps. Any values that the server does not know about will have their default value reported (although it is also possible for the server to report values we don't know about). Args: rpcserver: The RPC server to use. config: The appyaml configuration. Returns: A dictionary. """ resource_limits = DEFAULT_RESOURCE_LIMITS.copy() resource_limits.update(GetRemoteResourceLimits(rpcserver, config)) logging.debug('Using resource limits: %s' % resource_limits) return resource_limits
def GetResourceLimits(rpcserver, config): """Gets the resource limits. Gets the resource limits that should be applied to apps. Any values that the server does not know about will have their default value reported (although it is also possible for the server to report values we don't know about). Args: rpcserver: The RPC server to use. config: The appyaml configuration. Returns: A dictionary. """ resource_limits = DEFAULT_RESOURCE_LIMITS.copy() resource_limits.update(GetRemoteResourceLimits(rpcserver, config)) logging.debug('Using resource limits: %s' % resource_limits) return resource_limits
Python
def CheckForUpdates(self): """Queries the server for updates and nags the user if appropriate. Queries the server for the latest SDK version at the same time reporting the local SDK version. The server will respond with a yaml document containing the fields: 'release': The name of the release (e.g. 1.2). 'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ). 'api_versions': A list of api_version strings (e.g. ['1', 'beta']). We will nag the user with increasing severity if: - There is a new release. - There is a new release with a new api_version. - There is a new release that does not support the api_version named in self.config. """ version = self._ParseVersionFile() if version is None: logging.info('Skipping update check') return logging.info('Checking for updates to the SDK.') try: response = self.rpcserver.Send('/api/updatecheck', timeout=UPDATE_CHECK_TIMEOUT, release=version['release'], timestamp=version['timestamp'], api_versions=version['api_versions'], runtime=self.config.runtime) except urllib2.URLError, e: logging.info('Update check failed: %s', e) return latest = yaml.safe_load(response) if version['release'] == latest['release']: logging.info('The SDK is up to date.') return try: this_release = _VersionList(version['release']) except ValueError: logging.warn('Could not parse this release version (%r)', version['release']) else: try: advertised_release = _VersionList(latest['release']) except ValueError: logging.warn('Could not parse advertised release version (%r)', latest['release']) else: if this_release > advertised_release: logging.info('This SDK release is newer than the advertised release.') return api_versions = latest['api_versions'] if self.config.api_version not in api_versions: self._Nag( 'The api version you are using (%s) is obsolete! You should\n' 'upgrade your SDK and test that your code works with the new\n' 'api version.' % self.config.api_version, latest, version, force=True) return if self.config.api_version != api_versions[len(api_versions) - 1]: self._Nag( 'The api version you are using (%s) is deprecated. You should\n' 'upgrade your SDK to try the new functionality.' % self.config.api_version, latest, version) return self._Nag('There is a new release of the SDK available.', latest, version)
def CheckForUpdates(self): """Queries the server for updates and nags the user if appropriate. Queries the server for the latest SDK version at the same time reporting the local SDK version. The server will respond with a yaml document containing the fields: 'release': The name of the release (e.g. 1.2). 'timestamp': The time the release was created (YYYY-MM-DD HH:MM AM/PM TZ). 'api_versions': A list of api_version strings (e.g. ['1', 'beta']). We will nag the user with increasing severity if: - There is a new release. - There is a new release with a new api_version. - There is a new release that does not support the api_version named in self.config. """ version = self._ParseVersionFile() if version is None: logging.info('Skipping update check') return logging.info('Checking for updates to the SDK.') try: response = self.rpcserver.Send('/api/updatecheck', timeout=UPDATE_CHECK_TIMEOUT, release=version['release'], timestamp=version['timestamp'], api_versions=version['api_versions'], runtime=self.config.runtime) except urllib2.URLError, e: logging.info('Update check failed: %s', e) return latest = yaml.safe_load(response) if version['release'] == latest['release']: logging.info('The SDK is up to date.') return try: this_release = _VersionList(version['release']) except ValueError: logging.warn('Could not parse this release version (%r)', version['release']) else: try: advertised_release = _VersionList(latest['release']) except ValueError: logging.warn('Could not parse advertised release version (%r)', latest['release']) else: if this_release > advertised_release: logging.info('This SDK release is newer than the advertised release.') return api_versions = latest['api_versions'] if self.config.api_version not in api_versions: self._Nag( 'The api version you are using (%s) is obsolete! You should\n' 'upgrade your SDK and test that your code works with the new\n' 'api version.' % self.config.api_version, latest, version, force=True) return if self.config.api_version != api_versions[len(api_versions) - 1]: self._Nag( 'The api version you are using (%s) is deprecated. You should\n' 'upgrade your SDK to try the new functionality.' % self.config.api_version, latest, version) return self._Nag('There is a new release of the SDK available.', latest, version)
Python
def MigratePython27Notice(): """Encourages the user to migrate from Python 2.5 to Python 2.7. Prints a message to sys.stdout. The caller should have tested that the user is using Python 2.5, so as not to spuriously display this message. """ print ( 'Notice: The Python 2.7 runtime is now available, and comes with a ' 'range of new features including concurrent requests and more ' 'libraries. Learn how simple it is to migrate your application to ' 'Python 2.7 at ' 'https://developers.google.com/appengine/docs/python/python25/migrate27.')
def MigratePython27Notice(): """Encourages the user to migrate from Python 2.5 to Python 2.7. Prints a message to sys.stdout. The caller should have tested that the user is using Python 2.5, so as not to spuriously display this message. """ print ( 'Notice: The Python 2.7 runtime is now available, and comes with a ' 'range of new features including concurrent requests and more ' 'libraries. Learn how simple it is to migrate your application to ' 'Python 2.7 at ' 'https://developers.google.com/appengine/docs/python/python25/migrate27.')
Python
def DoDownloadApp(rpcserver, out_dir, app_id, app_version): """Downloads the files associated with a particular app version. Args: rpcserver: The RPC server to use to download. out_dir: The directory the files should be downloaded to. app_id: The app ID of the app whose files we want to download. app_version: The version number we want to download. Can be: - None: We'll download the latest default version. - <major>: We'll download the latest minor version. - <major>/<minor>: We'll download that exact version. """ StatusUpdate('Fetching file list...') url_args = {'app_id': app_id} if app_version is not None: url_args['version_match'] = app_version result = rpcserver.Send('/api/files/list', **url_args) StatusUpdate('Fetching files...') lines = result.splitlines() if len(lines) < 1: logging.error('Invalid response from server: empty') return full_version = lines[0] file_lines = lines[1:] current_file_number = 0 num_files = len(file_lines) num_errors = 0 for line in file_lines: parts = line.split('|', 2) if len(parts) != 3: logging.error('Invalid response from server: expecting ' '"<id>|<size>|<path>", found: "%s"\n', line) return current_file_number += 1 file_id, size_str, path = parts try: size = int(size_str) except ValueError: logging.error('Invalid file list entry from server: invalid size: ' '"%s"', size_str) return StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path)) def TryGet(): """A request to /api/files/get which works with the RetryWithBackoff.""" try: contents = rpcserver.Send('/api/files/get', app_id=app_id, version=full_version, id=file_id) return True, contents except urllib2.HTTPError, exc: if exc.code == 503: return False, exc else: raise def PrintRetryMessage(_, delay): StatusUpdate('Server busy. Will try again in %d seconds.' % delay) success, contents = RetryWithBackoff(TryGet, PrintRetryMessage) if not success: logging.error('Unable to download file "%s".', path) num_errors += 1 continue if len(contents) != size: logging.error('File "%s": server listed as %d bytes but served ' '%d bytes.', path, size, len(contents)) num_errors += 1 full_path = os.path.join(out_dir, path) if os.path.exists(full_path): logging.error('Unable to create file "%s": path conflicts with ' 'an existing file or directory', path) num_errors += 1 continue full_dir = os.path.dirname(full_path) try: EnsureDir(full_dir) except OSError, exc: logging.error('Couldn\'t create directory "%s": %s', full_dir, exc) num_errors += 1 continue try: out_file = open(full_path, 'wb') except IOError, exc: logging.error('Couldn\'t open file "%s": %s', full_path, exc) num_errors += 1 continue try: try: out_file.write(contents) except IOError, exc: logging.error('Couldn\'t write to file "%s": %s', full_path, exc) num_errors += 1 continue finally: out_file.close() if num_errors > 0: logging.error('Number of errors: %d. See output for details.', num_errors)
def DoDownloadApp(rpcserver, out_dir, app_id, app_version): """Downloads the files associated with a particular app version. Args: rpcserver: The RPC server to use to download. out_dir: The directory the files should be downloaded to. app_id: The app ID of the app whose files we want to download. app_version: The version number we want to download. Can be: - None: We'll download the latest default version. - <major>: We'll download the latest minor version. - <major>/<minor>: We'll download that exact version. """ StatusUpdate('Fetching file list...') url_args = {'app_id': app_id} if app_version is not None: url_args['version_match'] = app_version result = rpcserver.Send('/api/files/list', **url_args) StatusUpdate('Fetching files...') lines = result.splitlines() if len(lines) < 1: logging.error('Invalid response from server: empty') return full_version = lines[0] file_lines = lines[1:] current_file_number = 0 num_files = len(file_lines) num_errors = 0 for line in file_lines: parts = line.split('|', 2) if len(parts) != 3: logging.error('Invalid response from server: expecting ' '"<id>|<size>|<path>", found: "%s"\n', line) return current_file_number += 1 file_id, size_str, path = parts try: size = int(size_str) except ValueError: logging.error('Invalid file list entry from server: invalid size: ' '"%s"', size_str) return StatusUpdate('[%d/%d] %s' % (current_file_number, num_files, path)) def TryGet(): """A request to /api/files/get which works with the RetryWithBackoff.""" try: contents = rpcserver.Send('/api/files/get', app_id=app_id, version=full_version, id=file_id) return True, contents except urllib2.HTTPError, exc: if exc.code == 503: return False, exc else: raise def PrintRetryMessage(_, delay): StatusUpdate('Server busy. Will try again in %d seconds.' % delay) success, contents = RetryWithBackoff(TryGet, PrintRetryMessage) if not success: logging.error('Unable to download file "%s".', path) num_errors += 1 continue if len(contents) != size: logging.error('File "%s": server listed as %d bytes but served ' '%d bytes.', path, size, len(contents)) num_errors += 1 full_path = os.path.join(out_dir, path) if os.path.exists(full_path): logging.error('Unable to create file "%s": path conflicts with ' 'an existing file or directory', path) num_errors += 1 continue full_dir = os.path.dirname(full_path) try: EnsureDir(full_dir) except OSError, exc: logging.error('Couldn\'t create directory "%s": %s', full_dir, exc) num_errors += 1 continue try: out_file = open(full_path, 'wb') except IOError, exc: logging.error('Couldn\'t open file "%s": %s', full_path, exc) num_errors += 1 continue try: try: out_file.write(contents) except IOError, exc: logging.error('Couldn\'t write to file "%s": %s', full_path, exc) num_errors += 1 continue finally: out_file.close() if num_errors > 0: logging.error('Number of errors: %d. See output for details.', num_errors)
Python
def Describe(self): """Returns a string describing the object being updated.""" result = 'app: %s' % self.app_id if self.backend: result += ', backend: %s' % self.backend elif self.version: result += ', version: %s' % self.version return result
def Describe(self): """Returns a string describing the object being updated.""" result = 'app: %s' % self.app_id if self.backend: result += ', backend: %s' % self.backend elif self.version: result += ', version: %s' % self.version return result
Python
def Begin(self): """Begins the transaction, returning a list of files that need uploading. All calls to AddFile must be made before calling Begin(). Returns: A list of pathnames for files that should be uploaded using UploadFile() before Commit() can be called. """ assert not self.in_transaction, 'Already in a transaction.' self.Send('/api/appversion/create', payload=self.config.ToYAML()) self.in_transaction = True files_to_clone = [] blobs_to_clone = [] errorblobs = {} for path, content_hash in self.files.iteritems(): file_classification = FileClassification(self.config, path) if file_classification.IsStaticFile(): blobs_to_clone.append((path, content_hash, file_classification.StaticMimeType())) if file_classification.IsErrorFile(): errorblobs[path] = content_hash if file_classification.IsApplicationFile(): files_to_clone.append((path, content_hash)) files_to_upload = {} def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return StatusUpdate('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) max_files = self.resource_limits['max_files_to_clone'] for i in xrange(0, len(files), max_files): if i > 0 and i % max_files == 0: StatusUpdate('Cloned %d files.' % i) chunk = files[i:min(len(files), i + max_files)] result = self.Send(url, payload=BuildClonePostBody(chunk)) if result: files_to_upload.update(dict( (f, self.files[f]) for f in result.split(LIST_DELIMITER))) CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static') CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application') logging.debug('Files to upload: %s', files_to_upload) for (path, content_hash) in errorblobs.iteritems(): files_to_upload[path] = content_hash self.files = files_to_upload return sorted(files_to_upload.iterkeys())
def Begin(self): """Begins the transaction, returning a list of files that need uploading. All calls to AddFile must be made before calling Begin(). Returns: A list of pathnames for files that should be uploaded using UploadFile() before Commit() can be called. """ assert not self.in_transaction, 'Already in a transaction.' self.Send('/api/appversion/create', payload=self.config.ToYAML()) self.in_transaction = True files_to_clone = [] blobs_to_clone = [] errorblobs = {} for path, content_hash in self.files.iteritems(): file_classification = FileClassification(self.config, path) if file_classification.IsStaticFile(): blobs_to_clone.append((path, content_hash, file_classification.StaticMimeType())) if file_classification.IsErrorFile(): errorblobs[path] = content_hash if file_classification.IsApplicationFile(): files_to_clone.append((path, content_hash)) files_to_upload = {} def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return StatusUpdate('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) max_files = self.resource_limits['max_files_to_clone'] for i in xrange(0, len(files), max_files): if i > 0 and i % max_files == 0: StatusUpdate('Cloned %d files.' % i) chunk = files[i:min(len(files), i + max_files)] result = self.Send(url, payload=BuildClonePostBody(chunk)) if result: files_to_upload.update(dict( (f, self.files[f]) for f in result.split(LIST_DELIMITER))) CloneFiles('/api/appversion/cloneblobs', blobs_to_clone, 'static') CloneFiles('/api/appversion/clonefiles', files_to_clone, 'application') logging.debug('Files to upload: %s', files_to_upload) for (path, content_hash) in errorblobs.iteritems(): files_to_upload[path] = content_hash self.files = files_to_upload return sorted(files_to_upload.iterkeys())
Python
def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return StatusUpdate('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) max_files = self.resource_limits['max_files_to_clone'] for i in xrange(0, len(files), max_files): if i > 0 and i % max_files == 0: StatusUpdate('Cloned %d files.' % i) chunk = files[i:min(len(files), i + max_files)] result = self.Send(url, payload=BuildClonePostBody(chunk)) if result: files_to_upload.update(dict( (f, self.files[f]) for f in result.split(LIST_DELIMITER)))
def CloneFiles(url, files, file_type): """Sends files to the given url. Args: url: the server URL to use. files: a list of files file_type: the type of the files """ if not files: return StatusUpdate('Cloning %d %s file%s.' % (len(files), file_type, len(files) != 1 and 's' or '')) max_files = self.resource_limits['max_files_to_clone'] for i in xrange(0, len(files), max_files): if i > 0 and i % max_files == 0: StatusUpdate('Cloned %d files.' % i) chunk = files[i:min(len(files), i + max_files)] result = self.Send(url, payload=BuildClonePostBody(chunk)) if result: files_to_upload.update(dict( (f, self.files[f]) for f in result.split(LIST_DELIMITER)))