body
stringlengths
26
98.2k
body_hash
int64
-9,222,864,604,528,158,000
9,221,803,474B
docstring
stringlengths
1
16.8k
path
stringlengths
5
230
name
stringlengths
1
96
repository_name
stringlengths
7
89
lang
stringclasses
1 value
body_without_docstring
stringlengths
20
98.2k
def getFonts(self, filterByFontFormat=[], variableFont=None): '\n Calculate list of fonts of this package by applying filters for\n font.format and font.variableFont (possibly more in the future)\n ' def passedFilter(font): passed1 = ((not filterByFontFormat) or (filterByFontFormat and (font.format in filterByFontFormat))) passed2 = ((variableFont is None) or ((variableFont is not None) and (font.variableFont == variableFont))) return (passed1 and passed2) return [x for x in self.fonts if passedFilter(x)]
7,343,162,613,261,906,000
Calculate list of fonts of this package by applying filters for font.format and font.variableFont (possibly more in the future)
Lib/typeworld/api/__init__.py
getFonts
typeWorld/api
python
def getFonts(self, filterByFontFormat=[], variableFont=None): '\n Calculate list of fonts of this package by applying filters for\n font.format and font.variableFont (possibly more in the future)\n ' def passedFilter(font): passed1 = ((not filterByFontFormat) or (filterByFontFormat and (font.format in filterByFontFormat))) passed2 = ((variableFont is None) or ((variableFont is not None) and (font.variableFont == variableFont))) return (passed1 and passed2) return [x for x in self.fonts if passedFilter(x)]
def getLicense(self): ' Returns the ::License:: object that this font references.\n ' return self.parent.parent.parent.getLicenseByKeyword(self.keyword)
-8,534,528,834,294,439,000
Returns the ::License:: object that this font references.
Lib/typeworld/api/__init__.py
getLicense
typeWorld/api
python
def getLicense(self): ' \n ' return self.parent.parent.parent.getLicenseByKeyword(self.keyword)
def isFontSpecific(self): ' Returns True if this version is defined at the font level.\n Returns False if this version is defined at the family level.\n ' return issubclass(self.parent.__class__, Font)
-7,631,871,248,832,800,000
Returns True if this version is defined at the font level. Returns False if this version is defined at the family level.
Lib/typeworld/api/__init__.py
isFontSpecific
typeWorld/api
python
def isFontSpecific(self): ' Returns True if this version is defined at the font level.\n Returns False if this version is defined at the family level.\n ' return issubclass(self.parent.__class__, Font)
def filename(self, version): ' Returns the recommended font file name to be used to store the font on disk.\n\n It is composed of the font’s uniqueID, its version string and the file\n extension. Together, they must not exceed 220 characters.\n ' if (not (type(version) in (str, int, float))): raise ValueError('Supplied version must be str or int or float') if self.format: return ('%s_%s.%s' % (self.uniqueID, version, self.format)) else: return ('%s_%s' % (self.uniqueID, version))
-8,905,214,114,146,957,000
Returns the recommended font file name to be used to store the font on disk. It is composed of the font’s uniqueID, its version string and the file extension. Together, they must not exceed 220 characters.
Lib/typeworld/api/__init__.py
filename
typeWorld/api
python
def filename(self, version): ' Returns the recommended font file name to be used to store the font on disk.\n\n It is composed of the font’s uniqueID, its version string and the file\n extension. Together, they must not exceed 220 characters.\n ' if (not (type(version) in (str, int, float))): raise ValueError('Supplied version must be str or int or float') if self.format: return ('%s_%s.%s' % (self.uniqueID, version, self.format)) else: return ('%s_%s' % (self.uniqueID, version))
def getBillboardURLs(self): ' Returns list billboardURLs compiled from ::Font.billboardURLs::\n and ::Family.billboardURLs::, giving the font-level definitions priority\n over family-level definitions.\n ' return (self.billboardURLs or self.parent.billboardURLs)
5,772,845,123,857,825,000
Returns list billboardURLs compiled from ::Font.billboardURLs:: and ::Family.billboardURLs::, giving the font-level definitions priority over family-level definitions.
Lib/typeworld/api/__init__.py
getBillboardURLs
typeWorld/api
python
def getBillboardURLs(self): ' Returns list billboardURLs compiled from ::Font.billboardURLs::\n and ::Family.billboardURLs::, giving the font-level definitions priority\n over family-level definitions.\n ' return (self.billboardURLs or self.parent.billboardURLs)
def getVersions(self): ' Returns list of ::Version:: objects.\n\n This is the final list based on the version information in this font object as\n well as in its parent ::Family:: object. Please read the section about\n [versioning](#versioning) above.\n ' if (not self.hasVersionInformation()): raise ValueError(('%s has no version information, and neither has its family %s. Either one needs to carry version information.' % (self, self.parent))) def compare(a, b): return semver.VersionInfo.parse(makeSemVer(a.number)).compare(makeSemVer(b.number)) versions = [] haveVersionNumbers = [] for version in self.versions: versions.append(version) haveVersionNumbers.append(makeSemVer(version.number)) for version in self.parent.versions: if (version.number not in haveVersionNumbers): versions.append(version) haveVersionNumbers.append(makeSemVer(version.number)) versions = sorted(versions, key=functools.cmp_to_key(compare)) return versions
4,009,952,125,952,453,600
Returns list of ::Version:: objects. This is the final list based on the version information in this font object as well as in its parent ::Family:: object. Please read the section about [versioning](#versioning) above.
Lib/typeworld/api/__init__.py
getVersions
typeWorld/api
python
def getVersions(self): ' Returns list of ::Version:: objects.\n\n This is the final list based on the version information in this font object as\n well as in its parent ::Family:: object. Please read the section about\n [versioning](#versioning) above.\n ' if (not self.hasVersionInformation()): raise ValueError(('%s has no version information, and neither has its family %s. Either one needs to carry version information.' % (self, self.parent))) def compare(a, b): return semver.VersionInfo.parse(makeSemVer(a.number)).compare(makeSemVer(b.number)) versions = [] haveVersionNumbers = [] for version in self.versions: versions.append(version) haveVersionNumbers.append(makeSemVer(version.number)) for version in self.parent.versions: if (version.number not in haveVersionNumbers): versions.append(version) haveVersionNumbers.append(makeSemVer(version.number)) versions = sorted(versions, key=functools.cmp_to_key(compare)) return versions
def getDesigners(self): ' Returns a list of ::Designer:: objects that this font references.\n These are the combination of family-level designers and font-level designers.\n The same logic as for versioning applies.\n Please read the section about [versioning](#versioning) above.\n ' if (not hasattr(self, '_designers')): self._designers = [] if self.parent.designerKeywords: for designerKeyword in self.parent.designerKeywords: self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword)) if self.designerKeywords: for designerKeyword in self.designerKeywords: self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword)) return self._designers
-3,829,965,687,416,815,600
Returns a list of ::Designer:: objects that this font references. These are the combination of family-level designers and font-level designers. The same logic as for versioning applies. Please read the section about [versioning](#versioning) above.
Lib/typeworld/api/__init__.py
getDesigners
typeWorld/api
python
def getDesigners(self): ' Returns a list of ::Designer:: objects that this font references.\n These are the combination of family-level designers and font-level designers.\n The same logic as for versioning applies.\n Please read the section about [versioning](#versioning) above.\n ' if (not hasattr(self, '_designers')): self._designers = [] if self.parent.designerKeywords: for designerKeyword in self.parent.designerKeywords: self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword)) if self.designerKeywords: for designerKeyword in self.designerKeywords: self._designers.append(self.parent.parent.parent.getDesignerByKeyword(designerKeyword)) return self._designers
def getAllDesigners(self): ' Returns a list of ::Designer:: objects that represent all of the designers\n referenced both at the family level as well as with all the family’s fonts,\n in case the fonts carry specific designers. This could be used to give a\n one-glance overview of all designers involved.\n ' if (not hasattr(self, '_allDesigners')): self._allDesigners = [] self._allDesignersKeywords = [] for designerKeyword in self.designerKeywords: self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword)) self._allDesignersKeywords.append(designerKeyword) for font in self.fonts: for designerKeyword in font.designerKeywords: if (designerKeyword not in self._allDesignersKeywords): self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword)) self._allDesignersKeywords.append(designerKeyword) return self._allDesigners
-1,701,254,645,675,247,600
Returns a list of ::Designer:: objects that represent all of the designers referenced both at the family level as well as with all the family’s fonts, in case the fonts carry specific designers. This could be used to give a one-glance overview of all designers involved.
Lib/typeworld/api/__init__.py
getAllDesigners
typeWorld/api
python
def getAllDesigners(self): ' Returns a list of ::Designer:: objects that represent all of the designers\n referenced both at the family level as well as with all the family’s fonts,\n in case the fonts carry specific designers. This could be used to give a\n one-glance overview of all designers involved.\n ' if (not hasattr(self, '_allDesigners')): self._allDesigners = [] self._allDesignersKeywords = [] for designerKeyword in self.designerKeywords: self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword)) self._allDesignersKeywords.append(designerKeyword) for font in self.fonts: for designerKeyword in font.designerKeywords: if (designerKeyword not in self._allDesignersKeywords): self._allDesigners.append(self.parent.parent.getDesignerByKeyword(designerKeyword)) self._allDesignersKeywords.append(designerKeyword) return self._allDesigners
def forward(self, s, state=None, info={}): 's -> Q(s, \\*)' (logits, h) = self.preprocess(s, state) logits = F.softmax(self.last(logits), dim=(- 1)) return (logits, h)
6,090,292,889,799,149,000
s -> Q(s, \*)
tianshou/utils/net/discrete.py
forward
FightingSrain/tianshou
python
def forward(self, s, state=None, info={}): 's -> Q(s, \\*)' (logits, h) = self.preprocess(s, state) logits = F.softmax(self.last(logits), dim=(- 1)) return (logits, h)
def forward(self, s, **kwargs): 's -> V(s)' (logits, h) = self.preprocess(s, state=kwargs.get('state', None)) logits = self.last(logits) return logits
-5,762,130,964,247,468,000
s -> V(s)
tianshou/utils/net/discrete.py
forward
FightingSrain/tianshou
python
def forward(self, s, **kwargs): (logits, h) = self.preprocess(s, state=kwargs.get('state', None)) logits = self.last(logits) return logits
def forward(self, x, state=None, info={}): 'x -> Q(x, \\*)' if (not isinstance(x, torch.Tensor)): x = torch.tensor(x, device=self.device, dtype=torch.float32) x = x.permute(0, 3, 1, 2) x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) x = self.fc(x.reshape(x.size(0), (- 1))) return (self.head(x), state)
-6,293,112,128,924,184,000
x -> Q(x, \*)
tianshou/utils/net/discrete.py
forward
FightingSrain/tianshou
python
def forward(self, x, state=None, info={}): 'x -> Q(x, \\*)' if (not isinstance(x, torch.Tensor)): x = torch.tensor(x, device=self.device, dtype=torch.float32) x = x.permute(0, 3, 1, 2) x = F.relu(self.bn1(self.conv1(x))) x = F.relu(self.bn2(self.conv2(x))) x = F.relu(self.bn3(self.conv3(x))) x = self.fc(x.reshape(x.size(0), (- 1))) return (self.head(x), state)
def test_set_tokens_credentials(client): 'Test setting the tokens using credentials' client.refresh_token = None del client.session.headers['Authorization'] client._set_tokens() assert client.refresh_token assert ('Authorization' in client.session.headers)
-8,691,619,730,239,429,000
Test setting the tokens using credentials
tests/test_client.py
test_set_tokens_credentials
MuckRock/python-documentcloud
python
def test_set_tokens_credentials(client): client.refresh_token = None del client.session.headers['Authorization'] client._set_tokens() assert client.refresh_token assert ('Authorization' in client.session.headers)
def test_set_tokens_refresh(client): 'Test setting the tokens using refresh token' client.refresh_token = None del client.session.headers['Authorization'] client._set_tokens() client._set_tokens() assert client.refresh_token assert ('Authorization' in client.session.headers)
2,197,476,233,905,266,400
Test setting the tokens using refresh token
tests/test_client.py
test_set_tokens_refresh
MuckRock/python-documentcloud
python
def test_set_tokens_refresh(client): client.refresh_token = None del client.session.headers['Authorization'] client._set_tokens() client._set_tokens() assert client.refresh_token assert ('Authorization' in client.session.headers)
def test_set_tokens_none(public_client): 'Test setting the tokens with no credentials' public_client._set_tokens() assert (public_client.refresh_token is None) assert ('Authorization' not in public_client.session.headers)
-8,181,585,842,802,609,000
Test setting the tokens with no credentials
tests/test_client.py
test_set_tokens_none
MuckRock/python-documentcloud
python
def test_set_tokens_none(public_client): public_client._set_tokens() assert (public_client.refresh_token is None) assert ('Authorization' not in public_client.session.headers)
def test_get_tokens(client): 'Test getting access and refresh tokens using valid credentials' (access, refresh) = client._get_tokens(client.username, client.password) assert access assert refresh
1,387,383,972,610,002,000
Test getting access and refresh tokens using valid credentials
tests/test_client.py
test_get_tokens
MuckRock/python-documentcloud
python
def test_get_tokens(client): (access, refresh) = client._get_tokens(client.username, client.password) assert access assert refresh
def test_get_tokens_bad_credentials(client): 'Test getting access and refresh tokens using invalid credentials' with pytest.raises(CredentialsFailedError): client._get_tokens(client.username, 'foo')
-6,442,918,922,323,646,000
Test getting access and refresh tokens using invalid credentials
tests/test_client.py
test_get_tokens_bad_credentials
MuckRock/python-documentcloud
python
def test_get_tokens_bad_credentials(client): with pytest.raises(CredentialsFailedError): client._get_tokens(client.username, 'foo')
def test_refresh_tokens(client): 'Test refreshing the tokens' (access, refresh) = client._refresh_tokens(client.refresh_token) assert access assert refresh
-4,345,584,591,935,057,000
Test refreshing the tokens
tests/test_client.py
test_refresh_tokens
MuckRock/python-documentcloud
python
def test_refresh_tokens(client): (access, refresh) = client._refresh_tokens(client.refresh_token) assert access assert refresh
def _parse_response(self, response): '\n Parse http raw respone into python\n dictionary object.\n\n :param str response: http response\n :returns: response dict\n :rtype: dict\n ' response_dict = {} for line in response.splitlines(): (key, value) = response.split('=', 1) response_dict[key] = value return response_dict
-4,143,355,550,308,036,600
Parse http raw respone into python dictionary object. :param str response: http response :returns: response dict :rtype: dict
sendsms/backends/esendex.py
_parse_response
codesankalp/django-sendsms
python
def _parse_response(self, response): '\n Parse http raw respone into python\n dictionary object.\n\n :param str response: http response\n :returns: response dict\n :rtype: dict\n ' response_dict = {} for line in response.splitlines(): (key, value) = response.split('=', 1) response_dict[key] = value return response_dict
def _send(self, message): '\n Private method to send one message.\n\n :param SmsMessage message: SmsMessage class instance.\n :returns: True if message is sent else False\n :rtype: bool\n ' params = {'EsendexUsername': self.get_username(), 'EsendexPassword': self.get_password(), 'EsendexAccount': self.get_account(), 'EsendexOriginator': message.from_phone, 'EsendexRecipient': ','.join(message.to), 'EsendexBody': message.body, 'EsendexPlainText': '1'} if ESENDEX_SANDBOX: params['EsendexTest'] = '1' response = requests.post(ESENDEX_API_URL, params) if (response.status_code != 200): if (not self.fail_silently): raise Exception('Bad status code') else: return False if (not response.content.startswith(b'Result')): if (not self.fail_silently): raise Exception('Bad result') else: return False response = self._parse_response(response.content.decode('utf8')) if (ESENDEX_SANDBOX and (response['Result'] == 'Test')): return True elif response['Result'].startswith('OK'): return True elif (not self.fail_silently): raise Exception('Bad result') return False
7,000,168,824,280,457,000
Private method to send one message. :param SmsMessage message: SmsMessage class instance. :returns: True if message is sent else False :rtype: bool
sendsms/backends/esendex.py
_send
codesankalp/django-sendsms
python
def _send(self, message): '\n Private method to send one message.\n\n :param SmsMessage message: SmsMessage class instance.\n :returns: True if message is sent else False\n :rtype: bool\n ' params = {'EsendexUsername': self.get_username(), 'EsendexPassword': self.get_password(), 'EsendexAccount': self.get_account(), 'EsendexOriginator': message.from_phone, 'EsendexRecipient': ','.join(message.to), 'EsendexBody': message.body, 'EsendexPlainText': '1'} if ESENDEX_SANDBOX: params['EsendexTest'] = '1' response = requests.post(ESENDEX_API_URL, params) if (response.status_code != 200): if (not self.fail_silently): raise Exception('Bad status code') else: return False if (not response.content.startswith(b'Result')): if (not self.fail_silently): raise Exception('Bad result') else: return False response = self._parse_response(response.content.decode('utf8')) if (ESENDEX_SANDBOX and (response['Result'] == 'Test')): return True elif response['Result'].startswith('OK'): return True elif (not self.fail_silently): raise Exception('Bad result') return False
def send_messages(self, messages): '\n Send messages.\n\n :param list messages: List of SmsMessage instances.\n :returns: number of messages sended successful.\n :rtype: int\n ' counter = 0 for message in messages: res = self._send(message) if res: counter += 1 return counter
5,847,916,432,418,928,000
Send messages. :param list messages: List of SmsMessage instances. :returns: number of messages sended successful. :rtype: int
sendsms/backends/esendex.py
send_messages
codesankalp/django-sendsms
python
def send_messages(self, messages): '\n Send messages.\n\n :param list messages: List of SmsMessage instances.\n :returns: number of messages sended successful.\n :rtype: int\n ' counter = 0 for message in messages: res = self._send(message) if res: counter += 1 return counter
@commands.slash_command(name='layer') async def _layer(self, inter: disnake.AppCmdInter, templates: str): 'Layer several templates.\n\n Parameters\n ----------\n templates: List of templates (URL or name) separated by a space (last goes above).\n ' (await inter.response.defer()) template_uris = templates.split(' ') (await self.layer(inter, template_uris))
-8,327,733,782,223,394,000
Layer several templates. Parameters ---------- templates: List of templates (URL or name) separated by a space (last goes above).
src/cogs/pxls_template/layer.py
_layer
GrayTurtles/Clueless
python
@commands.slash_command(name='layer') async def _layer(self, inter: disnake.AppCmdInter, templates: str): 'Layer several templates.\n\n Parameters\n ----------\n templates: List of templates (URL or name) separated by a space (last goes above).\n ' (await inter.response.defer()) template_uris = templates.split(' ') (await self.layer(inter, template_uris))
@conf def find_cuda_libs(self): "\n\tfind the cuda include and library folders\n\n\tuse ctx.program(source='main.c', target='app', use='CUDA CUDART')\n\t" if (not self.env.NVCC): self.fatal('check for nvcc first') d = self.root.find_node(self.env.NVCC[0]).parent.parent node = d.find_node('include') _includes = ((node and node.abspath()) or '') _libpath = [] for x in ('lib64', 'lib'): try: _libpath.append(d.find_node(x).abspath()) except: pass self.check_cxx(header='cuda.h', lib='cuda', libpath=_libpath, includes=_includes) self.check_cxx(header='cuda.h', lib='cudart', libpath=_libpath, includes=_includes)
7,435,091,890,794,082,000
find the cuda include and library folders use ctx.program(source='main.c', target='app', use='CUDA CUDART')
Firmware/ardupilot/modules/waf/playground/cuda/cuda.py
find_cuda_libs
eanswer/LearningToFly
python
@conf def find_cuda_libs(self): "\n\tfind the cuda include and library folders\n\n\tuse ctx.program(source='main.c', target='app', use='CUDA CUDART')\n\t" if (not self.env.NVCC): self.fatal('check for nvcc first') d = self.root.find_node(self.env.NVCC[0]).parent.parent node = d.find_node('include') _includes = ((node and node.abspath()) or ) _libpath = [] for x in ('lib64', 'lib'): try: _libpath.append(d.find_node(x).abspath()) except: pass self.check_cxx(header='cuda.h', lib='cuda', libpath=_libpath, includes=_includes) self.check_cxx(header='cuda.h', lib='cudart', libpath=_libpath, includes=_includes)
def arguments_window(args: Namespace) -> ArgumentsResults: 'Window interface\n\n :param args: the arguments passed from the command line\n :return: Tuple[Union[str, None], Namespace] - The new arguments\n ' filename: str = ((C.SAVE_FILE_DIR + args.lottery_type) + C.SAVE_FILE_TYPE) layout = [[sg.Text(text='Lottery type:'), sg.InputCombo(values=tuple(C.LOTTERY_TYPES), default_value=args.lottery_type, readonly=True, enable_events=True, size=(10, 1), tooltip='Choose a lottery type', key=C.ELEMENT_NAMES['LOTTO']), sg.Frame(layout=[[sg.Text(text='Number of lines'), sg.InputText(default_text=args.number_of_lines, enable_events=True, size=(3, 1), justification='right', key=C.ELEMENT_NAMES['COUNT'])]], title='', tooltip='Choose the number of lines to generate', relief=sg.RELIEF_FLAT, key=C.ELEMENT_NAMES['LINES'])], [sg.Frame(layout=[[sg.Radio(text='Save', group_id='R', default=(not args.no_save), tooltip='Save the generated numbers', enable_events=True, key=C.ELEMENT_NAMES['SAVE']), sg.Radio(text='Do NOT save', group_id='R', default=args.no_save, tooltip='Do not save the generated numbers', enable_events=True, key=C.ELEMENT_NAMES['NOSAVE']), sg.Radio(text='Delete', group_id='R', default=args.delete, enable_events=True, tooltip='Delete a saved file', key=C.ELEMENT_NAMES['DELETE']), sg.Radio(text='Show', group_id='R', default=args.print, tooltip='Display a previously saved file', enable_events=True, key=C.ELEMENT_NAMES['SHOW'])]], title='Saved file options', relief=sg.RELIEF_SOLID, size=(0, 40))], [sg.Text(text=('File name: ' + filename), key=C.ELEMENT_NAMES['FILENAME'], size=(50, 2), tooltip='The name of the file to save or to display', justification='left')], [sg.OK(key='OK', focus=True), sg.Quit(key='Cancel', tooltip='Do nothing and quit')]] window = sg.Window(title='Lottery number Generator Arguments', layout=layout, text_justification=C.GUI_JUSTIFY, font=(C.GUI_FONT_NAME, C.GUI_FONT_SIZE)) while True: (event, values) = window.Read() if (event == C.ELEMENT_NAMES['DELETE']): window.Element(key='OK').Update('Delete Saved File') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=False) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File to delete: ' + filename)) elif (event == C.ELEMENT_NAMES['SHOW']): window.Element(key='OK').Update('Show Saved File') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=False) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File to display: ' + filename)) elif (event in (C.ELEMENT_NAMES['NOSAVE'], C.ELEMENT_NAMES['SAVE'])): window.Element(key='OK').Update('Generate Numbers') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=True) if (event == C.ELEMENT_NAMES['NOSAVE']): window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update('File will not be saved') elif (event == C.ELEMENT_NAMES['SAVE']): window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('Will be saved as: ' + filename)) if (event == C.ELEMENT_NAMES['LOTTO']): filename = ((C.SAVE_FILE_DIR + values[C.ELEMENT_NAMES['LOTTO']]) + C.SAVE_FILE_TYPE) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File name: ' + filename)) elif (event == C.ELEMENT_NAMES['COUNT']): if values[C.ELEMENT_NAMES['COUNT']].isnumeric(): temp = int(values[C.ELEMENT_NAMES['COUNT']]) else: temp = False if ((temp < C.MIN_LINES) or (temp > C.MAX_LINES)): elem = window.Element(key=C.ELEMENT_NAMES['COUNT']) elem.Update(C.DEFAULT_LINES) msg = 'number of lines must be in the range 1-100' popup_window(text=msg) elif ((event == 'OK') or (event == 'Cancel') or (event is None)): break if ((event != 'Cancel') and (event is not None)): args.lottery_type = values[C.ELEMENT_NAMES['LOTTO']] args.number_of_lines = int(values[C.ELEMENT_NAMES['COUNT']]) args.delete = values[C.ELEMENT_NAMES['DELETE']] args.print = values[C.ELEMENT_NAMES['SHOW']] args.no_save = values[C.ELEMENT_NAMES['NOSAVE']] window.Close() return (event, args)
910,027,326,538,831,600
Window interface :param args: the arguments passed from the command line :return: Tuple[Union[str, None], Namespace] - The new arguments
gui_arguments.py
arguments_window
bernduwiesner/GenLottery
python
def arguments_window(args: Namespace) -> ArgumentsResults: 'Window interface\n\n :param args: the arguments passed from the command line\n :return: Tuple[Union[str, None], Namespace] - The new arguments\n ' filename: str = ((C.SAVE_FILE_DIR + args.lottery_type) + C.SAVE_FILE_TYPE) layout = [[sg.Text(text='Lottery type:'), sg.InputCombo(values=tuple(C.LOTTERY_TYPES), default_value=args.lottery_type, readonly=True, enable_events=True, size=(10, 1), tooltip='Choose a lottery type', key=C.ELEMENT_NAMES['LOTTO']), sg.Frame(layout=[[sg.Text(text='Number of lines'), sg.InputText(default_text=args.number_of_lines, enable_events=True, size=(3, 1), justification='right', key=C.ELEMENT_NAMES['COUNT'])]], title=, tooltip='Choose the number of lines to generate', relief=sg.RELIEF_FLAT, key=C.ELEMENT_NAMES['LINES'])], [sg.Frame(layout=[[sg.Radio(text='Save', group_id='R', default=(not args.no_save), tooltip='Save the generated numbers', enable_events=True, key=C.ELEMENT_NAMES['SAVE']), sg.Radio(text='Do NOT save', group_id='R', default=args.no_save, tooltip='Do not save the generated numbers', enable_events=True, key=C.ELEMENT_NAMES['NOSAVE']), sg.Radio(text='Delete', group_id='R', default=args.delete, enable_events=True, tooltip='Delete a saved file', key=C.ELEMENT_NAMES['DELETE']), sg.Radio(text='Show', group_id='R', default=args.print, tooltip='Display a previously saved file', enable_events=True, key=C.ELEMENT_NAMES['SHOW'])]], title='Saved file options', relief=sg.RELIEF_SOLID, size=(0, 40))], [sg.Text(text=('File name: ' + filename), key=C.ELEMENT_NAMES['FILENAME'], size=(50, 2), tooltip='The name of the file to save or to display', justification='left')], [sg.OK(key='OK', focus=True), sg.Quit(key='Cancel', tooltip='Do nothing and quit')]] window = sg.Window(title='Lottery number Generator Arguments', layout=layout, text_justification=C.GUI_JUSTIFY, font=(C.GUI_FONT_NAME, C.GUI_FONT_SIZE)) while True: (event, values) = window.Read() if (event == C.ELEMENT_NAMES['DELETE']): window.Element(key='OK').Update('Delete Saved File') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=False) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File to delete: ' + filename)) elif (event == C.ELEMENT_NAMES['SHOW']): window.Element(key='OK').Update('Show Saved File') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=False) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File to display: ' + filename)) elif (event in (C.ELEMENT_NAMES['NOSAVE'], C.ELEMENT_NAMES['SAVE'])): window.Element(key='OK').Update('Generate Numbers') window.Element(key=C.ELEMENT_NAMES['LINES']).Update(visible=True) if (event == C.ELEMENT_NAMES['NOSAVE']): window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update('File will not be saved') elif (event == C.ELEMENT_NAMES['SAVE']): window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('Will be saved as: ' + filename)) if (event == C.ELEMENT_NAMES['LOTTO']): filename = ((C.SAVE_FILE_DIR + values[C.ELEMENT_NAMES['LOTTO']]) + C.SAVE_FILE_TYPE) window.Element(key=C.ELEMENT_NAMES['FILENAME']).Update(('File name: ' + filename)) elif (event == C.ELEMENT_NAMES['COUNT']): if values[C.ELEMENT_NAMES['COUNT']].isnumeric(): temp = int(values[C.ELEMENT_NAMES['COUNT']]) else: temp = False if ((temp < C.MIN_LINES) or (temp > C.MAX_LINES)): elem = window.Element(key=C.ELEMENT_NAMES['COUNT']) elem.Update(C.DEFAULT_LINES) msg = 'number of lines must be in the range 1-100' popup_window(text=msg) elif ((event == 'OK') or (event == 'Cancel') or (event is None)): break if ((event != 'Cancel') and (event is not None)): args.lottery_type = values[C.ELEMENT_NAMES['LOTTO']] args.number_of_lines = int(values[C.ELEMENT_NAMES['COUNT']]) args.delete = values[C.ELEMENT_NAMES['DELETE']] args.print = values[C.ELEMENT_NAMES['SHOW']] args.no_save = values[C.ELEMENT_NAMES['NOSAVE']] window.Close() return (event, args)
def calculate_perplexity(models, coefs, data): '\n Calculate perplexity with given model\n :param models: language models\n :param coefs: coefficients\n :param data: test data\n :return: perplexity\n ' pp = 0 uniform_prob = [] unigram_prob = [] bigram_prob = [] trigram_prob = [] prob_table_unifrom = None prob_table_1gram = None prob_table_2gram = None prob_table_3gram = None min_freq = models[0].min_freq train_vocabulary = models[0].vocabulary (word_to_idx, idx_to_word) = (models[0].word_to_idx, models[0].idx_to_word) test_infrequent_words = find_infrequent_words(data, min_freq) replace_infrequent_words(data, test_infrequent_words) for i in range(len(data)): for j in range(len(data[i])): if (data[i][j] not in train_vocabulary): data[i][j] = 'UNK' (corpus_1gram, vocabulary, V, N) = get_vocabulary(data) corpus_2gram = [(corpus_1gram[i], corpus_1gram[(i + 1)]) for i in range((len(corpus_1gram) - 1))] corpus_3gram = [(corpus_1gram[i], corpus_1gram[(i + 1)], corpus_1gram[(i + 2)]) for i in range((len(corpus_1gram) - 2))] for i in range(len(models)): model = models[i] if model.uniform: prob_table_unifrom = model.uniform_table for word in corpus_1gram: uniform_prob.append((prob_table_unifrom[0][word_to_idx[word]] * coefs[0])) elif (model.ngram == 1): prob_table_1gram = model.unigram_table for word in corpus_1gram: unigram_prob.append((prob_table_1gram[0][word_to_idx[word]] * coefs[1])) elif (model.ngram == 2): prob_table_2gram = model.bigram_table bigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_2gram[0][0]]]) for words in corpus_2gram: word1 = words[0] word2 = words[1] prob_1gram = prob_table_1gram[0][word_to_idx[word2]] prob_2gram = prob_table_2gram[word_to_idx[word1]][word_to_idx[word2]] if (prob_2gram != 0): bigram_prob.append((prob_2gram * coefs[2])) else: bigram_prob.append((prob_1gram * coefs[2])) elif (model.ngram == 3): prob_table_3gram = model.trigram_table train_corpus_3gram = set(model.corpus_3gram) trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][0]]]) trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][1]]]) for words in corpus_3gram: word1 = words[0] word2 = words[1] word3 = words[2] if (words in train_corpus_3gram): prob_3gram = prob_table_3gram[(word1, word2, word3)] trigram_prob.append((prob_3gram * coefs[3])) else: prob_1gram = prob_table_1gram[0][word_to_idx[word3]] prob_2gram = prob_table_2gram[word_to_idx[word2]][word_to_idx[word3]] if (prob_2gram != 0): trigram_prob.append((prob_2gram * coefs[3])) else: trigram_prob.append((prob_1gram * coefs[3])) prob = np.zeros((N,), dtype=np.float64) for i in range(len(prob)): prob[i] += uniform_prob[i] prob[i] += unigram_prob[i] prob[i] += bigram_prob[i] prob[i] += trigram_prob[i] for p in prob: pp += np.log2(p) pp /= (- N) pp = np.power(2, pp) return pp
-6,578,478,251,837,124,000
Calculate perplexity with given model :param models: language models :param coefs: coefficients :param data: test data :return: perplexity
lm.py
calculate_perplexity
alvisdeng/NLP-Language-Model
python
def calculate_perplexity(models, coefs, data): '\n Calculate perplexity with given model\n :param models: language models\n :param coefs: coefficients\n :param data: test data\n :return: perplexity\n ' pp = 0 uniform_prob = [] unigram_prob = [] bigram_prob = [] trigram_prob = [] prob_table_unifrom = None prob_table_1gram = None prob_table_2gram = None prob_table_3gram = None min_freq = models[0].min_freq train_vocabulary = models[0].vocabulary (word_to_idx, idx_to_word) = (models[0].word_to_idx, models[0].idx_to_word) test_infrequent_words = find_infrequent_words(data, min_freq) replace_infrequent_words(data, test_infrequent_words) for i in range(len(data)): for j in range(len(data[i])): if (data[i][j] not in train_vocabulary): data[i][j] = 'UNK' (corpus_1gram, vocabulary, V, N) = get_vocabulary(data) corpus_2gram = [(corpus_1gram[i], corpus_1gram[(i + 1)]) for i in range((len(corpus_1gram) - 1))] corpus_3gram = [(corpus_1gram[i], corpus_1gram[(i + 1)], corpus_1gram[(i + 2)]) for i in range((len(corpus_1gram) - 2))] for i in range(len(models)): model = models[i] if model.uniform: prob_table_unifrom = model.uniform_table for word in corpus_1gram: uniform_prob.append((prob_table_unifrom[0][word_to_idx[word]] * coefs[0])) elif (model.ngram == 1): prob_table_1gram = model.unigram_table for word in corpus_1gram: unigram_prob.append((prob_table_1gram[0][word_to_idx[word]] * coefs[1])) elif (model.ngram == 2): prob_table_2gram = model.bigram_table bigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_2gram[0][0]]]) for words in corpus_2gram: word1 = words[0] word2 = words[1] prob_1gram = prob_table_1gram[0][word_to_idx[word2]] prob_2gram = prob_table_2gram[word_to_idx[word1]][word_to_idx[word2]] if (prob_2gram != 0): bigram_prob.append((prob_2gram * coefs[2])) else: bigram_prob.append((prob_1gram * coefs[2])) elif (model.ngram == 3): prob_table_3gram = model.trigram_table train_corpus_3gram = set(model.corpus_3gram) trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][0]]]) trigram_prob.append(prob_table_1gram[0][word_to_idx[corpus_3gram[0][1]]]) for words in corpus_3gram: word1 = words[0] word2 = words[1] word3 = words[2] if (words in train_corpus_3gram): prob_3gram = prob_table_3gram[(word1, word2, word3)] trigram_prob.append((prob_3gram * coefs[3])) else: prob_1gram = prob_table_1gram[0][word_to_idx[word3]] prob_2gram = prob_table_2gram[word_to_idx[word2]][word_to_idx[word3]] if (prob_2gram != 0): trigram_prob.append((prob_2gram * coefs[3])) else: trigram_prob.append((prob_1gram * coefs[3])) prob = np.zeros((N,), dtype=np.float64) for i in range(len(prob)): prob[i] += uniform_prob[i] prob[i] += unigram_prob[i] prob[i] += bigram_prob[i] prob[i] += trigram_prob[i] for p in prob: pp += np.log2(p) pp /= (- N) pp = np.power(2, pp) return pp
def parse_args(): '\n Parse input positional arguments from command line\n :return: args - parsed arguments\n ' parser = argparse.ArgumentParser('N-gram Language Model') parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float) parser.add_argument('coef_uni', help='coefficient for the unigram model.', type=float) parser.add_argument('coef_bi', help='coefficient for the bigram model.', type=float) parser.add_argument('coef_tri', help='coefficient for the trigram model.', type=float) parser.add_argument('min_freq', type=int, help='minimum frequency threshold for substitute with UNK token, set to 1 for not use this threshold') parser.add_argument('testfile', help='test text file.') parser.add_argument('trainfile', help='training text file.', nargs='+') return parser.parse_args()
-2,590,715,522,766,917,000
Parse input positional arguments from command line :return: args - parsed arguments
lm.py
parse_args
alvisdeng/NLP-Language-Model
python
def parse_args(): '\n Parse input positional arguments from command line\n :return: args - parsed arguments\n ' parser = argparse.ArgumentParser('N-gram Language Model') parser.add_argument('coef_unif', help='coefficient for the uniform model.', type=float) parser.add_argument('coef_uni', help='coefficient for the unigram model.', type=float) parser.add_argument('coef_bi', help='coefficient for the bigram model.', type=float) parser.add_argument('coef_tri', help='coefficient for the trigram model.', type=float) parser.add_argument('min_freq', type=int, help='minimum frequency threshold for substitute with UNK token, set to 1 for not use this threshold') parser.add_argument('testfile', help='test text file.') parser.add_argument('trainfile', help='training text file.', nargs='+') return parser.parse_args()
def __init__(self, corpus, ngram, min_freq, uniform=False): '\n Initialize language model\n :param corpus: input text corpus to build LM on\n :param ngram: number of n-gram, e.g. 1, 2, 3, ...\n :param min_freq: minimum frequency threshold to set a word to UNK placeholder\n set to 1 to not use this threshold\n :param uniform: boolean flag, set to True to indicate this model is a simple uniform LM\n otherwise will be an N-gram model\n ' self.corpus = corpus self.ngram = ngram self.min_freq = min_freq self.uniform = uniform self.uniform_table = None self.unigram_table = None self.bigram_table = None self.trigram_table = None self.infrequent_words = find_infrequent_words(self.corpus, self.min_freq) replace_infrequent_words(self.corpus, self.infrequent_words) (self.corpus_1gram, self.vocabulary, self.V, self.N) = get_vocabulary(self.corpus) (self.word_to_idx, self.idx_to_word) = get_word_mappings(self.vocabulary) self.counter_1gram = get_counter(self.corpus_1gram) self.build()
3,727,620,931,636,181,500
Initialize language model :param corpus: input text corpus to build LM on :param ngram: number of n-gram, e.g. 1, 2, 3, ... :param min_freq: minimum frequency threshold to set a word to UNK placeholder set to 1 to not use this threshold :param uniform: boolean flag, set to True to indicate this model is a simple uniform LM otherwise will be an N-gram model
lm.py
__init__
alvisdeng/NLP-Language-Model
python
def __init__(self, corpus, ngram, min_freq, uniform=False): '\n Initialize language model\n :param corpus: input text corpus to build LM on\n :param ngram: number of n-gram, e.g. 1, 2, 3, ...\n :param min_freq: minimum frequency threshold to set a word to UNK placeholder\n set to 1 to not use this threshold\n :param uniform: boolean flag, set to True to indicate this model is a simple uniform LM\n otherwise will be an N-gram model\n ' self.corpus = corpus self.ngram = ngram self.min_freq = min_freq self.uniform = uniform self.uniform_table = None self.unigram_table = None self.bigram_table = None self.trigram_table = None self.infrequent_words = find_infrequent_words(self.corpus, self.min_freq) replace_infrequent_words(self.corpus, self.infrequent_words) (self.corpus_1gram, self.vocabulary, self.V, self.N) = get_vocabulary(self.corpus) (self.word_to_idx, self.idx_to_word) = get_word_mappings(self.vocabulary) self.counter_1gram = get_counter(self.corpus_1gram) self.build()
def build(self): '\n Build LM from text corpus\n ' if self.uniform: self.uniform_table = get_uniform_tables(self.V) elif (self.ngram == 1): self.unigram_table = get_unigram_tables(self.V, self.N, self.counter_1gram, self.word_to_idx) elif (self.ngram == 2): self.corpus_2gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)]) for i in range((len(self.corpus_1gram) - 1))] self.counter_2gram = get_counter(self.corpus_2gram) self.bigram_table = get_bigram_tables(self.V, self.counter_1gram, self.counter_2gram, self.word_to_idx, self.idx_to_word) elif (self.ngram == 3): self.corpus_2gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)]) for i in range((len(self.corpus_1gram) - 1))] self.counter_2gram = get_counter(self.corpus_2gram) self.corpus_3gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)], self.corpus_1gram[(i + 2)]) for i in range((len(self.corpus_1gram) - 2))] self.counter_3gram = get_counter(self.corpus_3gram) self.trigram_table = get_trigram_tables(self.V, self.counter_2gram, self.counter_3gram, self.word_to_idx)
-3,804,000,836,486,489,000
Build LM from text corpus
lm.py
build
alvisdeng/NLP-Language-Model
python
def build(self): '\n \n ' if self.uniform: self.uniform_table = get_uniform_tables(self.V) elif (self.ngram == 1): self.unigram_table = get_unigram_tables(self.V, self.N, self.counter_1gram, self.word_to_idx) elif (self.ngram == 2): self.corpus_2gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)]) for i in range((len(self.corpus_1gram) - 1))] self.counter_2gram = get_counter(self.corpus_2gram) self.bigram_table = get_bigram_tables(self.V, self.counter_1gram, self.counter_2gram, self.word_to_idx, self.idx_to_word) elif (self.ngram == 3): self.corpus_2gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)]) for i in range((len(self.corpus_1gram) - 1))] self.counter_2gram = get_counter(self.corpus_2gram) self.corpus_3gram = [(self.corpus_1gram[i], self.corpus_1gram[(i + 1)], self.corpus_1gram[(i + 2)]) for i in range((len(self.corpus_1gram) - 2))] self.counter_3gram = get_counter(self.corpus_3gram) self.trigram_table = get_trigram_tables(self.V, self.counter_2gram, self.counter_3gram, self.word_to_idx)
def most_common_words(self, k): '\n Return the top-k most frequent n-grams and their frequencies in sorted order.\n For uniform models, the frequency should be "1" for each token.\n\n Your return should be sorted in descending order of frequency.\n Sort according to ascending alphabet order when multiple words have same frequency.\n :return: list[tuple(token, freq)] of top k most common tokens\n ' if self.uniform: return [(word, 1) for word in sorted(self.vocabulary)[0:k]] elif (self.ngram == 1): return sorted(self.counter_1gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k] elif (self.ngram == 2): return [(((token[0] + ' ') + token[1]), num) for (token, num) in sorted(self.counter_2gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k]] elif (self.ngram == 3): return [(((((token[0] + ' ') + token[1]) + ' ') + token[2]), num) for (token, num) in sorted(self.counter_3gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k]] return
-1,959,674,398,925,063,700
Return the top-k most frequent n-grams and their frequencies in sorted order. For uniform models, the frequency should be "1" for each token. Your return should be sorted in descending order of frequency. Sort according to ascending alphabet order when multiple words have same frequency. :return: list[tuple(token, freq)] of top k most common tokens
lm.py
most_common_words
alvisdeng/NLP-Language-Model
python
def most_common_words(self, k): '\n Return the top-k most frequent n-grams and their frequencies in sorted order.\n For uniform models, the frequency should be "1" for each token.\n\n Your return should be sorted in descending order of frequency.\n Sort according to ascending alphabet order when multiple words have same frequency.\n :return: list[tuple(token, freq)] of top k most common tokens\n ' if self.uniform: return [(word, 1) for word in sorted(self.vocabulary)[0:k]] elif (self.ngram == 1): return sorted(self.counter_1gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k] elif (self.ngram == 2): return [(((token[0] + ' ') + token[1]), num) for (token, num) in sorted(self.counter_2gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k]] elif (self.ngram == 3): return [(((((token[0] + ' ') + token[1]) + ' ') + token[2]), num) for (token, num) in sorted(self.counter_3gram.most_common(), key=(lambda x: ((- x[1]), x[0])))[0:k]] return
@option(Configs.model) def autoregressive_model(c: Configs): '\n ### Initialize the auto-regressive model\n ' from labml_nn.transformers.xl import RelativeMultiHeadAttention from labml_nn.transformers.feed_forward import FeedForward m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(CompressiveTransformerLayer(d_model=c.d_model, self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout, compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers)) return m.to(c.device)
5,798,183,949,863,375,000
### Initialize the auto-regressive model
labml_nn/transformers/compressive/experiment.py
autoregressive_model
Aarsh2001/annotated_deep_learning_paper_implementations
python
@option(Configs.model) def autoregressive_model(c: Configs): '\n \n ' from labml_nn.transformers.xl import RelativeMultiHeadAttention from labml_nn.transformers.feed_forward import FeedForward m = AutoregressiveModel(c.n_tokens, c.d_model, CompressiveTransformer(CompressiveTransformerLayer(d_model=c.d_model, self_attn=RelativeMultiHeadAttention(c.heads, c.d_model, c.dropout), feed_forward=FeedForward(c.d_model, c.d_ff, c.dropout), dropout_prob=c.dropout, compress=Conv1dCompression(c.compression_rate, c.d_model)), c.n_layers)) return m.to(c.device)
@option(Configs.attention_reconstruction_loss) def attention_reconstruction_loss(c: Configs): '\n ### Initialize the attention reconstruction loss\n ' return AttentionReconstructionLoss(c.model.transformer.layers)
-6,834,483,399,557,530,000
### Initialize the attention reconstruction loss
labml_nn/transformers/compressive/experiment.py
attention_reconstruction_loss
Aarsh2001/annotated_deep_learning_paper_implementations
python
@option(Configs.attention_reconstruction_loss) def attention_reconstruction_loss(c: Configs): '\n \n ' return AttentionReconstructionLoss(c.model.transformer.layers)
def main(): '\n ### Run the experiment\n ' experiment.create(name='compressive_transformer', comment='') conf = Configs() experiment.configs(conf, {'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 0.00025, 'optimizer.optimizer': 'AdamW', 'prompt': 'It is', 'prompt_separator': '', 'train_loader': 'sequential_train_loader', 'valid_loader': 'sequential_valid_loader', 'seq_len': 8, 'mem_len': 8, 'epochs': 128, 'batch_size': 32, 'inner_iterations': 25, 'compression_rate': 2}) experiment.add_pytorch_models({'model': conf.model}) with experiment.start(): conf.run()
6,623,430,949,124,413,000
### Run the experiment
labml_nn/transformers/compressive/experiment.py
main
Aarsh2001/annotated_deep_learning_paper_implementations
python
def main(): '\n \n ' experiment.create(name='compressive_transformer', comment=) conf = Configs() experiment.configs(conf, {'tokenizer': 'character', 'text': 'tiny_shakespeare', 'optimizer.learning_rate': 0.00025, 'optimizer.optimizer': 'AdamW', 'prompt': 'It is', 'prompt_separator': , 'train_loader': 'sequential_train_loader', 'valid_loader': 'sequential_valid_loader', 'seq_len': 8, 'mem_len': 8, 'epochs': 128, 'batch_size': 32, 'inner_iterations': 25, 'compression_rate': 2}) experiment.add_pytorch_models({'model': conf.model}) with experiment.start(): conf.run()
@torch.no_grad() def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) -> Tuple[(CompressedMemory, List[torch.Tensor])]: '\n Concatenate new memories and compress the oldest memories.\n ' if ((self.mem_len == 0) and (self.c_mem_len == 0)): return (CompressedMemory([], []), []) if (mem is not None): (mem, c_mem) = (mem.mem, mem.c_mem) else: (mem, c_mem) = ([], []) if mem: mem = [torch.cat((m, x), dim=0) for (m, x) in zip(mem, new_mem)] else: mem = new_mem if (len(mem[0]) > self.mem_len): n_c_mem = ((((len(mem[0]) - self.mem_len) + self.compression_rate) - 1) // self.compression_rate) n_old = (n_c_mem * self.compression_rate) mem_to_compress = [] uncompressed_mem = [] for m in mem: (cm, m) = torch.split(m, [n_old, (len(m) - n_old)]) mem_to_compress.append(cm) uncompressed_mem.append(m) mem = uncompressed_mem new_c_mem = [] for (i, layer) in enumerate(self.model.transformer.layers): new_c_mem.append(layer.compress(mem_to_compress[i])) if c_mem: c_mem = [torch.cat((m, nm), dim=0) for (m, nm) in zip(c_mem, new_c_mem)] else: c_mem = new_c_mem if (len(c_mem[0]) > self.c_mem_len): c_mem = [m[(- self.c_mem_len):] for m in c_mem] else: mem_to_compress = [] return (CompressedMemory(mem, c_mem), mem_to_compress)
1,156,103,335,448,820,500
Concatenate new memories and compress the oldest memories.
labml_nn/transformers/compressive/experiment.py
merge_compress_memory
Aarsh2001/annotated_deep_learning_paper_implementations
python
@torch.no_grad() def merge_compress_memory(self, mem: CompressedMemory, new_mem: List[torch.Tensor]) -> Tuple[(CompressedMemory, List[torch.Tensor])]: '\n \n ' if ((self.mem_len == 0) and (self.c_mem_len == 0)): return (CompressedMemory([], []), []) if (mem is not None): (mem, c_mem) = (mem.mem, mem.c_mem) else: (mem, c_mem) = ([], []) if mem: mem = [torch.cat((m, x), dim=0) for (m, x) in zip(mem, new_mem)] else: mem = new_mem if (len(mem[0]) > self.mem_len): n_c_mem = ((((len(mem[0]) - self.mem_len) + self.compression_rate) - 1) // self.compression_rate) n_old = (n_c_mem * self.compression_rate) mem_to_compress = [] uncompressed_mem = [] for m in mem: (cm, m) = torch.split(m, [n_old, (len(m) - n_old)]) mem_to_compress.append(cm) uncompressed_mem.append(m) mem = uncompressed_mem new_c_mem = [] for (i, layer) in enumerate(self.model.transformer.layers): new_c_mem.append(layer.compress(mem_to_compress[i])) if c_mem: c_mem = [torch.cat((m, nm), dim=0) for (m, nm) in zip(c_mem, new_c_mem)] else: c_mem = new_c_mem if (len(c_mem[0]) > self.c_mem_len): c_mem = [m[(- self.c_mem_len):] for m in c_mem] else: mem_to_compress = [] return (CompressedMemory(mem, c_mem), mem_to_compress)
def step(self, batch: any, batch_idx: BatchIndex): '\n ### Training/validation step\n ' (data, target) = (batch[0].to(self.device), batch[1].to(self.device)) if self.mode.is_train: tracker.add_global_step((data.shape[0] * data.shape[1])) with self.mode.update(is_log_activations=batch_idx.is_last): mem = self.memory.get() (output, new_mem) = self.model(data, mem) (mem, mem_to_compress) = self.merge_compress_memory(mem, new_mem) self.memory.set(mem) loss = self.loss_func(output, target) tracker.add('loss.', loss) if mem_to_compress: ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress) tracker.add('ar_loss.', ar_loss) loss = (loss + ar_loss) self.accuracy(output, target) self.accuracy.track() if self.mode.is_train: loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip) self.optimizer.step() if batch_idx.is_last: tracker.add('model', self.model) self.optimizer.zero_grad() tracker.save()
-1,364,598,047,004,983,000
### Training/validation step
labml_nn/transformers/compressive/experiment.py
step
Aarsh2001/annotated_deep_learning_paper_implementations
python
def step(self, batch: any, batch_idx: BatchIndex): '\n \n ' (data, target) = (batch[0].to(self.device), batch[1].to(self.device)) if self.mode.is_train: tracker.add_global_step((data.shape[0] * data.shape[1])) with self.mode.update(is_log_activations=batch_idx.is_last): mem = self.memory.get() (output, new_mem) = self.model(data, mem) (mem, mem_to_compress) = self.merge_compress_memory(mem, new_mem) self.memory.set(mem) loss = self.loss_func(output, target) tracker.add('loss.', loss) if mem_to_compress: ar_loss = self.attention_reconstruction_loss(new_mem, mem_to_compress) tracker.add('ar_loss.', ar_loss) loss = (loss + ar_loss) self.accuracy(output, target) self.accuracy.track() if self.mode.is_train: loss.backward() torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=self.grad_norm_clip) self.optimizer.step() if batch_idx.is_last: tracker.add('model', self.model) self.optimizer.zero_grad() tracker.save()
def sample(self): '\n ### Sampling function to generate samples periodically while training\n ' prompt = self.prompt log = [(prompt, Text.subtle)] mem = CompressedMemory([], []) for i in monit.iterate('Sample', 25): data = self.text.text_to_i(prompt).unsqueeze((- 1)) data = data.to(self.device) (output, new_mem) = self.model(data, mem) output = output.argmax(dim=(- 1)).squeeze(1) prompt += (self.prompt_separator + self.text.itos[output[(- 1)]]) prompt = prompt[(- 1):] log += [((self.prompt_separator + self.text.itos[output[(- 1)]]), Text.value)] (mem, _) = self.merge_compress_memory(mem, new_mem) logger.log(log)
6,554,604,733,984,911,000
### Sampling function to generate samples periodically while training
labml_nn/transformers/compressive/experiment.py
sample
Aarsh2001/annotated_deep_learning_paper_implementations
python
def sample(self): '\n \n ' prompt = self.prompt log = [(prompt, Text.subtle)] mem = CompressedMemory([], []) for i in monit.iterate('Sample', 25): data = self.text.text_to_i(prompt).unsqueeze((- 1)) data = data.to(self.device) (output, new_mem) = self.model(data, mem) output = output.argmax(dim=(- 1)).squeeze(1) prompt += (self.prompt_separator + self.text.itos[output[(- 1)]]) prompt = prompt[(- 1):] log += [((self.prompt_separator + self.text.itos[output[(- 1)]]), Text.value)] (mem, _) = self.merge_compress_memory(mem, new_mem) logger.log(log)
def scores(self, scaling): "Compute site and species scores for different scalings.\n\n Parameters\n ----------\n scaling : int\n\n For a more detailed explanation of the interpretation, check\n Legendre & Legendre 1998, section 9.4.3. The notes that\n follow are quick recommendations.\n\n Scaling type 1 maintains :math:`\\chi^2` distances between\n rows (sites): in the transformed space, the euclidean\n distances between rows are equal to the :math:`\\chi^2`\n distances between rows in the original space. It should be\n used when studying the ordination of sites. Rows (sites)\n that are near a column (species) have high contributions\n from it.\n\n Scaling type 2 preserves :math:`\\chi^2` distances between\n columns (species), so euclidean distance between columns\n after transformation is equal to :math:`\\chi^2` distance\n between columns in the original space. It is best used\n when we are interested in the ordination of species. A\n column (species) that is next to a row (site) means that\n it is more abundant there.\n\n Other types of scalings are currently not implemented, as\n they're less used by ecologists (Legendre & Legendre 1998,\n p. 456).\n\n In general, species appearing far from the center of the\n biplot and far from its edges will probably exhibit better\n relationships than species either in the center (may be\n multimodal species, not related to the shown ordination\n axes...) or the edges (sparse species...).\n\n Returns\n -------\n OrdinationResults\n Object that stores the computed eigenvalues, the\n proportion explained by each of them (per unit),\n transformed coordinates, etc.\n\n See Also\n --------\n OrdinationResults\n " if (scaling not in {1, 2}): raise NotImplementedError('Scaling {0} not implemented.'.format(scaling)) V = ((self.column_marginals[:, None] ** (- 0.5)) * self.U) V_hat = ((self.row_marginals[:, None] ** (- 0.5)) * self.U_hat) F = (V_hat * self.W) F_hat = (V * self.W) eigvals = (self.W ** 2) species_scores = [V, F_hat][(scaling - 1)] site_scores = [F, V_hat][(scaling - 1)] return OrdinationResults(eigvals=eigvals, species=species_scores, site=site_scores, site_ids=self.row_ids, species_ids=self.column_ids)
-3,771,814,060,755,760,600
Compute site and species scores for different scalings. Parameters ---------- scaling : int For a more detailed explanation of the interpretation, check Legendre & Legendre 1998, section 9.4.3. The notes that follow are quick recommendations. Scaling type 1 maintains :math:`\chi^2` distances between rows (sites): in the transformed space, the euclidean distances between rows are equal to the :math:`\chi^2` distances between rows in the original space. It should be used when studying the ordination of sites. Rows (sites) that are near a column (species) have high contributions from it. Scaling type 2 preserves :math:`\chi^2` distances between columns (species), so euclidean distance between columns after transformation is equal to :math:`\chi^2` distance between columns in the original space. It is best used when we are interested in the ordination of species. A column (species) that is next to a row (site) means that it is more abundant there. Other types of scalings are currently not implemented, as they're less used by ecologists (Legendre & Legendre 1998, p. 456). In general, species appearing far from the center of the biplot and far from its edges will probably exhibit better relationships than species either in the center (may be multimodal species, not related to the shown ordination axes...) or the edges (sparse species...). Returns ------- OrdinationResults Object that stores the computed eigenvalues, the proportion explained by each of them (per unit), transformed coordinates, etc. See Also -------- OrdinationResults
skbio/stats/ordination/_correspondence_analysis.py
scores
JWDebelius/scikit-bio
python
def scores(self, scaling): "Compute site and species scores for different scalings.\n\n Parameters\n ----------\n scaling : int\n\n For a more detailed explanation of the interpretation, check\n Legendre & Legendre 1998, section 9.4.3. The notes that\n follow are quick recommendations.\n\n Scaling type 1 maintains :math:`\\chi^2` distances between\n rows (sites): in the transformed space, the euclidean\n distances between rows are equal to the :math:`\\chi^2`\n distances between rows in the original space. It should be\n used when studying the ordination of sites. Rows (sites)\n that are near a column (species) have high contributions\n from it.\n\n Scaling type 2 preserves :math:`\\chi^2` distances between\n columns (species), so euclidean distance between columns\n after transformation is equal to :math:`\\chi^2` distance\n between columns in the original space. It is best used\n when we are interested in the ordination of species. A\n column (species) that is next to a row (site) means that\n it is more abundant there.\n\n Other types of scalings are currently not implemented, as\n they're less used by ecologists (Legendre & Legendre 1998,\n p. 456).\n\n In general, species appearing far from the center of the\n biplot and far from its edges will probably exhibit better\n relationships than species either in the center (may be\n multimodal species, not related to the shown ordination\n axes...) or the edges (sparse species...).\n\n Returns\n -------\n OrdinationResults\n Object that stores the computed eigenvalues, the\n proportion explained by each of them (per unit),\n transformed coordinates, etc.\n\n See Also\n --------\n OrdinationResults\n " if (scaling not in {1, 2}): raise NotImplementedError('Scaling {0} not implemented.'.format(scaling)) V = ((self.column_marginals[:, None] ** (- 0.5)) * self.U) V_hat = ((self.row_marginals[:, None] ** (- 0.5)) * self.U_hat) F = (V_hat * self.W) F_hat = (V * self.W) eigvals = (self.W ** 2) species_scores = [V, F_hat][(scaling - 1)] site_scores = [F, V_hat][(scaling - 1)] return OrdinationResults(eigvals=eigvals, species=species_scores, site=site_scores, site_ids=self.row_ids, species_ids=self.column_ids)
def testStatementResponse(self): 'Test StatementResponse' pass
-677,246,967,601,330,200
Test StatementResponse
test/test_statement_response.py
testStatementResponse
mxenabled/mx-platform-python
python
def testStatementResponse(self): pass
def AIC(N, rho, k): 'Akaike Information Criterion\n\n :param rho: rho at order k\n :param N: sample size\n :param k: AR order.\n\n If k is the AR order and N the size of the sample, then Akaike criterion is\n\n .. math:: AIC(k) = \\log(\\rho_k) + 2\\frac{k+1}{N}\n\n ::\n\n AIC(64, [0.5,0.3,0.2], [1,2,3])\n\n :validation: double checked versus octave.\n ' from numpy import log, array res = ((N * log(array(rho))) + (2.0 * (array(k) + 1))) return res
-8,007,909,734,390,670,000
Akaike Information Criterion :param rho: rho at order k :param N: sample size :param k: AR order. If k is the AR order and N the size of the sample, then Akaike criterion is .. math:: AIC(k) = \log(\rho_k) + 2\frac{k+1}{N} :: AIC(64, [0.5,0.3,0.2], [1,2,3]) :validation: double checked versus octave.
src/spectrum/criteria.py
AIC
butala/spectrum
python
def AIC(N, rho, k): 'Akaike Information Criterion\n\n :param rho: rho at order k\n :param N: sample size\n :param k: AR order.\n\n If k is the AR order and N the size of the sample, then Akaike criterion is\n\n .. math:: AIC(k) = \\log(\\rho_k) + 2\\frac{k+1}{N}\n\n ::\n\n AIC(64, [0.5,0.3,0.2], [1,2,3])\n\n :validation: double checked versus octave.\n ' from numpy import log, array res = ((N * log(array(rho))) + (2.0 * (array(k) + 1))) return res
def AICc(N, rho, k, norm=True): 'corrected Akaike information criterion\n\n .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n :validation: double checked versus octave.\n ' from numpy import log, array p = k res = (log(rho) + ((2.0 * (p + 1)) / ((N - p) - 2))) return res
7,617,045,710,475,825,000
corrected Akaike information criterion .. math:: AICc(k) = log(\rho_k) + 2 \frac{k+1}{N-k-2} :validation: double checked versus octave.
src/spectrum/criteria.py
AICc
butala/spectrum
python
def AICc(N, rho, k, norm=True): 'corrected Akaike information criterion\n\n .. math:: AICc(k) = log(\\rho_k) + 2 \\frac{k+1}{N-k-2}\n\n\n :validation: double checked versus octave.\n ' from numpy import log, array p = k res = (log(rho) + ((2.0 * (p + 1)) / ((N - p) - 2))) return res
def KIC(N, rho, k): 'Kullback information criterion\n\n .. math:: KIC(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.\n ' from numpy import log, array res = (log(rho) + ((3.0 * (k + 1.0)) / float(N))) return res
-1,878,092,060,176,235,500
Kullback information criterion .. math:: KIC(k) = log(\rho_k) + 3 \frac{k+1}{N} :validation: double checked versus octave.
src/spectrum/criteria.py
KIC
butala/spectrum
python
def KIC(N, rho, k): 'Kullback information criterion\n\n .. math:: KIC(k) = log(\\rho_k) + 3 \\frac{k+1}{N}\n\n :validation: double checked versus octave.\n ' from numpy import log, array res = (log(rho) + ((3.0 * (k + 1.0)) / float(N))) return res
def AKICc(N, rho, k): 'approximate corrected Kullback information\n\n .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}\n\n ' from numpy import log, array p = k res = ((log(rho) + ((p / N) / (N - p))) + (((3.0 - ((p + 2.0) / N)) * (p + 1.0)) / ((N - p) - 2.0))) return res
3,753,835,065,684,638,700
approximate corrected Kullback information .. math:: AKICc(k) = log(rho_k) + \frac{p}{N*(N-k)} + (3-\frac{k+2}{N})*\frac{k+1}{N-k-2}
src/spectrum/criteria.py
AKICc
butala/spectrum
python
def AKICc(N, rho, k): 'approximate corrected Kullback information\n\n .. math:: AKICc(k) = log(rho_k) + \\frac{p}{N*(N-k)} + (3-\\frac{k+2}{N})*\\frac{k+1}{N-k-2}\n\n ' from numpy import log, array p = k res = ((log(rho) + ((p / N) / (N - p))) + (((3.0 - ((p + 2.0) / N)) * (p + 1.0)) / ((N - p) - 2.0))) return res
def FPE(N, rho, k=None): 'Final prediction error criterion\n\n .. math:: FPE(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.\n\n ' fpe = ((rho * ((N + k) + 1.0)) / ((N - k) - 1)) return fpe
6,012,714,880,795,500,000
Final prediction error criterion .. math:: FPE(k) = \frac{N + k + 1}{N - k - 1} \rho_k :validation: double checked versus octave.
src/spectrum/criteria.py
FPE
butala/spectrum
python
def FPE(N, rho, k=None): 'Final prediction error criterion\n\n .. math:: FPE(k) = \\frac{N + k + 1}{N - k - 1} \\rho_k\n\n :validation: double checked versus octave.\n\n ' fpe = ((rho * ((N + k) + 1.0)) / ((N - k) - 1)) return fpe
def MDL(N, rho, k): 'Minimum Description Length\n\n .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n :validation: results\n ' from numpy import log mdl = ((N * log(rho)) + (k * log(N))) return mdl
-8,109,778,770,536,789,000
Minimum Description Length .. math:: MDL(k) = N log \rho_k + p \log N :validation: results
src/spectrum/criteria.py
MDL
butala/spectrum
python
def MDL(N, rho, k): 'Minimum Description Length\n\n .. math:: MDL(k) = N log \\rho_k + p \\log N\n\n :validation: results\n ' from numpy import log mdl = ((N * log(rho)) + (k * log(N))) return mdl
def CAT(N, rho, k): 'Criterion Autoregressive Transfer Function :\n\n .. math:: CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n .. todo:: validation\n ' from numpy import zeros, arange cat = zeros(len(rho)) for p in arange(1, (len(rho) + 1)): rho_p = ((float(N) / (N - p)) * rho[(p - 1)]) s = 0 for j in range(1, (p + 1)): rho_j = ((float(N) / (N - j)) * rho[(j - 1)]) s = (s + (1.0 / rho_j)) cat[(p - 1)] = ((s / float(N)) - (1.0 / rho_p)) return cat
-2,045,143,425,089,680,600
Criterion Autoregressive Transfer Function : .. math:: CAT(k) = \frac{1}{N} \sum_{i=1}^k \frac{1}{\rho_i} - \frac{\rho_i}{\rho_k} .. todo:: validation
src/spectrum/criteria.py
CAT
butala/spectrum
python
def CAT(N, rho, k): 'Criterion Autoregressive Transfer Function :\n\n .. math:: CAT(k) = \\frac{1}{N} \\sum_{i=1}^k \\frac{1}{\\rho_i} - \\frac{\\rho_i}{\\rho_k}\n\n .. todo:: validation\n ' from numpy import zeros, arange cat = zeros(len(rho)) for p in arange(1, (len(rho) + 1)): rho_p = ((float(N) / (N - p)) * rho[(p - 1)]) s = 0 for j in range(1, (p + 1)): rho_j = ((float(N) / (N - j)) * rho[(j - 1)]) s = (s + (1.0 / rho_j)) cat[(p - 1)] = ((s / float(N)) - (1.0 / rho_p)) return cat
def aic_eigen(s, N): 'AIC order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n Given :math:`n` sorted eigen values :math:`\\lambda_i` with\n :math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)\n is:\n\n .. math:: AIC(k) = -2(n-k)N \\ln \\frac{g(k)}{a(k)} + 2k(2n-k)\n\n where the arithmetic sum :math:`a(k)` is:\n\n .. math:: a(k) = \\sum_{i=k+1}^{n}\\lambda_i\n\n and the geometric sum :math:`g(k)` is:\n\n .. math:: g(k) = \\prod_{i=k+1}^{n} \\lambda_i^{-(n-k)}\n\n The number of relevant sinusoids in the signal subspace is determined by\n selecting the minimum of `AIC`.\n\n .. seealso:: :func:`~spectrum.eigenfreq.eigen`\n .. todo:: define precisely the input parameter N. Should be the input\n data length but when using correlation matrix (SVD), I suspect it\n should be the length of the correlation matrix rather than the\n original data.\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_\n ' import numpy as np kaic = [] n = len(s) for k in range(0, (n - 1)): ak = ((1.0 / (n - k)) * np.sum(s[(k + 1):])) gk = np.prod((s[(k + 1):] ** (1.0 / (n - k)))) kaic.append((((((- 2.0) * (n - k)) * N) * np.log((gk / ak))) + ((2.0 * k) * ((2.0 * n) - k)))) return kaic
7,812,365,131,708,917,000
AIC order-selection using eigen values :param s: a list of `p` sorted eigen values :param N: the size of the input data. To be defined precisely. :return: * an array containing the AIC values Given :math:`n` sorted eigen values :math:`\lambda_i` with :math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985) is: .. math:: AIC(k) = -2(n-k)N \ln \frac{g(k)}{a(k)} + 2k(2n-k) where the arithmetic sum :math:`a(k)` is: .. math:: a(k) = \sum_{i=k+1}^{n}\lambda_i and the geometric sum :math:`g(k)` is: .. math:: g(k) = \prod_{i=k+1}^{n} \lambda_i^{-(n-k)} The number of relevant sinusoids in the signal subspace is determined by selecting the minimum of `AIC`. .. seealso:: :func:`~spectrum.eigenfreq.eigen` .. todo:: define precisely the input parameter N. Should be the input data length but when using correlation matrix (SVD), I suspect it should be the length of the correlation matrix rather than the original data. :References: * [Marple]_ Chap 13, * [Wax]_
src/spectrum/criteria.py
aic_eigen
butala/spectrum
python
def aic_eigen(s, N): 'AIC order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n Given :math:`n` sorted eigen values :math:`\\lambda_i` with\n :math:`0 <= i < n`, the proposed criterion from Wax and Kailath (1985)\n is:\n\n .. math:: AIC(k) = -2(n-k)N \\ln \\frac{g(k)}{a(k)} + 2k(2n-k)\n\n where the arithmetic sum :math:`a(k)` is:\n\n .. math:: a(k) = \\sum_{i=k+1}^{n}\\lambda_i\n\n and the geometric sum :math:`g(k)` is:\n\n .. math:: g(k) = \\prod_{i=k+1}^{n} \\lambda_i^{-(n-k)}\n\n The number of relevant sinusoids in the signal subspace is determined by\n selecting the minimum of `AIC`.\n\n .. seealso:: :func:`~spectrum.eigenfreq.eigen`\n .. todo:: define precisely the input parameter N. Should be the input\n data length but when using correlation matrix (SVD), I suspect it\n should be the length of the correlation matrix rather than the\n original data.\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_\n ' import numpy as np kaic = [] n = len(s) for k in range(0, (n - 1)): ak = ((1.0 / (n - k)) * np.sum(s[(k + 1):])) gk = np.prod((s[(k + 1):] ** (1.0 / (n - k)))) kaic.append((((((- 2.0) * (n - k)) * N) * np.log((gk / ak))) + ((2.0 * k) * ((2.0 * n) - k)))) return kaic
def mdl_eigen(s, N): 'MDL order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)\n\n .. seealso:: :func:`aic_eigen` for details\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_\n ' import numpy as np kmdl = [] n = len(s) for k in range(0, (n - 1)): ak = ((1.0 / (n - k)) * np.sum(s[(k + 1):])) gk = np.prod((s[(k + 1):] ** (1.0 / (n - k)))) kmdl.append(((((- (n - k)) * N) * np.log((gk / ak))) + (((0.5 * k) * ((2.0 * n) - k)) * np.log(N)))) return kmdl
-6,247,924,153,286,262,000
MDL order-selection using eigen values :param s: a list of `p` sorted eigen values :param N: the size of the input data. To be defined precisely. :return: * an array containing the AIC values .. math:: MDL(k) = (n-k)N \ln \frac{g(k)}{a(k)} + 0.5k(2n-k) log(N) .. seealso:: :func:`aic_eigen` for details :References: * [Marple]_ Chap 13, * [Wax]_
src/spectrum/criteria.py
mdl_eigen
butala/spectrum
python
def mdl_eigen(s, N): 'MDL order-selection using eigen values\n\n :param s: a list of `p` sorted eigen values\n :param N: the size of the input data. To be defined precisely.\n\n :return:\n * an array containing the AIC values\n\n .. math:: MDL(k) = (n-k)N \\ln \\frac{g(k)}{a(k)} + 0.5k(2n-k) log(N)\n\n .. seealso:: :func:`aic_eigen` for details\n\n :References:\n * [Marple]_ Chap 13,\n * [Wax]_\n ' import numpy as np kmdl = [] n = len(s) for k in range(0, (n - 1)): ak = ((1.0 / (n - k)) * np.sum(s[(k + 1):])) gk = np.prod((s[(k + 1):] ** (1.0 / (n - k)))) kmdl.append(((((- (n - k)) * N) * np.log((gk / ak))) + (((0.5 * k) * ((2.0 * n) - k)) * np.log(N)))) return kmdl
def __init__(self, name, N): "Create a criteria object\n\n :param name: a string or list of strings containing valid criteria\n method's name\n :param int N: size of the data sample.\n\n " self.__name = None self.name = name self.__N = N self.__rho = 0 self.__k = None self.__old_data = None self.__data = None self.__norm = True
-6,479,387,712,756,581,000
Create a criteria object :param name: a string or list of strings containing valid criteria method's name :param int N: size of the data sample.
src/spectrum/criteria.py
__init__
butala/spectrum
python
def __init__(self, name, N): "Create a criteria object\n\n :param name: a string or list of strings containing valid criteria\n method's name\n :param int N: size of the data sample.\n\n " self.__name = None self.name = name self.__N = N self.__rho = 0 self.__k = None self.__old_data = None self.__data = None self.__norm = True
def __call__(self, rho=None, k=None, N=None, norm=True): 'Call the criteria function corresponding to :attr:`name`.' self.__norm = norm if (N is not None): self.N = N if (rho is not None): self.rho = rho if (k is not None): self.__k = k self.__norm = norm f = eval(self.name) self.data = f(self.N, self.rho, self.k) if ((self.old_data is not None) and (self.data is not None)): if (self.data > self.old_data): return False else: return True return True
-6,886,929,687,545,804,000
Call the criteria function corresponding to :attr:`name`.
src/spectrum/criteria.py
__call__
butala/spectrum
python
def __call__(self, rho=None, k=None, N=None, norm=True): self.__norm = norm if (N is not None): self.N = N if (rho is not None): self.rho = rho if (k is not None): self.__k = k self.__norm = norm f = eval(self.name) self.data = f(self.N, self.rho, self.k) if ((self.old_data is not None) and (self.data is not None)): if (self.data > self.old_data): return False else: return True return True
def stop(self, force=False): '\n Stop seed workers by sending None-sentinel and joining the workers.\n\n :param force: Skip sending None-sentinel and join with a timeout.\n For use when workers might be shutdown already by KeyboardInterrupt.\n ' if (not force): alives = 0 for proc in self.procs: if proc.is_alive(): alives += 1 while alives: try: self.tiles_queue.put(None, timeout=1) alives -= 1 except Queue.Full: alives = 0 for proc in self.procs: if proc.is_alive(): alives += 1 if force: timeout = 1.0 else: timeout = None for proc in self.procs: proc.join(timeout)
4,690,955,062,984,198,000
Stop seed workers by sending None-sentinel and joining the workers. :param force: Skip sending None-sentinel and join with a timeout. For use when workers might be shutdown already by KeyboardInterrupt.
mapproxy/seed/seeder.py
stop
GeoplexGIS/mapproxy
python
def stop(self, force=False): '\n Stop seed workers by sending None-sentinel and joining the workers.\n\n :param force: Skip sending None-sentinel and join with a timeout.\n For use when workers might be shutdown already by KeyboardInterrupt.\n ' if (not force): alives = 0 for proc in self.procs: if proc.is_alive(): alives += 1 while alives: try: self.tiles_queue.put(None, timeout=1) alives -= 1 except Queue.Full: alives = 0 for proc in self.procs: if proc.is_alive(): alives += 1 if force: timeout = 1.0 else: timeout = None for proc in self.procs: proc.join(timeout)
@staticmethod def can_skip(old_progress, current_progress): "\n Return True if the `current_progress` is behind the `old_progress` -\n when it isn't as far as the old progress.\n\n >>> SeedProgress.can_skip(None, [(0, 4)])\n False\n >>> SeedProgress.can_skip([], [(0, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4)], None)\n False\n >>> SeedProgress.can_skip([(0, 4)], [(0, 4)])\n False\n >>> SeedProgress.can_skip([(1, 4)], [(0, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)])\n False\n\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)])\n False\n " if (current_progress is None): return False if (old_progress is None): return False if (old_progress == []): return True for (old, current) in izip_longest(old_progress, current_progress, fillvalue=None): if (old is None): return False if (current is None): return False if (old < current): return False if (old > current): return True return False
1,151,653,813,984,509,800
Return True if the `current_progress` is behind the `old_progress` - when it isn't as far as the old progress. >>> SeedProgress.can_skip(None, [(0, 4)]) False >>> SeedProgress.can_skip([], [(0, 4)]) True >>> SeedProgress.can_skip([(0, 4)], None) False >>> SeedProgress.can_skip([(0, 4)], [(0, 4)]) False >>> SeedProgress.can_skip([(1, 4)], [(0, 4)]) True >>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)]) False >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)]) False >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)]) True >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)]) False >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)]) False >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)]) False >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)]) False
mapproxy/seed/seeder.py
can_skip
GeoplexGIS/mapproxy
python
@staticmethod def can_skip(old_progress, current_progress): "\n Return True if the `current_progress` is behind the `old_progress` -\n when it isn't as far as the old progress.\n\n >>> SeedProgress.can_skip(None, [(0, 4)])\n False\n >>> SeedProgress.can_skip([], [(0, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4)], None)\n False\n >>> SeedProgress.can_skip([(0, 4)], [(0, 4)])\n False\n >>> SeedProgress.can_skip([(1, 4)], [(0, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4)], [(0, 4), (0, 4)])\n False\n\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (1, 4)])\n True\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (2, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (0, 4), (3, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4)])\n False\n >>> SeedProgress.can_skip([(0, 4), (0, 4), (2, 4)], [(0, 4), (1, 4), (0, 4)])\n False\n " if (current_progress is None): return False if (old_progress is None): return False if (old_progress == []): return True for (old, current) in izip_longest(old_progress, current_progress, fillvalue=None): if (old is None): return False if (current is None): return False if (old < current): return False if (old > current): return True return False
def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False): '\n :param cur_bbox: the bbox to seed in this call\n :param levels: list of levels to seed\n :param all_subtiles: seed all subtiles and do not check for\n intersections with bbox/geom\n ' (bbox_, tiles, subtiles) = self.grid.get_affected_level_tiles(cur_bbox, current_level) total_subtiles = (tiles[0] * tiles[1]) if (len(levels) < self.skip_geoms_for_last_levels): all_subtiles = True subtiles = self._filter_subtiles(subtiles, all_subtiles) if ((current_level in levels) and (current_level <= self.report_till_level)): self.report_progress(current_level, cur_bbox) if (not self.seed_progress.running()): if (current_level in levels): self.report_progress(current_level, cur_bbox) self.tile_mgr.cleanup() raise StopProcess() process = False if (current_level in levels): levels = levels[1:] process = True for (i, (subtile, sub_bbox, intersection)) in enumerate(subtiles): if (subtile is None): self.seed_progress.step_forward(total_subtiles) continue if levels: sub_bbox = limit_sub_bbox(cur_bbox, sub_bbox) if (intersection == CONTAINS): all_subtiles = True else: all_subtiles = False with self.seed_progress.step_down(i, total_subtiles): if self.seed_progress.already_processed(): self.seed_progress.step_forward() else: self._walk(sub_bbox, levels, current_level=(current_level + 1), all_subtiles=all_subtiles) if (not process): continue if (subtile in self.seeded_tiles[current_level]): if (not levels): self.seed_progress.step_forward(total_subtiles) continue self.seeded_tiles[current_level].appendleft(subtile) if (not self.work_on_metatiles): handle_tiles = self.grid.tile_list(subtile) else: handle_tiles = [subtile] if self.handle_uncached: handle_tiles = [t for t in handle_tiles if ((t is not None) and (not self.tile_mgr.is_cached(t)))] elif self.handle_stale: handle_tiles = [t for t in handle_tiles if ((t is not None) and self.tile_mgr.is_stale(t))] if handle_tiles: self.count += 1 self.worker_pool.process(handle_tiles, self.seed_progress) if (not levels): self.seed_progress.step_forward(total_subtiles) if (len(levels) >= 4): self.tile_mgr.cleanup()
7,811,925,727,793,627,000
:param cur_bbox: the bbox to seed in this call :param levels: list of levels to seed :param all_subtiles: seed all subtiles and do not check for intersections with bbox/geom
mapproxy/seed/seeder.py
_walk
GeoplexGIS/mapproxy
python
def _walk(self, cur_bbox, levels, current_level=0, all_subtiles=False): '\n :param cur_bbox: the bbox to seed in this call\n :param levels: list of levels to seed\n :param all_subtiles: seed all subtiles and do not check for\n intersections with bbox/geom\n ' (bbox_, tiles, subtiles) = self.grid.get_affected_level_tiles(cur_bbox, current_level) total_subtiles = (tiles[0] * tiles[1]) if (len(levels) < self.skip_geoms_for_last_levels): all_subtiles = True subtiles = self._filter_subtiles(subtiles, all_subtiles) if ((current_level in levels) and (current_level <= self.report_till_level)): self.report_progress(current_level, cur_bbox) if (not self.seed_progress.running()): if (current_level in levels): self.report_progress(current_level, cur_bbox) self.tile_mgr.cleanup() raise StopProcess() process = False if (current_level in levels): levels = levels[1:] process = True for (i, (subtile, sub_bbox, intersection)) in enumerate(subtiles): if (subtile is None): self.seed_progress.step_forward(total_subtiles) continue if levels: sub_bbox = limit_sub_bbox(cur_bbox, sub_bbox) if (intersection == CONTAINS): all_subtiles = True else: all_subtiles = False with self.seed_progress.step_down(i, total_subtiles): if self.seed_progress.already_processed(): self.seed_progress.step_forward() else: self._walk(sub_bbox, levels, current_level=(current_level + 1), all_subtiles=all_subtiles) if (not process): continue if (subtile in self.seeded_tiles[current_level]): if (not levels): self.seed_progress.step_forward(total_subtiles) continue self.seeded_tiles[current_level].appendleft(subtile) if (not self.work_on_metatiles): handle_tiles = self.grid.tile_list(subtile) else: handle_tiles = [subtile] if self.handle_uncached: handle_tiles = [t for t in handle_tiles if ((t is not None) and (not self.tile_mgr.is_cached(t)))] elif self.handle_stale: handle_tiles = [t for t in handle_tiles if ((t is not None) and self.tile_mgr.is_stale(t))] if handle_tiles: self.count += 1 self.worker_pool.process(handle_tiles, self.seed_progress) if (not levels): self.seed_progress.step_forward(total_subtiles) if (len(levels) >= 4): self.tile_mgr.cleanup()
def _filter_subtiles(self, subtiles, all_subtiles): '\n Return an iterator with all sub tiles.\n Yields (None, None, None) for non-intersecting tiles,\n otherwise (subtile, subtile_bbox, intersection).\n ' for subtile in subtiles: if (subtile is None): (yield (None, None, None)) else: sub_bbox = self.grid.meta_tile(subtile).bbox if all_subtiles: intersection = CONTAINS else: intersection = self.task.intersects(sub_bbox) if intersection: (yield (subtile, sub_bbox, intersection)) else: (yield (None, None, None))
-8,990,287,347,691,366,000
Return an iterator with all sub tiles. Yields (None, None, None) for non-intersecting tiles, otherwise (subtile, subtile_bbox, intersection).
mapproxy/seed/seeder.py
_filter_subtiles
GeoplexGIS/mapproxy
python
def _filter_subtiles(self, subtiles, all_subtiles): '\n Return an iterator with all sub tiles.\n Yields (None, None, None) for non-intersecting tiles,\n otherwise (subtile, subtile_bbox, intersection).\n ' for subtile in subtiles: if (subtile is None): (yield (None, None, None)) else: sub_bbox = self.grid.meta_tile(subtile).bbox if all_subtiles: intersection = CONTAINS else: intersection = self.task.intersects(sub_bbox) if intersection: (yield (subtile, sub_bbox, intersection)) else: (yield (None, None, None))
def testVnicEthAdapterPolicyList(self): 'Test VnicEthAdapterPolicyList' pass
-3,592,398,631,652,934,700
Test VnicEthAdapterPolicyList
test/test_vnic_eth_adapter_policy_list.py
testVnicEthAdapterPolicyList
CiscoUcs/intersight-python
python
def testVnicEthAdapterPolicyList(self): pass
def timestamp(): 'Get a precise timestamp' return _clock_func()
-8,491,789,232,951,088,000
Get a precise timestamp
auto1/venv/Lib/site-packages/pywinauto/timings.py
timestamp
snakyhuman/auto-tests
python
def timestamp(): return _clock_func()
def always_wait_until(timeout, retry_interval, value=True, op=operator.eq): 'Decorator to call wait_until(...) every time for a decorated function/method' def wait_until_decorator(func): 'Callable object that must be returned by the @always_wait_until decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) return wrapper return wait_until_decorator
1,975,085,348,732,868,400
Decorator to call wait_until(...) every time for a decorated function/method
auto1/venv/Lib/site-packages/pywinauto/timings.py
always_wait_until
snakyhuman/auto-tests
python
def always_wait_until(timeout, retry_interval, value=True, op=operator.eq): def wait_until_decorator(func): 'Callable object that must be returned by the @always_wait_until decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) return wrapper return wait_until_decorator
def wait_until(timeout, retry_interval, func, value=True, op=operator.eq, *args, **kwargs): '\n Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** the function that will be executed\n * **value** the value to be compared against (defaults to True)\n * **op** the comparison function (defaults to equality)\\\n * **args** optional arguments to be passed to func when called\n * **kwargs** optional keyword arguments to be passed to func when called\n\n Returns the return value of the function\n If the operation times out then the return value of the the function\n is in the \'function_value\' attribute of the raised exception.\n\n e.g. ::\n\n try:\n # wait a maximum of 10.5 seconds for the\n # the objects item_count() method to return 10\n # in increments of .5 of a second\n wait_until(10.5, .5, self.item_count, 10)\n except TimeoutError as e:\n print("timed out")\n ' start = timestamp() func_val = func(*args, **kwargs) while (not op(func_val, value)): time_left = (timeout - (timestamp() - start)) if (time_left > 0): time.sleep(min(retry_interval, time_left)) func_val = func(*args, **kwargs) else: err = TimeoutError('timed out') err.function_value = func_val raise err return func_val
8,987,653,373,818,958,000
Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires * **timeout** how long the function will try the function * **retry_interval** how long to wait between retries * **func** the function that will be executed * **value** the value to be compared against (defaults to True) * **op** the comparison function (defaults to equality)\ * **args** optional arguments to be passed to func when called * **kwargs** optional keyword arguments to be passed to func when called Returns the return value of the function If the operation times out then the return value of the the function is in the 'function_value' attribute of the raised exception. e.g. :: try: # wait a maximum of 10.5 seconds for the # the objects item_count() method to return 10 # in increments of .5 of a second wait_until(10.5, .5, self.item_count, 10) except TimeoutError as e: print("timed out")
auto1/venv/Lib/site-packages/pywinauto/timings.py
wait_until
snakyhuman/auto-tests
python
def wait_until(timeout, retry_interval, func, value=True, op=operator.eq, *args, **kwargs): '\n Wait until ``op(function(*args, **kwargs), value)`` is True or until timeout expires\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** the function that will be executed\n * **value** the value to be compared against (defaults to True)\n * **op** the comparison function (defaults to equality)\\\n * **args** optional arguments to be passed to func when called\n * **kwargs** optional keyword arguments to be passed to func when called\n\n Returns the return value of the function\n If the operation times out then the return value of the the function\n is in the \'function_value\' attribute of the raised exception.\n\n e.g. ::\n\n try:\n # wait a maximum of 10.5 seconds for the\n # the objects item_count() method to return 10\n # in increments of .5 of a second\n wait_until(10.5, .5, self.item_count, 10)\n except TimeoutError as e:\n print("timed out")\n ' start = timestamp() func_val = func(*args, **kwargs) while (not op(func_val, value)): time_left = (timeout - (timestamp() - start)) if (time_left > 0): time.sleep(min(retry_interval, time_left)) func_val = func(*args, **kwargs) else: err = TimeoutError('timed out') err.function_value = func_val raise err return func_val
def always_wait_until_passes(timeout, retry_interval, exceptions=Exception): 'Decorator to call wait_until_passes(...) every time for a decorated function/method' def wait_until_passes_decorator(func): 'Callable object that must be returned by the @always_wait_until_passes decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) return wrapper return wait_until_passes_decorator
4,447,544,560,799,387,000
Decorator to call wait_until_passes(...) every time for a decorated function/method
auto1/venv/Lib/site-packages/pywinauto/timings.py
always_wait_until_passes
snakyhuman/auto-tests
python
def always_wait_until_passes(timeout, retry_interval, exceptions=Exception): def wait_until_passes_decorator(func): 'Callable object that must be returned by the @always_wait_until_passes decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) return wrapper return wait_until_passes_decorator
def wait_until_passes(timeout, retry_interval, func, exceptions=Exception, *args, **kwargs): '\n Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** the function that will be executed\n * **exceptions** list of exceptions to test against (default: Exception)\n * **args** optional arguments to be passed to func when called\n * **kwargs** optional keyword arguments to be passed to func when called\n\n Returns the return value of the function\n If the operation times out then the original exception raised is in\n the \'original_exception\' attribute of the raised exception.\n\n e.g. ::\n\n try:\n # wait a maximum of 10.5 seconds for the\n # window to be found in increments of .5 of a second.\n # P.int a message and re-raise the original exception if never found.\n wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))\n except TimeoutError as e:\n print("timed out")\n raise e.\n ' start = timestamp() while True: try: func_val = func(*args, **kwargs) break except exceptions as e: time_left = (timeout - (timestamp() - start)) if (time_left > 0): time.sleep(min(retry_interval, time_left)) else: err = TimeoutError() err.original_exception = e raise err return func_val
-3,044,909,421,197,461,000
Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions * **timeout** how long the function will try the function * **retry_interval** how long to wait between retries * **func** the function that will be executed * **exceptions** list of exceptions to test against (default: Exception) * **args** optional arguments to be passed to func when called * **kwargs** optional keyword arguments to be passed to func when called Returns the return value of the function If the operation times out then the original exception raised is in the 'original_exception' attribute of the raised exception. e.g. :: try: # wait a maximum of 10.5 seconds for the # window to be found in increments of .5 of a second. # P.int a message and re-raise the original exception if never found. wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError)) except TimeoutError as e: print("timed out") raise e.
auto1/venv/Lib/site-packages/pywinauto/timings.py
wait_until_passes
snakyhuman/auto-tests
python
def wait_until_passes(timeout, retry_interval, func, exceptions=Exception, *args, **kwargs): '\n Wait until ``func(*args, **kwargs)`` does not raise one of the exceptions\n\n * **timeout** how long the function will try the function\n * **retry_interval** how long to wait between retries\n * **func** the function that will be executed\n * **exceptions** list of exceptions to test against (default: Exception)\n * **args** optional arguments to be passed to func when called\n * **kwargs** optional keyword arguments to be passed to func when called\n\n Returns the return value of the function\n If the operation times out then the original exception raised is in\n the \'original_exception\' attribute of the raised exception.\n\n e.g. ::\n\n try:\n # wait a maximum of 10.5 seconds for the\n # window to be found in increments of .5 of a second.\n # P.int a message and re-raise the original exception if never found.\n wait_until_passes(10.5, .5, self.Exists, (ElementNotFoundError))\n except TimeoutError as e:\n print("timed out")\n raise e.\n ' start = timestamp() while True: try: func_val = func(*args, **kwargs) break except exceptions as e: time_left = (timeout - (timestamp() - start)) if (time_left > 0): time.sleep(min(retry_interval, time_left)) else: err = TimeoutError() err.original_exception = e raise err return func_val
def __getattribute__(self, attr): 'Get the value for a particular timing' if (attr in ['__dict__', '__members__', '__methods__', '__class__']): return object.__getattribute__(self, attr) if (attr in dir(TimeConfig)): return object.__getattribute__(self, attr) if (attr in self.__default_timing): return self._timings[attr] else: raise AttributeError('Unknown timing setting: {0}'.format(attr))
5,758,114,577,032,665,000
Get the value for a particular timing
auto1/venv/Lib/site-packages/pywinauto/timings.py
__getattribute__
snakyhuman/auto-tests
python
def __getattribute__(self, attr): if (attr in ['__dict__', '__members__', '__methods__', '__class__']): return object.__getattribute__(self, attr) if (attr in dir(TimeConfig)): return object.__getattribute__(self, attr) if (attr in self.__default_timing): return self._timings[attr] else: raise AttributeError('Unknown timing setting: {0}'.format(attr))
def __setattr__(self, attr, value): 'Set a particular timing' if (attr == '_timings'): object.__setattr__(self, attr, value) elif (attr in self.__default_timing): self._timings[attr] = value else: raise AttributeError('Unknown timing setting: {0}'.format(attr))
6,694,533,967,756,782,000
Set a particular timing
auto1/venv/Lib/site-packages/pywinauto/timings.py
__setattr__
snakyhuman/auto-tests
python
def __setattr__(self, attr, value): if (attr == '_timings'): object.__setattr__(self, attr, value) elif (attr in self.__default_timing): self._timings[attr] = value else: raise AttributeError('Unknown timing setting: {0}'.format(attr))
def Fast(self): 'Set fast timing values\n\n Currently this changes the timing in the following ways:\n timeouts = 1 second\n waits = 0 seconds\n retries = .001 seconds (minimum!)\n\n (if existing times are faster then keep existing times)\n ' for setting in self.__default_timing: if ('_timeout' in setting): self._timings[setting] = min(1, self._timings[setting]) if ('_wait' in setting): self._timings[setting] = (self._timings[setting] / 2) elif setting.endswith('_retry'): self._timings[setting] = 0.001
-8,164,920,440,071,308,000
Set fast timing values Currently this changes the timing in the following ways: timeouts = 1 second waits = 0 seconds retries = .001 seconds (minimum!) (if existing times are faster then keep existing times)
auto1/venv/Lib/site-packages/pywinauto/timings.py
Fast
snakyhuman/auto-tests
python
def Fast(self): 'Set fast timing values\n\n Currently this changes the timing in the following ways:\n timeouts = 1 second\n waits = 0 seconds\n retries = .001 seconds (minimum!)\n\n (if existing times are faster then keep existing times)\n ' for setting in self.__default_timing: if ('_timeout' in setting): self._timings[setting] = min(1, self._timings[setting]) if ('_wait' in setting): self._timings[setting] = (self._timings[setting] / 2) elif setting.endswith('_retry'): self._timings[setting] = 0.001
def Slow(self): 'Set slow timing values\n\n Currently this changes the timing in the following ways:\n timeouts = default timeouts * 10\n waits = default waits * 3\n retries = default retries * 3\n\n (if existing times are slower then keep existing times)\n ' for setting in self.__default_timing: if ('_timeout' in setting): self._timings[setting] = max((self.__default_timing[setting] * 10), self._timings[setting]) if ('_wait' in setting): self._timings[setting] = max((self.__default_timing[setting] * 3), self._timings[setting]) elif setting.endswith('_retry'): self._timings[setting] = max((self.__default_timing[setting] * 3), self._timings[setting]) if (self._timings[setting] < 0.2): self._timings[setting] = 0.2
-2,657,467,507,185,467,000
Set slow timing values Currently this changes the timing in the following ways: timeouts = default timeouts * 10 waits = default waits * 3 retries = default retries * 3 (if existing times are slower then keep existing times)
auto1/venv/Lib/site-packages/pywinauto/timings.py
Slow
snakyhuman/auto-tests
python
def Slow(self): 'Set slow timing values\n\n Currently this changes the timing in the following ways:\n timeouts = default timeouts * 10\n waits = default waits * 3\n retries = default retries * 3\n\n (if existing times are slower then keep existing times)\n ' for setting in self.__default_timing: if ('_timeout' in setting): self._timings[setting] = max((self.__default_timing[setting] * 10), self._timings[setting]) if ('_wait' in setting): self._timings[setting] = max((self.__default_timing[setting] * 3), self._timings[setting]) elif setting.endswith('_retry'): self._timings[setting] = max((self.__default_timing[setting] * 3), self._timings[setting]) if (self._timings[setting] < 0.2): self._timings[setting] = 0.2
def Defaults(self): 'Set all timings to the default time' self._timings = self.__default_timing.copy()
2,807,953,678,227,924,500
Set all timings to the default time
auto1/venv/Lib/site-packages/pywinauto/timings.py
Defaults
snakyhuman/auto-tests
python
def Defaults(self): self._timings = self.__default_timing.copy()
def wait_until_decorator(func): 'Callable object that must be returned by the @always_wait_until decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) return wrapper
2,517,495,585,548,496,000
Callable object that must be returned by the @always_wait_until decorator
auto1/venv/Lib/site-packages/pywinauto/timings.py
wait_until_decorator
snakyhuman/auto-tests
python
def wait_until_decorator(func): @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs) return wrapper
def wait_until_passes_decorator(func): 'Callable object that must be returned by the @always_wait_until_passes decorator' @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) return wrapper
-9,218,570,426,858,793,000
Callable object that must be returned by the @always_wait_until_passes decorator
auto1/venv/Lib/site-packages/pywinauto/timings.py
wait_until_passes_decorator
snakyhuman/auto-tests
python
def wait_until_passes_decorator(func): @wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs) return wrapper
@wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs)
-3,897,649,944,343,418,000
pre-callback, target function call and post-callback
auto1/venv/Lib/site-packages/pywinauto/timings.py
wrapper
snakyhuman/auto-tests
python
@wraps(func) def wrapper(*args, **kwargs): return wait_until(timeout, retry_interval, func, value, op, *args, **kwargs)
@wraps(func) def wrapper(*args, **kwargs): 'pre-callback, target function call and post-callback' return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs)
-1,675,487,642,385,341,200
pre-callback, target function call and post-callback
auto1/venv/Lib/site-packages/pywinauto/timings.py
wrapper
snakyhuman/auto-tests
python
@wraps(func) def wrapper(*args, **kwargs): return wait_until_passes(timeout, retry_interval, func, exceptions, *args, **kwargs)
@pytest.fixture(autouse=True) def mpl_test_settings(qt_module, mpl_test_settings): '\n Ensure qt_module fixture is *first* fixture.\n\n We override the `mpl_test_settings` fixture and depend on the `qt_module`\n fixture first. It is very important that it is first, because it skips\n tests when Qt is not available, and if not, then the main\n `mpl_test_settings` fixture will try to switch backends before the skip can\n be triggered.\n ' pass
-4,220,199,978,093,792,000
Ensure qt_module fixture is *first* fixture. We override the `mpl_test_settings` fixture and depend on the `qt_module` fixture first. It is very important that it is first, because it skips tests when Qt is not available, and if not, then the main `mpl_test_settings` fixture will try to switch backends before the skip can be triggered.
tests/test_backend_qt.py
mpl_test_settings
qiujiangkun/mplopengl
python
@pytest.fixture(autouse=True) def mpl_test_settings(qt_module, mpl_test_settings): '\n Ensure qt_module fixture is *first* fixture.\n\n We override the `mpl_test_settings` fixture and depend on the `qt_module`\n fixture first. It is very important that it is first, because it skips\n tests when Qt is not available, and if not, then the main\n `mpl_test_settings` fixture will try to switch backends before the skip can\n be triggered.\n ' pass
@pytest.mark.parametrize('qt_key, qt_mods, answer', [('Key_A', ['ShiftModifier'], 'A'), ('Key_A', [], 'a'), ('Key_A', ['ControlModifier'], 'ctrl+a'), ('Key_Aacute', ['ShiftModifier'], 'Á'), ('Key_Aacute', [], 'á'), ('ControlKey', ['AltModifier'], 'alt+control'), ('AltKey', ['ControlModifier'], 'ctrl+alt'), ('Key_Aacute', ['ControlModifier', 'AltModifier', 'SuperModifier'], 'ctrl+alt+super+á'), ('Key_Backspace', [], 'backspace'), ('Key_Backspace', ['ControlModifier'], 'ctrl+backspace'), ('Key_Play', [], None)], indirect=['qt_key', 'qt_mods'], ids=['shift', 'lower', 'control', 'unicode_upper', 'unicode_lower', 'alt_control', 'control_alt', 'modifier_order', 'backspace', 'backspace_mod', 'non_unicode_key']) @pytest.mark.parametrize('backend', [pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')), pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg'))]) def test_correct_key(backend, qt_key, qt_mods, answer): '\n Make a figure\n Send a key_press_event event (using non-public, qtX backend specific api)\n Catch the event\n Assert sent and caught keys are the same\n ' qt_canvas = plt.figure().canvas event = mock.Mock() event.isAutoRepeat.return_value = False event.key.return_value = qt_key event.modifiers.return_value = qt_mods def receive(event): assert (event.key == answer) qt_canvas.mpl_connect('key_press_event', receive) qt_canvas.keyPressEvent(event)
6,353,541,293,830,967,000
Make a figure Send a key_press_event event (using non-public, qtX backend specific api) Catch the event Assert sent and caught keys are the same
tests/test_backend_qt.py
test_correct_key
qiujiangkun/mplopengl
python
@pytest.mark.parametrize('qt_key, qt_mods, answer', [('Key_A', ['ShiftModifier'], 'A'), ('Key_A', [], 'a'), ('Key_A', ['ControlModifier'], 'ctrl+a'), ('Key_Aacute', ['ShiftModifier'], 'Á'), ('Key_Aacute', [], 'á'), ('ControlKey', ['AltModifier'], 'alt+control'), ('AltKey', ['ControlModifier'], 'ctrl+alt'), ('Key_Aacute', ['ControlModifier', 'AltModifier', 'SuperModifier'], 'ctrl+alt+super+á'), ('Key_Backspace', [], 'backspace'), ('Key_Backspace', ['ControlModifier'], 'ctrl+backspace'), ('Key_Play', [], None)], indirect=['qt_key', 'qt_mods'], ids=['shift', 'lower', 'control', 'unicode_upper', 'unicode_lower', 'alt_control', 'control_alt', 'modifier_order', 'backspace', 'backspace_mod', 'non_unicode_key']) @pytest.mark.parametrize('backend', [pytest.param('Qt4Agg', marks=pytest.mark.backend('Qt4Agg')), pytest.param('Qt5Agg', marks=pytest.mark.backend('Qt5Agg'))]) def test_correct_key(backend, qt_key, qt_mods, answer): '\n Make a figure\n Send a key_press_event event (using non-public, qtX backend specific api)\n Catch the event\n Assert sent and caught keys are the same\n ' qt_canvas = plt.figure().canvas event = mock.Mock() event.isAutoRepeat.return_value = False event.key.return_value = qt_key event.modifiers.return_value = qt_mods def receive(event): assert (event.key == answer) qt_canvas.mpl_connect('key_press_event', receive) qt_canvas.keyPressEvent(event)
@pytest.mark.backend('Qt5Agg') def test_dpi_ratio_change(): '\n Make sure that if _dpi_ratio changes, the figure dpi changes but the\n widget remains the same physical size.\n ' prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio' with mock.patch(prop, new_callable=mock.PropertyMock) as p: p.return_value = 3 fig = plt.figure(figsize=(5, 2), dpi=120) qt_canvas = fig.canvas qt_canvas.show() from matplotlib.backends.backend_qt5 import qApp assert (qt_canvas._dpi_ratio == 3) size = qt_canvas.size() qt_canvas.manager.show() qt_canvas.draw() qApp.processEvents() assert (fig.dpi == 360) assert (qt_canvas.renderer.width == 1800) assert (qt_canvas.renderer.height == 720) assert (size.width() == 600) assert (size.height() == 240) p.return_value = 2 assert (qt_canvas._dpi_ratio == 2) qt_canvas.draw() qApp.processEvents() qApp.processEvents() assert (size.width() == 600) assert (size.height() == 240)
-8,263,528,494,087,251,000
Make sure that if _dpi_ratio changes, the figure dpi changes but the widget remains the same physical size.
tests/test_backend_qt.py
test_dpi_ratio_change
qiujiangkun/mplopengl
python
@pytest.mark.backend('Qt5Agg') def test_dpi_ratio_change(): '\n Make sure that if _dpi_ratio changes, the figure dpi changes but the\n widget remains the same physical size.\n ' prop = 'matplotlib.backends.backend_qt5.FigureCanvasQT._dpi_ratio' with mock.patch(prop, new_callable=mock.PropertyMock) as p: p.return_value = 3 fig = plt.figure(figsize=(5, 2), dpi=120) qt_canvas = fig.canvas qt_canvas.show() from matplotlib.backends.backend_qt5 import qApp assert (qt_canvas._dpi_ratio == 3) size = qt_canvas.size() qt_canvas.manager.show() qt_canvas.draw() qApp.processEvents() assert (fig.dpi == 360) assert (qt_canvas.renderer.width == 1800) assert (qt_canvas.renderer.height == 720) assert (size.width() == 600) assert (size.height() == 240) p.return_value = 2 assert (qt_canvas._dpi_ratio == 2) qt_canvas.draw() qApp.processEvents() qApp.processEvents() assert (size.width() == 600) assert (size.height() == 240)
def __init__(self, max_depth=10): '\n Initializes attributes and checks the maximum depth provided.\n\n Parameters\n ----------\n max_depth : int\n The maximum depth to look in.\n ' if (max_depth < 1): raise Exception('max_depth must be greater than or equal to 1.') self._max_depth = max_depth self._analyzers = [] self._current_depth = 0 self._rules = {}
-1,162,637,250,902,903,000
Initializes attributes and checks the maximum depth provided. Parameters ---------- max_depth : int The maximum depth to look in.
src/indexing/indexer.py
__init__
pgecsenyi/piepy
python
def __init__(self, max_depth=10): '\n Initializes attributes and checks the maximum depth provided.\n\n Parameters\n ----------\n max_depth : int\n The maximum depth to look in.\n ' if (max_depth < 1): raise Exception('max_depth must be greater than or equal to 1.') self._max_depth = max_depth self._analyzers = [] self._current_depth = 0 self._rules = {}
def add_rule(self, directory, policy): '\n Registers a new directory to index. Does nothing if the given directory is already added.\n\n Parameters\n ----------\n directory : str\n The directory to be indexed.\n policy : IndexerPolicy\n A policy that applies to this directory.\n ' analyzer = self._create_analyzer(policy) analyzer_store = self._create_analyzerstore(directory) analyzer_store.add_analyzer(policy.extensions, analyzer)
-6,603,331,465,312,089,000
Registers a new directory to index. Does nothing if the given directory is already added. Parameters ---------- directory : str The directory to be indexed. policy : IndexerPolicy A policy that applies to this directory.
src/indexing/indexer.py
add_rule
pgecsenyi/piepy
python
def add_rule(self, directory, policy): '\n Registers a new directory to index. Does nothing if the given directory is already added.\n\n Parameters\n ----------\n directory : str\n The directory to be indexed.\n policy : IndexerPolicy\n A policy that applies to this directory.\n ' analyzer = self._create_analyzer(policy) analyzer_store = self._create_analyzerstore(directory) analyzer_store.add_analyzer(policy.extensions, analyzer)
def index(self): '\n Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.\n ' for analyzer in self._analyzers: analyzer.init_filters() for (directory, analyzer_store) in self._rules.items(): if os.path.exists(directory): self._scan_directory(directory, analyzer_store) for analyzer in self._analyzers: analyzer.clean_filters()
3,595,322,224,351,321,000
Initializes filters, initiates indexing and after the indexing process has finished, cleans filters.
src/indexing/indexer.py
index
pgecsenyi/piepy
python
def index(self): '\n \n ' for analyzer in self._analyzers: analyzer.init_filters() for (directory, analyzer_store) in self._rules.items(): if os.path.exists(directory): self._scan_directory(directory, analyzer_store) for analyzer in self._analyzers: analyzer.clean_filters()
def _enter(self, directory): '\n Indicates for the analyzers that we entered into the given directory.\n\n Parameters\n ----------\n directory : str\n The directory we entered.\n ' for analyzer in self._analyzers: analyzer.enter(directory) self._current_depth = (self._current_depth + 1)
5,542,440,193,222,605,000
Indicates for the analyzers that we entered into the given directory. Parameters ---------- directory : str The directory we entered.
src/indexing/indexer.py
_enter
pgecsenyi/piepy
python
def _enter(self, directory): '\n Indicates for the analyzers that we entered into the given directory.\n\n Parameters\n ----------\n directory : str\n The directory we entered.\n ' for analyzer in self._analyzers: analyzer.enter(directory) self._current_depth = (self._current_depth + 1)
def _leave(self): '\n Indicates for the analyzers that we are leaving the last directory.\n ' for analyzer in self._analyzers: analyzer.leave() self._current_depth = (self._current_depth - 1)
-5,838,119,474,221,757,000
Indicates for the analyzers that we are leaving the last directory.
src/indexing/indexer.py
_leave
pgecsenyi/piepy
python
def _leave(self): '\n \n ' for analyzer in self._analyzers: analyzer.leave() self._current_depth = (self._current_depth - 1)
def _scan_directory(self, path, analyzer_store): '\n Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to\n analyze and store the data.\n\n Parameters\n ----------\n path : str\n The path to enumerate.\n analyzers : PathAnalyzerStore\n The PathAnalyzerStore to use.\n ' for current_file in os.listdir(path): current_path = os.path.join(path, current_file) if (self._current_depth >= self._max_depth): return if os.path.isdir(current_path): self._enter(current_file) self._scan_directory(current_path, analyzer_store) self._leave() else: self._analyze_file(current_path, analyzer_store)
-7,786,856,763,598,390,000
Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to analyze and store the data. Parameters ---------- path : str The path to enumerate. analyzers : PathAnalyzerStore The PathAnalyzerStore to use.
src/indexing/indexer.py
_scan_directory
pgecsenyi/piepy
python
def _scan_directory(self, path, analyzer_store): '\n Does the real indexing. Iterates through the directory using DFS, and invokes the registered analyzers to\n analyze and store the data.\n\n Parameters\n ----------\n path : str\n The path to enumerate.\n analyzers : PathAnalyzerStore\n The PathAnalyzerStore to use.\n ' for current_file in os.listdir(path): current_path = os.path.join(path, current_file) if (self._current_depth >= self._max_depth): return if os.path.isdir(current_path): self._enter(current_file) self._scan_directory(current_path, analyzer_store) self._leave() else: self._analyze_file(current_path, analyzer_store)
@property def local_epoch(self) -> int: "\n This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will\n automatically re-synchronize by downloading state from another peer.\n An epoch corresponds to accumulating target_batch_size across all active devices.\n " return self.state_averager.local_epoch
-610,526,349,330,534,100
This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will automatically re-synchronize by downloading state from another peer. An epoch corresponds to accumulating target_batch_size across all active devices.
hivemind/optim/optimizer.py
local_epoch
MeshchaninovViacheslav/hivemind
python
@property def local_epoch(self) -> int: "\n This worker's current epoch, kept synchronized with peers. If peer's local_epoch lags behind others, it will\n automatically re-synchronize by downloading state from another peer.\n An epoch corresponds to accumulating target_batch_size across all active devices.\n " return self.state_averager.local_epoch
def step(self, closure: Optional[Callable[([], torch.Tensor)]]=None, batch_size: Optional[int]=None, grad_scaler: Optional[GradScaler]=None): '\n Update training progress after accumulating another local batch size. Depending on the configuration, this will\n report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.\n\n :param closure: A closure that reevaluates the model and returns the loss.\n :param batch_size: optional override for batch_size_per_step from init.\n :param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.\n :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.\n ' if ((grad_scaler is not None) and (not isinstance(grad_scaler, GradScaler))): raise ValueError('hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)') if ((self.batch_size_per_step is None) and (batch_size is None) and (not self.auxiliary)): raise ValueError('Please either set batch_size_per_step parameter at init or when calling .step') if (self.auxiliary and ((closure is not None) or (batch_size is not None) or (grad_scaler is not None))): raise ValueError('Auxiliary peers should not have batch size, run closures, or use grad_scaler') batch_size = (batch_size if (batch_size is not None) else self.batch_size_per_step) self.state_averager.step(apply_delayed_updates=True) loss = None if (closure is not None): with torch.enable_grad(): loss = closure() if ((not self.auxiliary) and self._should_load_state_from_peers()): logger.log(self.status_loglevel, 'Peer is out of sync') self.load_state_from_peers() return loss if self.use_gradient_averaging: if (not self.auxiliary): grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler) if (not grads_are_valid): return loss self._maybe_schedule_gradient_averaging() self._maybe_schedule_state_averaging() elif (not self.auxiliary): if (grad_scaler is not None): with grad_scaler.running_global_step(): assert grad_scaler.unscale_(self) new_samples_accumulated = (self.tracker.local_progress.samples_accumulated + batch_size) self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated) self._maybe_schedule_state_averaging() self.state_averager.step(increment_epoch=False, optimizer_step=True, delay_optimizer_step=self.delay_optimizer_step, grad_scaler=grad_scaler) if self.tracker.ready_to_update_epoch: self._update_global_epoch(grad_scaler) return loss
7,173,218,560,361,957,000
Update training progress after accumulating another local batch size. Depending on the configuration, this will report progress to peers, run global or local optimizer step, average parameters or schedule background tasks. :param closure: A closure that reevaluates the model and returns the loss. :param batch_size: optional override for batch_size_per_step from init. :param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler. :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.
hivemind/optim/optimizer.py
step
MeshchaninovViacheslav/hivemind
python
def step(self, closure: Optional[Callable[([], torch.Tensor)]]=None, batch_size: Optional[int]=None, grad_scaler: Optional[GradScaler]=None): '\n Update training progress after accumulating another local batch size. Depending on the configuration, this will\n report progress to peers, run global or local optimizer step, average parameters or schedule background tasks.\n\n :param closure: A closure that reevaluates the model and returns the loss.\n :param batch_size: optional override for batch_size_per_step from init.\n :param grad_scaler: if amp is enabled, this **must** be a hivemind-aware gradient scaler.\n :note: this .step is different from normal pytorch optimizers in several key ways. See __init__ for details.\n ' if ((grad_scaler is not None) and (not isinstance(grad_scaler, GradScaler))): raise ValueError('hivemind.Optimizer requires a hivemind-aware gradient scaler (hivemind.GradScaler)') if ((self.batch_size_per_step is None) and (batch_size is None) and (not self.auxiliary)): raise ValueError('Please either set batch_size_per_step parameter at init or when calling .step') if (self.auxiliary and ((closure is not None) or (batch_size is not None) or (grad_scaler is not None))): raise ValueError('Auxiliary peers should not have batch size, run closures, or use grad_scaler') batch_size = (batch_size if (batch_size is not None) else self.batch_size_per_step) self.state_averager.step(apply_delayed_updates=True) loss = None if (closure is not None): with torch.enable_grad(): loss = closure() if ((not self.auxiliary) and self._should_load_state_from_peers()): logger.log(self.status_loglevel, 'Peer is out of sync') self.load_state_from_peers() return loss if self.use_gradient_averaging: if (not self.auxiliary): grads_are_valid = self._check_and_accumulate_gradients(batch_size, grad_scaler) if (not grads_are_valid): return loss self._maybe_schedule_gradient_averaging() self._maybe_schedule_state_averaging() elif (not self.auxiliary): if (grad_scaler is not None): with grad_scaler.running_global_step(): assert grad_scaler.unscale_(self) new_samples_accumulated = (self.tracker.local_progress.samples_accumulated + batch_size) self.tracker.report_local_progress(self.local_epoch, new_samples_accumulated) self._maybe_schedule_state_averaging() self.state_averager.step(increment_epoch=False, optimizer_step=True, delay_optimizer_step=self.delay_optimizer_step, grad_scaler=grad_scaler) if self.tracker.ready_to_update_epoch: self._update_global_epoch(grad_scaler) return loss
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None: 'Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step' assert (self._schema_hash == self._compute_schema_hash()), 'parameters or gradients changed during iteration' _epoch_start_time = time.perf_counter() with self.tracker.pause_updates(): wait_for_trigger = None if self.use_gradient_averaging: logger.log(self.status_loglevel, f'Beginning optimizer step #{self.local_epoch}') if self.delay_optimizer_step: self.state_averager.step(wait_for_delayed_updates=True) began_averaging_gradients = self._begin_averaging_gradients(grad_scaler) if (not began_averaging_gradients): self.grad_averager.load_accumulators_into_averager_() elif self.delay_grad_averaging: wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads) else: self._average_gradients_and_load_into_optimizer(self.scheduled_grads) next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch) swarm_not_empty = (self.tracker.global_progress.num_peers > 1) should_perform_optimizer_step = ((not self.auxiliary) and (not self.use_local_updates)) should_average_state = (swarm_not_empty and ((next_epoch % self.average_state_every) == 0) and (not self.state_averager.averaging_in_progress)) if (should_average_state and (self.scheduled_state is not None)): if (self.scheduled_state.triggered or self.scheduled_state.done()): logger.log(self.status_loglevel, f'Not using pre-scheduled group for state averaging because itwas already used elsewhere: {self.scheduled_state}') self.scheduled_state = None self.delay_before_state_averaging.update(task_size=1, interval=(time.perf_counter() - _epoch_start_time)) self.state_averager.step(increment_epoch=True, wait_for_trigger=wait_for_trigger, optimizer_step=should_perform_optimizer_step, delay_optimizer_step=(self.delay_optimizer_step and should_perform_optimizer_step), grad_scaler=grad_scaler, averaging_round=should_average_state, delay_averaging=(self.delay_state_averaging and (not self.auxiliary)), averaging_control=(self.scheduled_state if should_average_state else None), averaging_opts=(dict(timeout=self.averaging_timeout) if should_average_state else None)) if ((not should_average_state) and (self.scheduled_state is not None) and (not self.scheduled_state.done())): self.scheduled_state.cancel() self.scheduled_state = None self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch) self._should_check_synchronization_on_update = True if (not self.client_mode): self.state_averager.state_sharing_priority = self.local_epoch if (self.use_gradient_averaging and (not self.auxiliary)): self.grad_averager.reset_accumulated_grads_() if (not self.client_mode): self.grad_averager.state_sharing_priority = self.local_epoch logger.log(self.status_loglevel, f'Transitioning to epoch {self.local_epoch}')
-2,510,677,156,005,104,600
Depending on the configuration: aggregate gradients and/or parameters, perform global optimizer step
hivemind/optim/optimizer.py
_update_global_epoch
MeshchaninovViacheslav/hivemind
python
def _update_global_epoch(self, grad_scaler: Optional[GradScaler]) -> None: assert (self._schema_hash == self._compute_schema_hash()), 'parameters or gradients changed during iteration' _epoch_start_time = time.perf_counter() with self.tracker.pause_updates(): wait_for_trigger = None if self.use_gradient_averaging: logger.log(self.status_loglevel, f'Beginning optimizer step #{self.local_epoch}') if self.delay_optimizer_step: self.state_averager.step(wait_for_delayed_updates=True) began_averaging_gradients = self._begin_averaging_gradients(grad_scaler) if (not began_averaging_gradients): self.grad_averager.load_accumulators_into_averager_() elif self.delay_grad_averaging: wait_for_trigger = partial(self._average_gradients_and_load_into_optimizer, self.scheduled_grads) else: self._average_gradients_and_load_into_optimizer(self.scheduled_grads) next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch) swarm_not_empty = (self.tracker.global_progress.num_peers > 1) should_perform_optimizer_step = ((not self.auxiliary) and (not self.use_local_updates)) should_average_state = (swarm_not_empty and ((next_epoch % self.average_state_every) == 0) and (not self.state_averager.averaging_in_progress)) if (should_average_state and (self.scheduled_state is not None)): if (self.scheduled_state.triggered or self.scheduled_state.done()): logger.log(self.status_loglevel, f'Not using pre-scheduled group for state averaging because itwas already used elsewhere: {self.scheduled_state}') self.scheduled_state = None self.delay_before_state_averaging.update(task_size=1, interval=(time.perf_counter() - _epoch_start_time)) self.state_averager.step(increment_epoch=True, wait_for_trigger=wait_for_trigger, optimizer_step=should_perform_optimizer_step, delay_optimizer_step=(self.delay_optimizer_step and should_perform_optimizer_step), grad_scaler=grad_scaler, averaging_round=should_average_state, delay_averaging=(self.delay_state_averaging and (not self.auxiliary)), averaging_control=(self.scheduled_state if should_average_state else None), averaging_opts=(dict(timeout=self.averaging_timeout) if should_average_state else None)) if ((not should_average_state) and (self.scheduled_state is not None) and (not self.scheduled_state.done())): self.scheduled_state.cancel() self.scheduled_state = None self.tracker.update_epoch(new_epoch=self.state_averager.local_epoch) self._should_check_synchronization_on_update = True if (not self.client_mode): self.state_averager.state_sharing_priority = self.local_epoch if (self.use_gradient_averaging and (not self.auxiliary)): self.grad_averager.reset_accumulated_grads_() if (not self.client_mode): self.grad_averager.state_sharing_priority = self.local_epoch logger.log(self.status_loglevel, f'Transitioning to epoch {self.local_epoch}')
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool: 'Begin an all-reduce round to average gradients; return True if succeeded, False if failed' if (grad_scaler is not None): with grad_scaler.running_global_step(): assert grad_scaler.unscale_(self) began_averaging_gradients = False if ((self.scheduled_grads is not None) and (self.scheduled_grads.triggered or self.scheduled_grads.done())): logger.log(self.status_loglevel, f'Not using pre-scheduled group for state averaging because itwas already used elsewhere: {self.scheduled_state}') self.scheduled_grads = None elif (self.tracker.global_progress.num_peers > 1): try: self.scheduled_grads = self.grad_averager.step(control=self.scheduled_grads, reset_accumulators=True, wait=False) began_averaging_gradients = True except BaseException as e: logger.exception(e) if ((not began_averaging_gradients) and (self.scheduled_grads is not None) and (not self.scheduled_grads.done())): if (self.tracker.global_progress.num_peers > 1): logger.log(self.status_loglevel, f'Tagging along for a pre-scheduled gradient averaging round') self._tag_along_with_zero_weight(self.scheduled_grads) else: logger.log(self.status_loglevel, f'Skipping pre-scheduled averaging round: there are no other peers') self._load_local_gradients_into_optimizer() self.scheduled_grads.cancel() self.scheduled_grads = None return began_averaging_gradients
-3,321,637,960,115,376,600
Begin an all-reduce round to average gradients; return True if succeeded, False if failed
hivemind/optim/optimizer.py
_begin_averaging_gradients
MeshchaninovViacheslav/hivemind
python
def _begin_averaging_gradients(self, grad_scaler: Optional[GradScaler]) -> bool: if (grad_scaler is not None): with grad_scaler.running_global_step(): assert grad_scaler.unscale_(self) began_averaging_gradients = False if ((self.scheduled_grads is not None) and (self.scheduled_grads.triggered or self.scheduled_grads.done())): logger.log(self.status_loglevel, f'Not using pre-scheduled group for state averaging because itwas already used elsewhere: {self.scheduled_state}') self.scheduled_grads = None elif (self.tracker.global_progress.num_peers > 1): try: self.scheduled_grads = self.grad_averager.step(control=self.scheduled_grads, reset_accumulators=True, wait=False) began_averaging_gradients = True except BaseException as e: logger.exception(e) if ((not began_averaging_gradients) and (self.scheduled_grads is not None) and (not self.scheduled_grads.done())): if (self.tracker.global_progress.num_peers > 1): logger.log(self.status_loglevel, f'Tagging along for a pre-scheduled gradient averaging round') self._tag_along_with_zero_weight(self.scheduled_grads) else: logger.log(self.status_loglevel, f'Skipping pre-scheduled averaging round: there are no other peers') self._load_local_gradients_into_optimizer() self.scheduled_grads.cancel() self.scheduled_grads = None return began_averaging_gradients
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool: 'Check if gradients are valid, accumulate and return True; otherwise, reset and return False' assert ((not self.use_local_updates) and (not self.auxiliary)) if ((grad_scaler is not None) and (not grad_scaler.are_grads_finite(self))): logger.log(self.status_loglevel, 'Encountered incorrect value in fp16 grads, resetting local gradients') self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0) self.grad_averager.reset_accumulated_grads_() return False self.grad_averager.accumulate_grads_(batch_size) self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated) return True
-3,769,334,914,421,445,000
Check if gradients are valid, accumulate and return True; otherwise, reset and return False
hivemind/optim/optimizer.py
_check_and_accumulate_gradients
MeshchaninovViacheslav/hivemind
python
def _check_and_accumulate_gradients(self, batch_size: int, grad_scaler: Optional[GradScaler]) -> bool: assert ((not self.use_local_updates) and (not self.auxiliary)) if ((grad_scaler is not None) and (not grad_scaler.are_grads_finite(self))): logger.log(self.status_loglevel, 'Encountered incorrect value in fp16 grads, resetting local gradients') self.tracker.report_local_progress(self.local_epoch, samples_accumulated=0) self.grad_averager.reset_accumulated_grads_() return False self.grad_averager.accumulate_grads_(batch_size) self.tracker.report_local_progress(self.local_epoch, self.grad_averager.local_samples_accumulated) return True
def _maybe_schedule_gradient_averaging(self) -> None: 'If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch' assert self.use_gradient_averaging if ((self.tracker.estimated_next_update_time - get_dht_time()) <= self.matchmaking_time): if ((self.scheduled_grads is None) or self.scheduled_grads.triggered or self.scheduled_grads.done()): eta_seconds = (self.tracker.estimated_next_update_time - get_dht_time()) eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs['min_matchmaking_time']) logger.log(self.status_loglevel, f'Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec') self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)
-7,469,730,981,344,683,000
If next epoch is coming soon, schedule the next gradient averaging round at the estimated end of epoch
hivemind/optim/optimizer.py
_maybe_schedule_gradient_averaging
MeshchaninovViacheslav/hivemind
python
def _maybe_schedule_gradient_averaging(self) -> None: assert self.use_gradient_averaging if ((self.tracker.estimated_next_update_time - get_dht_time()) <= self.matchmaking_time): if ((self.scheduled_grads is None) or self.scheduled_grads.triggered or self.scheduled_grads.done()): eta_seconds = (self.tracker.estimated_next_update_time - get_dht_time()) eta_seconds = max(eta_seconds, self.grad_averager.matchmaking_kwargs['min_matchmaking_time']) logger.log(self.status_loglevel, f'Pre-scheduling gradient averaging round in {eta_seconds:.2f} sec') self.scheduled_grads = self.grad_averager.schedule_step(timeout=self.averaging_timeout)
def _maybe_schedule_state_averaging(self) -> None: 'If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start' next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch) if ((next_epoch % self.average_state_every) != 0): return if self.state_averager.averaging_in_progress: return if (self.delay_before_state_averaging.num_updates == 0): return estimated_time = self.tracker.estimated_next_update_time estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample eta_seconds_to_averaging = (estimated_time - get_dht_time()) if (eta_seconds_to_averaging <= self.matchmaking_time): if ((self.scheduled_state is None) or self.scheduled_state.triggered or self.scheduled_state.done()): min_matchmaking_time = self.state_averager.matchmaking_kwargs['min_matchmaking_time'] actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time) logger.log(self.status_loglevel, f'Pre-scheduling state averaging round in {actual_seconds:.2f} sec') self.scheduled_state = self.state_averager.schedule_step(gather=next_epoch, timeout=self.averaging_timeout)
4,311,566,516,886,041,600
If next epoch is coming soon, schedule the next state averaging at estimated parameter averaging start
hivemind/optim/optimizer.py
_maybe_schedule_state_averaging
MeshchaninovViacheslav/hivemind
python
def _maybe_schedule_state_averaging(self) -> None: next_epoch = max((self.local_epoch + 1), self.tracker.global_epoch) if ((next_epoch % self.average_state_every) != 0): return if self.state_averager.averaging_in_progress: return if (self.delay_before_state_averaging.num_updates == 0): return estimated_time = self.tracker.estimated_next_update_time estimated_time += self.delay_before_state_averaging.ema_seconds_per_sample estimated_time += self.state_averager.delay_before_averaging.ema_seconds_per_sample eta_seconds_to_averaging = (estimated_time - get_dht_time()) if (eta_seconds_to_averaging <= self.matchmaking_time): if ((self.scheduled_state is None) or self.scheduled_state.triggered or self.scheduled_state.done()): min_matchmaking_time = self.state_averager.matchmaking_kwargs['min_matchmaking_time'] actual_seconds = max(eta_seconds_to_averaging, min_matchmaking_time) logger.log(self.status_loglevel, f'Pre-scheduling state averaging round in {actual_seconds:.2f} sec') self.scheduled_state = self.state_averager.schedule_step(gather=next_epoch, timeout=self.averaging_timeout)
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]): 'Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients' assert ((self.use_gradient_averaging and (maybe_step_control is None)) or maybe_step_control.triggered) averaged_gradients = False try: if (maybe_step_control is not None): group_info = maybe_step_control.result(self.averaging_timeout) logger.log(self.status_loglevel, f'Averaged gradients with {len(group_info)} peers') self._load_averaged_gradients_into_optimizer_() averaged_gradients = True else: logger.log(self.status_loglevel, f'Skipped averaging: there are no other peers') except BaseException as e: logger.log(self.status_loglevel, f'Averaging gradients failed with {repr(e)}') if (not averaged_gradients): self._load_local_gradients_into_optimizer()
8,258,748,080,675,111,000
Run gradient averaging; on success, feed averaged gradients into optimizer; else, use local gradients
hivemind/optim/optimizer.py
_average_gradients_and_load_into_optimizer
MeshchaninovViacheslav/hivemind
python
def _average_gradients_and_load_into_optimizer(self, maybe_step_control: Optional[StepControl]): assert ((self.use_gradient_averaging and (maybe_step_control is None)) or maybe_step_control.triggered) averaged_gradients = False try: if (maybe_step_control is not None): group_info = maybe_step_control.result(self.averaging_timeout) logger.log(self.status_loglevel, f'Averaged gradients with {len(group_info)} peers') self._load_averaged_gradients_into_optimizer_() averaged_gradients = True else: logger.log(self.status_loglevel, f'Skipped averaging: there are no other peers') except BaseException as e: logger.log(self.status_loglevel, f'Averaging gradients failed with {repr(e)}') if (not averaged_gradients): self._load_local_gradients_into_optimizer()
def _load_averaged_gradients_into_optimizer_(self): 'If required, load averaged gradients into optimizer; otherwise simply notify grad averager' assert self.use_gradient_averaging if self.offload_optimizer: pass else: optimized_param_groups = self.state_averager.optimizer.param_groups optimized_parameters = [param for group in optimized_param_groups for param in group['params']] with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients: assert (len(averaged_gradients) == len(optimized_parameters)) for (opt_param, averaged_grad) in zip(optimized_parameters, averaged_gradients): opt_param.grad.copy_(averaged_grad, non_blocking=True) self.grad_averager.notify_used_averaged_gradients()
-1,556,350,358,990,898,400
If required, load averaged gradients into optimizer; otherwise simply notify grad averager
hivemind/optim/optimizer.py
_load_averaged_gradients_into_optimizer_
MeshchaninovViacheslav/hivemind
python
def _load_averaged_gradients_into_optimizer_(self): assert self.use_gradient_averaging if self.offload_optimizer: pass else: optimized_param_groups = self.state_averager.optimizer.param_groups optimized_parameters = [param for group in optimized_param_groups for param in group['params']] with torch.no_grad(), self.grad_averager.get_tensors() as averaged_gradients: assert (len(averaged_gradients) == len(optimized_parameters)) for (opt_param, averaged_grad) in zip(optimized_parameters, averaged_gradients): opt_param.grad.copy_(averaged_grad, non_blocking=True) self.grad_averager.notify_used_averaged_gradients()
def _load_local_gradients_into_optimizer(self): 'Fallback to using local gradients in the optimizer (instead of averaged gradients)' logger.log(self.status_loglevel, f'Proceeding with local gradients') self.grad_averager.load_accumulators_into_averager_() self._load_averaged_gradients_into_optimizer_()
-2,611,091,218,565,431,300
Fallback to using local gradients in the optimizer (instead of averaged gradients)
hivemind/optim/optimizer.py
_load_local_gradients_into_optimizer
MeshchaninovViacheslav/hivemind
python
def _load_local_gradients_into_optimizer(self): logger.log(self.status_loglevel, f'Proceeding with local gradients') self.grad_averager.load_accumulators_into_averager_() self._load_averaged_gradients_into_optimizer_()
def zero_grad(self, set_to_none: bool=False): 'Reset gradients from model. If reuse_grad_buffers=True, this will raise an error.' if (self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers): raise ValueError(f'When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never call zero_grad manually. Gradients will be refreshed internally') for param_group in self.param_groups: for param in param_group['params']: if (param.grad is None): pass elif set_to_none: param.grad = None else: param.grad.zero_()
5,026,697,121,043,981,000
Reset gradients from model. If reuse_grad_buffers=True, this will raise an error.
hivemind/optim/optimizer.py
zero_grad
MeshchaninovViacheslav/hivemind
python
def zero_grad(self, set_to_none: bool=False): if (self.use_gradient_averaging and self.grad_averager.reuse_grad_buffers): raise ValueError(f'When running {self.__class__.__name__} with reuse_grad_buffers=True, user should never call zero_grad manually. Gradients will be refreshed internally') for param_group in self.param_groups: for param in param_group['params']: if (param.grad is None): pass elif set_to_none: param.grad = None else: param.grad.zero_()
def _should_load_state_from_peers(self) -> bool: '\n If true, peer will discard local progress and attempt to download state from peers.\n This method allows peer to continue training in two cases:\n - peer is on the same epoch as other collaborators - keep training normally\n - peer was on the same epoch and accumulated some grads, but some collaborators\n have just transitioned to the next epoch - this peer should also transition.\n\n :note: The latter case occurs due to the lack of network synchrony: the first peer that\n detects enough samples will transition to the next step and start counting samples anew.\n Some other peers may take time before they check with DHT and observe that\n - the global epoch is technically one epoch ahead of the current one and\n - the remaining (non-transitioned) peers no longer have target_batch_size between them\n If this is the case, peer should transition to the next epoch and does *not* need to re-load state.\n ' if (self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set()): self._should_check_synchronization_on_update = False return (self.local_epoch != self.tracker.global_epoch) return (self.local_epoch < (self.tracker.global_epoch - 1))
-4,292,192,955,225,625,600
If true, peer will discard local progress and attempt to download state from peers. This method allows peer to continue training in two cases: - peer is on the same epoch as other collaborators - keep training normally - peer was on the same epoch and accumulated some grads, but some collaborators have just transitioned to the next epoch - this peer should also transition. :note: The latter case occurs due to the lack of network synchrony: the first peer that detects enough samples will transition to the next step and start counting samples anew. Some other peers may take time before they check with DHT and observe that - the global epoch is technically one epoch ahead of the current one and - the remaining (non-transitioned) peers no longer have target_batch_size between them If this is the case, peer should transition to the next epoch and does *not* need to re-load state.
hivemind/optim/optimizer.py
_should_load_state_from_peers
MeshchaninovViacheslav/hivemind
python
def _should_load_state_from_peers(self) -> bool: '\n If true, peer will discard local progress and attempt to download state from peers.\n This method allows peer to continue training in two cases:\n - peer is on the same epoch as other collaborators - keep training normally\n - peer was on the same epoch and accumulated some grads, but some collaborators\n have just transitioned to the next epoch - this peer should also transition.\n\n :note: The latter case occurs due to the lack of network synchrony: the first peer that\n detects enough samples will transition to the next step and start counting samples anew.\n Some other peers may take time before they check with DHT and observe that\n - the global epoch is technically one epoch ahead of the current one and\n - the remaining (non-transitioned) peers no longer have target_batch_size between them\n If this is the case, peer should transition to the next epoch and does *not* need to re-load state.\n ' if (self._should_check_synchronization_on_update and self.tracker.fetched_global_progress_this_epoch.is_set()): self._should_check_synchronization_on_update = False return (self.local_epoch != self.tracker.global_epoch) return (self.local_epoch < (self.tracker.global_epoch - 1))
def is_synchronized_with_peers(self) -> bool: 'Checks whether the current peer is up-to-date with others in terms of the epoch (step) number.' return (self.local_epoch >= (self.tracker.global_epoch - 1))
8,538,689,893,884,570,000
Checks whether the current peer is up-to-date with others in terms of the epoch (step) number.
hivemind/optim/optimizer.py
is_synchronized_with_peers
MeshchaninovViacheslav/hivemind
python
def is_synchronized_with_peers(self) -> bool: return (self.local_epoch >= (self.tracker.global_epoch - 1))
def load_state_from_peers(self, **kwargs): '\n Attempt to load the newest collaboration state from other peers within the same run_id.\n\n If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.\n ' if ((self.scheduled_grads is not None) and (not self.scheduled_grads.done())): self._tag_along_with_zero_weight(self.scheduled_grads) self.scheduled_grads = None self.state_averager.step(wait_for_delayed_updates=True) with self.tracker.pause_updates(): while True: try: self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs) break except KeyboardInterrupt: raise except BaseException as e: logger.exception(f'Failed to load state from peers: {e}, retrying ...') continue if ((self.tracker.global_epoch - 1) <= self.local_epoch < self.tracker.global_epoch): logger.log(self.status_loglevel, f'Catching up with collaboration step {self.tracker.global_epoch}') self.state_averager.local_epoch = self.tracker.global_epoch self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0) if (not self.client_mode): self.state_averager.state_sharing_priority = self.local_epoch if self.use_gradient_averaging: self.grad_averager.reset_accumulated_grads_() if (not self.client_mode): self.grad_averager.state_sharing_priority = self.local_epoch
-3,688,123,011,394,910,000
Attempt to load the newest collaboration state from other peers within the same run_id. If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.
hivemind/optim/optimizer.py
load_state_from_peers
MeshchaninovViacheslav/hivemind
python
def load_state_from_peers(self, **kwargs): '\n Attempt to load the newest collaboration state from other peers within the same run_id.\n\n If successful, this will update parameters, optimizer state, local epoch and learning rate schedule in-place.\n ' if ((self.scheduled_grads is not None) and (not self.scheduled_grads.done())): self._tag_along_with_zero_weight(self.scheduled_grads) self.scheduled_grads = None self.state_averager.step(wait_for_delayed_updates=True) with self.tracker.pause_updates(): while True: try: self.state_averager.load_state_from_peers(timeout=self.load_state_timeout, **kwargs) break except KeyboardInterrupt: raise except BaseException as e: logger.exception(f'Failed to load state from peers: {e}, retrying ...') continue if ((self.tracker.global_epoch - 1) <= self.local_epoch < self.tracker.global_epoch): logger.log(self.status_loglevel, f'Catching up with collaboration step {self.tracker.global_epoch}') self.state_averager.local_epoch = self.tracker.global_epoch self.tracker.report_local_progress(local_epoch=self.local_epoch, samples_accumulated=0) if (not self.client_mode): self.state_averager.state_sharing_priority = self.local_epoch if self.use_gradient_averaging: self.grad_averager.reset_accumulated_grads_() if (not self.client_mode): self.grad_averager.state_sharing_priority = self.local_epoch
def _tag_along_with_zero_weight(self, control: StepControl): 'Wait for a running averaging round to finish with zero weight.' if (not control.triggered): control.weight = 0 control.allow_allreduce() if (not control.done()): try: control.result(self.averaging_timeout) except BaseException as e: logger.exception(e) if (not control.done()): control.cancel()
6,806,298,440,358,946,000
Wait for a running averaging round to finish with zero weight.
hivemind/optim/optimizer.py
_tag_along_with_zero_weight
MeshchaninovViacheslav/hivemind
python
def _tag_along_with_zero_weight(self, control: StepControl): if (not control.triggered): control.weight = 0 control.allow_allreduce() if (not control.done()): try: control.result(self.averaging_timeout) except BaseException as e: logger.exception(e) if (not control.done()): control.cancel()
def visualize(model: Model, structural_part=True, measurement_part=False, view=True, filename=None, title=''): 'Visualization of SEM model via graphviz library.\n\n Keyword arguments:\n model -- A SEM model.\n structural_part -- Should structural part be visualised?\n measurement_part -- Should measurement part be visualised?\n view -- Should graph be displayed?\n filename -- Filename/path.\n title -- Title.\n ' g = gv.Digraph(format='jpg', graph_attr={'label': title}) if structural_part: g.node_attr.update(color='red', shape='box') for (i, j) in model.parameters['Beta']: (lval, rval) = (model.beta_names[0][i], model.beta_names[0][j]) g.edge(rval, lval) if measurement_part: g.node_attr.update(color='black', shape='circle') for (i, j) in model.parameters['Lambda']: (lval, rval) = (model.lambda_names[0][i], model.lambda_names[0][j]) g.edge(lval, rval) g.render(filename, view=view)
6,237,103,513,191,551,000
Visualization of SEM model via graphviz library. Keyword arguments: model -- A SEM model. structural_part -- Should structural part be visualised? measurement_part -- Should measurement part be visualised? view -- Should graph be displayed? filename -- Filename/path. title -- Title.
semopy/visualization.py
visualize
YoungjuneKwon/forked-semopy
python
def visualize(model: Model, structural_part=True, measurement_part=False, view=True, filename=None, title=): 'Visualization of SEM model via graphviz library.\n\n Keyword arguments:\n model -- A SEM model.\n structural_part -- Should structural part be visualised?\n measurement_part -- Should measurement part be visualised?\n view -- Should graph be displayed?\n filename -- Filename/path.\n title -- Title.\n ' g = gv.Digraph(format='jpg', graph_attr={'label': title}) if structural_part: g.node_attr.update(color='red', shape='box') for (i, j) in model.parameters['Beta']: (lval, rval) = (model.beta_names[0][i], model.beta_names[0][j]) g.edge(rval, lval) if measurement_part: g.node_attr.update(color='black', shape='circle') for (i, j) in model.parameters['Lambda']: (lval, rval) = (model.lambda_names[0][i], model.lambda_names[0][j]) g.edge(lval, rval) g.render(filename, view=view)
def __len__(self): 'Returns the length of the coding, use len(my_coding).' return len(self._data)
423,562,225,722,425,860
Returns the length of the coding, use len(my_coding).
Bio/Nexus/StandardData.py
__len__
EnjoyLifeFund/macHighSierra-py36-pkgs
python
def __len__(self): return len(self._data)
def raw(self): 'Returns the full coding as a python list.' return self._data
6,180,843,810,050,607,000
Returns the full coding as a python list.
Bio/Nexus/StandardData.py
raw
EnjoyLifeFund/macHighSierra-py36-pkgs
python
def raw(self): return self._data
def __str__(self): 'Returns the full coding as a python string, use str(my_coding).' str_return = '' for coding in self._data: if (coding['t'] == 'multi'): str_return += (('(' + ''.join(coding['d'])) + ')') elif (coding['t'] == 'uncer'): str_return += (('{' + ''.join(coding['d'])) + '}') else: str_return += coding['d'][0] return str_return
-8,687,247,177,936,933,000
Returns the full coding as a python string, use str(my_coding).
Bio/Nexus/StandardData.py
__str__
EnjoyLifeFund/macHighSierra-py36-pkgs
python
def __str__(self): str_return = for coding in self._data: if (coding['t'] == 'multi'): str_return += (('(' + .join(coding['d'])) + ')') elif (coding['t'] == 'uncer'): str_return += (('{' + .join(coding['d'])) + '}') else: str_return += coding['d'][0] return str_return
def next(self): 'Deprecated Python 2 style alias for Python 3 style __next__ method.' return self.__next__()
-8,216,947,362,407,348,000
Deprecated Python 2 style alias for Python 3 style __next__ method.
Bio/Nexus/StandardData.py
next
EnjoyLifeFund/macHighSierra-py36-pkgs
python
def next(self): return self.__next__()
def display(wa): 'Display all the stuffs on the screen' print('Total word count: {}'.format(len(wa.words_list(wa.normalized_text)))) print('Number of different words: {}'.format(len(wa.differents_words_list(wa.normalized_text)))) print('Total number of characters: {}'.format(len(wa.normal_text))) print('Number of characters without spaces: {}'.format(wa.all_characters_without_spaces(wa.normal_text))) print('Number of spaces: {}'.format(wa.number_spaces(wa.normal_text))) print('Sentence count: {}'.format(len(wa.sentence_split(wa.normalized_text)))) print('Average sentence length (Words): {0:.2f}'.format(wa.average_sentence_length(wa.normalized_text))) print('Max sentence length (Characters): {}'.format(wa.max_sentence_length(wa.normalized_text))) print('Min sentence length (Characters): {}'.format(wa.min_sentence_length(wa.normalized_text))) print('Lexical density: {0:.2f} %'.format(lexical_density(wa.words_list(wa.normalized_text), FILE_LEXI))) print('Language: {} \n'.format(deduce_language(wa.words_list(wa.normalized_text), FILE_LEXI)))
-2,072,599,764,001,801,500
Display all the stuffs on the screen
words.py
display
Layto888/Words-Analysis
python
def display(wa): print('Total word count: {}'.format(len(wa.words_list(wa.normalized_text)))) print('Number of different words: {}'.format(len(wa.differents_words_list(wa.normalized_text)))) print('Total number of characters: {}'.format(len(wa.normal_text))) print('Number of characters without spaces: {}'.format(wa.all_characters_without_spaces(wa.normal_text))) print('Number of spaces: {}'.format(wa.number_spaces(wa.normal_text))) print('Sentence count: {}'.format(len(wa.sentence_split(wa.normalized_text)))) print('Average sentence length (Words): {0:.2f}'.format(wa.average_sentence_length(wa.normalized_text))) print('Max sentence length (Characters): {}'.format(wa.max_sentence_length(wa.normalized_text))) print('Min sentence length (Characters): {}'.format(wa.min_sentence_length(wa.normalized_text))) print('Lexical density: {0:.2f} %'.format(lexical_density(wa.words_list(wa.normalized_text), FILE_LEXI))) print('Language: {} \n'.format(deduce_language(wa.words_list(wa.normalized_text), FILE_LEXI)))
def lexical_density(words_list, lexi_file_name): " calculates the lexical density.\n L_d = (N_lex / N) * 100\n Where:\n\n L_d = the analyzed text's lexical density\n\n N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)\n in the analyzed text.\n\n N = the number of all tokens (total number of words) in the analyzed text.\n " l_d = 0 n_lex = 0 n = 0 with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp: lexical_words = fp.read() lexical_words = lexical_words.split(',') for word in lexical_words: counter = words_list.count(word) n_lex += counter counter = 0 n = len(words_list) l_d = ((n_lex / n) * 100) return l_d
1,663,756,399,478,105,900
calculates the lexical density. L_d = (N_lex / N) * 100 Where: L_d = the analyzed text's lexical density N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs) in the analyzed text. N = the number of all tokens (total number of words) in the analyzed text.
words.py
lexical_density
Layto888/Words-Analysis
python
def lexical_density(words_list, lexi_file_name): " calculates the lexical density.\n L_d = (N_lex / N) * 100\n Where:\n\n L_d = the analyzed text's lexical density\n\n N_lex = the number of lexical word tokens (nouns,adjectives,verbs,adverbs)\n in the analyzed text.\n\n N = the number of all tokens (total number of words) in the analyzed text.\n " l_d = 0 n_lex = 0 n = 0 with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp: lexical_words = fp.read() lexical_words = lexical_words.split(',') for word in lexical_words: counter = words_list.count(word) n_lex += counter counter = 0 n = len(words_list) l_d = ((n_lex / n) * 100) return l_d
def deduce_language(words_list, lexi_file_name): '\n This function will deduce language between French and English.\n Using the lexical words found on the text.\n ' with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp: lexical_words = fp.read() lexical_words = lexical_words.split(',') for word in words_list: if (word in lexical_words): if (lexical_words.index(word) <= FRENCH_LIST_LENGTH): return 'French' else: return 'English' return 'Not found'
-2,685,445,105,616,604,000
This function will deduce language between French and English. Using the lexical words found on the text.
words.py
deduce_language
Layto888/Words-Analysis
python
def deduce_language(words_list, lexi_file_name): '\n This function will deduce language between French and English.\n Using the lexical words found on the text.\n ' with open(lexi_file_name, 'r', encoding=DEFAULT_CODEC) as fp: lexical_words = fp.read() lexical_words = lexical_words.split(',') for word in words_list: if (word in lexical_words): if (lexical_words.index(word) <= FRENCH_LIST_LENGTH): return 'French' else: return 'English' return 'Not found'
def show_process_time(t1_start, t1_stop, t2_start, t2_stop): '\n function to show elapsed time.\n ' print('\n') print('Elapsed time: {0:.4f} [sec]'.format((t1_stop - t1_start))) print('CPU process time: {0:.4f} [sec]'.format((t2_stop - t2_start))) print('Done.')
-4,703,739,508,452,082,000
function to show elapsed time.
words.py
show_process_time
Layto888/Words-Analysis
python
def show_process_time(t1_start, t1_stop, t2_start, t2_stop): '\n \n ' print('\n') print('Elapsed time: {0:.4f} [sec]'.format((t1_stop - t1_start))) print('CPU process time: {0:.4f} [sec]'.format((t2_stop - t2_start))) print('Done.')
def __init__(self, filename): '\n Input : text file name\n Do some operations to a text and return results.\n ' with open(filename, 'r', encoding=DEFAULT_CODEC) as fp: self.normal_text = fp.read().strip() self.normalized_text = self.normalize_text(self.normal_text)
135,094,893,725,222,100
Input : text file name Do some operations to a text and return results.
words.py
__init__
Layto888/Words-Analysis
python
def __init__(self, filename): '\n Input : text file name\n Do some operations to a text and return results.\n ' with open(filename, 'r', encoding=DEFAULT_CODEC) as fp: self.normal_text = fp.read().strip() self.normalized_text = self.normalize_text(self.normal_text)