text
stringlengths 4
1.02M
| meta
dict |
---|---|
from __future__ import print_function, absolute_import, unicode_literals
import codecs
import datetime
import os
try:
from urlparse import urljoin, urlparse
except ImportError:
from urllib.parse import urljoin, urlparse # NOQA
from nikola.plugin_categories import LateTask
from nikola.utils import config_changed
header = """<?xml version="1.0" encoding="UTF-8"?>
<urlset
xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.sitemaps.org/schemas/sitemap/0.9
http://www.sitemaps.org/schemas/sitemap/0.9/sitemap.xsd">
"""
url_format = """ <url>
<loc>{0}</loc>
<lastmod>{1}</lastmod>
<priority>0.5000</priority>
</url>
"""
get_lastmod = lambda p: datetime.datetime.fromtimestamp(os.stat(p).st_mtime).isoformat().split('T')[0]
def get_base_path(base):
u"""returns the path of a base URL if it contains one.
>>> get_base_path('http://some.site') == '/'
True
>>> get_base_path('http://some.site/') == '/'
True
>>> get_base_path('http://some.site/some/sub-path') == '/some/sub-path/'
True
>>> get_base_path('http://some.site/some/sub-path/') == '/some/sub-path/'
True
"""
# first parse the base_url for some path
base_parsed = urlparse(base)
if not base_parsed.path:
sub_path = ''
else:
sub_path = base_parsed.path
if sub_path.endswith('/'):
return sub_path
else:
return sub_path + '/'
class Sitemap(LateTask):
"""Generate google sitemap."""
name = "sitemap"
def gen_tasks(self):
"""Generate Google sitemap."""
kw = {
"base_url": self.site.config["BASE_URL"],
"site_url": self.site.config["SITE_URL"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"strip_indexes": self.site.config["STRIP_INDEXES"],
"index_file": self.site.config["INDEX_FILE"],
"sitemap_include_fileless_dirs": self.site.config["SITEMAP_INCLUDE_FILELESS_DIRS"],
"mapped_extensions": self.site.config.get('MAPPED_EXTENSIONS', ['.html', '.htm'])
}
output_path = kw['output_folder']
sitemap_path = os.path.join(output_path, "sitemap.xml")
base_path = get_base_path(kw['base_url'])
locs = {}
output = kw['output_folder']
base_url = kw['base_url']
mapped_exts = kw['mapped_extensions']
def scan_locs():
for root, dirs, files in os.walk(output):
if not dirs and not files and not kw['sitemap_include_fileless_dirs']:
continue # Totally empty, not on sitemap
path = os.path.relpath(root, output)
# ignore the current directory.
path = (path.replace(os.sep, '/') + '/').replace('./', '')
lastmod = get_lastmod(root)
loc = urljoin(base_url, base_path + path)
if 'index.html' in files: # Only map folders with indexes
locs[loc] = url_format.format(loc, lastmod)
for fname in files:
if kw['strip_indexes'] and fname == kw['index_file']:
continue # We already mapped the folder
if os.path.splitext(fname)[-1] in mapped_exts:
real_path = os.path.join(root, fname)
path = os.path.relpath(real_path, output)
post = self.site.post_per_file.get(path)
if post and (post.is_draft or post.is_retired or post.publish_later):
continue
path = path.replace(os.sep, '/')
lastmod = get_lastmod(real_path)
loc = urljoin(base_url, base_path + path)
locs[loc] = url_format.format(loc, lastmod)
def write_sitemap():
# Have to rescan, because files may have been added between
# task dep scanning and task execution
scan_locs()
with codecs.open(sitemap_path, 'wb+', 'utf8') as outf:
outf.write(header)
for k in sorted(locs.keys()):
outf.write(locs[k])
outf.write("</urlset>")
return True
scan_locs()
task = {
"basename": "sitemap",
"name": sitemap_path,
"targets": [sitemap_path],
"actions": [(write_sitemap,)],
"uptodate": [config_changed({1: kw, 2: locs})],
"clean": True,
"task_dep": ["render_site"],
}
yield task
if __name__ == '__main__':
import doctest
doctest.testmod()
| {
"content_hash": "f8b624e720e0e70ff17442fd4819aa2d",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 102,
"avg_line_length": 36.166666666666664,
"alnum_prop": 0.543778801843318,
"repo_name": "servalproject/nikola",
"id": "b9f2c7f36238762498c5c734e0ae3f9dae36d50a",
"size": "5884",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nikola/plugins/task_sitemap/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "156379"
},
{
"name": "Python",
"bytes": "429762"
},
{
"name": "Shell",
"bytes": "28"
}
],
"symlink_target": ""
} |
"""
__graph_MT_pre__Pattern.py___________________________________________________________
Automatically generated graphical appearance ---> MODIFY DIRECTLY WITH CAUTION
_____________________________________________________________________________
"""
import tkFont
from graphEntity import *
from GraphicalForm import *
from ATOM3Constraint import *
class graph_MT_pre__Pattern(graphEntity):
def __init__(self, x, y, semObject = None):
self.semanticObject = semObject
self.sizeX, self.sizeY = 173, 91
graphEntity.__init__(self, x, y)
self.ChangesAtRunTime = 0
self.constraintList = []
if self.semanticObject: atribs = self.semanticObject.attributesToDraw()
else: atribs = None
self.graphForms = []
self.imageDict = self.getImageDict()
def DrawObject(self, drawing, showGG = 0):
self.dc = drawing
if showGG and self.semanticObject: self.drawGGLabel(drawing)
h = drawing.create_oval(self.translate([209.0, 88.0, 209.0, 88.0]), tags = (self.tag, 'connector'), outline = '', fill = '' )
self.connectors.append( h )
h = drawing.create_rectangle(self.translate([38.0, 38.0, 209.0, 127.0]), tags = self.tag, stipple = '', width = 1, outline = 'black', fill = 'cyan')
self.gf5 = GraphicalForm(drawing, h, "gf5")
self.graphForms.append(self.gf5)
font = tkFont.Font( family='Arial', size=12, weight='normal', slant='roman', underline=0)
h = drawing.create_text(self.translate([126.0, 59.0, 126.0, 12.0])[:2], tags = self.tag, font=font, fill = 'black', anchor = 'center', text = 'MT_pre__Pattern', width = '0', justify= 'left', stipple='' )
self.gf23 = GraphicalForm(drawing, h, 'gf23', fontObject=font)
self.graphForms.append(self.gf23)
helv12 = tkFont.Font ( family="Helvetica", size=12, weight="bold" )
h = drawing.create_text(self.translate([-3, -3]), font=helv12,
tags = (self.tag, self.semanticObject.getClass()),
fill = "black",
text=self.semanticObject.MT_label__.toString())
self.attr_display["MT_label__"] = h
self.gf_label = GraphicalForm(drawing, h, 'gf_label', fontObject=helv12)
self.graphForms.append(self.gf_label)
def postCondition( self, actionID, * params):
return None
def preCondition( self, actionID, * params):
return None
def getImageDict( self ):
imageDict = dict()
return imageDict
new_class = graph_MT_pre__Pattern
| {
"content_hash": "6f26bbca59c303e008f1b80b27fc59a2",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 211,
"avg_line_length": 41.42857142857143,
"alnum_prop": 0.575095785440613,
"repo_name": "levilucio/SyVOLT",
"id": "c6d96f7619f97baffd512e5dde351c9048c6caf9",
"size": "2610",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "UMLRT2Kiltera_MM/graph_MT_pre__Pattern.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "166159"
},
{
"name": "Python",
"bytes": "34207588"
},
{
"name": "Shell",
"bytes": "1118"
}
],
"symlink_target": ""
} |
"""
Performance PubsubIO streaming test for Write/Read operations.
Caution: only test runners (e.g. TestDataflowRunner) support matchers
Example for TestDataflowRunner:
python -m apache_beam.io.gcp.pubsub_io_perf_test \
--test-pipeline-options="
--runner=TestDataflowRunner
--sdk_location=.../dist/apache-beam-x.x.x.dev0.tar.gz
--project=<GCP_PROJECT_ID>
--temp_location=gs://<BUCKET_NAME>/tmp
--staging_location=gs://<BUCKET_NAME>/staging
--wait_until_finish_duration=<TIME_IN_MS>
--pubsub_namespace=<PUBSUB_NAMESPACE>
--publish_to_big_query=<OPTIONAL><true/false>
--metrics_dataset=<OPTIONAL>
--metrics_table=<OPTIONAL>
--dataflow_worker_jar=<OPTIONAL>
--input_options='{
\"num_records\": <SIZE_OF_INPUT>
\"key_size\": 1
\"value_size\": <SIZE_OF_EACH_MESSAGE>
}'"
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import sys
from hamcrest import all_of
import apache_beam as beam
from apache_beam.io import Read
from apache_beam.io import ReadFromPubSub
from apache_beam.io.gcp.tests.pubsub_matcher import PubSubMessageMatcher
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.options.pipeline_options import SetupOptions
from apache_beam.options.pipeline_options import StandardOptions
from apache_beam.testing.load_tests.load_test import LoadTest
from apache_beam.testing.load_tests.load_test_metrics_utils import MeasureTime
from apache_beam.testing.synthetic_pipeline import SyntheticSource
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.transforms import trigger
from apache_beam.transforms import window
# pylint: disable=wrong-import-order, wrong-import-position
try:
from google.cloud import pubsub
except ImportError:
pubsub = None
# pylint: enable=wrong-import-order, wrong-import-position
WRITE_METRICS_NAMESPACE = 'pubsub_io_perf_write'
READ_METRICS_NAMESPACE = 'pubsub_io_perf_read'
MATCHER_TIMEOUT = 60 * 15
MATCHER_PULL_TIMEOUT = 60 * 5
class PubsubIOPerfTest(LoadTest):
def _setup_env(self):
if not self.pipeline.get_option('pubsub_namespace'):
logging.error('--pubsub_namespace argument is required.')
sys.exit(1)
if not self.pipeline.get_option('wait_until_finish_duration'):
logging.error('--wait_until_finish_duration argument is required.')
sys.exit(1)
self.num_of_messages = int(self.input_options.get('num_records'))
self.pubsub_namespace = self.pipeline.get_option('pubsub_namespace')
def _setup_pubsub(self):
self.pub_client = pubsub.PublisherClient()
self.topic_name = self.pub_client.topic_path(
self.project_id, self.pubsub_namespace)
self.matcher_topic_name = self.pub_client.topic_path(
self.project_id, self.pubsub_namespace + '_matcher')
self.sub_client = pubsub.SubscriberClient()
self.read_sub_name = self.sub_client.subscription_path(
self.project_id,
self.pubsub_namespace + '_read',
)
self.read_matcher_sub_name = self.sub_client.subscription_path(
self.project_id,
self.pubsub_namespace + '_read_matcher',
)
class PubsubWritePerfTest(PubsubIOPerfTest):
def __init__(self):
super(PubsubWritePerfTest, self).__init__(WRITE_METRICS_NAMESPACE)
self._setup_env()
self._setup_pubsub()
self._setup_pipeline()
def test(self):
def to_pubsub_message(element):
import uuid
from apache_beam.io import PubsubMessage
return PubsubMessage(
data=element[1],
attributes={'id': str(uuid.uuid1()).encode('utf-8')},
)
_ = (
self.pipeline
| 'Create input' >> Read(
SyntheticSource(self.parse_synthetic_source_options()))
| 'Format to pubsub message in bytes' >> beam.Map(to_pubsub_message)
| 'Measure time' >> beam.ParDo(MeasureTime(self.metrics_namespace))
| 'Write to Pubsub' >> beam.io.WriteToPubSub(
self.topic_name,
with_attributes=True,
id_label='id',
))
def _setup_pipeline(self):
options = PipelineOptions(self.pipeline.get_full_options_as_args())
options.view_as(SetupOptions).save_main_session = True
options.view_as(StandardOptions).streaming = True
self.pipeline = TestPipeline(options=options)
def _setup_pubsub(self):
super(PubsubWritePerfTest, self)._setup_pubsub()
_ = self.pub_client.create_topic(self.topic_name)
_ = self.sub_client.create_subscription(
self.read_sub_name,
self.topic_name,
)
class PubsubReadPerfTest(PubsubIOPerfTest):
def __init__(self):
super(PubsubReadPerfTest, self).__init__(READ_METRICS_NAMESPACE)
self._setup_env()
self._setup_pubsub()
self._setup_pipeline()
def test(self):
_ = (
self.pipeline
| 'Read from pubsub' >> ReadFromPubSub(
subscription=self.read_sub_name,
with_attributes=True,
id_label='id',
)
| beam.Map(lambda x: bytes(1)).with_output_types(bytes)
| 'Measure time' >> beam.ParDo(MeasureTime(self.metrics_namespace))
| 'Window' >> beam.WindowInto(
window.GlobalWindows(),
trigger=trigger.Repeatedly(
trigger.AfterCount(self.num_of_messages)),
accumulation_mode=trigger.AccumulationMode.DISCARDING)
| 'Count messages' >> beam.CombineGlobally(
beam.combiners.CountCombineFn()).without_defaults().
with_output_types(int)
| 'Convert to bytes' >>
beam.Map(lambda count: str(count).encode('utf-8'))
| 'Write to Pubsub' >> beam.io.WriteToPubSub(self.matcher_topic_name))
def _setup_pubsub(self):
super(PubsubReadPerfTest, self)._setup_pubsub()
_ = self.pub_client.create_topic(self.matcher_topic_name)
_ = self.sub_client.create_subscription(
self.read_matcher_sub_name,
self.matcher_topic_name,
)
def _setup_pipeline(self):
pubsub_msg_verifier = PubSubMessageMatcher(
self.project_id,
self.read_matcher_sub_name,
expected_msg=[str(self.num_of_messages).encode('utf-8')],
timeout=MATCHER_TIMEOUT,
pull_timeout=MATCHER_PULL_TIMEOUT,
)
extra_opts = {
'on_success_matcher': all_of(pubsub_msg_verifier),
'streaming': True,
'save_main_session': True
}
args = self.pipeline.get_full_options_as_args(**extra_opts)
self.pipeline = TestPipeline(options=PipelineOptions(args))
def cleanup(self):
self.sub_client.delete_subscription(self.read_sub_name)
self.sub_client.delete_subscription(self.read_matcher_sub_name)
self.pub_client.delete_topic(self.topic_name)
self.pub_client.delete_topic(self.matcher_topic_name)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
PubsubWritePerfTest().run()
PubsubReadPerfTest().run()
| {
"content_hash": "0929b7872d1c1e77ed36942f20f47860",
"timestamp": "",
"source": "github",
"line_count": 203,
"max_line_length": 78,
"avg_line_length": 33.99014778325123,
"alnum_prop": 0.6782608695652174,
"repo_name": "iemejia/incubator-beam",
"id": "a0457c0a4f257faf568ac9d54e3092403c3058e5",
"size": "7685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdks/python/apache_beam/io/gcp/pubsub_io_perf_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "22216"
},
{
"name": "Java",
"bytes": "9687045"
},
{
"name": "Protocol Buffer",
"bytes": "1407"
},
{
"name": "Shell",
"bytes": "10104"
}
],
"symlink_target": ""
} |
from goldsberry.masterclass import NbaDataProvider
from goldsberry.apiparams import *
class daily_scoreboard(NbaDataProvider):
def __init__(self, date, **kwargs):
url_modifier = 'scoreboardV2'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_sb, gameDate=date, **kwargs)
def game_header(self):
return self._get_table_from_data(self._data_tables, 0)
def linescore(self):
return self._get_table_from_data(self._data_tables, 1)
def series_standings(self):
return self._get_table_from_data(self._data_tables, 2)
def last_meeting(self):
return self._get_table_from_data(self._data_tables, 3)
def eastern_conference_standings(self):
return self._get_table_from_data(self._data_tables, 4)
def western_conference_standings(self):
return self._get_table_from_data(self._data_tables, 5)
def available(self):
return self._get_table_from_data(self._data_tables, 6)
def team_leaders(self):
return self._get_table_from_data(self._data_tables, 7)
def _ticket_links(self):
return self._get_table_from_data(self._data_tables, 8)
def win_probability(self):
return self._get_table_from_data(self._data_tables, 9)
class franchise_history(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'franchisehistory'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_history, **kwargs)
def current_teams(self):
return self._get_table_from_data(self._data_tables, 0)
def defunct_teams(self):
return self._get_table_from_data(self._data_tables, 1)
# This one might not work because it's the key 'resultSet', not 'resultSets'
# Confirmed does not work
class league_leaders(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leagueleaders'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_leaders, **kwargs)
def leaders(self):
return self._get_table_from_data(self._data_tables, 0)
class lineups(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguedashlineups'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_game_ids, **kwargs)
def lineups(self):
return self._get_table_from_data(self._data_tables, 0)
# Double Check Stem
class playoff_picture(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'playoffpicture'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_game_ids, **kwargs)
def eastern_conf_playoff_picture(self):
return self._get_table_from_data(self._data_tables, 0)
def western_conf_playoff_picture(self):
return self._get_table_from_data(self._data_tables, 1)
def eastern_conf_standings(self):
return self._get_table_from_data(self._data_tables, 2)
def western_conf_standings(self):
return self._get_table_from_data(self._data_tables, 3)
def eastern_conf_remaining_games(self):
return self._get_table_from_data(self._data_tables, 4)
def western_conf_remaining_games(self):
return self._get_table_from_data(self._data_tables, 5)
class team_stats_classic(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguedashteamstats'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_classic, **kwargs)
def stats(self):
return self._get_table_from_data(self._data_tables, 0)
class player_stats_classic(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguedashplayerstats'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_classic, **kwargs)
def stats(self):
return self._get_table_from_data(self._data_tables, 0)
class team_stats_clutch(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguedashteamclutch'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_clutch, **kwargs)
def clutch_stats(self):
return self._get_table_from_data(self._data_tables, 0)
class player_stats_clutch(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguedashplayerclutch'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_clutch, **kwargs)
def clutch_stats(self):
return self._get_table_from_data(self._data_tables, 0)
class player_stats_hustle(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguehustlestatsplayer'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_leaders, **kwargs)
def hustle_stats(self):
return self._get_table_from_data(self._data_tables,0)
class team_stats_hustle(NbaDataProvider):
def __init__(self, **kwargs):
url_modifier = 'leaguehustlestatsteam'
NbaDataProvider.__init__(self, url_modifier=url_modifier, default_params=p_league_leaders, **kwargs)
def hustle_stats(self):
return self._get_table_from_data(self._data_tables,0)
# class transactions(BASE):
# _pull_url = "http://stats.nba.com/feeds/NBAPlayerTransactions-559107/json.js"
# def transactions(self):
# return #self._pull.json()['ListItems']
# # Shooting class needs some further study of the data because it classifies shots in two levels.
# # This class will be used for Player & Team as well as Self & Opponent
# class shooting(object):
# def __init__(self,team=False, measure=1, season=2015, datefrom='', dateto='',distancerange=1,
# gamescope=1, gamesegment=1, lastngames=0, league="NBA", location=1, month=0, opponentteamid=0,
# outcome=1, paceadjust=1, permode=1, period=0, playerexperience=1, playerposition=1, plusminus=1,
# rank=1, seasonsegment=1, seasontype=1, starterbench=1, vsconference=1, vsdivision=1):
# if team:
# self._url = "http://stats.nba.com/stats/leaguedashteamshotlocations?"
# else: self._url = "http://stats.nba.com/stats/leaguedashplayershotlocations?"
# if measure == 2:
# measure="Opponent"
# else: measure='Base'
# self._api_param = {
# 'DateFrom':datefrom,
# 'DateTo':dateto,
# 'DistanceRange':distance_range(distancerange),
# 'GameScope':game_scope(gamescope),
# 'GameSegment':game_segment(gamesegment),
# 'LastNGames':lastngames,
# 'LeagueID':_nbaLeague(league),
# 'Location':location(location),
# 'MeasureType':measure,
# 'Month':month,
# 'OpponentTeamID':opponentteamid,
# 'Outcome':outcome(outcome),
# 'PaceAdjust':pace_adjust(paceadjust),
# 'PerMode':per_mode_large(permode),
# 'Period':period,
# 'PlayerExperience':player_experience(playerexperience),
# 'PlayerPosition':player_position(playerposition),
# 'PlusMinus':plus_minus(plusminus),
# 'Rank':rank(rank),
# 'Season':_nbaSeason(season),
# 'SeasonSegment':season_segment(seasonsegment),
# 'SeasonType':season_type(seasontype),
# 'StarterBench':starter_bench(starterbench),
# 'VsConference':vs_conference(vsconference),
# 'VsDivision':vs_division(vsdivision)
# }
# self._pull = _requests.get(self._url, params=self._api_param)
# def headers(self):
# _skip = self._pull.json()['resultSets']['headers'][0]['columnsToSkip']
# _span = self._pull.json()['resultSets']['headers'][0]['columnSpan']
# _headers = []
# for i in self._pull.json()['resultSets']['headers'][0]['columnNames']:
# for j in self._pull.json()['resultSets']['headers'][1]['columnNames'][_skip:_skip+_span]:
# _headers.append(j + " " + i)
# _headers = self._pull.json()['resultSets']['headers'][1]['columnNames'][:_skip] + _headers
# return _headers
# def shooting(self):
# _headers = self.headers()
# _values = self._pull.json()['resultSets']['rowSet']
# return [dict(zip(_headers, value)) for value in _values]
__all__ = ['daily_scoreboard', 'franchise_history', 'playoff_picture',
'team_stats_classic', 'player_stats_classic', 'lineups',
'team_stats_clutch', 'player_stats_clutch', 'league_leaders'] | {
"content_hash": "af5ae2ac367d59b6f89cabaf9b7db2ee",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 118,
"avg_line_length": 40.225352112676056,
"alnum_prop": 0.6434407096171803,
"repo_name": "bradleyfay/py-Goldsberry",
"id": "c8ee19f79fa201f8b81089479cf72e4ccf7b1948",
"size": "8568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "goldsberry/league/league.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "210"
},
{
"name": "Python",
"bytes": "217060"
}
],
"symlink_target": ""
} |
"""
WSGI config for Webinterface project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Webinterface.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "54ed27404ec4b426e42ae7ce5dfaf0a4",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.5,
"alnum_prop": 0.7794486215538847,
"repo_name": "elnappo/Baulicht",
"id": "eb437cc28cc5407180bcd46b5ff2c60f879ab018",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Webinterface/Baulicht/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "21843"
},
{
"name": "CSS",
"bytes": "1444"
},
{
"name": "Groovy",
"bytes": "1145"
},
{
"name": "Java",
"bytes": "21611"
},
{
"name": "Python",
"bytes": "14729"
},
{
"name": "Shell",
"bytes": "141"
}
],
"symlink_target": ""
} |
"""Support for exposing NX584 elements as sensors."""
from __future__ import annotations
import logging
import threading
import time
from nx584 import client as nx584_client
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA as BINARY_SENSOR_DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorDeviceClass,
BinarySensorEntity,
)
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE_ZONES = "exclude_zones"
CONF_ZONE_TYPES = "zone_types"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = "5007"
DEFAULT_SSL = False
ZONE_TYPES_SCHEMA = vol.Schema({cv.positive_int: BINARY_SENSOR_DEVICE_CLASSES_SCHEMA})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_EXCLUDE_ZONES, default=[]): vol.All(
cv.ensure_list, [cv.positive_int]
),
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ZONE_TYPES, default={}): ZONE_TYPES_SCHEMA,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the NX584 binary sensor platform."""
host = config[CONF_HOST]
port = config[CONF_PORT]
exclude = config[CONF_EXCLUDE_ZONES]
zone_types = config[CONF_ZONE_TYPES]
try:
client = nx584_client.Client(f"http://{host}:{port}")
zones = client.list_zones()
except requests.exceptions.ConnectionError as ex:
_LOGGER.error("Unable to connect to NX584: %s", str(ex))
return
version = [int(v) for v in client.get_version().split(".")]
if version < [1, 1]:
_LOGGER.error("NX584 is too old to use for sensors (>=0.2 required)")
return
zone_sensors = {
zone["number"]: NX584ZoneSensor(
zone, zone_types.get(zone["number"], BinarySensorDeviceClass.OPENING)
)
for zone in zones
if zone["number"] not in exclude
}
if zone_sensors:
add_entities(zone_sensors.values())
watcher = NX584Watcher(client, zone_sensors)
watcher.start()
else:
_LOGGER.warning("No zones found on NX584")
class NX584ZoneSensor(BinarySensorEntity):
"""Representation of a NX584 zone as a sensor."""
def __init__(self, zone, zone_type):
"""Initialize the nx594 binary sensor."""
self._zone = zone
self._zone_type = zone_type
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return self._zone_type
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the binary sensor."""
return self._zone["name"]
@property
def is_on(self):
"""Return true if the binary sensor is on."""
# True means "faulted" or "open" or "abnormal state"
return self._zone["state"]
@property
def extra_state_attributes(self):
"""Return the state attributes."""
return {"zone_number": self._zone["number"]}
class NX584Watcher(threading.Thread):
"""Event listener thread to process NX584 events."""
def __init__(self, client, zone_sensors):
"""Initialize NX584 watcher thread."""
super().__init__()
self.daemon = True
self._client = client
self._zone_sensors = zone_sensors
def _process_zone_event(self, event):
zone = event["zone"]
# pylint: disable=protected-access
if not (zone_sensor := self._zone_sensors.get(zone)):
return
zone_sensor._zone["state"] = event["zone_state"]
zone_sensor.schedule_update_ha_state()
def _process_events(self, events):
for event in events:
if event.get("type") == "zone_status":
self._process_zone_event(event)
def _run(self):
"""Throw away any existing events so we don't replay history."""
self._client.get_events()
while True:
if events := self._client.get_events():
self._process_events(events)
def run(self):
"""Run the watcher."""
while True:
try:
self._run()
except requests.exceptions.ConnectionError:
_LOGGER.error("Failed to reach NX584 server")
time.sleep(10)
| {
"content_hash": "e85d98acb3efde77bc2f860f932ab3d9",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 86,
"avg_line_length": 30.264150943396228,
"alnum_prop": 0.6319617622610141,
"repo_name": "rohitranjan1991/home-assistant",
"id": "cbd1796b768336c6c463f17a6b168ae00b03e4b9",
"size": "4812",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/nx584/binary_sensor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
import unittest
from unittest import mock
from google.api_core.exceptions import AlreadyExists
from google.cloud.vision_v1.types import Product, ProductSet, ReferenceImage
from airflow.providers.google.cloud.operators.vision import (
CloudVisionAddProductToProductSetOperator,
CloudVisionCreateProductOperator,
CloudVisionCreateProductSetOperator,
CloudVisionCreateReferenceImageOperator,
CloudVisionDeleteProductOperator,
CloudVisionDeleteProductSetOperator,
CloudVisionDeleteReferenceImageOperator,
CloudVisionDetectImageLabelsOperator,
CloudVisionDetectImageSafeSearchOperator,
CloudVisionDetectTextOperator,
CloudVisionGetProductOperator,
CloudVisionGetProductSetOperator,
CloudVisionImageAnnotateOperator,
CloudVisionRemoveProductFromProductSetOperator,
CloudVisionTextDetectOperator,
CloudVisionUpdateProductOperator,
CloudVisionUpdateProductSetOperator,
)
PRODUCTSET_TEST = ProductSet(display_name='Test Product Set')
PRODUCTSET_ID_TEST = 'my-productset'
PRODUCT_TEST = Product(display_name='My Product 1', product_category='toys')
PRODUCT_ID_TEST = 'my-product'
REFERENCE_IMAGE_TEST = ReferenceImage(uri='gs://bucket_name/file.txt')
REFERENCE_IMAGE_ID_TEST = 'my-reference-image'
ANNOTATE_REQUEST_TEST = {'image': {'source': {'image_uri': 'https://foo.com/image.jpg'}}}
ANNOTATE_REQUEST_BATCH_TEST = [
{'image': {'source': {'image_uri': 'https://foo.com/image1.jpg'}}},
{'image': {'source': {'image_uri': 'https://foo.com/image2.jpg'}}},
]
LOCATION_TEST = 'europe-west1'
GCP_CONN_ID = 'google_cloud_default'
DETECT_TEST_IMAGE = {"source": {"image_uri": "test_uri"}}
class TestCloudVisionProductSetCreate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_product_set.return_value = {}
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_already_exists(self, mock_hook):
mock_hook.return_value.create_product_set.side_effect = AlreadyExists(message='')
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateProductSetOperator(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id='mock-project-id',
task_id='id',
)
result = op.execute(None)
assert PRODUCTSET_ID_TEST == result
class TestCloudVisionProductSetUpdate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.update_product_set.return_value = {}
op = CloudVisionUpdateProductSetOperator(
location=LOCATION_TEST, product_set=PRODUCTSET_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set=PRODUCTSET_TEST,
product_set_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
update_mask=None,
)
class TestCloudVisionProductSetGet(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.get_product_set.return_value = {}
op = CloudVisionGetProductSetOperator(
location=LOCATION_TEST, product_set_id=PRODUCTSET_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionProductSetDelete(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_product_set.return_value = {}
op = CloudVisionDeleteProductSetOperator(
location=LOCATION_TEST, product_set_id=PRODUCTSET_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_product_set.assert_called_once_with(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionProductCreate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_product.return_value = {}
op = CloudVisionCreateProductOperator(location=LOCATION_TEST, product=PRODUCT_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_product.assert_called_once_with(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_already_exists(self, mock_hook):
mock_hook.return_value.create_product.side_effect = AlreadyExists(message='')
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateProductOperator(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=PRODUCT_ID_TEST,
project_id='mock-project-id',
task_id='id',
)
result = op.execute(None)
assert PRODUCT_ID_TEST == result
class TestCloudVisionProductGet(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.get_product.return_value = {}
op = CloudVisionGetProductOperator(location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.get_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionProductUpdate(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.update_product.return_value = {}
op = CloudVisionUpdateProductOperator(location=LOCATION_TEST, product=PRODUCT_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.update_product.assert_called_once_with(
location=LOCATION_TEST,
product=PRODUCT_TEST,
product_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
update_mask=None,
)
class TestCloudVisionProductDelete(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_product.return_value = {}
op = CloudVisionDeleteProductOperator(
location=LOCATION_TEST, product_id=PRODUCT_ID_TEST, task_id='id'
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_product.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionReferenceImageCreate(unittest.TestCase):
@mock.patch(
'airflow.providers.google.cloud.operators.vision.CloudVisionHook',
)
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.create_reference_image.return_value = {}
op = CloudVisionCreateReferenceImageOperator(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_reference_image.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
@mock.patch(
'airflow.providers.google.cloud.operators.vision.CloudVisionHook',
**{'return_value.create_reference_image.side_effect': AlreadyExists("MESSAGe")},
)
def test_already_exists(self, mock_hook):
# Exception AlreadyExists not raised, caught in the operator's execute() - idempotence
op = CloudVisionCreateReferenceImageOperator(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_reference_image.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image=REFERENCE_IMAGE_TEST,
reference_image_id=None,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionReferenceImageDelete(unittest.TestCase):
@mock.patch(
'airflow.providers.google.cloud.operators.vision.CloudVisionHook',
)
def test_minimal_green_path(self, mock_hook):
mock_hook.return_value.delete_reference_image.return_value = {}
op = CloudVisionDeleteReferenceImageOperator(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.delete_reference_image.assert_called_once_with(
location=LOCATION_TEST,
product_id=PRODUCT_ID_TEST,
reference_image_id=REFERENCE_IMAGE_ID_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionAddProductToProductSetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
op = CloudVisionAddProductToProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.add_product_to_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionRemoveProductFromProductSetOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path(self, mock_hook):
op = CloudVisionRemoveProductFromProductSetOperator(
location=LOCATION_TEST,
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
task_id='id',
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.remove_product_from_product_set.assert_called_once_with(
product_set_id=PRODUCTSET_ID_TEST,
product_id=PRODUCT_ID_TEST,
location=LOCATION_TEST,
project_id=None,
retry=None,
timeout=None,
metadata=(),
)
class TestCloudVisionAnnotateImageOperator(unittest.TestCase):
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path_for_one_image(self, mock_hook):
op = CloudVisionImageAnnotateOperator(request=ANNOTATE_REQUEST_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.annotate_image.assert_called_once_with(
request=ANNOTATE_REQUEST_TEST, retry=None, timeout=None
)
@mock.patch('airflow.providers.google.cloud.operators.vision.CloudVisionHook')
def test_minimal_green_path_for_batch(self, mock_hook):
op = CloudVisionImageAnnotateOperator(request=ANNOTATE_REQUEST_BATCH_TEST, task_id='id')
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.batch_annotate_images.assert_called_once_with(
requests=ANNOTATE_REQUEST_BATCH_TEST, retry=None, timeout=None
)
class TestCloudVisionDetectTextOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectTextOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_additional_params(self, mock_hook):
op = CloudVisionDetectTextOperator(
image=DETECT_TEST_IMAGE,
task_id="id",
language_hints="pl",
web_detection_params={'param': 'test'},
additional_properties={
'image_context': {'additional_property_1': 'add_1'},
'additional_property_2': 'add_2',
},
)
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE,
max_results=None,
retry=None,
timeout=None,
additional_properties={
'additional_property_2': 'add_2',
'image_context': {
'language_hints': 'pl',
'additional_property_1': 'add_1',
'web_detection_params': {'param': 'test'},
},
},
)
class TestCloudVisionDetectDocumentTextOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionTextDetectOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.document_text_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
class TestCloudVisionDetectImageLabelsOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageLabelsOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.label_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
class TestCloudVisionDetectImageSafeSearchOperator(unittest.TestCase):
@mock.patch("airflow.providers.google.cloud.operators.vision.CloudVisionHook")
def test_minimal_green_path(self, mock_hook):
op = CloudVisionDetectImageSafeSearchOperator(image=DETECT_TEST_IMAGE, task_id="id")
op.execute(context=None)
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.safe_search_detection.assert_called_once_with(
image=DETECT_TEST_IMAGE, max_results=None, retry=None, timeout=None, additional_properties=None
)
| {
"content_hash": "005b0cd4203e890ea87fc094d7ff8a04",
"timestamp": "",
"source": "github",
"line_count": 493,
"max_line_length": 108,
"avg_line_length": 39.302231237322516,
"alnum_prop": 0.6381606110652354,
"repo_name": "bolkedebruin/airflow",
"id": "3c256febd6c9bedfa9e8cb9f8a853e77da81d936",
"size": "20164",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/providers/google/cloud/operators/test_vision.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25286"
},
{
"name": "Dockerfile",
"bytes": "40459"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "157840"
},
{
"name": "JavaScript",
"bytes": "167972"
},
{
"name": "Jinja",
"bytes": "33382"
},
{
"name": "Jupyter Notebook",
"bytes": "2933"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "19287942"
},
{
"name": "Shell",
"bytes": "645244"
},
{
"name": "TypeScript",
"bytes": "173854"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
from .base import *
import dj_database_url
ENV = 'HEROKU'
DEBUG = True
ALLOWED_HOSTS = [
'neurobrush.com',
'www.neurobrush.com'
]
# ALLOWED_HOSTS = ['*']
# DATABASES = {
# 'default': dj_database_url.config()
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': '',
'PASSWORD': '',
'HOST': '', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
INSTALLED_APPS += (
'gunicorn',
)
# INSTALLED_APPS = INSTALLED_APPS + (
# 'raven.contrib.django.raven_compat',
# )
STATICFILES_ROOT = PROJECT_DIR.child('static')
STATIC_URL = '//' + AWS_STORAGE_BUCKET_NAME + '.s3.amazonaws.com/'
| {
"content_hash": "b13aff40e859e3dc60b14b8ea8262f6e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 127,
"avg_line_length": 26.1,
"alnum_prop": 0.5699233716475096,
"repo_name": "rootux/neurobrush",
"id": "f89c283076729d7159bd26375b6bbcc924843715",
"size": "1044",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neurobrush_web/settings/heroku.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "7002"
},
{
"name": "CSS",
"bytes": "1200"
},
{
"name": "Java",
"bytes": "67287"
},
{
"name": "JavaScript",
"bytes": "902751"
},
{
"name": "Perl",
"bytes": "872"
},
{
"name": "Python",
"bytes": "13837"
}
],
"symlink_target": ""
} |
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def DistributedVirtualSwitchManagerHostArrayFilter(vim, *args, **kwargs):
'''Check host compatibility against all hosts specified in the array.'''
obj = vim.client.factory.create('ns0:DistributedVirtualSwitchManagerHostArrayFilter')
# do some validation checking...
if (len(args) + len(kwargs)) < 2:
raise IndexError('Expected at least 3 arguments got: %d' % len(args))
required = [ 'host', 'inclusive' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
| {
"content_hash": "58b438e19fb11e836f92ec54021d275c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 124,
"avg_line_length": 33.71875,
"alnum_prop": 0.6181649675625579,
"repo_name": "xuru/pyvisdk",
"id": "32423dec7bf1ff95ea74a9578d60efe80bc328e9",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyvisdk/do/distributed_virtual_switch_manager_host_array_filter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "369"
},
{
"name": "Python",
"bytes": "3037849"
},
{
"name": "Shell",
"bytes": "4517"
}
],
"symlink_target": ""
} |
import socket
tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
orig = ("127.0.0.1",5555)
tcp.bind(orig)
tcp.listen(1)
while True:
conn, cliente = tcp.accept()
print 'Conectado por ', cliente
while True:
msg = conn.recv(1024)
if not msg: break
print cliente, msg
conn.send("Recebido")
print 'Finalizando conexao do cliente', cliente
conn.close() | {
"content_hash": "d424a84ab8c75221a5aa6c537403b2c5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 23.823529411764707,
"alnum_prop": 0.6395061728395062,
"repo_name": "ufgup/lab-redes-socket-python",
"id": "313454224e1eb0decf8790e5419f52523c6bcd00",
"size": "423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "863"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
# add to readme starting from:
from django.core.urlresolvers import reverse
from skd_smoke import SmokeTestCase
class RedirectToSmokeTestCase(SmokeTestCase):
TESTS_CONFIGURATION = (
('is_authenticated', 302, 'GET', {
'redirect_to': '%s?next=%s' % (reverse('login'),
reverse('is_authenticated')),
'comment': 'Anonymous profile access with check of redirect url'
}),
)
| {
"content_hash": "56e4a655f91788486e0df9496081547a",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 76,
"avg_line_length": 31.375,
"alnum_prop": 0.6075697211155379,
"repo_name": "steelkiwi/django-skd-smoke",
"id": "460d5900d3872a5853cda871d347affb99d0313b",
"size": "526",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "example_project/test_example_3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "61791"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import json
import random
import contextlib
import shutil
import pytest
import tempfile
from pathlib import Path
from thinc.neural.optimizers import Adam
from ...gold import GoldParse
from ...pipeline import EntityRecognizer
from ...lang.en import English
try:
unicode
except NameError:
unicode = str
@pytest.fixture
def train_data():
return [
["hey",[]],
["howdy",[]],
["hey there",[]],
["hello",[]],
["hi",[]],
["i'm looking for a place to eat",[]],
["i'm looking for a place in the north of town",[[31,36,"location"]]],
["show me chinese restaurants",[[8,15,"cuisine"]]],
["show me chines restaurants",[[8,14,"cuisine"]]],
["yes",[]],
["yep",[]],
["yeah",[]],
["show me a mexican place in the centre",[[31,37,"location"], [10,17,"cuisine"]]],
["bye",[]],["goodbye",[]],
["good bye",[]],
["stop",[]],
["end",[]],
["i am looking for an indian spot",[[20,26,"cuisine"]]],
["search for restaurants",[]],
["anywhere in the west",[[16,20,"location"]]],
["central indian restaurant",[[0,7,"location"],[8,14,"cuisine"]]],
["indeed",[]],
["that's right",[]],
["ok",[]],
["great",[]]
]
@pytest.fixture
def additional_entity_types():
return ['cuisine', 'location']
@contextlib.contextmanager
def temp_save_model(model):
model_dir = tempfile.mkdtemp()
model.to_disk(model_dir)
yield model_dir
shutil.rmtree(model_dir.as_posix())
@pytest.mark.xfail
@pytest.mark.models('en')
def test_issue910(EN, train_data, additional_entity_types):
'''Test that adding entities and resuming training works passably OK.
There are two issues here:
1) We have to readd labels. This isn't very nice.
2) There's no way to set the learning rate for the weight update, so we
end up out-of-scale, causing it to learn too fast.
'''
nlp = EN
doc = nlp(u"I am looking for a restaurant in Berlin")
ents_before_train = [(ent.label_, ent.text) for ent in doc.ents]
# Fine tune the ner model
for entity_type in additional_entity_types:
nlp.entity.add_label(entity_type)
sgd = Adam(nlp.entity.model[0].ops, 0.001)
for itn in range(10):
random.shuffle(train_data)
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
nlp.tagger(doc)
nlp.tensorizer(doc)
gold = GoldParse(doc, entities=entity_offsets)
loss = nlp.entity.update(doc, gold, sgd=sgd, drop=0.5)
with temp_save_model(nlp.entity) as model_dir:
# Load the fine tuned model
loaded_ner = EntityRecognizer(nlp.vocab)
loaded_ner.from_disk(model_dir)
for raw_text, entity_offsets in train_data:
doc = nlp.make_doc(raw_text)
nlp.tagger(doc)
loaded_ner(doc)
ents = {(ent.start_char, ent.end_char): ent.label_ for ent in doc.ents}
for start, end, label in entity_offsets:
assert ents[(start, end)] == label
| {
"content_hash": "b350c316d759de2ff6a68c1a260bf32e",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 94,
"avg_line_length": 31.54368932038835,
"alnum_prop": 0.5743305632502308,
"repo_name": "ryfeus/lambda-packs",
"id": "94a2562fd5ad60d37ecd1361b83381ea876b8ede",
"size": "3264",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Spacy/source2.7/spacy/tests/regression/test_issue910.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9768343"
},
{
"name": "C++",
"bytes": "76566960"
},
{
"name": "CMake",
"bytes": "191097"
},
{
"name": "CSS",
"bytes": "153538"
},
{
"name": "Cuda",
"bytes": "61768"
},
{
"name": "Cython",
"bytes": "3110222"
},
{
"name": "Fortran",
"bytes": "110284"
},
{
"name": "HTML",
"bytes": "248658"
},
{
"name": "JavaScript",
"bytes": "62920"
},
{
"name": "MATLAB",
"bytes": "17384"
},
{
"name": "Makefile",
"bytes": "152150"
},
{
"name": "Python",
"bytes": "549307737"
},
{
"name": "Roff",
"bytes": "26398"
},
{
"name": "SWIG",
"bytes": "142"
},
{
"name": "Shell",
"bytes": "7790"
},
{
"name": "Smarty",
"bytes": "4090"
},
{
"name": "TeX",
"bytes": "152062"
},
{
"name": "XSLT",
"bytes": "305540"
}
],
"symlink_target": ""
} |
import unittest
import signal
import os
import stackimpact
from stackimpact.runtime import runtime_info, register_signal
class RuntimeTestCase(unittest.TestCase):
def test_register_signal(self):
if runtime_info.OS_WIN:
return
result = {'handler': 0}
def _handler(signum, frame):
result['handler'] += 1
register_signal(signal.SIGUSR1, _handler)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
signal.signal(signal.SIGUSR1, signal.SIG_DFL)
self.assertEqual(result['handler'], 2)
'''def test_register_signal_default(self):
result = {'handler': 0}
def _handler(signum, frame):
result['handler'] += 1
register_signal(signal.SIGUSR1, _handler, once = True)
os.kill(os.getpid(), signal.SIGUSR1)
os.kill(os.getpid(), signal.SIGUSR1)
self.assertEqual(result['handler'], 1)'''
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "3b1d023d0a4ec4e662d9e2845d0bda5c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 62,
"avg_line_length": 21.617021276595743,
"alnum_prop": 0.610236220472441,
"repo_name": "stackimpact/stackimpact-python",
"id": "e97ee1111cbf59e3e5688e6a95c844a5aa77073d",
"size": "1016",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/runtime_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "93356"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
} |
import unittest
import os
from conans.client.conf.config_installer import tmp_config_install_folder
from conans.test.utils.tools import TestClient
class InstallFolderTests(unittest.TestCase):
def test_unique_install_folder(self):
""" Validate if tmp_config_install_folder is removing old folder before creating a new one
tmp_config_install_folder must create the same folder, but all items must be exclude when a
new folder is created.
"""
client = TestClient()
with tmp_config_install_folder(client.cache) as tmp_folder_first:
temp_file = os.path.join(tmp_folder_first, "foobar.txt")
open(temp_file, "w+")
with tmp_config_install_folder(client.cache) as tmp_folder_second:
self.assertEqual(tmp_folder_first, tmp_folder_second)
self.assertFalse(os.path.exists(temp_file))
| {
"content_hash": "c1e25c914167ea83133b040167f416d7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 99,
"avg_line_length": 39.04347826086956,
"alnum_prop": 0.6837416481069042,
"repo_name": "conan-io/conan",
"id": "47a7fe319f5843f9db872cdb39c0f1462f49da98",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "conans/test/unittests/client/conf/config_installer/test_install_folder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "264"
},
{
"name": "C++",
"bytes": "425"
},
{
"name": "CMake",
"bytes": "447"
},
{
"name": "Python",
"bytes": "8209945"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from future.builtins import filter, str
try:
from urllib.parse import urljoin
except ImportError: # Python 2
from urlparse import urljoin
from django.core.urlresolvers import resolve, reverse
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from mezzanine.conf import settings
from mezzanine.core.models import Displayable, Orderable, RichText
from mezzanine.pages.fields import MenusField
from mezzanine.pages.managers import PageManager
from mezzanine.utils.urls import path_to_slug
class BasePage(Orderable, Displayable):
"""
Exists solely to store ``PageManager`` as the main manager.
If it's defined on ``Page``, a concrete model, then each
``Page`` subclass loses the custom manager.
"""
objects = PageManager()
class Meta:
abstract = True
@python_2_unicode_compatible
class Page(BasePage):
"""
A page in the page tree. This is the base class that custom content types
need to subclass.
"""
parent = models.ForeignKey("Page", blank=True, null=True,
related_name="children")
in_menus = MenusField(_("Show in menus"), blank=True, null=True)
titles = models.CharField(editable=False, max_length=1000, null=True)
content_model = models.CharField(editable=False, max_length=50, null=True)
login_required = models.BooleanField(_("Login required"), default=False,
help_text=_("If checked, only logged in users can view this page"))
class Meta:
verbose_name = _("Page")
verbose_name_plural = _("Pages")
ordering = ("titles",)
order_with_respect_to = "parent"
def __str__(self):
return self.titles
def get_absolute_url(self):
"""
URL for a page - for ``Link`` page types, simply return its
slug since these don't have an actual URL pattern. Also handle
the special case of the homepage being a page object.
"""
slug = self.slug
if self.content_model == "link":
# Ensure the URL is absolute.
slug = urljoin('/', slug)
return slug
if slug == "/":
return reverse("home")
else:
return reverse("page", kwargs={"slug": slug})
def save(self, *args, **kwargs):
"""
Create the titles field using the titles up the parent chain
and set the initial value for ordering.
"""
if self.id is None:
self.content_model = self._meta.object_name.lower()
titles = [self.title]
parent = self.parent
while parent is not None:
titles.insert(0, parent.title)
parent = parent.parent
self.titles = " / ".join(titles)
super(Page, self).save(*args, **kwargs)
def description_from_content(self):
"""
Override ``Displayable.description_from_content`` to load the
content type subclass for when ``save`` is called directly on a
``Page`` instance, so that all fields defined on the subclass
are available for generating the description.
"""
if self.__class__ == Page:
content_model = self.get_content_model()
if content_model:
return content_model.description_from_content()
return super(Page, self).description_from_content()
def get_ascendants(self, for_user=None):
"""
Returns the ascendants for the page. Ascendants are cached in
the ``_ascendants`` attribute, which is populated when the page
is loaded via ``Page.objects.with_ascendants_for_slug``.
"""
if not self.parent_id:
# No parents at all, bail out.
return []
if not hasattr(self, "_ascendants"):
# _ascendants has not been either page.get_ascendants or
# Page.objects.assigned by with_ascendants_for_slug, so
# run it to see if we can retrieve all parents in a single
# query, which will occur if the slugs for each of the pages
# have not been customised.
if self.slug:
kwargs = {"for_user": for_user}
pages = Page.objects.with_ascendants_for_slug(self.slug,
**kwargs)
self._ascendants = pages[0]._ascendants
else:
self._ascendants = []
if not self._ascendants:
# Page has a parent but with_ascendants_for_slug failed to
# find them due to custom slugs, so retrieve the parents
# recursively.
child = self
while child.parent_id is not None:
self._ascendants.append(child.parent)
child = child.parent
return self._ascendants
@classmethod
def get_content_models(cls):
"""
Return all Page subclasses.
"""
is_content_model = lambda m: m is not Page and issubclass(m, Page)
return list(filter(is_content_model, models.get_models()))
def get_content_model(self):
"""
Provies a generic method of retrieving the instance of the custom
content type's model for this page.
"""
return getattr(self, self.content_model, None)
def get_slug(self):
"""
Recursively build the slug from the chain of parents.
"""
slug = super(Page, self).get_slug()
if self.parent is not None:
return "%s/%s" % (self.parent.slug, slug)
return slug
def set_slug(self, new_slug):
"""
Changes this page's slug, and all other pages whose slugs
start with this page's slug.
"""
for page in Page.objects.filter(slug__startswith=self.slug):
if not page.overridden():
page.slug = new_slug + page.slug[len(self.slug):]
page.save()
self.slug = new_slug
def set_parent(self, new_parent):
"""
Change the parent of this page, changing this page's slug to match
the new parent if necessary.
"""
self_slug = self.slug
old_parent_slug = self.parent.slug if self.parent else ""
new_parent_slug = new_parent.slug if new_parent else ""
# Make sure setting the new parent won't cause a cycle.
parent = new_parent
while parent is not None:
if parent.pk == self.pk:
raise AttributeError("You can't set a page or its child as"
" a parent.")
parent = parent.parent
self.parent = new_parent
self.save()
if self_slug:
if not old_parent_slug:
self.set_slug("/".join((new_parent_slug, self.slug)))
elif self.slug.startswith(old_parent_slug):
new_slug = self.slug.replace(old_parent_slug,
new_parent_slug, 1)
self.set_slug(new_slug.strip("/"))
def overridden(self):
"""
Returns ``True`` if the page's slug has an explicitly defined
urlpattern and is therefore considered to be overridden.
"""
from mezzanine.pages.views import page
page_url = reverse("page", kwargs={"slug": self.slug})
resolved_view = resolve(page_url)[0]
return resolved_view != page
def can_add(self, request):
"""
Dynamic ``add`` permission for content types to override.
"""
return self.slug != "/"
def can_change(self, request):
"""
Dynamic ``change`` permission for content types to override.
"""
return True
def can_delete(self, request):
"""
Dynamic ``delete`` permission for content types to override.
"""
return True
def set_helpers(self, context):
"""
Called from the ``page_menu`` template tag and assigns a
handful of properties based on the current page, that are used
within the various types of menus.
"""
current_page = context["_current_page"]
current_page_id = getattr(current_page, "id", None)
current_parent_id = getattr(current_page, "parent_id", None)
# Am I a child of the current page?
self.is_current_child = self.parent_id == current_page_id
self.is_child = self.is_current_child # Backward compatibility
# Is my parent the same as the current page's?
self.is_current_sibling = self.parent_id == current_parent_id
# Am I the current page?
try:
request = context["request"]
except KeyError:
# No request context, most likely when tests are run.
self.is_current = False
else:
self.is_current = self.slug == path_to_slug(request.path_info)
# Is the current page me or any page up the parent chain?
def is_c_or_a(page_id):
parent_id = context.get("_parent_page_ids", {}).get(page_id)
return self.id == page_id or (parent_id and is_c_or_a(parent_id))
self.is_current_or_ascendant = lambda: bool(is_c_or_a(current_page_id))
self.is_current_parent = self.id == current_parent_id
# Am I a primary page?
self.is_primary = self.parent_id is None
# What's an ID I can use in HTML?
self.html_id = self.slug.replace("/", "-")
# Default branch level - gets assigned in the page_menu tag.
self.branch_level = 0
def in_menu_template(self, template_name):
if self.in_menus is not None:
for i, l, t in settings.PAGE_MENU_TEMPLATES:
if not str(i) in self.in_menus and t == template_name:
return False
return True
def get_template_name(self):
"""
Subclasses can implement this to provide a template to use
in ``mezzanine.pages.views.page``.
"""
return None
class RichTextPage(Page, RichText):
"""
Implements the default type of page with a single Rich Text
content field.
"""
class Meta:
verbose_name = _("Rich text page")
verbose_name_plural = _("Rich text pages")
class Link(Page):
"""
A general content type for creating external links in the page
menu.
"""
class Meta:
verbose_name = _("Link")
verbose_name_plural = _("Links")
| {
"content_hash": "1a437bc07bef9c35bdf9c674ac77862c",
"timestamp": "",
"source": "github",
"line_count": 294,
"max_line_length": 79,
"avg_line_length": 35.945578231292515,
"alnum_prop": 0.5879068887206662,
"repo_name": "cccs-web/mezzanine",
"id": "0b5a0d3a3c60260756991404ff1d8f35c0cde7e8",
"size": "10568",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mezzanine/pages/models.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "108170"
},
{
"name": "JavaScript",
"bytes": "228868"
},
{
"name": "Python",
"bytes": "1084061"
}
],
"symlink_target": ""
} |
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
import os
import shutil
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-wallet=w1', '-wallet=w2', '-wallet=w3', '-wallet=w']]
self.supports_cli = True
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, 'regtest', *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
assert_equal(set(node.listwallets()), {"w1", "w2", "w3", "w"})
self.stop_node(0)
# should not initialize if there are duplicate wallets
self.assert_start_raises_init_error(0, ['-wallet=w1', '-wallet=w1'], 'Error loading wallet w1. Duplicate -wallet filename specified.')
# should not initialize if wallet file is a directory
os.mkdir(wallet_dir('w11'))
self.assert_start_raises_init_error(0, ['-wallet=w11'], 'Error loading wallet w11. -wallet filename must be a regular file.')
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w2'), wallet_dir('w22'))
self.assert_start_raises_init_error(0, ['-wallet=w2', '-wallet=w22'], 'duplicates fileid')
# should not initialize if wallet file is a symlink
os.symlink(wallet_dir('w1'), wallet_dir('w12'))
self.assert_start_raises_init_error(0, ['-wallet=w12'], 'Error loading wallet w12. -wallet filename must be a regular file.')
# should not initialize if the specified walletdir does not exist
self.assert_start_raises_init_error(0, ['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a').close()
self.assert_start_raises_init_error(0, ['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0, ['-wallet=w4', '-wallet=w5'])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5.generate(1)
self.stop_node(0)
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.start_node(0, ['-wallet=w4', '-wallet=w5', '-walletdir=' + data_dir()])
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
self.stop_node(0)
self.start_node(0, self.extra_args[0])
w1 = wallet("w1")
w2 = wallet("w2")
w3 = wallet("w3")
w4 = wallet("w")
wallet_bad = wallet("bad")
w1.generate(1)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
# check w1 wallet balance
w1_info = w1.getwalletinfo()
assert_equal(w1_info['immature_balance'], 50)
w1_name = w1_info['walletname']
assert_equal(w1_name, "w1")
# check w2 wallet balance
w2_info = w2.getwalletinfo()
assert_equal(w2_info['immature_balance'], 0)
w2_name = w2_info['walletname']
assert_equal(w2_name, "w2")
w3_name = w3.getwalletinfo()['walletname']
assert_equal(w3_name, "w3")
w4_name = w4.getwalletinfo()['walletname']
assert_equal(w4_name, "w")
w1.generate(101)
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
w1.generate(1)
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], "regtest")
assert_equal(batch[1]["result"]["walletname"], "w1")
if __name__ == '__main__':
MultiWalletTest().main()
| {
"content_hash": "c587450ad2a76da96264022d8952ad1d",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 145,
"avg_line_length": 40.073170731707314,
"alnum_prop": 0.6195982958003652,
"repo_name": "brandonrobertz/namecoin-core",
"id": "12d9e9f48d4b3bbf85c26cd79eef210d8a29ec3d",
"size": "5138",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/functional/multiwallet.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "688659"
},
{
"name": "C++",
"bytes": "5733984"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "194059"
},
{
"name": "Makefile",
"bytes": "114172"
},
{
"name": "Objective-C",
"bytes": "5737"
},
{
"name": "Objective-C++",
"bytes": "6763"
},
{
"name": "Python",
"bytes": "1363751"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "67429"
}
],
"symlink_target": ""
} |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
OUT = ViewDetailLevel.Medium | {
"content_hash": "ddaa724fbebb52a3658cbf97f5f30960",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 31,
"avg_line_length": 20.2,
"alnum_prop": 0.801980198019802,
"repo_name": "CAAD-RWTH/ClockworkForDynamo",
"id": "e317e6763d622724e9c380e7062c19e378666b59",
"size": "101",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nodes/0.9.x/python/DetailLevel.Medium.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "316146"
}
],
"symlink_target": ""
} |
class DummyMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
| {
"content_hash": "3a5674f7268d2190ccc563022f509ea9",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 41,
"avg_line_length": 25.571428571428573,
"alnum_prop": 0.6368715083798883,
"repo_name": "digitalocean/netbox",
"id": "97592c3b21ce01b9f12739a7e2eb4be7ec875ba7",
"size": "179",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "netbox/extras/tests/dummy_plugin/middleware.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "189339"
},
{
"name": "HTML",
"bytes": "570800"
},
{
"name": "JavaScript",
"bytes": "326125"
},
{
"name": "Python",
"bytes": "1815170"
},
{
"name": "Shell",
"bytes": "2786"
}
],
"symlink_target": ""
} |
import os
import pytest
from mock import Mock
import pip.req.req_uninstall
from pip.req.req_uninstall import UninstallPathSet, uninstallation_paths
# Pretend all files are local, so UninstallPathSet accepts files in the tmpdir,
# outside the virtualenv
def mock_is_local(path):
return True
def test_uninstallation_paths():
class dist(object):
def get_metadata_lines(self, record):
return ['file.py,,',
'file.pyc,,',
'file.so,,',
'nopyc.py']
location = ''
d = dist()
paths = list(uninstallation_paths(d))
expected = ['file.py',
'file.pyc',
'file.so',
'nopyc.py',
'nopyc.pyc']
assert paths == expected
# Avoid an easy 'unique generator' bug
paths2 = list(uninstallation_paths(d))
assert paths2 == paths
class TestUninstallPathSet(object):
def test_add(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', mock_is_local)
# Fix case for windows tests
file_extant = os.path.normcase(os.path.join(tmpdir, 'foo'))
file_nonexistent = os.path.normcase(
os.path.join(tmpdir, 'nonexistent'))
with open(file_extant, 'w'):
pass
ups = UninstallPathSet(dist=Mock())
assert ups.paths == set()
ups.add(file_extant)
assert ups.paths == set([file_extant])
ups.add(file_nonexistent)
assert ups.paths == set([file_extant])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_add_symlink(self, tmpdir, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', mock_is_local)
f = os.path.join(tmpdir, 'foo')
with open(f, 'w'):
pass
l = os.path.join(tmpdir, 'foo_link')
os.symlink(f, l)
ups = UninstallPathSet(dist=Mock())
ups.add(l)
assert ups.paths == set([l])
def test_compact_shorter_path(self, monkeypatch):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', lambda p: True)
monkeypatch.setattr('os.path.exists', lambda p: True)
# This deals with nt/posix path differences
short_path = os.path.normcase(os.path.abspath(
os.path.join(os.path.sep, 'path')))
ups = UninstallPathSet(dist=Mock())
ups.add(short_path)
ups.add(os.path.join(short_path, 'longer'))
assert ups.compact(ups.paths) == set([short_path])
@pytest.mark.skipif("sys.platform == 'win32'")
def test_detect_symlink_dirs(self, monkeypatch, tmpdir):
monkeypatch.setattr(pip.req.req_uninstall, 'is_local', lambda p: True)
# construct 2 paths:
# tmpdir/dir/file
# tmpdir/dirlink/file (where dirlink is a link to dir)
d = tmpdir.join('dir')
d.mkdir()
dlink = tmpdir.join('dirlink')
os.symlink(d, dlink)
d.join('file').touch()
path1 = str(d.join('file'))
path2 = str(dlink.join('file'))
ups = UninstallPathSet(dist=Mock())
ups.add(path1)
ups.add(path2)
assert ups.paths == set([path1])
| {
"content_hash": "b92c0a83b0739491c30f05e263927dca",
"timestamp": "",
"source": "github",
"line_count": 103,
"max_line_length": 79,
"avg_line_length": 31,
"alnum_prop": 0.5875352333228938,
"repo_name": "atdaemon/pip",
"id": "7097ded9d7841a85580568c7298f26a8d8a3555d",
"size": "3193",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/unit/test_req_uninstall.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2342"
},
{
"name": "Python",
"bytes": "972228"
},
{
"name": "Shell",
"bytes": "1885"
}
],
"symlink_target": ""
} |
import Queue
import curses
import logging
import subprocess
import textwrap
import threading
import traceback
from curses import ascii
logger = logging.getLogger('assertEquals.interactive.utils')
class Bucket:
"""
"""
class RefreshError(StandardError):
"""An error refreshing the summary.
"""
def __init__(self, traceback):
"""Save the remote traceback.
"""
StandardError.__init__(self)
self.traceback = traceback
class CommunicationProblem(StandardError):
"""Wrap a Process that wants to talk.
"""
def __init__(self, proc):
StandardError.__init__(self)
self.proc = proc
class Process(subprocess.Popen):
"""Represent a child process that might want to interact with us.
"""
prompt = '(Pdb) ' # The signal that it wants to talk.
intro = '' # If it wants to talk, this will be the first thing it said.
interactive = False # whether or not we are interacting with the child
def __init__(self, *args, **kwargs):
"""Extend to capture I/O streams.
"""
_kwargs = { 'stdin':subprocess.PIPE
, 'stdout':subprocess.PIPE
, 'stderr':subprocess.STDOUT
}
kwargs.update(_kwargs)
subprocess.Popen.__init__(self, *args, **kwargs)
def __str__(self):
return "<Process #%d>" % self.pid
__repr__ = __str__
def communicate(self, input=None):
"""Override to support Pdb interaction.
If input is None, then we will raise ourselves if the process wants to
interact. Otherwise, we will return the last thing it said. To see if
the conversation is over, use self.poll().
"""
if input is not None:
self.stdin.write(input + '\n')
output = []
i = len(self.prompt)
while 1:
retcode = self.poll()
if retcode is None:
# Conversation not done; check to see if it's our turn to talk.
if len(output) >= i:
latest = ''.join(output[-i:])
if latest == self.prompt:
self.interactive = True
break
output.append(self.stdout.read(1))
else:
# The process is done; assume we can read to EOF.
output.append(self.stdout.read())
break
output = ''.join(output)
if self.interactive and (input is None):
self.intro = output
raise CommunicationProblem(self)
else:
return output
class Spinner:
"""Represent a random work indicator, handled in a separate thread.
"""
def __init__(self, spin):
"""Takes a callable that actually draws/undraws the spinner.
"""
self.spin = spin
self.flag = Queue.Queue(1)
def start(self):
"""Show a spinner.
"""
self.thread = threading.Thread(target=self.spin)
self.thread.start()
def stop(self):
"""Stop the spinner.
"""
self.flag.put(True)
self.thread.join()
def __call__(self, call, *args, **kwargs):
"""Convenient way to run a routine with a spinner.
"""
self.start()
try:
return call(*args, **kwargs)
finally:
self.stop()
class DoneScrolling(StandardError):
"""Represents the edge of a scrolling area.
"""
class ScrollArea:
"""Represents a scrollable portion of a screen.
"""
numrows = 0 # number of viewable rows; len semantics
cursor = 0 # index of the currently curitem row; 0-indexed
toprow = 0 # index of our top row within the window; 0-indexed
numitems = 0 # the total number of items in the list; len semantics
curitem = 0 # index of the currently curitem item; 0-indexed
start = end_ = 0 # coordinates in your list of items; slice semantics
bar = None # a range() within range(numrows) for which a scrollbar
# should be drawn
def __init__(self, numrows, numitems, toprow):
"""
"""
self.numrows = numrows
self.numitems = numitems
self.toprow = toprow
if self.numitems < self.numrows:
self.end_ = self.numitems
else:
self.end_ = self.numrows
self.update()
def __repr__(self):
return "<ScrollArea %s>" % str(self.stat())
__str__ = __repr__
# Container emulation
# ===================
# As a container, we are a list of 2-tuples: (index, rownum)
# index -- an index of an item currently being displayed
# rownum -- a row number relative to the current window object
def __list(self):
def rownum(i):
return self.toprow + i - self.start
return [(i, rownum(i)) for i in range(self.start, self.end_)]
def __iter__(self):
return iter(self.__list())
def __len__(self):
return len(self.__list())
# Basic API
# =========
def scroll_one(self, up=False):
"""Scroll the viewport by one row.
"""
if self.numitems == 0: # short-circuit
raise DoneScrolling
if up: # scroll up
if self.cursor == 0: # top of viewport
if self.start == 0: # top of list
raise DoneScrolling
else: # not top of list
self.start -= 1
if self.end_ - self.start > self.numrows:
self.end_ -= 1
else: # not top of viewport
self.cursor -= 1
else: # scroll down
if self.curitem + 1 == self.numitems: # bottom of list
raise DoneScrolling
else: # not bottom of list
if self.cursor + 1 == self.numrows: # bottom of viewport
self.start += 1
self.end_ += 1
else: # not bottom of viewport
self.cursor += 1
self.update()
def scroll(self, delta):
"""Support multi-line scrolling.
"""
up = delta < 0
delta = abs(delta)
try:
for i in range(delta):
self.scroll_one(up)
except DoneScrolling:
self._refuse()
# Extended API
# ============
def page_up(self):
"""Scroll up one page.
"""
if self.numitems == 0: # empty page
self._refuse()
elif self.numitems <= self.numrows: # partial/single page
self.cursor = 0
self._refuse()
elif self.numitems > self.numrows: # multiple pages
# already at top
if self.curitem == 0:
self.cursor = 0
self._refuse()
# less than a full page above
elif self.start+1 - self.numrows < 0:
self.start = 0
self.end_ = self.numrows
self.cursor = 0
self._refuse()
# exactly one page above
elif self.start+1 - self.numrows == 0:
self.start = 0
self.end_ = self.numrows
self.cursor = 0
# more than one page above
else:
self.start -= self.numrows
self.end_ = self.start + self.numrows
self.update()
def page_down(self):
"""
"""
if self.numitems == 0: # empty page
self._refuse()
elif self.numitems <= self.numrows: # partial/single page
self.cursor = self.numitems - 1
self._refuse()
elif self.numitems > self.numrows: # multiple pages
#if hasattr(self, 'flag'):
# import pdb; pdb.set_trace()
# already on the last page (exact or partial)
if self.numitems - self.start <= self.numrows:
self.start = self.numitems - 1
self.end_ = self.numitems
self.cursor = 0
self._refuse()
# less than a full page left
elif self.numitems - self.end_ < self.numrows:
self.start = self.end_
self.end_ = self.numitems
rows_displayed = self.end_ - self.start
if self.cursor > rows_displayed:
self.cursor = rows_displayed - 1
# one full page or more left
else:
self.start += self.numrows
self.end_ += self.numrows
self.update()
def home(self):
"""
"""
if self.numitems == 0: # empty page
self._refuse()
elif self.numitems <= self.numrows: # partial/single page
if self.cursor == 0:
self._refuse()
else:
self.cursor = 0
elif self.numitems > self.numrows: # multiple pages
self.start = 0
self.end_ = self.start + self.numrows
self.cursor = 0
if self.curitem == 0:
self._refuse()
self.update()
def end(self):
"""
"""
if self.numitems == 0: # empty page
self._refuse()
elif self.numitems <= self.numrows: # partial/single page
if self.cursor == self.numitems - 1:
self._refuse()
else:
self.cursor = self.numitems - 1
elif self.numitems > self.numrows: # multiple pages
self.cursor = self.numrows - 1
self.end_ = self.numitems
self.start = self.end_ - self.numrows
if self.curitem == self.numitems - 1:
self._refuse()
self.update()
# Helpers
# =======
def _refuse(self):
"""Factored out for easier testing.
"""
self.update()
self.refuse()
def refuse(self):
"""Factored out for easier testing.
"""
curses.beep()
def update(self):
"""Update self.bar and self.curitem.
"""
if self.numrows > self.numitems:
bar = None
else:
numitems_f = float(self.numitems)
size = int((self.numrows/numitems_f) * self.numrows)
start = int((self.start/numitems_f) * self.numrows)
end = start + size + 1
if end > self.numrows:
end = self.numrows
bar = range(start+self.toprow, end+self.toprow)
self.bar = bar
self.curitem = self.start + self.cursor
def stat(self):
return ( self.numrows # 1-indexed
, self.cursor # 0-indexed
, self.numitems # 1-indexed
, self.start # 0-indexed
, self.end_ # 0-indexed
, self.curitem # 0-indexed
, self.bar # 0-indexed
)
def move_cursor(self, rownum):
"""Move the cursor to a specific row, selecting the item there.
"""
if (self.numrows < self.numitems) and (rownum in range(self.numrows)):
self.cursor = rownum
if rownum not in [i[1] for i in self]:
self._refuse()
else:
self.update()
else:
self._refuse()
wrapper_1 = textwrap.TextWrapper( initial_indent=''
, subsequent_indent=''
, break_long_words=True
)
wrapper_2 = textwrap.TextWrapper( initial_indent=' '
, subsequent_indent=' '
, break_long_words=True
)
def format_tb(width, traceback_):
"""Given a traceback, return a list of strings.
I would like to format tracebacks differently, but that will have to
wait for another day.
"""
wrapper_1.width = wrapper_2.width = width
raw = traceback_.splitlines()
lines = wrapper_1.wrap(raw[0])
lines.append('')
for line in raw[1:-1]:
line = line.strip()
lines.extend(wrapper_2.wrap(line))
if not line.startswith('File'):
lines.append('')
lines.extend(wrapper_1.wrap(raw[-1]))
return lines
| {
"content_hash": "78eceb6830cf5d80093d3536e81d3fcc",
"timestamp": "",
"source": "github",
"line_count": 424,
"max_line_length": 79,
"avg_line_length": 29.35377358490566,
"alnum_prop": 0.504419090470834,
"repo_name": "whit537/assertEquals",
"id": "81e1e51666b10249f63a2c787bc05b553a42f07c",
"size": "12446",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assertEquals/interactive/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "134311"
}
],
"symlink_target": ""
} |
"""Save and restore variables."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import re
import time
import uuid
import numpy as np
import six
from google.protobuf import text_format
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.core.protobuf import saver_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import errors
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_io_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpointable
from tensorflow.python.training import training_util
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
from tensorflow.python.util import compat
from tensorflow.python.util.tf_export import tf_export
# Op names which identify variable reads which should be saved.
_VARIABLE_OPS = set(["Variable",
"VariableV2",
"AutoReloadVariable",
"VarHandleOp",
"ReadVariableOp"])
def _set_cpu0(device_string):
"""Creates a new device string based on `device_string` but using /CPU:0.
If the device is already on /CPU:0, this is a no-op.
Args:
device_string: A device string.
Returns:
A device string.
"""
parsed_device = pydev.DeviceSpec.from_string(device_string)
parsed_device.device_type = "CPU"
parsed_device.device_index = 0
return parsed_device.to_string()
class BaseSaverBuilder(object):
"""Base class for Savers.
Can be extended to create different Ops.
"""
class SaveSpec(object):
"""Class used to describe tensor slices that need to be saved."""
def __init__(self, tensor, slice_spec, name, dtype=None):
"""Creates a `SaveSpec` object.
Args:
tensor: the tensor to save or callable that produces a tensor to save.
slice_spec: the slice to be saved. See `Variable.SaveSliceInfo`.
name: the name to save the tensor under.
dtype: The data type of the Tensor. Required if `tensor` is callable.
Used for error checking in the restore op.
"""
self._tensor = tensor
self.slice_spec = slice_spec
self.name = name
if callable(self._tensor):
if dtype is None:
raise AssertionError(
"When passing a callable `tensor` to a SaveSpec, an explicit "
"dtype must be provided.")
self.dtype = dtype
else:
self.dtype = tensor.dtype
@property
def tensor(self):
return self._tensor() if callable(self._tensor) else self._tensor
class SaveableObject(object):
"""Base class for saving and restoring saveable objects."""
def __init__(self, op, specs, name):
"""Creates a `SaveableObject` object.
Args:
op: the "producer" object that this class wraps; it produces a list of
tensors to save. E.g., a "Variable" object saving its backing tensor.
specs: a list of SaveSpec, each element of which describes one tensor to
save under this object. All Tensors must be on the same device.
name: the name to save the object under.
"""
self.op = op
self.specs = specs
self.name = name
self._device = None
@property
def device(self):
"""The device for SaveSpec Tensors."""
# Note that SaveSpec.tensor runs Tensor-gathering ops when executing
# eagerly, making this call potentially very expensive.
#
# TODO(allenl): Consider another way to gather device information. Lower
# priority since this property isn't part of the normal save()/restore()
# workflow, but does come up when some alternative builders are passed to
# the Saver.
if self._device is None:
self._device = self.specs[0].tensor.device
return self._device
def restore(self, restored_tensors, restored_shapes):
"""Restores this object from 'restored_tensors'.
Args:
restored_tensors: the tensors that were loaded from a checkpoint
restored_shapes: the shapes this object should conform to after
restore, or None.
Returns:
An operation that restores the state of the object.
Raises:
ValueError: If the object cannot be restored using the provided
parameters.
"""
# pylint: disable=unused-argument
raise ValueError("Calling an abstract method.")
class VariableSaveable(SaveableObject):
"""SaveableObject implementation that handles Variables."""
def __init__(self, var, slice_spec, name):
spec = BaseSaverBuilder.SaveSpec(var, slice_spec, name, dtype=var.dtype)
super(BaseSaverBuilder.VariableSaveable, self).__init__(var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
return state_ops.assign(
self.op,
restored_tensor,
validate_shape=restored_shapes is None and
self.op.get_shape().is_fully_defined())
class ResourceVariableSaveable(SaveableObject):
"""SaveableObject implementation that handles ResourceVariables."""
def __init__(self, var, slice_spec, name):
self._var_device = var.device
self._var_shape = var.shape
if isinstance(var, ops.Tensor):
self.handle_op = var.op.inputs[0]
tensor = var
elif isinstance(var, resource_variable_ops.ResourceVariable):
def _read_variable_closure(v):
def f():
with ops.device(v.device):
x = v.read_value()
with ops.device("/device:CPU:0"):
return array_ops.identity(x)
return f
self.handle_op = var.handle
tensor = _read_variable_closure(var)
else:
raise ValueError(
"Saveable is neither a resource variable nor a read operation."
" Got: %s" % repr(var))
spec = BaseSaverBuilder.SaveSpec(tensor, slice_spec, name,
dtype=var.dtype)
super(BaseSaverBuilder.ResourceVariableSaveable, self).__init__(
var, [spec], name)
def restore(self, restored_tensors, restored_shapes):
restored_tensor = restored_tensors[0]
if restored_shapes is not None:
restored_tensor = array_ops.reshape(restored_tensor, restored_shapes[0])
# Copy the restored tensor to the variable's device.
with ops.device(self._var_device):
restored_tensor = array_ops.identity(restored_tensor)
return resource_variable_ops.shape_safe_assign_variable_handle(
self.handle_op, self._var_shape, restored_tensor)
def __init__(self, write_version=saver_pb2.SaverDef.V2):
self._write_version = write_version
def save_op(self, filename_tensor, saveables):
"""Create an Op to save 'saveables'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
An Operation that save the variables.
Raises:
RuntimeError: (implementation detail) if "self._write_version" is an
unexpected value.
"""
# pylint: disable=protected-access
tensor_names = []
tensors = []
tensor_slices = []
for saveable in saveables:
for spec in saveable.specs:
tensor_names.append(spec.name)
tensors.append(spec.tensor)
tensor_slices.append(spec.slice_spec)
if self._write_version == saver_pb2.SaverDef.V1:
return io_ops._save(
filename=filename_tensor,
tensor_names=tensor_names,
tensors=tensors,
tensor_slices=tensor_slices)
elif self._write_version == saver_pb2.SaverDef.V2:
# "filename_tensor" is interpreted *NOT AS A FILENAME*, but as a prefix
# of a V2 checkpoint: e.g. "/fs/train/ckpt-<step>/tmp/worker<i>-<step>".
return io_ops.save_v2(filename_tensor, tensor_names, tensor_slices,
tensors)
else:
raise RuntimeError("Unexpected write_version: " + self._write_version)
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
"""Restore all tensors contained in saveables.
By default, this issues separate calls to `restore_op` for each saveable.
Subclasses may override to load multiple saveables in a single call.
Args:
filename_tensor: String Tensor.
saveables: List of BaseSaverBuilder.SaveableObject objects.
preferred_shard: Int. Shard to open first when loading a sharded file.
restore_sequentially: Bool. If true, each restore is sequential.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
all_tensors = []
assign_ops = []
for saveable in saveables:
restore_control_inputs = assign_ops[-1:] if restore_sequentially else []
with ops.device(_set_cpu0(saveable.device) if saveable.device else None):
with ops.control_dependencies(restore_control_inputs):
all_tensors.extend(
self.restore_op(filename_tensor, saveable, preferred_shard))
return all_tensors
# pylint: disable=unused-argument
def restore_op(self, filename_tensor, saveable, preferred_shard):
"""Create ops to restore 'saveable'.
This is intended to be overridden by subclasses that want to generate
different Ops.
Args:
filename_tensor: String Tensor.
saveable: A BaseSaverBuilder.SaveableObject object.
preferred_shard: Int. Shard to open first when loading a sharded file.
Returns:
A list of Tensors resulting from reading 'saveable' from
'filename'.
"""
# pylint: disable=protected-access
tensors = []
for spec in saveable.specs:
tensors.append(
io_ops.restore_v2(
filename_tensor,
[spec.name],
[spec.slice_spec],
[spec.dtype])[0])
return tensors
# pylint: enable=unused-argument
def sharded_filename(self, filename_tensor, shard, num_shards):
"""Append sharding information to a filename.
Args:
filename_tensor: A string tensor.
shard: Integer. The shard for the filename.
num_shards: An int Tensor for the number of shards.
Returns:
A string tensor.
"""
return gen_io_ops.sharded_filename(filename_tensor, shard, num_shards)
def _AddSaveOps(self, filename_tensor, saveables):
"""Add ops to save variables that are on the same shard.
Args:
filename_tensor: String Tensor.
saveables: A list of SaveableObject objects.
Returns:
A tensor with the filename used to save.
"""
save = self.save_op(filename_tensor, saveables)
return control_flow_ops.with_dependencies([save], filename_tensor)
def _AddShardedSaveOpsForV2(self, checkpoint_prefix, per_device):
"""Add ops to save the params per shard, for the V2 format.
Note that the sharded save procedure for the V2 format is different from
V1: there is a special "merge" step that merges the small metadata produced
from each device.
Args:
checkpoint_prefix: scalar String Tensor. Interpreted *NOT AS A
FILENAME*, but as a prefix of a V2 checkpoint;
per_device: A list of (device, BaseSaverBuilder.VarToSave) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables, which, when evaluated, returns the prefix
"<user-fed prefix>" only and does not include the sharded spec suffix.
"""
# IMPLEMENTATION DETAILS: most clients should skip.
#
# Suffix for any well-formed "checkpoint_prefix", when sharded.
# Transformations:
# * Users pass in "save_path" in save() and restore(). Say "myckpt".
# * checkpoint_prefix gets fed <save_path><_SHARDED_SUFFIX>.
#
# Example:
# During runtime, a temporary directory is first created, which contains
# files
#
# <train dir>/myckpt_temp/
# part-?????-of-?????{.index, .data-00000-of-00001}
#
# Before .save() finishes, they will be (hopefully, atomically) renamed to
#
# <train dir>/
# myckpt{.index, .data-?????-of-?????}
#
# Users only need to interact with the user-specified prefix, which is
# "<train dir>/myckpt" in this case. Save() and Restore() work with the
# prefix directly, instead of any physical pathname. (On failure and
# subsequent restore, an outdated and orphaned temporary directory can be
# safely removed.)
_SHARDED_SUFFIX = "_temp_%s/part" % uuid.uuid4().hex
tmp_checkpoint_prefix = string_ops.string_join(
[checkpoint_prefix, _SHARDED_SUFFIX])
num_shards = len(per_device)
sharded_saves = []
sharded_prefixes = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
last_device = None
for shard, (device, saveables) in enumerate(per_device):
last_device = device
with ops.device(_set_cpu0(device)):
sharded_filename = self.sharded_filename(tmp_checkpoint_prefix, shard,
num_shards_tensor)
sharded_prefixes.append(sharded_filename)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
with ops.control_dependencies([x.op for x in sharded_saves]):
# Co-locates the merge step with the last device.
with ops.device(_set_cpu0(last_device)):
# V2 format write path consists of a metadata merge step. Once merged,
# attempts to delete the temporary directory, "<user-fed prefix>_temp".
merge_step = gen_io_ops.merge_v2_checkpoints(
sharded_prefixes, checkpoint_prefix, delete_old_dirs=True)
with ops.control_dependencies([merge_step]):
# Returns the prefix "<user-fed prefix>" only. DOES NOT include the
# sharded spec suffix.
return array_ops.identity(checkpoint_prefix)
def _AddShardedSaveOps(self, filename_tensor, per_device):
"""Add ops to save the params per shard.
Args:
filename_tensor: a scalar String Tensor.
per_device: A list of (device, BaseSaverBuilder.SaveableObject) pairs, as
returned by _GroupByDevices().
Returns:
An op to save the variables.
"""
if self._write_version == saver_pb2.SaverDef.V2:
return self._AddShardedSaveOpsForV2(filename_tensor, per_device)
num_shards = len(per_device)
sharded_saves = []
num_shards_tensor = constant_op.constant(num_shards, name="num_shards")
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_filename = self.sharded_filename(filename_tensor, shard,
num_shards_tensor)
sharded_saves.append(self._AddSaveOps(sharded_filename, saveables))
# Return the sharded name for the save path.
with ops.control_dependencies([x.op for x in sharded_saves]):
return gen_io_ops.sharded_filespec(filename_tensor, num_shards_tensor)
def _AddRestoreOps(self,
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=-1,
name="restore_all"):
"""Add operations to restore saveables.
Args:
filename_tensor: Tensor for the path of the file to load.
saveables: A list of SaveableObject objects.
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
preferred_shard: Shard to open first when loading a sharded file.
name: Name for the returned op.
Returns:
An Operation that restores the variables.
"""
all_tensors = self.bulk_restore(filename_tensor, saveables, preferred_shard,
restore_sequentially)
assign_ops = []
idx = 0
# Load and optionally reshape on the CPU, as string tensors are not
# available on the GPU.
# TODO(touts): Re-enable restore on GPU when we can support annotating
# string tensors as "HostMemory" inputs.
for saveable in saveables:
shapes = None
if reshape:
# Compute the shapes, let the restore op decide if and how to do
# the reshape.
shapes = []
for spec in saveable.specs:
v = spec.tensor
shape = v.get_shape()
if not shape.is_fully_defined():
shape = array_ops.shape(v)
shapes.append(shape)
saveable_tensors = all_tensors[idx:idx + len(saveable.specs)]
idx += len(saveable.specs)
assign_ops.append(saveable.restore(saveable_tensors, shapes))
# Create a Noop that has control dependencies from all the updates.
return control_flow_ops.group(*assign_ops, name=name)
def _AddShardedRestoreOps(self, filename_tensor, per_device,
restore_sequentially, reshape):
"""Add Ops to restore variables from multiple devices.
Args:
filename_tensor: Tensor for the path of the file to load.
per_device: A list of (device, SaveableObject) pairs, as
returned by _GroupByDevices().
restore_sequentially: True if we want to restore variables sequentially
within a shard.
reshape: True if we want to reshape loaded tensors to the shape of
the corresponding variable.
Returns:
An Operation that restores the variables.
"""
sharded_restores = []
for shard, (device, saveables) in enumerate(per_device):
with ops.device(device):
sharded_restores.append(
self._AddRestoreOps(
filename_tensor,
saveables,
restore_sequentially,
reshape,
preferred_shard=shard,
name="restore_shard"))
return control_flow_ops.group(*sharded_restores, name="restore_all")
@staticmethod
def _IsVariable(v):
return isinstance(v, ops.Tensor) and v.op.type in _VARIABLE_OPS
def _GroupByDevices(self, saveables):
"""Group Variable tensor slices per device.
TODO(touts): Make sure that all the devices found are on different
job/replica/task/cpu|gpu. It would be bad if 2 were on the same device.
It can happen if the devices are unspecified.
Args:
saveables: A list of BaseSaverBuilder.SaveableObject objects.
Returns:
A list of tuples: (device_name, BaseSaverBuilder.SaveableObject) tuples.
The list is sorted by ascending device_name.
Raises:
ValueError: If the tensors of a saveable are on different devices.
"""
per_device = collections.defaultdict(lambda: [])
for saveable in saveables:
canonical_device = set(
pydev.canonical_name(spec.tensor.device) for spec in saveable.specs)
if len(canonical_device) != 1:
raise ValueError("All tensors of a saveable object must be "
"on the same device: %s" % saveable.name)
per_device[canonical_device.pop()].append(saveable)
return sorted(per_device.items(), key=lambda t: t[0])
@staticmethod
def OpListToDict(op_list, convert_variable_to_tensor=True):
"""Create a dictionary of names to operation lists.
Args:
op_list: A list, tuple, or set of Variables or SaveableObjects.
convert_variable_to_tensor: Whether or not to convert single Variables
with no slice info into Tensors.
Returns:
A dictionary of names to the operations that must be saved under
that name. Variables with save_slice_info are grouped together under the
same key in no particular order.
Raises:
TypeError: If the type of op_list or its elements is not supported.
ValueError: If at least two saveables share the same name.
"""
if not isinstance(op_list, (list, tuple, set)):
raise TypeError("Variables to save should be passed in a dict or a "
"list: %s" % op_list)
# When ResourceVariables are converted to Tensors, read ops are added to the
# graph. Sorting the op_list ensures that the resulting graph is always
# constructed in a deterministic way:
op_list = sorted(op_list, key=lambda x: x.name)
names_to_saveables = {}
# pylint: disable=protected-access
for var in op_list:
if isinstance(var, BaseSaverBuilder.SaveableObject):
names_to_saveables[var.name] = var
elif isinstance(var, variables.PartitionedVariable):
if var.name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
var.name)
names_to_saveables[var.name] = var
elif isinstance(var, variables.Variable) and var._save_slice_info:
name = var._save_slice_info.full_name
if name in names_to_saveables:
if not isinstance(names_to_saveables[name], list):
raise ValueError("Mixing slices and non-slices with the same name: "
"%s" % name)
names_to_saveables[name].append(var)
else:
names_to_saveables[name] = [var]
elif (isinstance(var, checkpointable.CheckpointableBase)
and not isinstance(var, variables.Variable)):
checkpointable_saveables = [
(factory() if callable(factory) else factory)
for factory in var._gather_saveables_for_checkpoint().values()]
names_to_saveables.update(
BaseSaverBuilder.OpListToDict(checkpointable_saveables))
else:
if context.executing_eagerly():
if not isinstance(var, resource_variable_ops.ResourceVariable):
raise ValueError(
"Can only save/restore ResourceVariables when eager execution "
"is enabled, type: %s." % type(var))
set_var = names_to_saveables.setdefault(var._shared_name, var)
if set_var is not var:
raise ValueError(
("Two different ResourceVariable objects with the same "
"shared_name '%s' were passed to the Saver. This likely means "
"that they were created in different Graphs or isolation "
"contexts, and may not be checkpointed together.") %
(var._shared_name,))
else:
if convert_variable_to_tensor:
if isinstance(var, resource_variable_ops.ResourceVariable):
var = var._graph_element # pylint: disable=protected-access
else:
var = ops.internal_convert_to_tensor(var, as_ref=True)
if not BaseSaverBuilder._IsVariable(var):
raise TypeError("Variable to save is not a Variable: %s" % var)
if var.op.type == "ReadVariableOp":
name = var.op.inputs[0].op.name
else:
name = var.op.name
if name in names_to_saveables:
raise ValueError("At least two variables have the same name: %s" %
name)
names_to_saveables[name] = var
# pylint: enable=protected-access
return names_to_saveables
def _ValidateAndSliceInputs(self, names_to_saveables):
"""Returns the variables and names that will be used for a Saver.
Args:
names_to_saveables: A dict (k, v) where k is the name of an operation and
v is an operation to save or a BaseSaverBuilder.Saver.
Returns:
A list of BaseSaverBuilder.SaveableObject objects.
Raises:
TypeError: If any of the keys are not strings or any of the
values are not one of Tensor or Variable or a checkpointable operation.
ValueError: If the same operation is given in more than one value
(this also applies to slices of SlicedVariables).
"""
if not isinstance(names_to_saveables, dict):
names_to_saveables = BaseSaverBuilder.OpListToDict(names_to_saveables)
saveables = []
seen_ops = set()
for name in sorted(names_to_saveables.keys()):
if not isinstance(name, six.string_types):
raise TypeError(
"names_to_saveables must be a dict mapping string names to "
"checkpointable operations. Name is not a string: %s" % name)
op = names_to_saveables[name]
if isinstance(op, BaseSaverBuilder.SaveableObject):
self._AddSaveable(saveables, seen_ops, op)
elif isinstance(op, (list, tuple, variables.PartitionedVariable)):
if isinstance(op, variables.PartitionedVariable):
op = list(op)
# A set of slices.
slice_name = None
# pylint: disable=protected-access
for variable in op:
if not isinstance(variable, variables.Variable):
raise ValueError("Slices must all be Variables: %s" % variable)
if not variable._save_slice_info:
raise ValueError("Slices must all be slices: %s" % variable)
if slice_name is None:
slice_name = variable._save_slice_info.full_name
elif slice_name != variable._save_slice_info.full_name:
raise ValueError(
"Slices must all be from the same tensor: %s != %s" %
(slice_name, variable._save_slice_info.full_name))
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
saveable = BaseSaverBuilder.VariableSaveable(
variable, variable._save_slice_info.spec, name)
else:
saveable = BaseSaverBuilder.ResourceVariableSaveable(
variable, variable._save_slice_info.spec, name)
self._AddSaveable(saveables, seen_ops, saveable)
# pylint: enable=protected-access
else:
# A variable or tensor.
if context.executing_eagerly():
if not isinstance(op, resource_variable_ops.ResourceVariable):
raise ValueError("Can only save/restore ResourceVariable eager "
"mode is enabled, type: %s." % type(op))
saveable = BaseSaverBuilder.ResourceVariableSaveable(op, "", name)
else:
if isinstance(op, resource_variable_ops.ResourceVariable):
variable = op._graph_element # pylint: disable=protected-access
else:
variable = ops.internal_convert_to_tensor(op, as_ref=True)
if not BaseSaverBuilder._IsVariable(variable):
raise TypeError("names_to_saveables must be a dict mapping string "
"names to Tensors/Variables. Not a variable: %s" %
variable)
if variable.op.type in ["Variable", "VariableV2",
"AutoReloadVariable"]:
saveable = BaseSaverBuilder.VariableSaveable(variable, "", name)
else:
saveable = BaseSaverBuilder.ResourceVariableSaveable(
variable, "", name)
self._AddSaveable(saveables, seen_ops, saveable)
return saveables
def _AddSaveable(self, saveables, seen_ops, saveable):
"""Adds the saveable to the saveables list.
Args:
saveables: List to append the SaveableObject to.
seen_ops: Set of the ops of the saveables already processed. Used to
check that each saveable is only saved once.
saveable: The saveable.
Raises:
ValueError: If the saveable has already been processed.
"""
if saveable.op in seen_ops:
raise ValueError("The same saveable will be restored with two names: %s" %
saveable.name)
saveables.append(saveable)
seen_ops.add(saveable.op)
def build(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model"):
"""Builds save/restore graph nodes or runs save/restore in eager mode.
Args:
names_to_saveables: A dictionary mapping name to a Variable or
SaveableObject. Each name will be associated with the
corresponding variable in the checkpoint.
reshape: If True, allow restoring parameters from a checkpoint
that where the parameters have a different shape. This is
only needed when you try to restore from a Dist-Belief checkpoint,
and only some times.
sharded: If True, shard the checkpoints, one per device that has
Variable nodes.
max_to_keep: Maximum number of checkpoints to keep. As new checkpoints
are created, old ones are deleted. If None or 0, no checkpoints are
deleted from the filesystem but only the last one is kept in the
`checkpoint` file. Presently the number is only roughly enforced. For
example in case of restarts more than max_to_keep checkpoints may be
kept.
keep_checkpoint_every_n_hours: How often checkpoints should be kept.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A Bool, which if true, causes restore of different
variables to happen sequentially within each device.
filename: If known at graph construction time, filename used for variable
loading/saving. If None, then the default name "model" will be used.
Returns:
A SaverDef proto.
Raises:
TypeError: If 'names_to_saveables' is not a dictionary mapping string
keys to variable Tensors.
ValueError: If any of the keys or values in 'names_to_saveables' is not
unique.
"""
return self._build_internal(
names_to_saveables=names_to_saveables,
reshape=reshape,
sharded=sharded,
max_to_keep=max_to_keep,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
name=name,
restore_sequentially=restore_sequentially,
filename=filename)
def _build_internal(self,
names_to_saveables,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
filename="model",
build_save=True,
build_restore=True):
"""build() with option to only perform save and restore."""
if not context.executing_eagerly() and (not build_save or
not build_restore):
raise ValueError("save and restore operations need to be built together "
" when eager execution is not enabled.")
saveables = self._ValidateAndSliceInputs(names_to_saveables)
if max_to_keep is None:
max_to_keep = 0
with ops.name_scope(name, "save",
[saveable.op for saveable in saveables]) as name:
# Add the Constant string tensor for the filename.
filename_tensor = constant_op.constant(filename or "model")
# Add the save ops.
if sharded:
per_device = self._GroupByDevices(saveables)
if build_save:
save_tensor = self._AddShardedSaveOps(filename_tensor, per_device)
if build_restore:
restore_op = self._AddShardedRestoreOps(filename_tensor, per_device,
restore_sequentially, reshape)
else:
if build_save:
save_tensor = self._AddSaveOps(filename_tensor, saveables)
if build_restore:
restore_op = self._AddRestoreOps(filename_tensor, saveables,
restore_sequentially, reshape)
# In the following use case, it's possible to have restore_ops be called
# something else:
# - Build inference graph and export a meta_graph.
# - Import the inference meta_graph
# - Extend the inference graph to a train graph.
# - Export a new meta_graph.
# Now the second restore_op will be called "restore_all_1".
# As such, comment out the assert for now until we know whether supporting
# such usage model makes sense.
#
# assert restore_op.name.endswith("restore_all"), restore_op.name
if context.executing_eagerly():
# Store the tensor values to the tensor_names.
save_tensor_name = save_tensor.numpy() if build_save else ""
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.numpy(),
save_tensor_name=save_tensor_name,
restore_op_name="",
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
else:
return saver_pb2.SaverDef(
filename_tensor_name=filename_tensor.name,
save_tensor_name=save_tensor.name,
restore_op_name=restore_op.name,
max_to_keep=max_to_keep,
sharded=sharded,
keep_checkpoint_every_n_hours=keep_checkpoint_every_n_hours,
version=self._write_version)
class BulkSaverBuilder(BaseSaverBuilder):
"""SaverBuilder with support for bulk restoring multiple saveables."""
def bulk_restore(self, filename_tensor, saveables, preferred_shard,
restore_sequentially):
# Ignored: bulk restore is internally sequential.
del restore_sequentially
restore_specs = []
for saveable in saveables:
for spec in saveable.specs:
restore_specs.append((spec.name, spec.slice_spec, spec.dtype))
names, slices, dtypes = zip(*restore_specs)
# Load all tensors onto CPU 0 for compatibility with existing code.
with ops.device("cpu:0"):
return io_ops.restore_v2(filename_tensor, names, slices, dtypes)
def _get_saver_or_default():
"""Returns the saver from SAVERS collection, or creates a default one.
This method is used by other members of the training module, such as
`Scaffold`, or `CheckpointSaverHook`.
Returns:
`Saver`.
Raises:
RuntimeError: If the SAVERS collection already has more than one items.
"""
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if savers:
if len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor.".
format(collection_key))
return savers[0]
saver = Saver(sharded=True, allow_empty=True)
if saver is not None:
ops.add_to_collection(collection_key, saver)
return saver
def _GetCheckpointFilename(save_dir, latest_filename):
"""Returns a filename for storing the CheckpointState.
Args:
save_dir: The directory for saving and restoring checkpoints.
latest_filename: Name of the file in 'save_dir' that is used
to store the CheckpointState.
Returns:
The path of the file that contains the CheckpointState proto.
"""
if latest_filename is None:
latest_filename = "checkpoint"
return os.path.join(save_dir, latest_filename)
@tf_export("train.generate_checkpoint_state_proto")
def generate_checkpoint_state_proto(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None):
"""Generates a checkpoint state proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
Returns:
CheckpointState proto with model_checkpoint_path and
all_model_checkpoint_paths updated to either absolute paths or
relative paths to the current save_dir.
"""
if all_model_checkpoint_paths is None:
all_model_checkpoint_paths = []
if (not all_model_checkpoint_paths or
all_model_checkpoint_paths[-1] != model_checkpoint_path):
logging.info("%s is not in all_model_checkpoint_paths. Manually adding it.",
model_checkpoint_path)
all_model_checkpoint_paths.append(model_checkpoint_path)
# Relative paths need to be rewritten to be relative to the "save_dir"
# if model_checkpoint_path already contains "save_dir".
if not os.path.isabs(save_dir):
if not os.path.isabs(model_checkpoint_path):
model_checkpoint_path = os.path.relpath(model_checkpoint_path, save_dir)
for i in range(len(all_model_checkpoint_paths)):
p = all_model_checkpoint_paths[i]
if not os.path.isabs(p):
all_model_checkpoint_paths[i] = os.path.relpath(p, save_dir)
coord_checkpoint_proto = CheckpointState(
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
return coord_checkpoint_proto
@tf_export("train.update_checkpoint_state")
def update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
_update_checkpoint_state(
save_dir=save_dir,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths,
latest_filename=latest_filename,
save_relative_paths=False)
def _update_checkpoint_state(save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=None,
latest_filename=None,
save_relative_paths=False):
"""Updates the content of the 'checkpoint' file.
This updates the checkpoint file containing a CheckpointState
proto.
Args:
save_dir: Directory where the model was saved.
model_checkpoint_path: The checkpoint file.
all_model_checkpoint_paths: List of strings. Paths to all not-yet-deleted
checkpoints, sorted from oldest to newest. If this is a non-empty list,
the last element must be equal to model_checkpoint_path. These paths
are also saved in the CheckpointState proto.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
save_relative_paths: If `True`, will write relative paths to the checkpoint
state file.
Raises:
RuntimeError: If any of the model checkpoint paths conflict with the file
containing CheckpointSate.
"""
# Writes the "checkpoint" file for the coordinator for later restoration.
coord_checkpoint_filename = _GetCheckpointFilename(save_dir, latest_filename)
if save_relative_paths:
if os.path.isabs(model_checkpoint_path):
rel_model_checkpoint_path = os.path.relpath(
model_checkpoint_path, save_dir)
else:
rel_model_checkpoint_path = model_checkpoint_path
rel_all_model_checkpoint_paths = []
for p in all_model_checkpoint_paths:
if os.path.isabs(p):
rel_all_model_checkpoint_paths.append(os.path.relpath(p, save_dir))
else:
rel_all_model_checkpoint_paths.append(p)
ckpt = generate_checkpoint_state_proto(
save_dir,
rel_model_checkpoint_path,
all_model_checkpoint_paths=rel_all_model_checkpoint_paths)
else:
ckpt = generate_checkpoint_state_proto(
save_dir,
model_checkpoint_path,
all_model_checkpoint_paths=all_model_checkpoint_paths)
if coord_checkpoint_filename == ckpt.model_checkpoint_path:
raise RuntimeError("Save path '%s' conflicts with path used for "
"checkpoint state. Please use a different save path." %
model_checkpoint_path)
# Preventing potential read/write race condition by *atomically* writing to a
# file.
file_io.atomic_write_string_to_file(coord_checkpoint_filename,
text_format.MessageToString(ckpt))
@tf_export("train.get_checkpoint_state")
def get_checkpoint_state(checkpoint_dir, latest_filename=None):
"""Returns CheckpointState proto from the "checkpoint" file.
If the "checkpoint" file contains a valid CheckpointState
proto, returns it.
Args:
checkpoint_dir: The directory of checkpoints.
latest_filename: Optional name of the checkpoint file. Default to
'checkpoint'.
Returns:
A CheckpointState if the state was available, None
otherwise.
Raises:
ValueError: if the checkpoint read doesn't have model_checkpoint_path set.
"""
ckpt = None
coord_checkpoint_filename = _GetCheckpointFilename(checkpoint_dir,
latest_filename)
f = None
try:
# Check that the file exists before opening it to avoid
# many lines of errors from colossus in the logs.
if file_io.file_exists(coord_checkpoint_filename):
file_content = file_io.read_file_to_string(
coord_checkpoint_filename)
ckpt = CheckpointState()
text_format.Merge(file_content, ckpt)
if not ckpt.model_checkpoint_path:
raise ValueError("Invalid checkpoint state loaded from %s",
checkpoint_dir)
# For relative model_checkpoint_path and all_model_checkpoint_paths,
# prepend checkpoint_dir.
if not os.path.isabs(ckpt.model_checkpoint_path):
ckpt.model_checkpoint_path = os.path.join(checkpoint_dir,
ckpt.model_checkpoint_path)
for i in range(len(ckpt.all_model_checkpoint_paths)):
p = ckpt.all_model_checkpoint_paths[i]
if not os.path.isabs(p):
ckpt.all_model_checkpoint_paths[i] = os.path.join(checkpoint_dir, p)
except errors.OpError as e:
# It's ok if the file cannot be read
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
except text_format.ParseError as e:
logging.warning("%s: %s", type(e).__name__, e)
logging.warning("%s: Checkpoint ignored", coord_checkpoint_filename)
return None
finally:
if f:
f.close()
return ckpt
@tf_export("train.Saver")
class Saver(object):
"""Saves and restores variables.
See @{$variables$Variables}
for an overview of variables, saving and restoring.
The `Saver` class adds ops to save and restore variables to and from
*checkpoints*. It also provides convenience methods to run these ops.
Checkpoints are binary files in a proprietary format which map variable names
to tensor values. The best way to examine the contents of a checkpoint is to
load it using a `Saver`.
Savers can automatically number checkpoint filenames with a provided counter.
This lets you keep multiple checkpoints at different steps while training a
model. For example you can number the checkpoint filenames with the training
step number. To avoid filling up disks, savers manage checkpoint files
automatically. For example, they can keep only the N most recent files, or
one checkpoint for every N hours of training.
You number checkpoint filenames by passing a value to the optional
`global_step` argument to `save()`:
```python
saver.save(sess, 'my-model', global_step=0) ==> filename: 'my-model-0'
...
saver.save(sess, 'my-model', global_step=1000) ==> filename: 'my-model-1000'
```
Additionally, optional arguments to the `Saver()` constructor let you control
the proliferation of checkpoint files on disk:
* `max_to_keep` indicates the maximum number of recent checkpoint files to
keep. As new files are created, older files are deleted. If None or 0,
no checkpoints are deleted from the filesystem but only the last one is
kept in the `checkpoint` file. Defaults to 5 (that is, the 5 most recent
checkpoint files are kept.)
* `keep_checkpoint_every_n_hours`: In addition to keeping the most recent
`max_to_keep` checkpoint files, you might want to keep one checkpoint file
for every N hours of training. This can be useful if you want to later
analyze how a model progressed during a long training session. For
example, passing `keep_checkpoint_every_n_hours=2` ensures that you keep
one checkpoint file for every 2 hours of training. The default value of
10,000 hours effectively disables the feature.
Note that you still have to call the `save()` method to save the model.
Passing these arguments to the constructor will not save variables
automatically for you.
A training program that saves regularly looks like:
```python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Launch the graph and train, saving the model every 1,000 steps.
sess = tf.Session()
for step in xrange(1000000):
sess.run(..training_op..)
if step % 1000 == 0:
# Append the step number to the checkpoint name:
saver.save(sess, 'my-model', global_step=step)
```
In addition to checkpoint files, savers keep a protocol buffer on disk with
the list of recent checkpoints. This is used to manage numbered checkpoint
files and by `latest_checkpoint()`, which makes it easy to discover the path
to the most recent checkpoint. That protocol buffer is stored in a file named
'checkpoint' next to the checkpoint files.
If you create several savers, you can specify a different filename for the
protocol buffer file in the call to `save()`.
"""
def __init__(self,
var_list=None,
reshape=False,
sharded=False,
max_to_keep=5,
keep_checkpoint_every_n_hours=10000.0,
name=None,
restore_sequentially=False,
saver_def=None,
builder=None,
defer_build=False,
allow_empty=False,
write_version=saver_pb2.SaverDef.V2,
pad_step_number=False,
save_relative_paths=False,
filename=None):
"""Creates a `Saver`.
The constructor adds ops to save and restore variables.
`var_list` specifies the variables that will be saved and restored. It can
be passed as a `dict` or a list:
* A `dict` of names to variables: The keys are the names that will be
used to save or restore the variables in the checkpoint files.
* A list of variables: The variables will be keyed with their op name in
the checkpoint files.
For example:
```python
v1 = tf.Variable(..., name='v1')
v2 = tf.Variable(..., name='v2')
# Pass the variables as a dict:
saver = tf.train.Saver({'v1': v1, 'v2': v2})
# Or pass them as a list.
saver = tf.train.Saver([v1, v2])
# Passing a list is equivalent to passing a dict with the variable op names
# as keys:
saver = tf.train.Saver({v.op.name: v for v in [v1, v2]})
```
The optional `reshape` argument, if `True`, allows restoring a variable from
a save file where the variable had a different shape, but the same number
of elements and type. This is useful if you have reshaped a variable and
want to reload it from an older checkpoint.
The optional `sharded` argument, if `True`, instructs the saver to shard
checkpoints per device.
Args:
var_list: A list of `Variable`/`SaveableObject`, or a dictionary mapping
names to `SaveableObject`s. If `None`, defaults to the list of all
saveable objects.
reshape: If `True`, allows restoring parameters from a checkpoint
where the variables have a different shape.
sharded: If `True`, shard the checkpoints, one per device.
max_to_keep: Maximum number of recent checkpoints to keep.
Defaults to 5.
keep_checkpoint_every_n_hours: How often to keep checkpoints.
Defaults to 10,000 hours.
name: String. Optional name to use as a prefix when adding operations.
restore_sequentially: A `Bool`, which if true, causes restore of different
variables to happen sequentially within each device. This can lower
memory usage when restoring very large models.
saver_def: Optional `SaverDef` proto to use instead of running the
builder. This is only useful for specialty code that wants to recreate
a `Saver` object for a previously built `Graph` that had a `Saver`.
The `saver_def` proto should be the one returned by the
`as_saver_def()` call of the `Saver` that was created for that `Graph`.
builder: Optional `SaverBuilder` to use if a `saver_def` was not provided.
Defaults to `BulkSaverBuilder()`.
defer_build: If `True`, defer adding the save and restore ops to the
`build()` call. In that case `build()` should be called before
finalizing the graph or using the saver.
allow_empty: If `False` (default) raise an error if there are no
variables in the graph. Otherwise, construct the saver anyway and make
it a no-op.
write_version: controls what format to use when saving checkpoints. It
also affects certain filepath matching logic. The V2 format is the
recommended choice: it is much more optimized than V1 in terms of
memory required and latency incurred during restore. Regardless of
this flag, the Saver is able to restore from both V2 and V1 checkpoints.
pad_step_number: if True, pads the global step number in the checkpoint
filepaths to some fixed width (8 by default). This is turned off by
default.
save_relative_paths: If `True`, will write relative paths to the
checkpoint state file. This is needed if the user wants to copy the
checkpoint directory and reload from the copied directory.
filename: If known at graph construction time, filename used for variable
loading/saving.
Raises:
TypeError: If `var_list` is invalid.
ValueError: If any of the keys or values in `var_list` are not unique.
RuntimeError: If eager execution is enabled and`var_list` does not specify
a list of varialbes to save.
@compatibility(eager)
When eager execution is enabled, `var_list` must specify a `list` or `dict`
of variables to save. Otherwise, a `RuntimeError` will be raised.
@end_compatibility
"""
if defer_build and var_list:
raise ValueError(
"If `var_list` is provided then build cannot be deferred. "
"Either set defer_build=False or var_list=None.")
if context.executing_eagerly() and var_list is None:
raise RuntimeError(
"When eager execution is enabled, `var_list` must specify a list or "
"dict of variables to save")
self._var_list = var_list
self._reshape = reshape
self._sharded = sharded
self._max_to_keep = max_to_keep
self._keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self._name = name
self._restore_sequentially = restore_sequentially
self.saver_def = saver_def
self._builder = builder
self._is_built = False
self._allow_empty = allow_empty
self._is_empty = None
self._write_version = write_version
self._pad_step_number = pad_step_number
self._filename = filename
self._last_checkpoints = []
self._checkpoints_to_be_deleted = []
if context.executing_eagerly():
self._next_checkpoint_time = (
time.time() + self._keep_checkpoint_every_n_hours * 3600)
elif not defer_build:
self.build()
if self.saver_def:
self._check_saver_def()
self._write_version = self.saver_def.version
self._save_relative_paths = save_relative_paths
def build(self):
if context.executing_eagerly():
raise RuntimeError("Use save/restore instead of build in eager mode.")
self._build(self._filename, build_save=True, build_restore=True)
def _build_eager(self, checkpoint_path, build_save, build_restore):
self._build(
checkpoint_path, build_save=build_save, build_restore=build_restore)
def _build(self, checkpoint_path, build_save, build_restore):
"""Builds saver_def."""
if not context.executing_eagerly():
if self._is_built:
return
self._is_built = True
if not self.saver_def or context.executing_eagerly():
if self._builder is None:
self._builder = BulkSaverBuilder(self._write_version)
if self._var_list is None:
# pylint: disable=protected-access
self._var_list = variables._all_saveable_objects()
if not self._var_list:
if self._allow_empty:
self._is_empty = True
return
else:
raise ValueError("No variables to save")
self._is_empty = False
self.saver_def = self._builder._build_internal( # pylint: disable=protected-access
self._var_list,
reshape=self._reshape,
sharded=self._sharded,
max_to_keep=self._max_to_keep,
keep_checkpoint_every_n_hours=self._keep_checkpoint_every_n_hours,
name=self._name,
restore_sequentially=self._restore_sequentially,
filename=checkpoint_path,
build_save=build_save, build_restore=build_restore)
elif self.saver_def and self._name:
# Since self._name is used as a name_scope by builder(), we are
# overloading the use of this field to represent the "import_scope" as
# well.
self.saver_def.filename_tensor_name = ops.prepend_name_scope(
self.saver_def.filename_tensor_name, self._name)
self.saver_def.save_tensor_name = ops.prepend_name_scope(
self.saver_def.save_tensor_name, self._name)
self.saver_def.restore_op_name = ops.prepend_name_scope(
self.saver_def.restore_op_name, self._name)
self._check_saver_def()
if not context.executing_eagerly():
# Updates next checkpoint time.
# Set in __init__ when executing eagerly.
self._next_checkpoint_time = (
time.time() + self.saver_def.keep_checkpoint_every_n_hours * 3600)
def _check_saver_def(self):
if not isinstance(self.saver_def, saver_pb2.SaverDef):
raise ValueError("saver_def must be a saver_pb2.SaverDef: %s" %
self.saver_def)
if not context.executing_eagerly():
if not self.saver_def.save_tensor_name:
raise ValueError("saver_def must specify the save_tensor_name: %s" %
str(self.saver_def))
if not self.saver_def.restore_op_name:
raise ValueError("saver_def must specify the restore_op_name: %s" %
str(self.saver_def))
def _CheckpointFilename(self, p):
"""Returns the checkpoint filename given a `(filename, time)` pair.
Args:
p: (filename, time) pair.
Returns:
Checkpoint file name.
"""
name, _ = p
return name
def _MetaGraphFilename(self, checkpoint_filename, meta_graph_suffix="meta"):
"""Returns the meta graph filename.
Args:
checkpoint_filename: Name of the checkpoint file.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
Returns:
MetaGraph file name.
"""
# If the checkpoint_filename is sharded, the checkpoint_filename could
# be of format model.ckpt-step#-?????-of-shard#. For example,
# model.ckpt-123456-?????-of-00005, or model.ckpt-123456-00001-of-00002.
basename = re.sub(r"-[\d\?]+-of-\d+$", "", checkpoint_filename)
meta_graph_filename = ".".join([basename, meta_graph_suffix])
return meta_graph_filename
def _RecordLastCheckpoint(self, latest_save_path):
"""Manages the list of the latest checkpoints."""
if not self.saver_def.max_to_keep:
return
# Remove first from list if the same name was used before.
for p in self._last_checkpoints:
if latest_save_path == self._CheckpointFilename(p):
self._last_checkpoints.remove(p)
# Append new path to list
self._last_checkpoints.append((latest_save_path, time.time()))
# If more than max_to_keep, remove oldest.
if len(self._last_checkpoints) > self.saver_def.max_to_keep:
self._checkpoints_to_be_deleted.append(self._last_checkpoints.pop(0))
def _MaybeDeleteOldCheckpoints(self, meta_graph_suffix="meta"):
"""Deletes old checkpoints if necessary.
`self._checkpoints_to_be_deleted` is going to contain checkpoints that are
over `max_to_keep`. They are going to be deleted. If
`keep_checkpoint_every_n_hours` was specified, keep an additional checkpoint
every `N` hours. For example, if `N` is 0.5, an additional checkpoint is
kept for every 0.5 hours of training; if `N` is 10, an additional
checkpoint is kept for every 10 hours of training.
Args:
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
"""
if self._checkpoints_to_be_deleted:
p = self._checkpoints_to_be_deleted.pop(0)
# Do not delete the file if we keep_checkpoint_every_n_hours is set and we
# have reached N hours of training.
should_keep = p[1] > self._next_checkpoint_time
if should_keep:
self._next_checkpoint_time += (
self.saver_def.keep_checkpoint_every_n_hours * 3600)
return
# Otherwise delete the files.
try:
checkpoint_prefix = self._CheckpointFilename(p)
self._delete_file_if_exists(
self._MetaGraphFilename(checkpoint_prefix, meta_graph_suffix))
if self.saver_def.version == saver_pb2.SaverDef.V2:
# V2 has a metadata file and some data files.
self._delete_file_if_exists(checkpoint_prefix + ".index")
self._delete_file_if_exists(checkpoint_prefix +
".data-?????-of-?????")
else:
# V1, Legacy. Exact match on the data file.
self._delete_file_if_exists(checkpoint_prefix)
except Exception as e: # pylint: disable=broad-except
logging.warning("Ignoring: %s", str(e))
def _delete_file_if_exists(self, filespec):
for pathname in file_io.get_matching_files(filespec):
file_io.delete_file(pathname)
def as_saver_def(self):
"""Generates a `SaverDef` representation of this saver.
Returns:
A `SaverDef` proto.
"""
return self.saver_def
def to_proto(self, export_scope=None):
"""Converts this `Saver` to a `SaverDef` protocol buffer.
Args:
export_scope: Optional `string`. Name scope to remove.
Returns:
A `SaverDef` protocol buffer.
"""
if export_scope is None:
return self.saver_def
if not (self.saver_def.filename_tensor_name.startswith(export_scope) and
self.saver_def.save_tensor_name.startswith(export_scope) and
self.saver_def.restore_op_name.startswith(export_scope)):
return None
saver_def = saver_pb2.SaverDef()
saver_def.CopyFrom(self.saver_def)
saver_def.filename_tensor_name = ops.strip_name_scope(
saver_def.filename_tensor_name, export_scope)
saver_def.save_tensor_name = ops.strip_name_scope(
saver_def.save_tensor_name, export_scope)
saver_def.restore_op_name = ops.strip_name_scope(
saver_def.restore_op_name, export_scope)
return saver_def
@staticmethod
def from_proto(saver_def, import_scope=None):
"""Returns a `Saver` object created from `saver_def`.
Args:
saver_def: a `SaverDef` protocol buffer.
import_scope: Optional `string`. Name scope to use.
Returns:
A `Saver` built from saver_def.
"""
return Saver(saver_def=saver_def, name=import_scope)
@property
def last_checkpoints(self):
"""List of not-yet-deleted checkpoint filenames.
You can pass any of the returned values to `restore()`.
Returns:
A list of checkpoint filenames, sorted from oldest to newest.
"""
return list(self._CheckpointFilename(p) for p in self._last_checkpoints)
def set_last_checkpoints(self, last_checkpoints):
"""DEPRECATED: Use set_last_checkpoints_with_time.
Sets the list of old checkpoint filenames.
Args:
last_checkpoints: A list of checkpoint filenames.
Raises:
AssertionError: If last_checkpoints is not a list.
"""
assert isinstance(last_checkpoints, list)
# We use a timestamp of +inf so that this checkpoint will never be
# deleted. This is both safe and backwards compatible to a previous
# version of the code which used s[1] as the "timestamp".
self._last_checkpoints = [(s, np.inf) for s in last_checkpoints]
def set_last_checkpoints_with_time(self, last_checkpoints_with_time):
"""Sets the list of old checkpoint filenames and timestamps.
Args:
last_checkpoints_with_time: A list of tuples of checkpoint filenames and
timestamps.
Raises:
AssertionError: If last_checkpoints_with_time is not a list.
"""
assert isinstance(last_checkpoints_with_time, list)
self._last_checkpoints = last_checkpoints_with_time
def recover_last_checkpoints(self, checkpoint_paths):
"""Recovers the internal saver state after a crash.
This method is useful for recovering the "self._last_checkpoints" state.
Globs for the checkpoints pointed to by `checkpoint_paths`. If the files
exist, use their mtime as the checkpoint timestamp.
Args:
checkpoint_paths: a list of checkpoint paths.
"""
mtimes = get_checkpoint_mtimes(checkpoint_paths)
self.set_last_checkpoints_with_time(list(zip(checkpoint_paths, mtimes)))
def save(self,
sess,
save_path,
global_step=None,
latest_filename=None,
meta_graph_suffix="meta",
write_meta_graph=True,
write_state=True,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Saves variables.
This method runs the ops added by the constructor for saving variables.
It requires a session in which the graph was launched. The variables to
save must also have been initialized.
The method returns the path prefix of the newly created checkpoint files.
This string can be passed directly to a call to `restore()`.
Args:
sess: A Session to use to save the variables.
save_path: String. Prefix of filenames created for the checkpoint.
global_step: If provided the global step number is appended to
`save_path` to create the checkpoint filenames. The optional argument
can be a `Tensor`, a `Tensor` name or an integer.
latest_filename: Optional name for the protocol buffer file that will
contains the list of most recent checkpoints. That file,
kept in the same directory as the checkpoint files, is automatically
managed by the saver to keep track of recent checkpoints. Defaults to
'checkpoint'.
meta_graph_suffix: Suffix for `MetaGraphDef` file. Defaults to 'meta'.
write_meta_graph: `Boolean` indicating whether or not to write the meta
graph file.
write_state: `Boolean` indicating whether or not to write the
`CheckpointStateProto`.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A string: path prefix used for the checkpoint files. If the saver is
sharded, this string ends with: '-?????-of-nnnnn' where 'nnnnn'
is the number of shards created.
If the saver is empty, returns None.
Raises:
TypeError: If `sess` is not a `Session`.
ValueError: If `latest_filename` contains path components, or if it
collides with `save_path`.
RuntimeError: If save and restore ops weren't built.
"""
# pylint: enable=line-too-long
if not self._is_built and not context.executing_eagerly():
raise RuntimeError(
"`build()` should be called before save if defer_build==True")
if latest_filename is None:
latest_filename = "checkpoint"
if self._write_version != saver_pb2.SaverDef.V2:
logging.warning("*******************************************************")
logging.warning("TensorFlow's V1 checkpoint format has been deprecated.")
logging.warning("Consider switching to the more efficient V2 format:")
logging.warning(" `tf.train.Saver(write_version=tf.train.SaverDef.V2)`")
logging.warning("now on by default.")
logging.warning("*******************************************************")
if os.path.split(latest_filename)[0]:
raise ValueError("'latest_filename' must not contain path components")
if global_step is not None:
if not isinstance(global_step, compat.integral_types):
global_step = training_util.global_step(sess, global_step)
checkpoint_file = "%s-%d" % (save_path, global_step)
if self._pad_step_number:
# Zero-pads the step numbers, so that they are sorted when listed.
checkpoint_file = "%s-%s" % (save_path, "{:08d}".format(global_step))
else:
checkpoint_file = save_path
if os.path.basename(
save_path) == latest_filename and not self._sharded:
# Guard against collision between data file and checkpoint state file.
raise ValueError(
"'latest_filename' collides with 'save_path': '%s' and '%s'" %
(latest_filename, save_path))
if (not context.executing_eagerly() and
not isinstance(sess, session.SessionInterface)):
raise TypeError("'sess' must be a Session; %s" % sess)
save_path_parent = os.path.dirname(save_path)
if not self._is_empty:
try:
if context.executing_eagerly():
self._build_eager(
checkpoint_file, build_save=True, build_restore=False)
model_checkpoint_path = self.saver_def.save_tensor_name
else:
model_checkpoint_path = sess.run(
self.saver_def.save_tensor_name,
{self.saver_def.filename_tensor_name: checkpoint_file})
model_checkpoint_path = compat.as_str(model_checkpoint_path)
if write_state:
self._RecordLastCheckpoint(model_checkpoint_path)
_update_checkpoint_state(
save_dir=save_path_parent,
model_checkpoint_path=model_checkpoint_path,
all_model_checkpoint_paths=self.last_checkpoints,
latest_filename=latest_filename,
save_relative_paths=self._save_relative_paths)
self._MaybeDeleteOldCheckpoints(meta_graph_suffix=meta_graph_suffix)
except (errors.FailedPreconditionError, errors.NotFoundError) as exc:
if not gfile.IsDirectory(save_path_parent):
exc = ValueError(
"Parent directory of {} doesn't exist, can't save.".format(
save_path))
raise exc
if write_meta_graph:
meta_graph_filename = self._MetaGraphFilename(
checkpoint_file, meta_graph_suffix=meta_graph_suffix)
if not context.executing_eagerly():
with sess.graph.as_default():
self.export_meta_graph(
meta_graph_filename, strip_default_attrs=strip_default_attrs)
if self._is_empty:
return None
else:
return model_checkpoint_path
def export_meta_graph(self,
filename=None,
collection_list=None,
as_text=False,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False):
# pylint: disable=line-too-long
"""Writes `MetaGraphDef` to save_path/filename.
Args:
filename: Optional meta_graph filename including the path.
collection_list: List of string keys to collect.
as_text: If `True`, writes the meta_graph as an ASCII proto.
export_scope: Optional `string`. Name scope to remove.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with this Saver.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
Returns:
A `MetaGraphDef` proto.
"""
# pylint: enable=line-too-long
return export_meta_graph(
filename=filename,
graph_def=ops.get_default_graph().as_graph_def(add_shapes=True),
saver_def=self.saver_def,
collection_list=collection_list,
as_text=as_text,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs)
def restore(self, sess, save_path):
"""Restores previously saved variables.
This method runs the ops added by the constructor for restoring variables.
It requires a session in which the graph was launched. The variables to
restore do not have to have been initialized, as restoring is itself a way
to initialize variables.
The `save_path` argument is typically a value previously returned from a
`save()` call, or a call to `latest_checkpoint()`.
Args:
sess: A `Session` to use to restore the parameters. None in eager mode.
save_path: Path where parameters were previously saved.
Raises:
ValueError: If save_path is None.
"""
if self._is_empty:
return
if save_path is None:
raise ValueError("Can't load save_path when it is None.")
logging.info("Restoring parameters from %s", save_path)
if context.executing_eagerly():
self._build_eager(save_path, build_save=False, build_restore=True)
else:
sess.run(self.saver_def.restore_op_name,
{self.saver_def.filename_tensor_name: save_path})
@staticmethod
def _add_collection_def(meta_graph_def, key, export_scope=None):
"""Adds a collection to MetaGraphDef protocol buffer.
Args:
meta_graph_def: MetaGraphDef protocol buffer.
key: One of the GraphKeys or user-defined string.
export_scope: Optional `string`. Name scope to remove.
"""
meta_graph.add_collection_def(meta_graph_def, key,
export_scope=export_scope)
def _prefix_to_checkpoint_path(prefix, format_version):
"""Returns the pathname of a checkpoint file, given the checkpoint prefix.
For V1 checkpoint, simply returns the prefix itself (the data file). For V2,
returns the pathname to the index file.
Args:
prefix: a string, the prefix of a checkpoint.
format_version: the checkpoint format version that corresponds to the
prefix.
Returns:
The pathname of a checkpoint file, taking into account the checkpoint
format version.
"""
if format_version == saver_pb2.SaverDef.V2:
return prefix + ".index" # The index file identifies a checkpoint.
return prefix # Just the data file.
@tf_export("train.latest_checkpoint")
def latest_checkpoint(checkpoint_dir, latest_filename=None):
"""Finds the filename of latest saved checkpoint file.
Args:
checkpoint_dir: Directory where the variables were saved.
latest_filename: Optional name for the protocol buffer file that
contains the list of most recent checkpoint filenames.
See the corresponding argument to `Saver.save()`.
Returns:
The full path to the latest checkpoint or `None` if no checkpoint was found.
"""
# Pick the latest checkpoint based on checkpoint state.
ckpt = get_checkpoint_state(checkpoint_dir, latest_filename)
if ckpt and ckpt.model_checkpoint_path:
# Look for either a V2 path or a V1 path, with priority for V2.
v2_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V2)
v1_path = _prefix_to_checkpoint_path(ckpt.model_checkpoint_path,
saver_pb2.SaverDef.V1)
if file_io.get_matching_files(v2_path) or file_io.get_matching_files(
v1_path):
return ckpt.model_checkpoint_path
else:
logging.error("Couldn't match files for checkpoint %s",
ckpt.model_checkpoint_path)
return None
@tf_export("train.import_meta_graph")
def import_meta_graph(meta_graph_or_file, clear_devices=False,
import_scope=None, **kwargs):
"""Recreates a Graph saved in a `MetaGraphDef` proto.
This function takes a `MetaGraphDef` protocol buffer as input. If
the argument is a file containing a `MetaGraphDef` protocol buffer ,
it constructs a protocol buffer from the file content. The function
then adds all the nodes from the `graph_def` field to the
current graph, recreates all the collections, and returns a saver
constructed from the `saver_def` field.
In combination with `export_meta_graph()`, this function can be used to
* Serialize a graph along with other Python objects such as `QueueRunner`,
`Variable` into a `MetaGraphDef`.
* Restart training from a saved graph and checkpoints.
* Run inference from a saved graph and checkpoints.
```Python
...
# Create a saver.
saver = tf.train.Saver(...variables...)
# Remember the training_op we want to run by adding it to a collection.
tf.add_to_collection('train_op', train_op)
sess = tf.Session()
for step in xrange(1000000):
sess.run(train_op)
if step % 1000 == 0:
# Saves checkpoint, which by default also exports a meta_graph
# named 'my-model-global_step.meta'.
saver.save(sess, 'my-model', global_step=step)
```
Later we can continue training from this saved `meta_graph` without building
the model from scratch.
```Python
with tf.Session() as sess:
new_saver = tf.train.import_meta_graph('my-save-dir/my-model-10000.meta')
new_saver.restore(sess, 'my-save-dir/my-model-10000')
# tf.get_collection() returns a list. In this example we only want the
# first one.
train_op = tf.get_collection('train_op')[0]
for step in xrange(1000000):
sess.run(train_op)
```
NOTE: Restarting training from saved `meta_graph` only works if the
device assignments have not changed.
Args:
meta_graph_or_file: `MetaGraphDef` protocol buffer or filename (including
the path) containing a `MetaGraphDef`.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during import.
import_scope: Optional `string`. Name scope to add. Only used when
initializing from protocol buffer.
**kwargs: Optional keyed arguments.
Returns:
A saver constructed from `saver_def` in `MetaGraphDef` or None.
A None value is returned if no variables exist in the `MetaGraphDef`
(i.e., there are no variables to restore).
Raises:
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
""" # pylint: disable=g-doc-exception
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
if not isinstance(meta_graph_or_file, meta_graph_pb2.MetaGraphDef):
meta_graph_def = meta_graph.read_meta_graph_file(meta_graph_or_file)
else:
meta_graph_def = meta_graph_or_file
imported_vars = meta_graph.import_scoped_meta_graph(
meta_graph_def,
clear_devices=clear_devices,
import_scope=import_scope,
**kwargs)
if meta_graph_def.HasField("saver_def"):
# Infer the scope that is prepended by `import_scoped_meta_graph`.
scope = import_scope
var_names = list(imported_vars.keys())
if var_names:
sample_key = var_names[0]
sample_var = imported_vars[sample_key]
scope = sample_var.name[:-len(sample_key)]
return Saver(saver_def=meta_graph_def.saver_def, name=scope)
else:
if variables._all_saveable_objects(): # pylint: disable=protected-access
# Return the default saver instance for all graph variables.
return Saver()
else:
# If no graph variables exist, then a Saver cannot be constructed.
logging.info("Saver not created because there are no variables in the"
" graph to restore")
return None
@tf_export("train.export_meta_graph")
def export_meta_graph(filename=None,
meta_info_def=None,
graph_def=None,
saver_def=None,
collection_list=None,
as_text=False,
graph=None,
export_scope=None,
clear_devices=False,
clear_extraneous_savers=False,
strip_default_attrs=False,
**kwargs):
# pylint: disable=line-too-long
"""Returns `MetaGraphDef` proto. Optionally writes it to filename.
This function exports the graph, saver, and collection objects into
`MetaGraphDef` protocol buffer with the intention of it being imported
at a later time or location to restart training, run inference, or be
a subgraph.
Args:
filename: Optional filename including the path for writing the
generated `MetaGraphDef` protocol buffer.
meta_info_def: `MetaInfoDef` protocol buffer.
graph_def: `GraphDef` protocol buffer.
saver_def: `SaverDef` protocol buffer.
collection_list: List of string keys to collect.
as_text: If `True`, writes the `MetaGraphDef` as an ASCII proto.
graph: The `Graph` to export. If `None`, use the default graph.
export_scope: Optional `string`. Name scope under which to extract
the subgraph. The scope name will be striped from the node definitions
for easy import later into new name scopes. If `None`, the whole graph
is exported. graph_def and export_scope cannot both be specified.
clear_devices: Whether or not to clear the device field for an `Operation`
or `Tensor` during export.
clear_extraneous_savers: Remove any Saver-related information from the
graph (both Save/Restore ops and SaverDefs) that are not associated
with the provided SaverDef.
strip_default_attrs: Boolean. If `True`, default-valued attributes will be
removed from the NodeDefs. For a detailed guide, see
[Stripping Default-Valued Attributes](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/saved_model/README.md#stripping-default-valued-attributes).
**kwargs: Optional keyed arguments.
Returns:
A `MetaGraphDef` proto.
Raises:
ValueError: When the `GraphDef` is larger than 2GB.
RuntimeError: If called with eager execution enabled.
@compatibility(eager)
Exporting/importing meta graphs is not supported. No graph exists when eager
execution is enabled.
@end_compatibility
"""
# pylint: enable=line-too-long
if context.executing_eagerly():
raise RuntimeError("Exporting/importing meta graphs is not supported when "
"eager execution is enabled. No graph exists when eager "
"execution is enabled.")
meta_graph_def, _ = meta_graph.export_scoped_meta_graph(
filename=filename,
meta_info_def=meta_info_def,
graph_def=graph_def,
saver_def=saver_def,
collection_list=collection_list,
as_text=as_text,
graph=graph,
export_scope=export_scope,
clear_devices=clear_devices,
clear_extraneous_savers=clear_extraneous_savers,
strip_default_attrs=strip_default_attrs,
**kwargs)
return meta_graph_def
@tf_export("train.checkpoint_exists")
def checkpoint_exists(checkpoint_prefix):
"""Checks whether a V1 or V2 checkpoint exists with the specified prefix.
This is the recommended way to check if a checkpoint exists, since it takes
into account the naming difference between V1 and V2 formats.
Args:
checkpoint_prefix: the prefix of a V1 or V2 checkpoint, with V2 taking
priority. Typically the result of `Saver.save()` or that of
`tf.train.latest_checkpoint()`, regardless of sharded/non-sharded or
V1/V2.
Returns:
A bool, true iff a checkpoint referred to by `checkpoint_prefix` exists.
"""
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if file_io.get_matching_files(pathname):
return True
elif file_io.get_matching_files(checkpoint_prefix):
return True
else:
return False
@tf_export("train.get_checkpoint_mtimes")
def get_checkpoint_mtimes(checkpoint_prefixes):
"""Returns the mtimes (modification timestamps) of the checkpoints.
Globs for the checkpoints pointed to by `checkpoint_prefixes`. If the files
exist, collect their mtime. Both V2 and V1 checkpoints are considered, in
that priority.
This is the recommended way to get the mtimes, since it takes into account
the naming difference between V1 and V2 formats.
Args:
checkpoint_prefixes: a list of checkpoint paths, typically the results of
`Saver.save()` or those of `tf.train.latest_checkpoint()`, regardless of
sharded/non-sharded or V1/V2.
Returns:
A list of mtimes (in microseconds) of the found checkpoints.
"""
mtimes = []
def match_maybe_append(pathname):
fnames = file_io.get_matching_files(pathname)
if fnames:
mtimes.append(file_io.stat(fnames[0]).mtime_nsec / 1e9)
return True
return False
for checkpoint_prefix in checkpoint_prefixes:
# Tries V2's metadata file first.
pathname = _prefix_to_checkpoint_path(checkpoint_prefix,
saver_pb2.SaverDef.V2)
if match_maybe_append(pathname):
continue
# Otherwise, tries V1, where the prefix is the complete pathname.
match_maybe_append(checkpoint_prefix)
return mtimes
ops.register_proto_function(
ops.GraphKeys.SAVERS,
proto_type=saver_pb2.SaverDef,
to_proto=Saver.to_proto,
from_proto=Saver.from_proto)
| {
"content_hash": "8f9761fb67a226f38455a884a23d3cba",
"timestamp": "",
"source": "github",
"line_count": 2107,
"max_line_length": 176,
"avg_line_length": 40.16658756525866,
"alnum_prop": 0.659356500573076,
"repo_name": "allenlavoie/tensorflow",
"id": "e40b8d22ed2ab0f4c9ff65e953f0f1cf681c8068",
"size": "85352",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tensorflow/python/training/saver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9274"
},
{
"name": "C",
"bytes": "340645"
},
{
"name": "C++",
"bytes": "40746519"
},
{
"name": "CMake",
"bytes": "198073"
},
{
"name": "Go",
"bytes": "1047216"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "735737"
},
{
"name": "Jupyter Notebook",
"bytes": "2117270"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "48231"
},
{
"name": "Objective-C",
"bytes": "12456"
},
{
"name": "Objective-C++",
"bytes": "94385"
},
{
"name": "PHP",
"bytes": "2140"
},
{
"name": "Perl",
"bytes": "6179"
},
{
"name": "Perl 6",
"bytes": "1357"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "34933340"
},
{
"name": "Ruby",
"bytes": "533"
},
{
"name": "Shell",
"bytes": "426884"
},
{
"name": "Smarty",
"bytes": "6870"
}
],
"symlink_target": ""
} |
import os
from django.test import TestCase
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from mapentity.registry import registry
class AuthentFixturesMixin:
fixtures = [os.path.join(settings.PROJECT_DIR, 'authent', 'fixtures', 'minimal.json'),
os.path.join(settings.PROJECT_DIR, 'authent', 'fixtures', 'basic.json')]
def _pre_setup(self):
if not isinstance(self, TestCase):
call_command('update_geotrek_permissions', verbosity=0)
super()._pre_setup()
@classmethod
def setUpClass(cls):
"""
Override setUpClass() of test to make sure MapEntity models are
registered when test is setup.
Indeed since permissions are created on model registering, and since
models are registered in `urls.py` modules, and since `urls.py` are
imported after test setup, importing them here allows permissions to be
available before test `setUp()` methods.
"""
# Workaround https://code.djangoproject.com/ticket/10827
ContentType.objects.clear_cache()
if not registry.registry.keys():
from geotrek.core import urls # NOQA
from geotrek.land import urls # NOQA
from geotrek.maintenance import urls # NOQA
from geotrek.infrastructure import urls # NOQA
from geotrek.signage import urls # NOQA
from geotrek.trekking import urls # NOQA
from geotrek.tourism import urls # NOQA
call_command('update_geotrek_permissions', verbosity=0)
return super().setUpClass()
class AuthentFixturesTest(AuthentFixturesMixin, TestCase):
pass
| {
"content_hash": "11d1b1e602b4749252011a25ae180608",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 90,
"avg_line_length": 35.93877551020408,
"alnum_prop": 0.673480976717774,
"repo_name": "makinacorpus/Geotrek",
"id": "9da9dbc442f4da306250db61b66dc2a457ec4d2a",
"size": "1761",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "geotrek/authent/tests/base.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "30638"
},
{
"name": "HTML",
"bytes": "141008"
},
{
"name": "JavaScript",
"bytes": "184508"
},
{
"name": "Makefile",
"bytes": "4170"
},
{
"name": "PLpgSQL",
"bytes": "85546"
},
{
"name": "Python",
"bytes": "2768434"
},
{
"name": "Shell",
"bytes": "18090"
}
],
"symlink_target": ""
} |
import sys, ptypes
from ptypes import pstruct, parray, ptype, pbinary, pstr, dyn
from ..headers import *
from . import exports, imports, resources, exceptions, relocations, debug, loader, clr, tls, headers
## directory entry base types
class AddressEntry(headers.IMAGE_DATA_DIRECTORY): addressing = staticmethod(virtualaddress)
class OffsetEntry(headers.IMAGE_DATA_DIRECTORY): addressing = staticmethod(fileoffset)
## directory entry list
class IMAGE_DIRECTORY_ENTRY_EXPORT(AddressEntry):
_object_ = exports.IMAGE_EXPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_IMPORT(AddressEntry):
_object_ = imports.IMAGE_IMPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_RESOURCE(AddressEntry):
#_object_ = resources.IMAGE_RESOURCE_DIRECTORY
class _object_(resources.IMAGE_RESOURCE_DIRECTORY):
_fields_ = resources.IMAGE_RESOURCE_DIRECTORY._fields_[:]
_fields_.append((lambda s: dyn.block(s.blocksize() - (s.value[-1].getoffset()+s.value[-1].blocksize()-s.value[0].getoffset())), 'ResourceData'))
class IMAGE_DIRECTORY_ENTRY_EXCEPTION(AddressEntry):
_object_ = exceptions.IMAGE_EXCEPTION_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_SECURITY(OffsetEntry):
class _object_(parray.block):
_object_ = headers.Certificate
class IMAGE_DIRECTORY_ENTRY_BASERELOC(AddressEntry):
_object_ = relocations.IMAGE_BASERELOC_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_DEBUG(AddressEntry):
_object_ = debug.IMAGE_DEBUG_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_ARCHITECTURE(AddressEntry):
'''IMAGE_DIRECTORY_ENTRY_COPYRIGHT'''
class IMAGE_DIRECTORY_ENTRY_GLOBALPTR(AddressEntry):
pass
class IMAGE_DIRECTORY_ENTRY_TLS(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
return tls.IMAGE_TLS_DIRECTORY64 if res.is64() else tls.IMAGE_TLS_DIRECTORY32
class IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
res = loader.IMAGE_LOAD_CONFIG_DIRECTORY64 if res.is64() else loader.IMAGE_LOAD_CONFIG_DIRECTORY32
#return dyn.clone(res, blocksize=lambda s, cb=self['Size'].li.int(): cb)
return res
class IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT(OffsetEntry):
_object_ = imports.IMAGE_BOUND_IMPORT_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_IAT(AddressEntry):
def _object_(self):
res = self.getparent(Header)['OptionalHeader'].li
return imports.IMAGE_IMPORT_ADDRESS_TABLE64 if res.is64() else imports.IMAGE_IMPORT_ADDRESS_TABLE
class IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT(AddressEntry):
_object_ = imports.IMAGE_DELAYLOAD_DIRECTORY
class IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR(AddressEntry):
_object_ = clr.IMAGE_COR20_HEADER
class IMAGE_DIRECTORY_ENTRY_RESERVED(AddressEntry): pass
class DataDirectoryEntry(pint.enum):
_values_ = [
('Export', 0),
('Import', 1),
('Resource', 2),
('Exception', 3),
('Security', 4),
('BaseReloc', 5),
('Debug', 6),
('Architecture', 7),
('GlobalPtr', 8),
('Tls', 9),
('LoadConfig', 10),
('BoundImport', 11),
('IAT', 12),
('DelayLoad', 13),
('ClrHeader', 14),
('Reserved', 15),
# aliases
('Exports', 0),
('Imports', 1),
('Resources', 2),
('Exceptions', 3),
('Certificate', 4),
('Reloc', 5),
('Relocations', 5),
('Relocation', 5),
('BaseRelocation', 5),
('BaseRelocations', 5),
('Global', 8),
('Thread', 9),
('ThreadLocalStorage', 9),
('LoaderConfig', 10),
('Loader', 10),
('Bound', 11),
('BoundImports', 11),
('ImportAddress', 12),
('DelayImportDescriptor', 13),
('DelayImport', 13),
('Clr', 14),
('COM', 14),
('COR20', 14),
]
class DataDirectory(parray.type):
length = 16
def __getindex__(self, key):
string_types = (str, unicode) if sys.version_info.major < 3 else (str,)
if isinstance(key, string_types):
# try and be smart in case user tries to be dumb
mapping = DataDirectoryEntry.mapping()
key, res = key.lower(), { k.lower() : v for k, v in mapping.items() }
key = key[:key.rfind('table')] if key.endswith('table') else key[:]
return res[key]
return key
def _object_(self):
entries = (
IMAGE_DIRECTORY_ENTRY_EXPORT,
IMAGE_DIRECTORY_ENTRY_IMPORT,
IMAGE_DIRECTORY_ENTRY_RESOURCE,
IMAGE_DIRECTORY_ENTRY_EXCEPTION,
IMAGE_DIRECTORY_ENTRY_SECURITY,
IMAGE_DIRECTORY_ENTRY_BASERELOC,
IMAGE_DIRECTORY_ENTRY_DEBUG,
IMAGE_DIRECTORY_ENTRY_ARCHITECTURE,
IMAGE_DIRECTORY_ENTRY_GLOBALPTR,
IMAGE_DIRECTORY_ENTRY_TLS,
IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG,
IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT,
IMAGE_DIRECTORY_ENTRY_IAT,
IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT,
IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR,
IMAGE_DIRECTORY_ENTRY_RESERVED,
)
return entries[len(self.value)]
def details(self, **options):
if self.initializedQ():
width = max(len(n.classname()) for n in self.value) if self.value else 0
return '\n'.join('[{:x}] {:>{}}{:4s} {:s}:+{:#x}'.format(n.getoffset(), n.classname(), width, '{%d}'%i, n['Address'].summary(), n['Size'].int()) for i, n in enumerate(self.value))
return super(DataDirectory,self).details(**options)
def repr(self, **options):
return self.details(**options)
| {
"content_hash": "59aa63723e8622bf51c0432624e09c24",
"timestamp": "",
"source": "github",
"line_count": 152,
"max_line_length": 191,
"avg_line_length": 37.38157894736842,
"alnum_prop": 0.627419922562478,
"repo_name": "arizvisa/syringe",
"id": "110819d8e85935d59bd4852a916051ae46b4a5e2",
"size": "5682",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/pecoff/portable/datadirectory.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Assembly",
"bytes": "22844"
},
{
"name": "C",
"bytes": "11095"
},
{
"name": "HTML",
"bytes": "1761"
},
{
"name": "Makefile",
"bytes": "1228"
},
{
"name": "Perl",
"bytes": "9176"
},
{
"name": "Python",
"bytes": "4312979"
},
{
"name": "Shell",
"bytes": "171"
},
{
"name": "XQuery",
"bytes": "1884"
},
{
"name": "XSLT",
"bytes": "10518"
}
],
"symlink_target": ""
} |
import socket
import ssl
from ansible.module_utils.urls import generic_urlparse
from ansible.module_utils.six.moves.urllib.parse import urlparse
from ansible.module_utils.six.moves import http_client
from ansible.module_utils._text import to_text
# httplib/http.client connection using unix domain socket
HTTPConnection = http_client.HTTPConnection
HTTPSConnection = http_client.HTTPSConnection
try:
import json
except ImportError:
import simplejson as json
class UnixHTTPConnection(HTTPConnection):
def __init__(self, path):
HTTPConnection.__init__(self, 'localhost')
self.path = path
def connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
class LXDClientException(Exception):
def __init__(self, msg, **kwargs):
self.msg = msg
self.kwargs = kwargs
class LXDClient(object):
def __init__(self, url, key_file=None, cert_file=None, debug=False):
"""LXD Client.
:param url: The URL of the LXD server. (e.g. unix:/var/lib/lxd/unix.socket or https://127.0.0.1)
:type url: ``str``
:param key_file: The path of the client certificate key file.
:type key_file: ``str``
:param cert_file: The path of the client certificate file.
:type cert_file: ``str``
:param debug: The debug flag. The request and response are stored in logs when debug is true.
:type debug: ``bool``
"""
self.url = url
self.debug = debug
self.logs = []
if url.startswith('https:'):
self.cert_file = cert_file
self.key_file = key_file
parts = generic_urlparse(urlparse(self.url))
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ctx.load_cert_chain(cert_file, keyfile=key_file)
self.connection = HTTPSConnection(parts.get('netloc'), context=ctx)
elif url.startswith('unix:'):
unix_socket_path = url[len('unix:'):]
self.connection = UnixHTTPConnection(unix_socket_path)
else:
raise LXDClientException('URL scheme must be unix: or https:')
def do(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
resp_json = self._send_request(method, url, body_json=body_json, ok_error_codes=ok_error_codes, timeout=timeout)
if resp_json['type'] == 'async':
url = '{0}/wait'.format(resp_json['operation'])
resp_json = self._send_request('GET', url)
if resp_json['metadata']['status'] != 'Success':
self._raise_err_from_json(resp_json)
return resp_json
def authenticate(self, trust_password):
body_json = {'type': 'client', 'password': trust_password}
return self._send_request('POST', '/1.0/certificates', body_json=body_json)
def _send_request(self, method, url, body_json=None, ok_error_codes=None, timeout=None):
try:
body = json.dumps(body_json)
self.connection.request(method, url, body=body)
resp = self.connection.getresponse()
resp_data = resp.read()
resp_data = to_text(resp_data, errors='surrogate_or_strict')
resp_json = json.loads(resp_data)
self.logs.append({
'type': 'sent request',
'request': {'method': method, 'url': url, 'json': body_json, 'timeout': timeout},
'response': {'json': resp_json}
})
resp_type = resp_json.get('type', None)
if resp_type == 'error':
if ok_error_codes is not None and resp_json['error_code'] in ok_error_codes:
return resp_json
if resp_json['error'] == "Certificate already in trust store":
return resp_json
self._raise_err_from_json(resp_json)
return resp_json
except socket.error as e:
raise LXDClientException('cannot connect to the LXD server', err=e)
def _raise_err_from_json(self, resp_json):
err_params = {}
if self.debug:
err_params['logs'] = self.logs
raise LXDClientException(self._get_err_from_resp_json(resp_json), **err_params)
@staticmethod
def _get_err_from_resp_json(resp_json):
err = None
metadata = resp_json.get('metadata', None)
if metadata is not None:
err = metadata.get('err', None)
if err is None:
err = resp_json.get('error', None)
return err
| {
"content_hash": "2aec88984b0dbc1640b48d6617400987",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 120,
"avg_line_length": 39.577586206896555,
"alnum_prop": 0.6016118492703115,
"repo_name": "e-gob/plataforma-kioscos-autoatencion",
"id": "f9ec1fd4e299bf93268411cff52f83dc0455fc9e",
"size": "6283",
"binary": false,
"copies": "102",
"ref": "refs/heads/master",
"path": "scripts/ansible-play/.venv/lib/python2.7/site-packages/ansible/module_utils/lxd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "41110"
},
{
"name": "C++",
"bytes": "3804"
},
{
"name": "CSS",
"bytes": "34823"
},
{
"name": "CoffeeScript",
"bytes": "8521"
},
{
"name": "HTML",
"bytes": "61168"
},
{
"name": "JavaScript",
"bytes": "7206"
},
{
"name": "Makefile",
"bytes": "1347"
},
{
"name": "PowerShell",
"bytes": "584344"
},
{
"name": "Python",
"bytes": "25506593"
},
{
"name": "Ruby",
"bytes": "245726"
},
{
"name": "Shell",
"bytes": "5075"
}
],
"symlink_target": ""
} |
import os
import StringIO
import unittest
from telemetry import benchmark
from telemetry import page as page_module
from telemetry.page import page_set
from telemetry.results import html_output_formatter
from telemetry.results import page_test_results
from telemetry.value import scalar
def _MakePageSet():
ps = page_set.PageSet(file_path=os.path.dirname(__file__))
ps.AddUserStory(page_module.Page('http://www.foo.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.bar.com/', ps, ps.base_dir))
ps.AddUserStory(page_module.Page('http://www.baz.com/', ps, ps.base_dir))
return ps
class DeterministicHtmlOutputFormatter(
html_output_formatter.HtmlOutputFormatter):
def _GetBuildTime(self):
return 'build_time'
def _GetRevision(self):
return 'revision'
class FakeMetadataForTest(benchmark.BenchmarkMetadata):
def __init__(self):
super(FakeMetadataForTest, self).__init__('test_name')
# Wrap string IO with a .name property so that it behaves more like a file.
class StringIOFile(StringIO.StringIO):
name = 'fake_output_file'
class HtmlOutputFormatterTest(unittest.TestCase):
def test_basic_summary(self):
test_page_set = _MakePageSet()
output_file = StringIOFile()
# Run the first time and verify the results are written to the HTML file.
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 3))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 7))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = {
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"a.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}
self.assertEquals(expected, formatter.GetResults())
# Run the second time and verify the results are appended to the HTML file.
output_file.seek(0)
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 4))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 8))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), False, False, 'browser_type')
formatter.Format(results)
expected = [
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [3, 7],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [7],
"units": "seconds",
"important": False
},
"a.http://www.foo.com/": {
"current": [3],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
},
{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [4, 8],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False,
},
"a.http://www.bar.com/": {
"current": [8],
"units": "seconds",
"important": False
},
"a.http://www.foo.com/": {
"current": [4],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, formatter.GetCombinedResults())
last_output_len = len(output_file.getvalue())
# Now reset the results and verify the old ones are gone.
output_file.seek(0)
results = page_test_results.PageTestResults()
results.WillRunPage(test_page_set.pages[0])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[0], 'a', 'seconds', 5))
results.DidRunPage(test_page_set.pages[0])
results.WillRunPage(test_page_set.pages[1])
results.AddValue(scalar.ScalarValue(
test_page_set.pages[1], 'a', 'seconds', 9))
results.DidRunPage(test_page_set.pages[1])
formatter = DeterministicHtmlOutputFormatter(
output_file, FakeMetadataForTest(), True, False, 'browser_type')
formatter.Format(results)
expected = [{
"platform": "browser_type",
"buildTime": "build_time",
"label": None,
"tests": {
"test_name": {
"metrics": {
"a": {
"current": [5, 9],
"units": "seconds",
"important": True
},
"telemetry_page_measurement_results.num_failed": {
"current": [0],
"units": "count",
"important": False
},
"a.http://www.bar.com/": {
"current": [9],
"units": "seconds",
"important": False
},
"a.http://www.foo.com/": {
"current": [5],
"units": "seconds",
"important": False
}
}
}
},
"revision": "revision"
}]
self.assertEquals(expected, formatter.GetCombinedResults())
self.assertTrue(len(output_file.getvalue()) < last_output_len)
| {
"content_hash": "02ffe1a990d44c4729852e1f870d839a",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 79,
"avg_line_length": 31.168141592920353,
"alnum_prop": 0.5276831345826235,
"repo_name": "yury-s/v8-inspector",
"id": "4fdf206d414512f32dd18104f8af84e5a03d3b7e",
"size": "7206",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Source/chrome/tools/telemetry/telemetry/results/html_output_formatter_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "25707"
},
{
"name": "Assembly",
"bytes": "17448"
},
{
"name": "Batchfile",
"bytes": "11285"
},
{
"name": "C",
"bytes": "439305"
},
{
"name": "C#",
"bytes": "81308"
},
{
"name": "C++",
"bytes": "88101426"
},
{
"name": "CMake",
"bytes": "25626"
},
{
"name": "CSS",
"bytes": "23512"
},
{
"name": "Emacs Lisp",
"bytes": "32553"
},
{
"name": "Go",
"bytes": "6913"
},
{
"name": "Groff",
"bytes": "29030"
},
{
"name": "HTML",
"bytes": "1716402"
},
{
"name": "Java",
"bytes": "597533"
},
{
"name": "JavaScript",
"bytes": "19046972"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "25796"
},
{
"name": "Objective-C",
"bytes": "108132"
},
{
"name": "Objective-C++",
"bytes": "254920"
},
{
"name": "Protocol Buffer",
"bytes": "2991"
},
{
"name": "Python",
"bytes": "12018021"
},
{
"name": "Ruby",
"bytes": "937"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "342224"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "VimL",
"bytes": "14968"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
'''
Vector space model bases
'''
import os
import cPickle as pickle
import numpy as np
from scipy.stats import spearmanr
from .sim import cosine
class Space(object):
def __init__(self, descrs):
self.reportMissing = True
if isinstance(descrs, str):
self.space = pickle.load(open(descrs, 'rb'))
elif isinstance(descrs, dict):
self.space = descrs
else:
raise TypeError('Expecting file name or dictionary of descriptors')
def __getitem__(self, key):
return self.space[key]
def __contains__(self, key):
return key in self.space
def keys(self):
return self.space.keys()
def sim(self, x, y):
return cosine(self.space[x], self.space[y])
def spearman(self, dataset):
if not isinstance(dataset, list) \
or len(dataset) == 0 \
or len(dataset[0]) != 3 \
or not isinstance(dataset[0][2], float):
raise TypeError('Dataset is not of correct type, list of [str, str, float] triples expected.')
gs_scores, sys_scores = [], []
for one, two, gs_score in dataset:
try:
sys_score = self.sim(one, two)
gs_scores.append(gs_score)
sys_scores.append(sys_score)
except KeyError:
if self.reportMissing:
print('Warning: Missing pair %s-%s - skipping' % (one, two))
continue
return spearmanr(gs_scores, sys_scores)
def neighbours(self, key, n=None):
sims = []
for other_key in self.space:
if other_key == key: continue
sims.append((other_key, self.sim(key, other_key)))
if n is None:
n = len(sims)
return sorted(sims, key = lambda x: x[1], reverse=True)[:n]
class AggSpace(Space):
def __init__(self, descrs, aggFunc='mean', caching=True):
self.reportMissing = True
self.caching = caching
self.cached_file_name = None
if isinstance(descrs, str):
self.descrs_file = descrs
self.descrs = pickle.load(open(self.descrs_file, 'rb'))
self.cached_file_name = '%s-%s.pkl' % (self.descrs_file, aggFunc)
elif isinstance(descrs, dict):
self.descrs = descrs
if self.caching and self.cached_file_name is not None and os.path.exists(self.cached_file_name):
self.space = pickle.load(open(self.cached_file_name, 'rb'))
elif aggFunc in ['mean', 'median', 'max']:
if aggFunc == 'mean':
f = self.aggMean
elif aggFunc == 'median':
f = self.aggMedian
elif aggFunc == 'max':
f = self.aggMax
self.space = {}
for k in self.descrs.keys():
vecs = self.descrs[k].values()
if len(vecs) == 0:
if self.reportMissing:
print('Warning: Not enough vectors for key %s - skipping' % k)
continue
if len(vecs) == 1:
self.space[k] = vecs[0]
else:
self.space[k] = f(vecs)
if self.caching and self.cached_file_name is not None:
pickle.dump(self.space, open(self.cached_file_name, 'wb'))
def aggMean(self, m):
return np.mean(np.nan_to_num(m), axis=0, dtype=np.float64)
def aggMedian(self, m):
return np.median(np.nan_to_num(m), axis=0)
def aggMax(self, m):
return np.max(np.nan_to_num(m), axis=0)
def getDispersions(self, rescale=True, n_images=None):
self.cached_dispersions_file = None
if self.caching and hasattr(self, 'descrs_file'):
self.cached_dispersions_file = '%s-dispersions.pkl' % (self.descrs_file)
if os.path.exists(self.cached_dispersions_file):
self.dispersions = pickle.load(open(self.cached_dispersions_file, 'rb'))
return
def disp(M):
l = len(M)
d, cnt = 0, 0
for i in range(l):
for j in range(i) + range(i+1, l):
d += (1 - cosine(M[i], M[j]))
cnt += 1
return d / cnt if cnt != 0 else 0
self.dispersions = {}
min_disp, max_disp = 1, 0
for k in self.descrs:
image_reps = self.descrs[k].values()
if n_images is not None:
image_reps = image_reps[:n_images]
imgdisp = disp(image_reps)
self.dispersions[k] = imgdisp
if imgdisp > max_disp:
max_disp, max_key = imgdisp, k
if imgdisp < min_disp:
min_disp, min_key = imgdisp, k
# rescale
if rescale:
for k in self.dispersions:
self.dispersions[k] = max(0, min(1, (self.dispersions[k] - min_disp) / (max_disp - min_disp)))
if self.caching and self.cached_dispersions_file is not None:
pickle.dump(self.dispersions, open(self.cached_dispersions_file, 'wb'))
def nearest_neighbours(self, key, n=None):
'''Return the nearest neighbours to the centroid.'''
sims = []
for k, v in self.descrs[key].items():
sims.append(((k, v), cosine(v, self.space[key])))
if n is None:
n = len(sims)
return dict(map(lambda s: s[0], sorted(sims, key = lambda x: x[1], reverse=True)[:n]))
def filter_nearest_neighbours(self, n):
'''Filter nearest neighbours and only aggregate these.'''
for k in self.descrs:
self.descrs[k] = self.nearest_neighbours(k, n)
def update_space(self, aggFunc='mean', caching=True):
self.__init__(self.descrs, aggFunc=aggFunc, caching=caching)
| {
"content_hash": "39f8ec76b3429b5625341a23ba9b24cc",
"timestamp": "",
"source": "github",
"line_count": 163,
"max_line_length": 110,
"avg_line_length": 35.83435582822086,
"alnum_prop": 0.5386064030131826,
"repo_name": "douwekiela/mmfeat",
"id": "69eb2ebd32f71ad5e238c10f65bcac5f6a164ed5",
"size": "5841",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mmfeat/space/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "60886"
}
],
"symlink_target": ""
} |
from PySide import QtCore, QtGui
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(418, 303)
self.gridLayout = QtGui.QGridLayout(Dialog)
self.gridLayout.setObjectName("gridLayout")
self.configGroupBox = QtGui.QGroupBox(Dialog)
self.configGroupBox.setTitle("")
self.configGroupBox.setObjectName("configGroupBox")
self.formLayout = QtGui.QFormLayout(self.configGroupBox)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.label0 = QtGui.QLabel(self.configGroupBox)
self.label0.setObjectName("label0")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label0)
self.lineEdit0 = QtGui.QLineEdit(self.configGroupBox)
self.lineEdit0.setObjectName("lineEdit0")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.lineEdit0)
self.label1 = QtGui.QLabel(self.configGroupBox)
self.label1.setObjectName("label1")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label1)
self.lineEdit1 = QtGui.QLineEdit(self.configGroupBox)
self.lineEdit1.setObjectName("lineEdit1")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.lineEdit1)
self.gridLayout.addWidget(self.configGroupBox, 0, 0, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(Dialog)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName("buttonBox")
self.gridLayout.addWidget(self.buttonBox, 1, 0, 1, 1)
self.retranslateUi(Dialog)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), Dialog.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), Dialog.reject)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def retranslateUi(self, Dialog):
Dialog.setWindowTitle(QtGui.QApplication.translate("Dialog", "ConfigureDialog", None, QtGui.QApplication.UnicodeUTF8))
self.label0.setText(QtGui.QApplication.translate("Dialog", "identifier: ", None, QtGui.QApplication.UnicodeUTF8))
self.label1.setText(QtGui.QApplication.translate("Dialog", "Filename: ", None, QtGui.QApplication.UnicodeUTF8))
| {
"content_hash": "978241454487a4b7723d63d7768cdf42",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 126,
"avg_line_length": 56.86046511627907,
"alnum_prop": 0.7239263803680982,
"repo_name": "MusculoskeletalAtlasProject/mapclient-tests",
"id": "b60e0965fb54d0c8c7529ebefadfa95522285478",
"size": "2693",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test_resources/updater_test/loadstlstep-master/mapclientplugins/loadstlstep/ui_configuredialog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "924193"
}
],
"symlink_target": ""
} |
"""
Test that LLDB doesn't crash if the std module we load is empty.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
import os
class ImportStdModule(TestBase):
mydir = TestBase.compute_mydir(__file__)
# We only emulate a fake libc++ in this test and don't use the real libc++,
# but we still add the libc++ category so that this test is only run in
# test configurations where libc++ is actually supposed to be tested.
@add_test_categories(["libc++"])
@skipIfRemote
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
sysroot = os.path.join(os.getcwd(), "root")
# Set the sysroot.
self.runCmd("platform select --sysroot '" + sysroot + "' host", CURRENT_EXECUTABLE_SET)
lldbutil.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
# Use the typedef that is only defined in our 'empty' module. If this fails, then LLDB
# somehow figured out the correct define for the header and compiled the right
# standard module that actually contains the std::vector template.
self.expect("expr MissingContent var = 3; var", substrs=['$0 = 3'])
# Try to access our mock std::vector. This should fail but not crash LLDB as the
# std::vector template should be missing from the std module.
self.expect("expr (size_t)v.size()", substrs=["Couldn't lookup symbols"], error=True)
| {
"content_hash": "738d0fe8772e21795bc725059333fe89",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 95,
"avg_line_length": 41.07692307692308,
"alnum_prop": 0.6697877652933832,
"repo_name": "endlessm/chromium-browser",
"id": "2b1cb100a3251366ee93f296b6fdf225c9e4ae6a",
"size": "1602",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/llvm/lldb/test/API/commands/expression/import-std-module/empty-module/TestEmptyStdModule.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "MovingMedian", cycle_length = 12, transform = "Quantization", sigma = 0.0, exog_count = 20, ar_order = 0); | {
"content_hash": "401b26766636bcd0083c7a0c7e8d0442",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 172,
"avg_line_length": 38.857142857142854,
"alnum_prop": 0.7132352941176471,
"repo_name": "antoinecarme/pyaf",
"id": "d83ad7e1c427d3f418ef134040b7d794a45740fe",
"size": "272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/artificial/transf_Quantization/trend_MovingMedian/cycle_12/ar_/test_artificial_1024_Quantization_MovingMedian_12__20.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
} |
''' Provide 2013 Warsaw daylight hours from http://www.sunrisesunset.com
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from ..util.sampledata import package_csv
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'daylight_warsaw_2013',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
def _read_data():
'''
'''
df = package_csv('daylight', 'daylight_warsaw_2013.csv', parse_dates=["Date", "Sunrise", "Sunset"])
df["Date"] = df.Date.map(lambda x: x.date())
df["Sunrise"] = df.Sunrise.map(lambda x: x.time())
df["Sunset"] = df.Sunset.map(lambda x: x.time())
return df
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
daylight_warsaw_2013 = _read_data()
| {
"content_hash": "8847ecb5bef5ac6bab3f8952c27efce3",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 103,
"avg_line_length": 31.95,
"alnum_prop": 0.27856025039123633,
"repo_name": "timsnyder/bokeh",
"id": "f467f2d905c93b241eb06d1f12a143864ac3b34c",
"size": "2248",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/sampledata/daylight.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "24877"
},
{
"name": "Dockerfile",
"bytes": "4099"
},
{
"name": "HTML",
"bytes": "54062"
},
{
"name": "JavaScript",
"bytes": "27797"
},
{
"name": "Makefile",
"bytes": "886"
},
{
"name": "PowerShell",
"bytes": "713"
},
{
"name": "Python",
"bytes": "3827067"
},
{
"name": "Roff",
"bytes": "495"
},
{
"name": "Shell",
"bytes": "9953"
},
{
"name": "TypeScript",
"bytes": "2145262"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, absolute_import
import json
import unittest
import responses
from linebot import (
LineBotApi
)
from linebot.models import (
LocationSendMessage
)
class TestLineBotApi(unittest.TestCase):
def setUp(self):
self.tested = LineBotApi('channel_secret')
self.location_message = LocationSendMessage(
title='my location',
address='Tokyo',
latitude=35.65910807942215,
longitude=139.70372892916203
)
self.message = [{
"type": "location",
"title": "my location",
"address": "Tokyo",
"latitude": 35.65910807942215,
"longitude": 139.70372892916203
}]
@responses.activate
def test_push_location_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push',
json={}, status=200
)
self.tested.push_message('to', self.location_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/push')
self.assertEqual(
json.loads(request.body),
{
"to": "to",
"messages": self.message
}
)
@responses.activate
def test_reply_location_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply',
json={}, status=200
)
self.tested.reply_message('replyToken', self.location_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/reply')
self.assertEqual(
json.loads(request.body),
{
"replyToken": "replyToken",
"messages": self.message
}
)
@responses.activate
def test_multicast_location_message(self):
responses.add(
responses.POST,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast',
json={}, status=200
)
self.tested.multicast(['to1', 'to2'], self.location_message)
request = responses.calls[0].request
self.assertEqual(request.method, 'POST')
self.assertEqual(
request.url,
LineBotApi.DEFAULT_API_ENDPOINT + '/v2/bot/message/multicast')
self.assertEqual(
json.loads(request.body),
{
"to": ['to1', 'to2'],
"messages": self.message
}
)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "40a6eea2736d7f6bdb841dc36261e34f",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 74,
"avg_line_length": 27.31132075471698,
"alnum_prop": 0.5512953367875648,
"repo_name": "monhustla/line-bot-sdk-python",
"id": "03fa5c993ec74de2b3f4c857eacd5bf100b35dfd",
"size": "3476",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/api/test_send_location_message.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "191401"
}
],
"symlink_target": ""
} |
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import logging
from django.core.management.base import BaseCommand
from django.utils import timezone
from snisi_malaria import (DOMAIN_SLUG,
ROUTINE_REPORTING_END_DAY,
ROUTINE_EXTENDED_REPORTING_END_DAY,
ROUTINE_DISTRICT_AGG_DAY,
ROUTINE_REGION_AGG_DAY)
from snisi_core.models.PeriodicTasks import PeriodicTask
from snisi_malaria.aggregations import (generate_district_reports,
generate_region_country_reports,
generate_weekly_reports)
from snisi_malaria.notifications import (
end_of_reporting_period_notifications,
end_of_extended_reporting_period_notifications)
from snisi_core.models.Periods import MonthPeriod
from snisi_core.models.FixedWeekPeriods import (
FixedMonthWeek,
FixedMonthFirstWeek,
FixedMonthSecondWeek,
FixedMonthThirdWeek,
FixedMonthFourthWeek,
FixedMonthFifthWeek)
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def handle(self, *args, **options):
logger.info("snisi_malaria daily-checkups")
day = timezone.now().day
period = MonthPeriod.current().previous()
this_month = period.following()
# period_str = period.strid()
wperiod = FixedMonthWeek.previous_week(FixedMonthWeek.current())
# wperiod_str = MonthPeriod.current().strid()
logger.debug("{} -- {}".format(period, wperiod))
category_matrix = {
'end_of_reporting_period': end_of_reporting_period_notifications,
'end_of_extended_reporting_period':
end_of_extended_reporting_period_notifications,
'end_of_district_period': generate_district_reports,
'end_of_region_period': generate_region_country_reports,
'end_of_first_week_period_reporting': generate_weekly_reports,
'end_of_second_week_period_reporting': generate_weekly_reports,
'end_of_third_week_period_reporting': generate_weekly_reports,
'end_of_fourth_week_period_reporting': generate_weekly_reports,
'end_of_fifth_week_period_reporting': generate_weekly_reports,
}
def handle_category(category, nperiod=None, wperiod=None):
if nperiod is None:
nperiod = period
slug = "{domain}_{period}_{category}".format(
domain=DOMAIN_SLUG, period=nperiod.strid(), category=category)
task, created = PeriodicTask.get_or_create(slug, category)
if task.can_trigger():
logger.debug("triggering {}".format(task))
try:
category_matrix.get(category)(period=nperiod,
wperiod=wperiod)
except Exception as e:
logger.exception(e)
else:
task.trigger()
else:
logger.info("{} already triggered".format(task))
# On 1st
if day >= 1:
# in case we had only 28 days last month
wperiod = FixedMonthFourthWeek.find_create_from(
period.middle().year, period.middle().month)
handle_category("end_of_fourth_week_period_reporting",
period, wperiod)
wperiod = FixedMonthFifthWeek.find_create_from(
period.middle().year, period.middle().month)
handle_category("end_of_fifth_week_period_reporting",
period, wperiod)
# On 6th
if day >= ROUTINE_REPORTING_END_DAY:
# send warning notice to non-satisfied HC person
handle_category("end_of_reporting_period")
# On 11th
if day >= ROUTINE_EXTENDED_REPORTING_END_DAY:
# send summary notification and validation invitatin to districts
handle_category("end_of_extended_reporting_period")
# On 13th
if day >= 13:
wperiod = FixedMonthFirstWeek.find_create_from(
period.following().middle().year,
period.following().middle().month)
handle_category("end_of_first_week_period_reporting",
this_month, wperiod)
# On 16th
if day >= ROUTINE_DISTRICT_AGG_DAY:
# validate all HC reports
# create aggregated for district
# create expected-validation for district
# send notification to regions
handle_category("end_of_district_period")
# On 20th
if day >= 20:
wperiod = FixedMonthSecondWeek.find_create_from(
period.following().middle().year,
period.following().middle().month)
handle_category("end_of_second_week_period_reporting",
this_month, wperiod)
# On 26th
if day >= ROUTINE_REGION_AGG_DAY:
# validate all district reports
# create aggregated for region
# create aggregated for country
# send notification to central/national
handle_category("end_of_region_period")
# On 27th
if day >= 27:
wperiod = FixedMonthThirdWeek.find_create_from(
period.following().middle().year,
period.following().middle().month)
handle_category("end_of_third_week_period_reporting",
this_month, wperiod)
| {
"content_hash": "7f034d16d1f024b174d8b876d5c01120",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 78,
"avg_line_length": 41.108695652173914,
"alnum_prop": 0.5818790763264586,
"repo_name": "yeleman/snisi",
"id": "f9652f4bd3426501eb920ac9d7a56830aadd8e2f",
"size": "5752",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "snisi_malaria/management/commands/snisi_malaria_daily.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "410022"
},
{
"name": "HTML",
"bytes": "1007275"
},
{
"name": "Java",
"bytes": "7211"
},
{
"name": "JavaScript",
"bytes": "292583"
},
{
"name": "Python",
"bytes": "2237855"
},
{
"name": "Shell",
"bytes": "111"
}
],
"symlink_target": ""
} |
"""Mice Protocols."""
| {
"content_hash": "22d9659cde1259760a98004946944d94",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 21,
"avg_line_length": 23,
"alnum_prop": 0.5652173913043478,
"repo_name": "hlzz/dotfiles",
"id": "a76cb1a57eff9439e5f373d46e6465c611ab30f8",
"size": "23",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "graphics/VTK-7.0.0/ThirdParty/Twisted/twisted/protocols/mice/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "1240"
},
{
"name": "Arc",
"bytes": "38"
},
{
"name": "Assembly",
"bytes": "449468"
},
{
"name": "Batchfile",
"bytes": "16152"
},
{
"name": "C",
"bytes": "102303195"
},
{
"name": "C++",
"bytes": "155056606"
},
{
"name": "CMake",
"bytes": "7200627"
},
{
"name": "CSS",
"bytes": "179330"
},
{
"name": "Cuda",
"bytes": "30026"
},
{
"name": "D",
"bytes": "2152"
},
{
"name": "Emacs Lisp",
"bytes": "14892"
},
{
"name": "FORTRAN",
"bytes": "5276"
},
{
"name": "Forth",
"bytes": "3637"
},
{
"name": "GAP",
"bytes": "14495"
},
{
"name": "GLSL",
"bytes": "438205"
},
{
"name": "Gnuplot",
"bytes": "327"
},
{
"name": "Groff",
"bytes": "518260"
},
{
"name": "HLSL",
"bytes": "965"
},
{
"name": "HTML",
"bytes": "2003175"
},
{
"name": "Haskell",
"bytes": "10370"
},
{
"name": "IDL",
"bytes": "2466"
},
{
"name": "Java",
"bytes": "219109"
},
{
"name": "JavaScript",
"bytes": "1618007"
},
{
"name": "Lex",
"bytes": "119058"
},
{
"name": "Lua",
"bytes": "23167"
},
{
"name": "M",
"bytes": "1080"
},
{
"name": "M4",
"bytes": "292475"
},
{
"name": "Makefile",
"bytes": "7112810"
},
{
"name": "Matlab",
"bytes": "1582"
},
{
"name": "NSIS",
"bytes": "34176"
},
{
"name": "Objective-C",
"bytes": "65312"
},
{
"name": "Objective-C++",
"bytes": "269995"
},
{
"name": "PAWN",
"bytes": "4107117"
},
{
"name": "PHP",
"bytes": "2690"
},
{
"name": "Pascal",
"bytes": "5054"
},
{
"name": "Perl",
"bytes": "485508"
},
{
"name": "Pike",
"bytes": "1338"
},
{
"name": "Prolog",
"bytes": "5284"
},
{
"name": "Python",
"bytes": "16799659"
},
{
"name": "QMake",
"bytes": "89858"
},
{
"name": "Rebol",
"bytes": "291"
},
{
"name": "Ruby",
"bytes": "21590"
},
{
"name": "Scilab",
"bytes": "120244"
},
{
"name": "Shell",
"bytes": "2266191"
},
{
"name": "Slash",
"bytes": "1536"
},
{
"name": "Smarty",
"bytes": "1368"
},
{
"name": "Swift",
"bytes": "331"
},
{
"name": "Tcl",
"bytes": "1911873"
},
{
"name": "TeX",
"bytes": "11981"
},
{
"name": "Verilog",
"bytes": "3893"
},
{
"name": "VimL",
"bytes": "595114"
},
{
"name": "XSLT",
"bytes": "62675"
},
{
"name": "Yacc",
"bytes": "307000"
},
{
"name": "eC",
"bytes": "366863"
}
],
"symlink_target": ""
} |
from datetime import date
from django.test import TestCase, RequestFactory, Client
from test_app.models import TestModel
class ViewsTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.m = TestModel()
self.m.date = date(2015, 2, 21)
self.id = self.m.save()
self.n = TestModel()
self.n.date = date.today()
self.n.save()
def test_get_context_data(self):
response = self.client.get(f'/detail/{self.m.id}', {},
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
base_template = response.context['base_template']
self.assertTrue("octopus/ajax.html" == base_template)
response = self.client.get(f'/detail/{self.m.id}', {})
base_template = response.context['base_template']
self.assertTrue("base.html" == base_template)
def test_fragment_response(self):
response = self.client.get(f'/detail/{self.m.id}', {},
HTTP_X_REQUESTED_WITH="XMLHttpRequest")
conversation = response.content.strip()
self.assertTrue(
conversation.startswith(b'Fancy people talking about fancy dances'))
self.assertTrue(b'The truth' not in conversation)
self.assertTrue(
conversation.endswith(b'Loneliness, violence, and peanut butter'))
def test_full_response(self):
response = self.client.get('/list/', {}).content.strip()
self.assertTrue(b'too much milksteak' in response)
| {
"content_hash": "455a0f167dfeb27425792cb35e7d40ee",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 80,
"avg_line_length": 34.72727272727273,
"alnum_prop": 0.6132198952879581,
"repo_name": "brmc/django-octopus",
"id": "ba63f186ae470c31a50804ddbc8363cd0586cd2c",
"size": "1528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3097"
},
{
"name": "JavaScript",
"bytes": "16290"
},
{
"name": "Python",
"bytes": "19299"
},
{
"name": "Shell",
"bytes": "275"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import apps.submission.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('submission', '0004_auto_20171129_1346'),
]
operations = [
migrations.AlterField(
model_name='submissionprocess',
name='archive',
field=models.FileField(max_length=255, upload_to=apps.submission.models.SubmissionProcess.archive_upload_to, verbose_name='Pixels submitted archive'),
),
]
| {
"content_hash": "fcafc6151bfb3c2a8869470ecc875624",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 162,
"avg_line_length": 28.157894736842106,
"alnum_prop": 0.6691588785046729,
"repo_name": "Candihub/pixel",
"id": "d5a6882809c7cb7bd598394822a2897e52c6dc47",
"size": "608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/submission/migrations/0005_auto_20180110_1045.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15017"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HTML",
"bytes": "58864"
},
{
"name": "JavaScript",
"bytes": "1180"
},
{
"name": "Makefile",
"bytes": "4184"
},
{
"name": "Python",
"bytes": "414705"
},
{
"name": "R",
"bytes": "3817"
},
{
"name": "Shell",
"bytes": "2928"
}
],
"symlink_target": ""
} |
import logging
import mongo_db
import proj_constants as const
log = logging.getLogger('app')
def get_collection():
client = mongo_db.get_client()
if not client:
return
return client[const.NEWS_DB][const.NEWS_COLLECTION]
close = mongo_db.close
| {
"content_hash": "7bd2c3178dadd3f8c5b3f4a718bf0ab6",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 55,
"avg_line_length": 15.882352941176471,
"alnum_prop": 0.6962962962962963,
"repo_name": "andre487/news487",
"id": "8d197d082524a05ee18553a558a764643c86cd5e",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "collector/util/db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "990"
},
{
"name": "HTML",
"bytes": "8147"
},
{
"name": "JavaScript",
"bytes": "81939"
},
{
"name": "Python",
"bytes": "86335"
},
{
"name": "Shell",
"bytes": "4284"
}
],
"symlink_target": ""
} |
import argparse
import logging
from math import ceil
from multiprocessing import Pool
import os
import time
import urlparse
import boto
from boto.s3.connection import OrdinaryCallingFormat
parser = argparse.ArgumentParser(description="Download a file from S3 in parallel",
prog="s3-mp-download")
parser.add_argument("src", help="The S3 key to download")
parser.add_argument("dest", help="The destination file")
parser.add_argument("-np", "--num-processes", help="Number of processors to use",
type=int, default=2)
parser.add_argument("-s", "--split", help="Split size, in Mb", type=int, default=32)
parser.add_argument("-f", "--force", help="Overwrite an existing file",
action="store_true")
parser.add_argument("--insecure", dest='secure', help="Use HTTP for connection",
default=True, action="store_false")
parser.add_argument("-t", "--max-tries", help="Max allowed retries for http timeout", type=int, default=5)
parser.add_argument("-v", "--verbose", help="Be more verbose", default=False, action="store_true")
parser.add_argument("-q", "--quiet", help="Be less verbose (for use in cron jobs)",
default=False, action="store_true")
logger = logging.getLogger("s3-mp-download")
def do_part_download(args):
"""
Download a part of an S3 object using Range header
We utilize the existing S3 GET request implemented by Boto and tack on the
Range header. We then read in 1Mb chunks of the file and write out to the
correct position in the target file
:type args: tuple of (string, string, int, int)
:param args: The actual arguments of this method. Due to lameness of
multiprocessing, we have to extract these outside of the
function definition.
The arguments are: S3 Bucket name, S3 key, local file name,
chunk size, and part number
"""
bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries = args
conn = boto.connect_s3(calling_format=OrdinaryCallingFormat())
conn.is_secure = secure
# Make the S3 request
resp = conn.make_request("GET", bucket=bucket_name,
key=key_name, headers={'Range':"bytes=%d-%d" % (min_byte, max_byte)})
# Open the target file, seek to byte offset
fd = os.open(fname, os.O_WRONLY)
logger.debug("Opening file descriptor %d, seeking to %d" % (fd, min_byte))
os.lseek(fd, min_byte, os.SEEK_SET)
chunk_size = min((max_byte-min_byte), split*1024*1024)
logger.debug("Reading HTTP stream in %dM chunks" % (chunk_size/1024./1024))
t1 = time.time()
s = 0
try:
while True:
data = resp.read(chunk_size)
if data == "":
break
os.write(fd, data)
s += len(data)
t2 = time.time() - t1
os.close(fd)
s = s / 1024 / 1024.
logger.debug("Downloaded %0.2fM in %0.2fs at %0.2fMBps" % (s, t2, s/t2))
except Exception, err:
logger.debug("Retry request %d of max %d times" % (current_tries, max_tries))
if (current_tries > max_tries):
logger.error(err)
else:
time.sleep(3)
current_tries += 1
do_part_download(bucket_name, key_name, fname, min_byte, max_byte, split, secure, max_tries, current_tries)
def gen_byte_ranges(size, num_parts):
part_size = int(ceil(1. * size / num_parts))
for i in range(num_parts):
yield (part_size*i, min(part_size*(i+1)-1, size-1))
def main(src, dest, num_processes=2, split=32, force=False, verbose=False, quiet=False, secure=True, max_tries=5):
# Check that src is a valid S3 url
split_rs = urlparse.urlsplit(src)
if split_rs.scheme != "s3":
raise ValueError("'%s' is not an S3 url" % src)
# Check that dest does not exist
if os.path.isdir(dest):
filename = split_rs.path.split('/')[-1]
dest = os.path.join(dest, filename)
if os.path.exists(dest):
if force:
os.remove(dest)
else:
raise ValueError("Destination file '%s' exists, specify -f to"
" overwrite" % dest)
# Split out the bucket and the key
s3 = boto.connect_s3()
s3 = boto.connect_s3(calling_format=OrdinaryCallingFormat())
s3.is_secure = secure
logger.debug("split_rs: %s" % str(split_rs))
bucket = s3.lookup(split_rs.netloc)
if bucket == None:
raise ValueError("'%s' is not a valid bucket" % split_rs.netloc)
key = bucket.get_key(split_rs.path)
if key is None:
raise ValueError("'%s' does not exist." % split_rs.path)
# Determine the total size and calculate byte ranges
resp = s3.make_request("HEAD", bucket=bucket, key=key)
if resp is None:
raise ValueError("response is invalid.")
size = int(resp.getheader("content-length"))
logger.debug("Got headers: %s" % resp.getheaders())
# Skipping multipart if file is less than 1mb
if size < 1024 * 1024:
t1 = time.time()
key.get_contents_to_filename(dest)
t2 = time.time() - t1
size_mb = size / 1024 / 1024
logger.info("Finished single-part download of %0.2fM in %0.2fs (%0.2fMBps)" %
(size_mb, t2, size_mb/t2))
else:
# Touch the file
fd = os.open(dest, os.O_CREAT)
os.close(fd)
size_mb = size / 1024 / 1024
num_parts = (size_mb+(-size_mb%split))//split
def arg_iterator(num_parts):
for min_byte, max_byte in gen_byte_ranges(size, num_parts):
yield (bucket.name, key.name, dest, min_byte, max_byte, split, secure, max_tries, 0)
s = size / 1024 / 1024.
try:
t1 = time.time()
pool = Pool(processes=num_processes)
pool.map_async(do_part_download, arg_iterator(num_parts)).get(9999999)
t2 = time.time() - t1
logger.info("Finished downloading %0.2fM in %0.2fs (%0.2fMBps)" %
(s, t2, s/t2))
except KeyboardInterrupt:
logger.warning("User terminated")
except Exception, err:
logger.error(err)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
args = parser.parse_args()
arg_dict = vars(args)
if arg_dict['quiet'] == True:
logger.setLevel(logging.WARNING)
if arg_dict['verbose'] == True:
logger.setLevel(logging.DEBUG)
logger.debug("CLI args: %s" % args)
main(**arg_dict)
| {
"content_hash": "b9247d40c84bbdefbe6a5caa1665e669",
"timestamp": "",
"source": "github",
"line_count": 169,
"max_line_length": 119,
"avg_line_length": 38.50887573964497,
"alnum_prop": 0.6118623232944069,
"repo_name": "mumrah/s3-multipart",
"id": "9fae8d98b9efe6c404d5288980a804e7796227af",
"size": "6530",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "s3-mp-download.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18333"
}
],
"symlink_target": ""
} |
import hashlib
from oslo_log import log as logging
from oslo_utils import timeutils
from nova.api.openstack import api_version_request
from nova.api.openstack import common
from nova.api.openstack.compute.views import addresses as views_addresses
from nova.api.openstack.compute.views import flavors as views_flavors
from nova.api.openstack.compute.views import images as views_images
from nova.i18n import _LW
from nova.objects import base as obj_base
from nova import utils
LOG = logging.getLogger(__name__)
class ViewBuilder(common.ViewBuilder):
"""Model a server API response as a python dictionary."""
_collection_name = "servers"
_progress_statuses = (
"ACTIVE",
"BUILD",
"REBUILD",
"RESIZE",
"VERIFY_RESIZE",
"MIGRATING",
)
_fault_statuses = (
"ERROR", "DELETED"
)
# These are the lazy-loadable instance attributes required for showing
# details about an instance. Add to this list as new things need to be
# shown.
_show_expected_attrs = ['flavor', 'info_cache', 'metadata']
def __init__(self):
"""Initialize view builder."""
super(ViewBuilder, self).__init__()
self._address_builder = views_addresses.ViewBuilder()
self._flavor_builder = views_flavors.ViewBuilder()
self._image_builder = views_images.ViewBuilder()
def create(self, request, instance):
"""View that should be returned when an instance is created."""
return {
"server": {
"id": instance["uuid"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def basic(self, request, instance):
"""Generic, non-detailed view of an instance."""
return {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
def get_show_expected_attrs(self, expected_attrs=None):
"""Returns a list of lazy-loadable expected attributes used by show
This should be used when getting the instances from the database so
that the necessary attributes are pre-loaded before needing to build
the show response where lazy-loading can fail if an instance was
deleted.
:param list expected_attrs: The list of expected attributes that will
be requested in addition to what this view builder requires. This
method will merge the two lists and return what should be
ultimately used when getting an instance from the database.
:returns: merged and sorted list of expected attributes
"""
if expected_attrs is None:
expected_attrs = []
# NOTE(mriedem): We sort the list so we can have predictable test
# results.
return sorted(list(set(self._show_expected_attrs + expected_attrs)))
def show(self, request, instance):
"""Detailed view of a single instance."""
ip_v4 = instance.get('access_ip_v4')
ip_v6 = instance.get('access_ip_v6')
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": timeutils.isotime(instance["created_at"]),
"updated": timeutils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance),
"accessIPv4": str(ip_v4) if ip_v4 is not None else '',
"accessIPv6": str(ip_v6) if ip_v6 is not None else '',
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
return server
def index(self, request, instances):
"""Show a list of servers without many details."""
coll_name = self._collection_name
return self._list_view(self.basic, request, instances, coll_name)
def detail(self, request, instances):
"""Detailed view of a list of instance."""
coll_name = self._collection_name + '/detail'
return self._list_view(self.show, request, instances, coll_name)
def _list_view(self, func, request, servers, coll_name):
"""Provide a view for a list of servers.
:param func: Function used to format the server data
:param request: API request
:param servers: List of servers in dictionary format
:param coll_name: Name of collection, used to generate the next link
for a pagination query
:returns: Server data in dictionary format
"""
server_list = [func(request, server)["server"] for server in servers]
servers_links = self._get_collection_links(request,
servers,
coll_name)
servers_dict = dict(servers=server_list)
if servers_links:
servers_dict["servers_links"] = servers_links
return servers_dict
@staticmethod
def _get_metadata(instance):
# FIXME(danms): Transitional support for objects
metadata = instance.get('metadata')
if isinstance(instance, obj_base.NovaObject):
return metadata or {}
else:
return utils.instance_meta(instance)
@staticmethod
def _get_vm_status(instance):
# If the instance is deleted the vm and task states don't really matter
if instance.get("deleted"):
return "DELETED"
return common.status_from_state(instance.get("vm_state"),
instance.get("task_state"))
@staticmethod
def _get_host_id(instance):
host = instance.get("host")
project = str(instance.get("project_id"))
if host:
sha_hash = hashlib.sha224(project + host)
return sha_hash.hexdigest()
def _get_addresses(self, request, instance, extend_address=False):
context = request.environ["nova.context"]
networks = common.get_networks_for_instance(context, instance)
return self._address_builder.index(networks,
extend_address)["addresses"]
def _get_image(self, request, instance):
image_ref = instance["image_ref"]
if image_ref:
image_id = str(common.get_id_from_href(image_ref))
bookmark = self._image_builder._get_bookmark_link(request,
image_id,
"images")
return {
"id": image_id,
"links": [{
"rel": "bookmark",
"href": bookmark,
}],
}
else:
return ""
def _get_flavor(self, request, instance):
instance_type = instance.get_flavor()
if not instance_type:
LOG.warning(_LW("Instance has had its instance_type removed "
"from the DB"), instance=instance)
return {}
flavor_id = instance_type["flavorid"]
flavor_bookmark = self._flavor_builder._get_bookmark_link(request,
flavor_id,
"flavors")
return {
"id": str(flavor_id),
"links": [{
"rel": "bookmark",
"href": flavor_bookmark,
}],
}
def _get_fault(self, request, instance):
# This can result in a lazy load of the fault information
fault = instance.fault
if not fault:
return None
fault_dict = {
"code": fault["code"],
"created": timeutils.isotime(fault["created_at"]),
"message": fault["message"],
}
if fault.get('details', None):
is_admin = False
context = request.environ["nova.context"]
if context:
is_admin = getattr(context, 'is_admin', False)
if is_admin or fault['code'] != 500:
fault_dict['details'] = fault["details"]
return fault_dict
class ViewBuilderV3(ViewBuilder):
"""Model a server V3 API response as a python dictionary."""
def __init__(self):
"""Initialize view builder."""
super(ViewBuilderV3, self).__init__()
self._address_builder = views_addresses.ViewBuilderV3()
# TODO(alex_xu): In V3 API, we correct the image bookmark link to
# use glance endpoint. We revert back it to use nova endpoint for v2.1.
self._image_builder = views_images.ViewBuilder()
def show(self, request, instance, extend_address=True):
"""Detailed view of a single instance."""
server = {
"server": {
"id": instance["uuid"],
"name": instance["display_name"],
"status": self._get_vm_status(instance),
"tenant_id": instance.get("project_id") or "",
"user_id": instance.get("user_id") or "",
"metadata": self._get_metadata(instance),
"hostId": self._get_host_id(instance) or "",
# TODO(alex_xu): '_get_image' return {} when there image_ref
# isn't existed in V3 API, we revert it back to return "" in
# V2.1.
"image": self._get_image(request, instance),
"flavor": self._get_flavor(request, instance),
"created": timeutils.isotime(instance["created_at"]),
"updated": timeutils.isotime(instance["updated_at"]),
"addresses": self._get_addresses(request, instance,
extend_address),
"links": self._get_links(request,
instance["uuid"],
self._collection_name),
},
}
if server["server"]["status"] in self._fault_statuses:
_inst_fault = self._get_fault(request, instance)
if _inst_fault:
server['server']['fault'] = _inst_fault
if server["server"]["status"] in self._progress_statuses:
server["server"]["progress"] = instance.get("progress", 0)
if (request.api_version_request >=
api_version_request.APIVersionRequest("2.9")):
server["server"]["locked"] = (True if instance["locked_by"]
else False)
return server
| {
"content_hash": "ffaf9660991e5006bbd53366dcd11679",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 79,
"avg_line_length": 39.68456375838926,
"alnum_prop": 0.5362760020294267,
"repo_name": "nikesh-mahalka/nova",
"id": "5eac17c0bb679b97d15ef98384f904b0a32afbf4",
"size": "12513",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "nova/api/openstack/compute/views/servers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16554867"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259485"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import django
from django.db.models.aggregates import Sum
from django.db.models.expressions import F
from django.test import TestCase
from .models import Company, Employee
class ValuesExpressionsTests(TestCase):
@classmethod
def setUpTestData(cls):
Company.objects.create(
name='Example Inc.', num_employees=2300, num_chairs=5,
ceo=Employee.objects.create(firstname='Joe', lastname='Smith', salary=10)
)
Company.objects.create(
name='Foobar Ltd.', num_employees=3, num_chairs=4,
ceo=Employee.objects.create(firstname='Frank', lastname='Meyer', salary=20)
)
Company.objects.create(
name='Test GmbH', num_employees=32, num_chairs=1,
ceo=Employee.objects.create(firstname='Max', lastname='Mustermann', salary=30)
)
def test_values_expression(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older django")
self.assertSequenceEqual(
Company.objects.values(salary=F('ceo__salary')),
[{'salary': 10}, {'salary': 20}, {'salary': 30}],
)
def test_values_expression_group_by(self):
# values() applies annotate() first, so values selected are grouped by
# id, not firstname.
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older django")
Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
joes = Employee.objects.filter(firstname='Joe')
self.assertSequenceEqual(
joes.values('firstname', sum_salary=Sum('salary')).order_by('sum_salary'),
[{'firstname': 'Joe', 'sum_salary': 2}, {'firstname': 'Joe', 'sum_salary': 10}],
)
self.assertSequenceEqual(
joes.values('firstname').annotate(sum_salary=Sum('salary')),
[{'firstname': 'Joe', 'sum_salary': 12}]
)
def test_chained_values_with_expression(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older django")
Employee.objects.create(firstname='Joe', lastname='Jones', salary=2)
joes = Employee.objects.filter(firstname='Joe').values('firstname')
self.assertSequenceEqual(
joes.values('firstname', sum_salary=Sum('salary')),
[{'firstname': 'Joe', 'sum_salary': 12}]
)
self.assertSequenceEqual(
joes.values(sum_salary=Sum('salary')),
[{'sum_salary': 12}]
)
def test_values_list_expression(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older django")
companies = Company.objects.values_list('name', F('ceo__salary'))
self.assertSequenceEqual(companies, [('Example Inc.', 10), ('Foobar Ltd.', 20), ('Test GmbH', 30)])
def test_values_list_expression_flat(self):
if django.VERSION < (1, 11, 0):
self.skipTest("does not work on older django")
companies = Company.objects.values_list(F('ceo__salary'), flat=True)
self.assertSequenceEqual(companies, (10, 20, 30))
| {
"content_hash": "ffaa302ed61062ee254b7c031ef23f53",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 107,
"avg_line_length": 42.29333333333334,
"alnum_prop": 0.6097099621689785,
"repo_name": "denisenkom/django-sqlserver",
"id": "5975c2076f008b1dee4c9804b2c659938487526e",
"size": "3172",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/expressions/test_queryset_values.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1988672"
}
],
"symlink_target": ""
} |
import pygame
from pygame.locals import *
import sys
SCREEN_SIZE = (1280, 720) #resolution of the game
global HORIZ_MOV_INCR
HORIZ_MOV_INCR = 10 #speed of movement
global FPS
global clock
global time_spent
def RelRect(actor, camera):
return pygame.Rect(actor.rect.x-camera.rect.x, actor.rect.y-camera.rect.y, actor.rect.w, actor.rect.h)
class Camera(object):
'''Class for center screen on the player'''
def __init__(self, screen, player, level_width, level_height):
self.player = player
self.rect = screen.get_rect()
self.rect.center = self.player.center
self.world_rect = Rect(0, 0, level_width, level_height)
def update(self):
if self.player.centerx > self.rect.centerx + 25:
self.rect.centerx = self.player.centerx - 25
if self.player.centerx < self.rect.centerx - 25:
self.rect.centerx = self.player.centerx + 25
if self.player.centery > self.rect.centery + 25:
self.rect.centery = self.player.centery - 25
if self.player.centery < self.rect.centery - 25:
self.rect.centery = self.player.centery + 25
self.rect.clamp_ip(self.world_rect)
def draw_sprites(self, surf, sprites):
for s in sprites:
if s.rect.colliderect(self.rect):
surf.blit(s.image, RelRect(s, self))
class Obstacle(pygame.sprite.Sprite):
'''Class for create obstacles'''
def __init__(self, x, y):
self.x = x
self.y = y
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("world/obstacle.png").convert()
self.rect = self.image.get_rect()
self.rect.topleft = [self.x, self.y]
class Crashman(pygame.sprite.Sprite):
'''class for player and collision'''
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
self.movy = 0
self.movx = 0
self.x = x
self.y = y
self.contact = False
self.jump = False
self.image = pygame.image.load('actions/idle_right.png').convert()
self.rect = self.image.get_rect()
self.run_left = ["actions/run_left000.png","actions/run_left001.png",
"actions/run_left002.png", "actions/run_left003.png",
"actions/run_left004.png", "actions/run_left005.png",
"actions/run_left006.png", "actions/run_left007.png"]
self.run_right = ["actions/run_right000.png","actions/run_right001.png",
"actions/run_right002.png", "actions/run_right003.png",
"actions/run_right004.png", "actions/run_right005.png",
"actions/run_right006.png", "actions/run_right007.png"]
self.direction = "right"
self.rect.topleft = [x, y]
self.frame = 0
def update(self, up, down, left, right):
if up:
if self.contact:
if self.direction == "right":
self.image = pygame.image.load("actions/jump_right.png")
self.jump = True
self.movy -= 20
if down:
if self.contact and self.direction == "right":
self.image = pygame.image.load('actions/down_right.png').convert_alpha()
if self.contact and self.direction == "left":
self.image = pygame.image.load('actions/down_left.png').convert_alpha()
if not down and self.direction == "right":
self.image = pygame.image.load('actions/idle_right.png').convert_alpha()
if not down and self.direction == "left":
self.image = pygame.image.load('actions/idle_left.png').convert_alpha()
if left:
self.direction = "left"
self.movx = -HORIZ_MOV_INCR
if self.contact:
self.frame += 1
self.image = pygame.image.load(self.run_left[self.frame]).convert_alpha()
if self.frame == 6: self.frame = 0
else:
self.image = self.image = pygame.image.load("actions/jump_left.png").convert_alpha()
if right:
self.direction = "right"
self.movx = +HORIZ_MOV_INCR
if self.contact:
self.frame += 1
self.image = pygame.image.load(self.run_right[self.frame]).convert_alpha()
if self.frame == 6: self.frame = 0
else:
self.image = self.image = pygame.image.load("actions/jump_right.png").convert_alpha()
if not (left or right):
self.movx = 0
self.rect.right += self.movx
self.collide(self.movx, 0, world)
if not self.contact:
self.movy += 0.3
if self.movy > 10:
self.movy = 10
self.rect.top += self.movy
if self.jump:
self.movy += 2
self.rect.top += self.movy
if self.contact == True:
self.jump = False
self.contact = False
self.collide(0, self.movy, world)
def collide(self, movx, movy, world):
self.contact = False
for o in world:
if self.rect.colliderect(o):
if movx > 0:
self.rect.right = o.rect.left
if movx < 0:
self.rect.left = o.rect.right
if movy > 0:
self.rect.bottom = o.rect.top
self.movy = 0
self.contact = True
if movy < 0:
self.rect.top = o.rect.bottom
self.movy = 0
class Level(object):
'''Read a map and create a level'''
def __init__(self, open_level):
self.level1 = []
self.world = []
self.all_sprite = pygame.sprite.Group()
self.level = open(open_level, "r")
def create_level(self, x, y):
for l in self.level:
self.level1.append(l)
for row in self.level1:
for col in row:
if col == "X":
obstacle = Obstacle(x, y)
self.world.append(obstacle)
self.all_sprite.add(self.world)
if col == "P":
self.crashman = Crashman(x,y)
self.all_sprite.add(self.crashman)
x += 25
y += 25
x = 0
def get_size(self):
lines = self.level1
#line = lines[0]
line = max(lines, key=len)
self.width = (len(line))*25
self.height = (len(lines))*25
return (self.width, self.height)
def tps(orologio,fps):
temp = orologio.tick(fps)
tps = temp / 1000.
return tps
pygame.init()
screen = pygame.display.set_mode(SCREEN_SIZE, FULLSCREEN, 32)
screen_rect = screen.get_rect()
background = pygame.image.load("world/background2.jpg").convert_alpha()
background_rect = background.get_rect()
level = Level("level/level1")
level.create_level(0,0)
world = level.world
crashman = level.crashman
pygame.mouse.set_visible(0)
camera = Camera(screen, crashman.rect, level.get_size()[0], level.get_size()[1])
all_sprite = level.all_sprite
FPS = 30
clock = pygame.time.Clock()
up = down = left = right = False
x, y = 0, 0
while True:
for event in pygame.event.get():
if event.type == QUIT or event.type == KEYDOWN and event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == KEYDOWN and event.key == K_UP:
up = True
if event.type == KEYDOWN and event.key == K_DOWN:
down = True
if event.type == KEYDOWN and event.key == K_LEFT:
left = True
if event.type == KEYDOWN and event.key == K_RIGHT:
right = True
if event.type == KEYUP and event.key == K_UP:
up = False
if event.type == KEYUP and event.key == K_DOWN:
down = False
if event.type == KEYUP and event.key == K_LEFT:
left = False
if event.type == KEYUP and event.key == K_RIGHT:
right = False
asize = ((screen_rect.w // background_rect.w + 1) * background_rect.w, (screen_rect.h // background_rect.h + 1) * background_rect.h)
bg = pygame.Surface(asize)
for x in range(0, asize[0], background_rect.w):
for y in range(0, asize[1], background_rect.h):
screen.blit(background, (x, y))
time_spent = tps(clock, FPS)
camera.draw_sprites(screen, all_sprite)
crashman.update(up, down, left, right)
camera.update()
pygame.display.flip()
| {
"content_hash": "0341ada1e3902f625437955fbc8b19b9",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 136,
"avg_line_length": 34.155378486055774,
"alnum_prop": 0.5518488277149189,
"repo_name": "owattenmaker/PythonFighter",
"id": "1eb654f9a937c63f289e292d30031ffc32d3cd95",
"size": "9597",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "idk/crashman.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "53"
},
{
"name": "Python",
"bytes": "68611"
}
],
"symlink_target": ""
} |
__author__ = 'Krishna Mudragada'
from setuptools import setup
setup(name='FeedTester',
version='0.1',
description='Test your Data feeds for validity',
url='http://github.com/mudragada/Feeds',
author='Krishna Mudragada',
license='MIT',
install_requires = ['logging', 're'])
| {
"content_hash": "7c452c32040ad0e13cb2d7fe6a7a3310",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 54,
"avg_line_length": 34.333333333333336,
"alnum_prop": 0.6472491909385113,
"repo_name": "mudragada/util-scripts",
"id": "1e20e116b32e501eed5110109b248651e37e8f13",
"size": "309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FTPUtils/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "79980"
}
],
"symlink_target": ""
} |
import os
# import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings.
# They can be extensions coming with Sphinx (named 'sphinx.ext.*')
# or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Ckan API client'
copyright = u'2014, Trento RISE'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1a'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for
# all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
## -- Autodoc configuration ---------------------------------------------------
# Preserve order in the source code
autodoc_member_order = 'bysource'
## -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'CkanAPIclientdoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual])
latex_documents = [
('index', 'CkanAPIclient.tex', u'Ckan API client Documentation',
u'Samuele Santi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'ckanapiclient', u'Ckan API client Documentation',
[u'Samuele Santi'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'CkanAPIclient', u'Ckan API client Documentation',
u'Samuele Santi', 'CkanAPIclient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "27e68a49479ddd8ddf54477627016343",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 79,
"avg_line_length": 31.93089430894309,
"alnum_prop": 0.6934436664544876,
"repo_name": "opendatatrentino/ckan-api-client",
"id": "51190e0986a99c5e4db7df44f324d8af22492970",
"size": "8283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "7801"
},
{
"name": "Python",
"bytes": "250289"
},
{
"name": "Shell",
"bytes": "11387"
}
],
"symlink_target": ""
} |
import copy
import mock
from oslo_config import cfg
import oslo_messaging
from neutron.common import config as base_config
from neutron.common import constants as l3_constants
from neutron.openstack.common import uuidutils
from neutron.tests import base
from networking_cisco.plugins.cisco.cfg_agent import cfg_agent
from networking_cisco.plugins.cisco.cfg_agent import cfg_exceptions
from networking_cisco.plugins.cisco.cfg_agent.service_helpers import (
routing_svc_helper)
_uuid = uuidutils.generate_uuid
HOST = 'myhost'
FAKE_ID = _uuid()
def prepare_router_data(enable_snat=None, num_internal_ports=1):
router_id = _uuid()
ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
int_ports = []
for i in range(num_internal_ports):
int_ports.append({'id': _uuid(),
'network_id': _uuid(),
'admin_state_up': True,
'fixed_ips': [{'ip_address': '35.4.%s.4' % i,
'subnet_id': _uuid()}],
'mac_address': 'ca:fe:de:ad:be:ef',
'subnet': {'cidr': '35.4.%s.0/24' % i,
'gateway_ip': '35.4.%s.1' % i}})
hosting_device = {'id': _uuid(),
"name": "CSR1kv_template",
"booting_time": 300,
"host_category": "VM",
'management_ip_address': '20.0.0.5',
'protocol_port': 22,
"credentials": {
"username": "user",
"password": "4getme"},
}
router = {
'id': router_id,
'admin_state_up': True,
l3_constants.INTERFACE_KEY: int_ports,
'routes': [],
'gw_port': ex_gw_port,
'hosting_device': hosting_device}
if enable_snat is not None:
router['enable_snat'] = enable_snat
return router, int_ports
class TestRouterInfo(base.BaseTestCase):
def setUp(self):
super(TestRouterInfo, self).setUp()
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
self.router = {'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port}
def test_router_info_create(self):
router_id = _uuid()
fake_router = {}
ri = routing_svc_helper.RouterInfo(router_id, fake_router)
self.assertTrue(ri.router_name().endswith(router_id))
def test_router_info_create_with_router(self):
router_id = _uuid()
ri = routing_svc_helper.RouterInfo(router_id, self.router)
self.assertTrue(ri.router_name().endswith(router_id))
self.assertEqual(ri.router, self.router)
self.assertEqual(ri._router, self.router)
self.assertTrue(ri.snat_enabled)
self.assertIsNone(ri.ex_gw_port)
def test_router_info_create_snat_disabled(self):
router_id = _uuid()
self.router['enable_snat'] = False
ri = routing_svc_helper.RouterInfo(router_id, self.router)
self.assertFalse(ri.snat_enabled)
class TestBasicRoutingOperations(base.BaseTestCase):
def setUp(self):
super(TestBasicRoutingOperations, self).setUp()
self.conf = cfg.ConfigOpts()
self.conf.register_opts(base_config.core_opts)
self.conf.register_opts(cfg_agent.CiscoCfgAgent.OPTS)
self.ex_gw_port = {'id': _uuid(),
'network_id': _uuid(),
'fixed_ips': [{'ip_address': '19.4.4.4',
'subnet_id': _uuid()}],
'subnet': {'cidr': '19.4.4.0/24',
'gateway_ip': '19.4.4.1'}}
self.hosting_device = {'id': "100",
'name': "CSR1kv_template",
'booting_time': 300,
'host_category': "VM",
'management_ip_address': '20.0.0.5',
'protocol_port': 22,
'credentials': {'username': 'user',
"password": '4getme'},
}
self.router = {
'id': _uuid(),
'enable_snat': True,
'routes': [],
'gw_port': self.ex_gw_port,
'hosting_device': self.hosting_device}
self.agent = mock.Mock()
#Patches & Mocks
self.l3pluginApi_cls_p = mock.patch(
'networking_cisco.plugins.cisco.cfg_agent.service_helpers.'
'routing_svc_helper.CiscoRoutingPluginApi')
l3plugin_api_cls = self.l3pluginApi_cls_p.start()
self.plugin_api = mock.Mock()
l3plugin_api_cls.return_value = self.plugin_api
self.plugin_api.get_routers = mock.MagicMock()
self.looping_call_p = mock.patch(
'neutron.openstack.common.loopingcall.FixedIntervalLoopingCall')
self.looping_call_p.start()
mock.patch('neutron.common.rpc.create_connection').start()
self.routing_helper = routing_svc_helper.RoutingServiceHelper(
HOST, self.conf, self.agent)
self.routing_helper._internal_network_added = mock.Mock()
self.routing_helper._external_gateway_added = mock.Mock()
self.routing_helper._internal_network_removed = mock.Mock()
self.routing_helper._external_gateway_removed = mock.Mock()
self.driver = self._mock_driver_and_hosting_device(
self.routing_helper)
def _mock_driver_and_hosting_device(self, svc_helper):
svc_helper._dev_status.is_hosting_device_reachable = mock.MagicMock(
return_value=True)
driver = mock.MagicMock()
svc_helper._drivermgr.get_driver = mock.Mock(return_value=driver)
svc_helper._drivermgr.set_driver = mock.Mock(return_value=driver)
return driver
def _reset_mocks(self):
self.routing_helper._process_router_floating_ips.reset_mock()
self.routing_helper._internal_network_added.reset_mock()
self.routing_helper._external_gateway_added.reset_mock()
self.routing_helper._internal_network_removed.reset_mock()
self.routing_helper._external_gateway_removed.reset_mock()
def test_process_router_throw_config_error(self):
snip_name = 'CREATE_SUBINTERFACE'
e_type = 'Fake error'
e_tag = 'Fake error tag'
params = {'snippet': snip_name, 'type': e_type, 'tag': e_tag}
self.routing_helper._internal_network_added.side_effect = (
cfg_exceptions.CSR1kvConfigException(**params))
router, ports = prepare_router_data()
ri = routing_svc_helper.RouterInfo(router['id'], router)
self.assertRaises(cfg_exceptions.CSR1kvConfigException,
self.routing_helper._process_router, ri)
def test_process_router(self):
router, ports = prepare_router_data()
#Setup mock for call to proceess floating ips
self.routing_helper._process_router_floating_ips = mock.Mock()
fake_floatingips1 = {'floatingips': [
{'id': _uuid(),
'floating_ip_address': '8.8.8.8',
'fixed_ip_address': '7.7.7.7',
'port_id': _uuid()}]}
ri = routing_svc_helper.RouterInfo(router['id'], router=router)
# Process with initial values
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
# Assert that process_floating_ips, internal_network & external network
# added were all called with the right params
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.routing_helper._internal_network_added.assert_called_with(
ri, ports[0], ex_gw_port)
self.routing_helper._external_gateway_added.assert_called_with(
ri, ex_gw_port)
self._reset_mocks()
# remap floating IP to a new fixed ip
fake_floatingips2 = copy.deepcopy(fake_floatingips1)
fake_floatingips2['floatingips'][0]['fixed_ip_address'] = '7.7.7.8'
router[l3_constants.FLOATINGIP_KEY] = fake_floatingips2['floatingips']
# Process again and check that this time only the process_floating_ips
# was only called.
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.assertFalse(self.routing_helper._internal_network_added.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self._reset_mocks()
# remove just the floating ips
del router[l3_constants.FLOATINGIP_KEY]
# Process again and check that this time also only the
# process_floating_ips and external_network remove was called
self.routing_helper._process_router(ri)
ex_gw_port = ri.router.get('gw_port')
self.routing_helper._process_router_floating_ips.assert_called_with(
ri, ex_gw_port)
self.assertFalse(self.routing_helper._internal_network_added.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self._reset_mocks()
# now no ports so state is torn down
del router[l3_constants.INTERFACE_KEY]
del router['gw_port']
# Update router_info object
ri.router = router
# Keep a copy of the ex_gw_port before its gone after processing.
ex_gw_port = ri.ex_gw_port
# Process router and verify that internal and external network removed
# were called and floating_ips_process was called
self.routing_helper._process_router(ri)
self.assertFalse(self.routing_helper.
_process_router_floating_ips.called)
self.assertFalse(self.routing_helper._external_gateway_added.called)
self.assertTrue(self.routing_helper._internal_network_removed.called)
self.assertTrue(self.routing_helper._external_gateway_removed.called)
self.routing_helper._internal_network_removed.assert_called_with(
ri, ports[0], ex_gw_port)
self.routing_helper._external_gateway_removed.assert_called_with(
ri, ex_gw_port)
def test_routing_table_update(self):
router = self.router
fake_route1 = {'destination': '135.207.0.0/16',
'nexthop': '1.2.3.4'}
fake_route2 = {'destination': '135.207.111.111/32',
'nexthop': '1.2.3.4'}
# First we set the routes to fake_route1 and see if the
# driver.routes_updated was called with 'replace'(==add or replace)
# and fake_route1
router['routes'] = [fake_route1]
ri = routing_svc_helper.RouterInfo(router['id'], router)
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_called_with(ri, 'replace',
fake_route1)
# Now we replace fake_route1 with fake_route2. This should cause driver
# to be invoked to delete fake_route1 and 'replace'(==add or replace)
self.driver.reset_mock()
router['routes'] = [fake_route2]
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_called_with(ri, 'delete',
fake_route1)
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route2)
# Now we add back fake_route1 as a new route, this should cause driver
# to be invoked to 'replace'(==add or replace) fake_route1
self.driver.reset_mock()
router['routes'] = [fake_route2, fake_route1]
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_any_call(ri, 'replace', fake_route1)
# Now we delete all routes. This should cause driver
# to be invoked to delete fake_route1 and fake-route2
self.driver.reset_mock()
router['routes'] = []
ri.router = router
self.routing_helper._process_router(ri)
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route2)
self.driver.routes_updated.assert_any_call(ri, 'delete', fake_route1)
def test_process_router_internal_network_added_unexpected_error(self):
router, ports = prepare_router_data()
ri = routing_svc_helper.RouterInfo(router['id'], router=router)
# raise RuntimeError to simulate that an unexpected exception occurrs
self.routing_helper._internal_network_added.side_effect = RuntimeError
self.assertRaises(RuntimeError,
self.routing_helper._process_router,
ri)
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
self.routing_helper._internal_network_added.side_effect = None
# Failure will cause a retry next time, then were able to add the
# port to ri.internal_ports
self.routing_helper._process_router(ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_process_router_internal_network_removed_unexpected_error(self):
router, ports = prepare_router_data()
ri = routing_svc_helper.RouterInfo(router['id'], router=router)
# add an internal port
self.routing_helper._process_router(ri)
# raise RuntimeError to simulate that an unexpected exception occurrs
self.routing_helper._internal_network_removed.side_effect = mock.Mock(
side_effect=RuntimeError)
ri.internal_ports[0]['admin_state_up'] = False
# The above port is set to down state, remove it.
self.assertRaises(RuntimeError,
self.routing_helper._process_router,
ri)
self.assertIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
# The unexpected exception has been fixed manually
self.routing_helper._internal_network_removed.side_effect = None
# Failure will cause a retry next time,
# We were able to add the port to ri.internal_ports
self.routing_helper._process_router(ri)
# We were able to remove the port from ri.internal_ports
self.assertNotIn(
router[l3_constants.INTERFACE_KEY][0], ri.internal_ports)
def test_routers_with_admin_state_down(self):
self.plugin_api.get_external_network_id.return_value = None
routers = [
{'id': _uuid(),
'admin_state_up': False,
'external_gateway_info': {}}]
self.routing_helper._process_routers(routers, None)
self.assertNotIn(routers[0]['id'], self.routing_helper.router_info)
def test_router_deleted(self):
self.routing_helper.router_deleted(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
def test_routers_updated(self):
self.routing_helper.routers_updated(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
def test_removed_from_agent(self):
self.routing_helper.router_removed_from_agent(None,
{'router_id': FAKE_ID})
self.assertIn(FAKE_ID, self.routing_helper.removed_routers)
def test_added_to_agent(self):
self.routing_helper.router_added_to_agent(None, [FAKE_ID])
self.assertIn(FAKE_ID, self.routing_helper.updated_routers)
def test_process_router_delete(self):
router = self.router
router['gw_port'] = self.ex_gw_port
self.routing_helper._router_added(router['id'], router)
self.assertIn(router['id'], self.routing_helper.router_info)
# Now we remove the router
self.routing_helper._router_removed(router['id'], deconfigure=True)
self.assertNotIn(router['id'], self.routing_helper.router_info)
def test_collect_state(self):
router, ports = prepare_router_data(enable_snat=True,
num_internal_ports=2)
self.routing_helper._router_added(router['id'], router)
configurations = {}
configurations = self.routing_helper.collect_state(configurations)
hd_exp_result = {
router['hosting_device']['id']: {'routers': 1}}
self.assertEqual(1, configurations['total routers'])
self.assertEqual(1, configurations['total ex_gw_ports'])
self.assertEqual(2, configurations['total interfaces'])
self.assertEqual(0, configurations['total floating_ips'])
self.assertEqual(hd_exp_result, configurations['hosting_devices'])
self.assertEqual([], configurations['non_responding_hosting_devices'])
def test_sort_resources_per_hosting_device(self):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
router3, port = prepare_router_data()
router4, port = prepare_router_data()
hd1_id = router1['hosting_device']['id']
hd2_id = router4['hosting_device']['id']
#Setting router2 and router3 device id same as router1's device id
router2['hosting_device']['id'] = hd1_id
router3['hosting_device']['id'] = hd1_id
resources = {'routers': [router1, router2, router4],
'removed_routers': [router3]}
devices = self.routing_helper._sort_resources_per_hosting_device(
resources)
self.assertEqual(2, len(devices.keys())) # Two devices
hd1_routers = [router1, router2]
self.assertEqual(hd1_routers, devices[hd1_id]['routers'])
self.assertEqual([router3], devices[hd1_id]['removed_routers'])
self.assertEqual([router4], devices[hd2_id]['routers'])
def test_get_router_ids_from_removed_devices_info(self):
removed_devices_info = {
'hosting_data': {'device_1': {'routers': ['id1', 'id2']},
'device_2': {'routers': ['id3', 'id4'],
'other_key': ['value1', 'value2']}}
}
resp = self.routing_helper._get_router_ids_from_removed_devices_info(
removed_devices_info)
self.assertEqual(sorted(resp), sorted(['id1', 'id2', 'id3', 'id4']))
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_full_sync_different_devices(self, mock_spawn):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
self.plugin_api.get_routers = mock.Mock(
return_value=[router1, router2])
self.routing_helper.process_service()
self.assertEqual(2, mock_spawn.call_count)
call1 = mock.call(self.routing_helper._process_routers, [router1],
None, router1['hosting_device']['id'],
all_routers=True)
call2 = mock.call(self.routing_helper._process_routers, [router2],
None, router2['hosting_device']['id'],
all_routers=True)
mock_spawn.assert_has_calls([call1, call2], any_order=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_full_sync_same_device(self, mock_spawn):
router1, port = prepare_router_data()
router2, port = prepare_router_data()
router2['hosting_device']['id'] = router1['hosting_device']['id']
self.plugin_api.get_routers = mock.Mock(return_value=[router1,
router2])
self.routing_helper.process_service()
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router1, router2],
None,
router1['hosting_device']['id'],
all_routers=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_updated_routers(self, mock_spawn):
router1, port = prepare_router_data()
def routers_data(context, router_ids=None, hd_ids=None):
if router_ids:
return [router1]
self.plugin_api.get_routers.side_effect = routers_data
self.routing_helper.fullsync = False
self.routing_helper.updated_routers.add(router1['id'])
self.routing_helper.process_service()
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
router_ids=[router1['id']])
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router1],
None,
router1['hosting_device']['id'],
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_deviceid(self, mock_spawn):
router, port = prepare_router_data()
device_id = router['hosting_device']['id']
def routers_data(context, router_ids=None, hd_ids=None):
if hd_ids:
self.assertEqual([device_id], hd_ids)
return [router]
self.plugin_api.get_routers.side_effect = routers_data
self.routing_helper.fullsync = False
self.routing_helper.process_service(device_ids=[device_id])
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
hd_ids=[device_id])
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
[router],
None,
device_id,
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_removed_routers(self, mock_spawn):
router, port = prepare_router_data()
device_id = router['hosting_device']['id']
self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper.fullsync = False
# Emulate router added for setting up internal structures
self.routing_helper._router_added(router['id'], router)
# Add router to removed routers list and process it
self.routing_helper.removed_routers.add(router['id'])
self.routing_helper.process_service()
self.assertEqual(1, mock_spawn.call_count)
mock_spawn.assert_called_with(self.routing_helper._process_routers,
None,
[router],
device_id,
all_routers=False)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_removed_routers_info(self, mock_spawn):
router1, port = prepare_router_data()
device_id = router1['hosting_device']['id']
router2, port = prepare_router_data()
router2['hosting_device']['id'] = _uuid()
removed_devices_info = {
'hosting_data': {device_id: {'routers': [router1['id']]}},
'deconfigure': True
}
self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper.fullsync = False
# Emulate router added for setting up internal structures
self.routing_helper._router_added(router1['id'], router1)
self.routing_helper._router_added(router2['id'], router2)
# Add router to removed routers list and process it
self.routing_helper.removed_routers.add(router2['id'])
self.routing_helper.process_service(
removed_devices_info=removed_devices_info)
self.assertEqual(2, mock_spawn.call_count)
call1 = mock.call(self.routing_helper._process_routers,
None,
[router1],
router1['hosting_device']['id'],
all_routers=False)
call2 = mock.call(self.routing_helper._process_routers,
None,
[router2],
router2['hosting_device']['id'],
all_routers=False)
mock_spawn.assert_has_calls([call1, call2], any_order=True)
@mock.patch("eventlet.GreenPool.spawn_n")
def test_process_services_with_rpc_error(self, mock_spawn):
router, port = prepare_router_data()
self.plugin_api.get_routers.side_effect = (
oslo_messaging.MessagingException)
self.routing_helper.fullsync = False
self.routing_helper.updated_routers.add(router['id'])
self.routing_helper.process_service()
self.assertEqual(1, self.plugin_api.get_routers.call_count)
self.plugin_api.get_routers.assert_called_with(
self.routing_helper.context,
router_ids=[router['id']])
self.assertFalse(mock_spawn.called)
self.assertTrue(self.routing_helper.fullsync)
def test_process_routers(self):
router, port = prepare_router_data()
driver = self._mock_driver_and_hosting_device(self.routing_helper)
self.routing_helper._process_router = mock.Mock()
self.routing_helper._process_routers([router], None)
ri = self.routing_helper.router_info[router['id']]
driver.router_added.assert_called_with(ri)
self.routing_helper._process_router.assert_called_with(ri)
def _process_routers_floatingips(self, action='add'):
router, port = prepare_router_data()
driver = self._mock_driver_and_hosting_device(self.routing_helper)
ex_gw_port = router['gw_port']
floating_ip_address = '19.4.4.10'
fixed_ip_address = '35.4.1.10'
fixed_ip_address_2 = '35.4.1.15'
port_id = 'fake_port_id'
floating_ip = {'fixed_ip_address': fixed_ip_address,
'floating_ip_address': floating_ip_address,
'id': 'floating_ip_id',
'port_id': port_id,
'status': 'ACTIVE', }
router[l3_constants.FLOATINGIP_KEY] = [floating_ip]
ri = routing_svc_helper.RouterInfo(router['id'], router=router)
# Default add action
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_added.assert_called_with(
ri, ex_gw_port, floating_ip_address, fixed_ip_address)
if action == 'remove':
router[l3_constants.FLOATINGIP_KEY] = []
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_removed.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
if action == 'remap':
driver.reset_mock()
floating_ip_2 = copy.deepcopy(floating_ip)
floating_ip_2['fixed_ip_address'] = fixed_ip_address_2
ri.router[l3_constants.FLOATINGIP_KEY] = [floating_ip_2]
self.routing_helper._process_router_floating_ips(ri, ex_gw_port)
driver.floating_ip_added.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address_2)
driver.floating_ip_removed.assert_called_with(
ri, ri.ex_gw_port, floating_ip_address, fixed_ip_address)
def test_process_routers_floatingips_add(self):
self._process_routers_floatingips(action="add")
def test_process_routers_floatingips_remove(self):
self._process_routers_floatingips(action="remove")
def test_process_routers_floatingips_remap(self):
self._process_routers_floatingips(action="remap")
| {
"content_hash": "97a39409426761df8d34d89fecaa72ea",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 79,
"avg_line_length": 45.654574132492115,
"alnum_prop": 0.5852133356365521,
"repo_name": "cisco-openstack/networking-cisco",
"id": "385faa489bbf82eb65964b990d392fe34885b53c",
"size": "29579",
"binary": false,
"copies": "1",
"ref": "refs/heads/staging/libertyplus",
"path": "networking_cisco/tests/unit/cisco/cfg_agent/test_routing_svc_helper.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1180075"
},
{
"name": "Shell",
"bytes": "636"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('registration', '0003_player_slug'),
]
operations = [
migrations.RemoveField(
model_name='tournament',
name='price',
),
]
| {
"content_hash": "9d347b86a8294d29f5a866f9cdc453ec",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 45,
"avg_line_length": 19.352941176470587,
"alnum_prop": 0.5927051671732523,
"repo_name": "eldruz/tournament_registration",
"id": "22a9d7e0e1d823d2a67c4e22c18740b18e409610",
"size": "353",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tournament_registration/registration/migrations/0004_remove_tournament_price.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1210"
},
{
"name": "JavaScript",
"bytes": "44"
},
{
"name": "Python",
"bytes": "100432"
},
{
"name": "Shell",
"bytes": "5129"
}
],
"symlink_target": ""
} |
"""
Functions for torque/pbs scheduler.
"""
from __future__ import division
import re
__author__ = "Ryo KOBAYASHI"
__version__ = "170123"
__email__ = "[email protected]"
_fields = (
'Job Id', 'Job_Name', 'Job_Owner', 'job_state',
'queue', 'server', 'Checkpoint', 'ctime',
'Error_Path', 'exec_host', 'Hold_Types'
'Join_Path', 'Keep_Files', 'Mail_Points'
'mtime', 'Output_Path', 'Priority', 'qtime', 'Rerunable',
'Resource_List.cput', 'Resource_List.nodect',
'Resource_List.nodes', 'session_id',
'Variable_List', 'etime'
'submit_args', 'start_time', 'start_count',
)
_commands = {
'submit' : 'qsub',
'delete' : 'qdel',
'status' : 'qstat',
'full-status' : 'qstat -f',
}
_script_template_single = """#!/bin/bash
#PBS -N {JOB_NAME}
#PBS -o out
#PBS -q {QUEUE}
#PBS -j oe
#PBS -l nodes={NNODES}:ppn={NPROCS_NODE}
#PBS -l walltime={WALLTIME}
cd {WORKDIR}
echo 'started at ' `date`
{COMMANDS}
echo 'ended at ' `date`
"""
def get_command(command_type):
if command_type in _commands :
return _commands[command_type]
else:
return None
def get_jobids():
from subprocess import Popen, PIPE
cmd = get_command('full-status')
p = Popen(cmd,shell=True,stdout=PIPE,stderr=PIPE)
command_out,err = p.communicate()
jobdata = parse_jobdata(command_out)
jobids = []
for job in jobdata:
jobids.append(int(job['Job Id']))
return jobids
def parse_jobdata(command_out):
"""
Parse job data from output of `qstat -f` command.
`command_out` should be passed to this function as argument.
"""
output = [ o for o in command_out.splitlines() ]
jobdata = []
reading_job = False
job = {}
for line in output:
if 'Job Id' in line:
if len(job) != 0:
jobdata.append(job)
job = {} ## refresh job
jobid_str = line.split()[2]
job['Job Id'] = jobid_str.split('.')[0]
else:
for fld in _fields:
if fld in line:
entry = line.split()
value = entry[1]
if value == '=':
value = entry[2]
job[fld] = value
if len(job) > 0:
jobdata.append(job)
return jobdata
def script_single(job_info):
"""
Create job script context using given `job_info`.
The `job_info` has to contain the following keys:
- JOB_NAME
- QUEUE
- NNODES
- NPROCS_NODE
- WORKDIR
- COMMANDS
This only makes a script for only one calculation.
The job script that can run more than 1 calculation is now under construction...
"""
return _script_template_single.format(**job_info)
def submit(script_path):
from subprocess import Popen,PIPE
command = '{0} {1}'.format(get_command('submit'),script_path)
try:
p = Popen(command,shell=True,stdout=PIPE,stderr=PIPE)
out,err = p.communicate()
jobid = int(out.split('.')[0])
except:
raise
return jobid
def delete(jobid):
from subprocess import Popen, PIPE
command = '{0} {1:d}'.format(get_command('delete'),jobid)
try:
p = Popen(command,shell=True,stdout=PIPE,stderr=PIPE)
out,err = p.communicate()
except:
raise
return out
| {
"content_hash": "6ed5c5c46429bb95eb17ef3058e5d026",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 84,
"avg_line_length": 24.51824817518248,
"alnum_prop": 0.5718963977374218,
"repo_name": "ryokbys/nap",
"id": "13af938a6e78fa80a406d2c4cb7d5f468850b7b6",
"size": "3383",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nappy/scheduler/pbs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "51018"
},
{
"name": "Clojure",
"bytes": "260"
},
{
"name": "Fortran",
"bytes": "2419017"
},
{
"name": "M4",
"bytes": "738"
},
{
"name": "Makefile",
"bytes": "723"
},
{
"name": "Python",
"bytes": "891385"
},
{
"name": "Ruby",
"bytes": "12395"
},
{
"name": "Shell",
"bytes": "38722"
},
{
"name": "TeX",
"bytes": "19262"
}
],
"symlink_target": ""
} |
from google.cloud import speech_v1
import io
def sample_recognize(local_file_path):
"""
Transcribe a short audio file with multiple channels
Args:
local_file_path Path to local audio file, e.g. /path/audio.wav
"""
client = speech_v1.SpeechClient()
# local_file_path = 'resources/multi.wav'
# The number of channels in the input audio file (optional)
audio_channel_count = 2
# When set to true, each audio channel will be recognized separately.
# The recognition result will contain a channel_tag field to state which
# channel that result belongs to
enable_separate_recognition_per_channel = True
# The language of the supplied audio
language_code = "en-US"
config = {
"audio_channel_count": audio_channel_count,
"enable_separate_recognition_per_channel": enable_separate_recognition_per_channel,
"language_code": language_code,
}
with io.open(local_file_path, "rb") as f:
content = f.read()
audio = {"content": content}
response = client.recognize(config, audio)
for result in response.results:
# channel_tag to recognize which audio channel this result is for
print(u"Channel tag: {}".format(result.channel_tag))
# First alternative is the most probable result
alternative = result.alternatives[0]
print(u"Transcript: {}".format(alternative.transcript))
# [END speech_transcribe_multichannel]
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--local_file_path", type=str, default="resources/multi.wav")
args = parser.parse_args()
sample_recognize(args.local_file_path)
if __name__ == "__main__":
main()
| {
"content_hash": "36bf19838d2a160ca812e788c78259f5",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 91,
"avg_line_length": 29.47457627118644,
"alnum_prop": 0.6728004600345026,
"repo_name": "tswast/google-cloud-python",
"id": "790835f22ff39732dc36c4e09154deb638a51290",
"size": "2839",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "speech/samples/v1/speech_transcribe_multichannel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0004_auto_20170721_1851'),
]
operations = [
migrations.AddField(
model_name='partida',
name='rodada',
field=models.IntegerField(default=0),
preserve_default=False,
),
]
| {
"content_hash": "4afa43c5988af15897a65462971136f5",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 21.526315789473685,
"alnum_prop": 0.5843520782396088,
"repo_name": "schiller/cartolafc",
"id": "c673416b49817058866fd6437980bb10a96ff4d4",
"size": "482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "core/migrations/0005_partida_rodada.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "119464"
},
{
"name": "Python",
"bytes": "53180"
}
],
"symlink_target": ""
} |
from nose.tools import *
from dateutil.parser import parse as time_parse
import yawhois
class TestWhoisIisSeSeStatusRegistered(object):
def setUp(self):
fixture_path = "spec/fixtures/responses/whois.iis.se/se/status_registered.txt"
host = "whois.iis.se"
part = yawhois.record.Part(open(fixture_path, "r").read(), host)
self.record = yawhois.record.Record(None, [part])
def test_status(self):
eq_(self.record.status, 'registered')
def test_available(self):
eq_(self.record.available, False)
def test_domain(self):
eq_(self.record.domain, "google.se")
def test_nameservers(self):
eq_(self.record.nameservers.__class__.__name__, 'list')
eq_(len(self.record.nameservers), 4)
eq_(self.record.nameservers[0].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[0].name, "ns1.google.com")
eq_(self.record.nameservers[1].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[1].name, "ns2.google.com")
eq_(self.record.nameservers[2].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[2].name, "ns3.google.com")
eq_(self.record.nameservers[3].__class__.__name__, 'Nameserver')
eq_(self.record.nameservers[3].name, "ns4.google.com")
def test_admin_contacts(self):
eq_(self.record.admin_contacts.__class__.__name__, 'list')
eq_(self.record.admin_contacts, [])
def test_registered(self):
eq_(self.record.registered, True)
def test_created_on(self):
eq_(self.record.created_on.__class__.__name__, 'datetime')
eq_(self.record.created_on, time_parse('2008-10-20'))
def test_registrar(self):
eq_(self.record.registrar.__class__.__name__, 'Registrar')
eq_(self.record.registrar.name, "MarkMonitor Inc")
def test_registrant_contacts(self):
eq_(self.record.registrant_contacts.__class__.__name__, 'list')
eq_(len(self.record.registrant_contacts), 1)
eq_(self.record.registrant_contacts[0].__class__.__name__, 'Contact')
eq_(self.record.registrant_contacts[0].type, yawhois.record.Contact.TYPE_REGISTRANT)
eq_(self.record.registrant_contacts[0].id, "mmr8008-53808")
eq_(self.record.registrant_contacts[0].name, None)
eq_(self.record.registrant_contacts[0].organization, None)
eq_(self.record.registrant_contacts[0].address, None)
eq_(self.record.registrant_contacts[0].city, None)
eq_(self.record.registrant_contacts[0].zip, None)
eq_(self.record.registrant_contacts[0].state, None)
eq_(self.record.registrant_contacts[0].country_code, None)
eq_(self.record.registrant_contacts[0].phone, None)
eq_(self.record.registrant_contacts[0].fax, None)
eq_(self.record.registrant_contacts[0].email, None)
def test_technical_contacts(self):
eq_(self.record.technical_contacts.__class__.__name__, 'list')
eq_(self.record.technical_contacts, [])
def test_updated_on(self):
eq_(self.record.updated_on.__class__.__name__, 'datetime')
eq_(self.record.updated_on, time_parse('2013-09-18'))
def test_domain_id(self):
assert_raises(yawhois.exceptions.AttributeNotSupported, self.record.domain_id)
def test_expires_on(self):
eq_(self.record.expires_on.__class__.__name__, 'datetime')
eq_(self.record.expires_on, time_parse('2014-10-20'))
def test_disclaimer(self):
eq_(self.record.disclaimer, "Copyright (c) 1997- .SE (The Internet Infrastructure Foundation). All rights reserved. The information obtained through searches, or otherwise, is protected by the Swedish Copyright Act (1960:729) and international conventions. It is also subject to database protection according to the Swedish Copyright Act. Any use of this material to target advertising or similar activities is forbidden and will be prosecuted. If any of the information below is transferred to a third party, it must be done in its entirety. This server must not be used as a backend for a search engine. Result of search for registered domain names under the .SE top level domain. This whois printout is printed with UTF-8 encoding.")
| {
"content_hash": "37452ba5847e43bbc571d29b0d6be856",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 744,
"avg_line_length": 51.951219512195124,
"alnum_prop": 0.6650234741784038,
"repo_name": "huyphan/pyyawhois",
"id": "667f286d13ff9e2e9be0887d5035fe0eca4f519d",
"size": "4524",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/record/parser/test_response_whois_iis_se_se_status_registered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1859653"
}
],
"symlink_target": ""
} |
"""Tests NODE_NETWORK_LIMITED.
Tests that a node configured with -prune=550 signals NODE_NETWORK_LIMITED correctly
and that it responds to getdata requests for blocks correctly:
- send a block within 288 + 2 of the tip
- disconnect peers who request blocks older than that."""
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata, msg_verack, NODE_NETWORK_LIMITED, NODE_WITNESS
from test_framework.p2p import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
)
class P2PIgnoreInv(P2PInterface):
firstAddrnServices = 0
def on_inv(self, message):
# The node will send us invs for other blocks. Ignore them.
pass
def on_addr(self, message):
self.firstAddrnServices = message.addrs[0].nServices
def wait_for_addr(self, timeout=5):
test_function = lambda: self.last_message.get("addr")
self.wait_until(test_function, timeout=timeout)
def send_getdata_for_block(self, blockhash):
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, int(blockhash, 16)))
self.send_message(getdata_request)
class NodeNetworkLimitedTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [['-prune=550', '-addrmantest'], [], []]
def disconnect_all(self):
self.disconnect_nodes(0, 1)
self.disconnect_nodes(0, 2)
self.disconnect_nodes(1, 2)
def setup_network(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
def run_test(self):
node = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
expected_services = NODE_WITNESS | NODE_NETWORK_LIMITED
self.log.info("Check that node has signalled expected services.")
assert_equal(node.nServices, expected_services)
self.log.info("Check that the localservices is as expected.")
assert_equal(int(self.nodes[0].getnetworkinfo()['localservices'], 16), expected_services)
self.log.info("Mine enough blocks to reach the NODE_NETWORK_LIMITED range.")
self.connect_nodes(0, 1)
blocks = self.nodes[1].generatetoaddress(292, self.nodes[1].get_deterministic_priv_key().address)
self.sync_blocks([self.nodes[0], self.nodes[1]])
self.log.info("Make sure we can max retrieve block at tip-288.")
node.send_getdata_for_block(blocks[1]) # last block in valid range
node.wait_for_block(int(blocks[1], 16), timeout=3)
self.log.info("Requesting block at height 2 (tip-289) must fail (ignored).")
node.send_getdata_for_block(blocks[0]) # first block outside of the 288+2 limit
node.wait_for_disconnect(5)
self.log.info("Check local address relay, do a fresh connection.")
self.nodes[0].disconnect_p2ps()
node1 = self.nodes[0].add_p2p_connection(P2PIgnoreInv())
node1.send_message(msg_verack())
node1.wait_for_addr()
#must relay address with NODE_NETWORK_LIMITED
assert_equal(node1.firstAddrnServices, expected_services)
self.nodes[0].disconnect_p2ps()
# connect unsynced node 2 with pruned NODE_NETWORK_LIMITED peer
# because node 2 is in IBD and node 0 is a NODE_NETWORK_LIMITED peer, sync must not be possible
self.connect_nodes(0, 2)
try:
self.sync_blocks([self.nodes[0], self.nodes[2]], timeout=5)
except:
pass
# node2 must remain at height 0
assert_equal(self.nodes[2].getblockheader(self.nodes[2].getbestblockhash())['height'], 0)
# now connect also to node 1 (non pruned)
self.connect_nodes(1, 2)
# sync must be possible
self.sync_blocks()
# disconnect all peers
self.disconnect_all()
# mine 10 blocks on node 0 (pruned node)
self.nodes[0].generatetoaddress(10, self.nodes[0].get_deterministic_priv_key().address)
# connect node1 (non pruned) with node0 (pruned) and check if the can sync
self.connect_nodes(0, 1)
# sync must be possible, node 1 is no longer in IBD and should therefore connect to node 0 (NODE_NETWORK_LIMITED)
self.sync_blocks([self.nodes[0], self.nodes[1]])
if __name__ == '__main__':
NodeNetworkLimitedTest().main()
| {
"content_hash": "7002acdeb8f72b2d02178ba26707ab71",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 121,
"avg_line_length": 40.43119266055046,
"alnum_prop": 0.6643975493533015,
"repo_name": "alecalve/bitcoin",
"id": "b1a7ef6877f194eea44948b3180064a7de55bff8",
"size": "4621",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/p2p_node_network_limited.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "695632"
},
{
"name": "C++",
"bytes": "6008562"
},
{
"name": "HTML",
"bytes": "21860"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "197255"
},
{
"name": "Makefile",
"bytes": "117105"
},
{
"name": "Objective-C",
"bytes": "123749"
},
{
"name": "Objective-C++",
"bytes": "6594"
},
{
"name": "Python",
"bytes": "1469100"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "88183"
}
],
"symlink_target": ""
} |
"""
Copyright 2013 Jérémie BOUTOILLE
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from struct import unpack
from modules.pymacho.MachOLoadCommand import MachOLoadCommand
from modules.pymacho.Utils import green
class MachORPathCommand(MachOLoadCommand):
path_offset = 0
path = ""
def __init__(self, macho_file=None, cmd=0):
self.cmd = cmd
if macho_file is not None:
self.parse(macho_file)
def parse(self, macho_file):
# get cmdsize
macho_file.seek(-4, 1)
cmdsize = unpack('<I', macho_file.read(4))[0]
# get string offset
self.path_offset = unpack('<I', macho_file.read(4))[0]
strlen = cmdsize - self.path_offset
# get path
extract = "<%s" % ('s'*strlen)
self.path = "".join(unpack(extract, macho_file.read(strlen)))
def write(self, macho_file):
before = macho_file.tell()
macho_file.write(pack('<II', self.cmd, 0x0))
macho_file.write(pack('<I', self.path_offset))
extract = "<"+str(len(self.path))+"s"
macho_file.write(pack(extract, self.path))
after = macho_file.tell()
macho_file.seek(before+4)
macho_file.write(pack('<I', after-before))
macho_file.seek(after)
def display(self, before=''):
print before + green("[+]")+" LC_RPATH"
print before + "\t- path : %s" % repr(self.path)
| {
"content_hash": "51790a185dc438a89f347c8f3c0873ea",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 69,
"avg_line_length": 34.578947368421055,
"alnum_prop": 0.6549974632166413,
"repo_name": "jack51706/viper",
"id": "65f03cf9892af11465d26a0de05d82b44c431575",
"size": "1992",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "modules/pymacho/MachORPathCommand.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1306"
},
{
"name": "JavaScript",
"bytes": "9295"
},
{
"name": "Python",
"bytes": "1373991"
},
{
"name": "Smarty",
"bytes": "28210"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import jubatus
import jubatus.embedded
from .base import GenericSchema, BaseDataset, BaseService, GenericConfig, Utils
from .loader.array import ArrayLoader, ZipArrayLoader
from .loader.sparse import SparseMatrixLoader
from .loader.chain import MergeChainLoader
from .compat import *
class Schema(GenericSchema):
"""
Schema for Regression service.
"""
TARGET = 't'
def __init__(self, mapping, fallback=None):
self._target_key = self._get_unique_mapping(mapping, fallback, self.TARGET, 'TARGET', True)
super(Schema, self).__init__(mapping, fallback)
def transform(self, row):
"""
Regression schema transforms the row into Datum and its associated target value.
"""
target = row.get(self._target_key, None)
if target is not None:
target = float(target)
d = self._transform_as_datum(row, None, [self._target_key])
return (target, d)
class Dataset(BaseDataset):
"""
Dataset for Regression service.
"""
@classmethod
def _predict(cls, row):
return Schema.predict(row, False)
@classmethod
def _from_loader(cls, data_loader, targets, static):
if targets is None:
loader = data_loader
schema = Schema({}, Schema.NUMBER)
else:
# target is feeded with '_target' key from Loader.
target_loader = ZipArrayLoader(_target=targets)
loader = MergeChainLoader(data_loader, target_loader)
schema = Schema({'_target': Schema.TARGET}, Schema.NUMBER)
return Dataset(loader, schema, static)
@classmethod
def from_data(cls, data, targets=None, feature_names=None, static=True):
"""
Converts two arrays or a sparse matrix data and its associated target array to Dataset.
Parameters
----------
data : array or scipy 2-D sparse matrix of shape [n_samples, n_features]
targets : array of shape [n_samples], optional
feature_names : array of shape [n_features], optional
"""
if hasattr(data, 'todense'):
return cls.from_matrix(data, targets, feature_names, static)
else:
return cls.from_array(data, targets, feature_names, static)
@classmethod
def from_array(cls, data, targets=None, feature_names=None, static=True):
"""
Converts two arrays (data and its associated targets) to Dataset.
Parameters
----------
data : array of shape [n_samples, n_features]
targets : array of shape [n_samples], optional
feature_names : array of shape [n_features], optional
"""
data_loader = ArrayLoader(data, feature_names)
return cls._from_loader(data_loader, targets, static)
@classmethod
def from_matrix(cls, data, targets=None, feature_names=None, static=True):
"""
Converts a sparse matrix data and its associated target array to Dataset.
Parameters
----------
data : scipy 2-D sparse matrix of shape [n_samples, n_features]
targets : array of shape [n_samples], optional
feature_names : array of shape [n_features], optional
"""
data_loader = SparseMatrixLoader(data, feature_names)
return cls._from_loader(data_loader, targets, static)
class Regression(BaseService):
"""
Regression service.
"""
@classmethod
def name(cls):
return 'regression'
@classmethod
def _client_class(cls):
return jubatus.regression.client.Regression
@classmethod
def _embedded_class(cls):
return jubatus.embedded.Regression
def train(self, dataset):
"""
Trains the regression using the given dataset.
"""
cli = self._client()
for (idx, (target, d)) in dataset:
if target is None:
raise RuntimeError('Dataset without target column cannot be used for training')
result = cli.train([jubatus.regression.types.ScoredDatum(target, d)])
assert result == 1
yield (idx, target)
def estimate(self, dataset):
"""
Estimate target values of the given dataset using this Regression.
"""
cli = self._client()
for (idx, (target, d)) in dataset:
# Do regression for the record.
result = cli.estimate([d])
assert len(result) == 1
yield (idx, target, result[0])
@classmethod
def train_and_estimate(cls, config, train_dataset, test_dataset, metric):
"""
This is an utility method to perform bulk train-test.
Run a regression using the given config, train the regression,
estimate using the regression, then return the calculated metrics.
"""
regression = cls.run(config)
for _ in regression.train(train_dataset):
pass
y_true = []
y_pred = []
for (idx, target, result) in regression.estimate(test_dataset):
y_true.append(target)
y_pred.append(result)
regression.stop()
return metric(y_true, y_pred)
class Config(GenericConfig):
"""
Configulation to run Classifier service.
"""
@classmethod
def methods(cls):
return ['perceptron', 'PA', 'PA1', 'PA2', 'CW', 'AROW', 'NHERD', 'NN', 'cosine', 'euclidean']
@classmethod
def _default_method(cls):
return 'AROW'
@classmethod
def _default_parameter(cls, method):
if method in ('perceptron'):
return {'learning_rate': 1.0}
elif method in ('PA', 'passive_aggressive'):
return {'sensitivity': 1.0}
elif method in ('PA1', 'passive_aggressive_1',
'PA2', 'passive_aggressive_2',
'CW', 'confidence_weighted',
'AROW',
'NHERD', 'normal_herd'):
return {
'sensitivity': 1.0,
'regularization_weight': 1.0
}
elif method in ('cosine', 'euclidean'):
return {'nearest_neighbor_num': 128}
elif method in ('NN', 'nearest_neighbor'):
return {
'method': 'euclid_lsh',
'parameter': {
'threads': -1,
'hash_num': 64
},
'nearest_neighbor_num': 128,
}
else:
raise RuntimeError('unknown method: {0}'.format(method))
| {
"content_hash": "e25cfe5cf90331185cf2f56ff20ebfce",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 99,
"avg_line_length": 30.074626865671643,
"alnum_prop": 0.63904052936311,
"repo_name": "jubatus/jubakit",
"id": "f01ac92cf74dd7146730a009099dac62608abd5f",
"size": "6070",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "jubakit/regression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "398177"
}
],
"symlink_target": ""
} |
import sys
from typing import Any, Callable, Dict, IO, Iterable, Optional, TypeVar, Union, cast, overload
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
from .._serialization import Serializer
from .._vendor import MySQLManagementClientMixinABC, _convert_request, _format_url_section
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
def build_create_or_update_request(
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PUT", url=_url, params=_params, headers=_headers, **kwargs)
def build_delete_request(
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="DELETE", url=_url, params=_params, headers=_headers, **kwargs)
def build_update_tags_request(
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
subscription_id: str,
**kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}",
) # pylint: disable=line-too-long
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"privateEndpointConnectionName": _SERIALIZER.url(
"private_endpoint_connection_name", private_endpoint_connection_name, "str"
),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
if content_type is not None:
_headers["Content-Type"] = _SERIALIZER.header("content_type", content_type, "str")
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="PATCH", url=_url, params=_params, headers=_headers, **kwargs)
def build_list_by_server_request(
resource_group_name: str, server_name: str, subscription_id: str, **kwargs: Any
) -> HttpRequest:
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
accept = _headers.pop("Accept", "application/json")
# Construct URL
_url = kwargs.pop(
"template_url",
"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections",
) # pylint: disable=line-too-long
path_format_arguments = {
"resourceGroupName": _SERIALIZER.url(
"resource_group_name", resource_group_name, "str", max_length=90, min_length=1
),
"serverName": _SERIALIZER.url("server_name", server_name, "str"),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, "str", min_length=1),
}
_url = _format_url_section(_url, **path_format_arguments)
# Construct parameters
_params["api-version"] = _SERIALIZER.query("api_version", api_version, "str")
# Construct headers
_headers["Accept"] = _SERIALIZER.header("accept", accept, "str")
return HttpRequest(method="GET", url=_url, params=_params, headers=_headers, **kwargs)
class PrivateEndpointConnectionsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.rdbms.mysql.MySQLManagementClient`'s
:attr:`private_endpoint_connections` attribute.
"""
models = _models
def __init__(self, *args, **kwargs):
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def get(
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> _models.PrivateEndpointConnection:
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection. Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnection]
request = build_get_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _create_or_update_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> Optional[_models.PrivateEndpointConnection]:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[Optional[_models.PrivateEndpointConnection]]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "PrivateEndpointConnection")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@overload
def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: _models.PrivateEndpointConnection,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_create_or_update(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.PrivateEndpointConnection, IO],
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnection]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial( # type: ignore
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
request = build_delete_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def begin_delete(
self, resource_group_name: str, server_name: str, private_endpoint_connection_name: str, **kwargs: Any
) -> LROPoller[None]:
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[None]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
def _update_tags_initial(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.TagsObject, IO],
**kwargs: Any
) -> _models.PrivateEndpointConnection:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnection]
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "TagsObject")
request = build_update_tags_request(
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_tags_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_tags_initial.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@overload
def begin_update_tags(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: _models.TagsObject,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Updates tags on private endpoint connection.
Updates private endpoint connection with the specified tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update private endpoint connection Tags
operation. Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.TagsObject
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
def begin_update_tags(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Updates tags on private endpoint connection.
Updates private endpoint connection with the specified tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update private endpoint connection Tags
operation. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace
def begin_update_tags(
self,
resource_group_name: str,
server_name: str,
private_endpoint_connection_name: str,
parameters: Union[_models.TagsObject, IO],
**kwargs: Any
) -> LROPoller[_models.PrivateEndpointConnection]:
"""Updates tags on private endpoint connection.
Updates private endpoint connection with the specified tags.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:param private_endpoint_connection_name: Required.
:type private_endpoint_connection_name: str
:param parameters: Parameters supplied to the Update private endpoint connection Tags
operation. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.rdbms.mysql.models.TagsObject or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either PrivateEndpointConnection or the result
of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
content_type = kwargs.pop("content_type", _headers.pop("Content-Type", None)) # type: Optional[str]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnection]
polling = kwargs.pop("polling", True) # type: Union[bool, PollingMethod]
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token = kwargs.pop("continuation_token", None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_tags_initial( # type: ignore
resource_group_name=resource_group_name,
server_name=server_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnection", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method = cast(PollingMethod, ARMPolling(lro_delay, **kwargs)) # type: PollingMethod
elif polling is False:
polling_method = cast(PollingMethod, NoPolling())
else:
polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update_tags.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections/{privateEndpointConnectionName}"} # type: ignore
@distributed_trace
def list_by_server(
self, resource_group_name: str, server_name: str, **kwargs: Any
) -> Iterable["_models.PrivateEndpointConnection"]:
"""Gets all private endpoint connections on a server.
:param resource_group_name: The name of the resource group. The name is case insensitive.
Required.
:type resource_group_name: str
:param server_name: The name of the server. Required.
:type server_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnection or the result of
cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.rdbms.mysql.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version = kwargs.pop("api_version", _params.pop("api-version", "2018-06-01")) # type: Literal["2018-06-01"]
cls = kwargs.pop("cls", None) # type: ClsType[_models.PrivateEndpointConnectionListResult]
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_server_request(
resource_group_name=resource_group_name,
server_name=server_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_server.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url) # type: ignore
request.method = "GET"
return request
def extract_data(pipeline_response):
deserialized = self._deserialize("PrivateEndpointConnectionListResult", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run( # type: ignore # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(get_next, extract_data)
list_by_server.metadata = {"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DBforMySQL/servers/{serverName}/privateEndpointConnections"} # type: ignore
| {
"content_hash": "e3e7d83cbe72b4879260eb9d03a97e72",
"timestamp": "",
"source": "github",
"line_count": 972,
"max_line_length": 245,
"avg_line_length": 47.23868312757202,
"alnum_prop": 0.6551746667828208,
"repo_name": "Azure/azure-sdk-for-python",
"id": "0a39d4f32ec63451479c9d3861ed884937abe9e7",
"size": "46416",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/rdbms/azure-mgmt-rdbms/azure/mgmt/rdbms/mysql/operations/_private_endpoint_connections_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import itertools
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from sentry import status_checks
from sentry.status_checks import sort_by_severity
from sentry.api.base import Endpoint
from sentry.auth.superuser import is_active_superuser
from sentry.utils.hashlib import md5_text
class SystemHealthEndpoint(Endpoint):
permission_classes = (IsAuthenticated, )
def get(self, request):
if not is_active_superuser(request):
return Response()
results = status_checks.check_all()
return Response(
{
'problems': [
{
'id': md5_text(problem.message).hexdigest(),
'message': problem.message,
'severity': problem.severity,
'url': problem.url,
} for problem in
sort_by_severity(itertools.chain.from_iterable(results.values()))
],
'healthy':
{type(check).__name__: not problems for check, problems in results.items()},
}
)
| {
"content_hash": "690705ab571a79f81521db4407aa79a7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 92,
"avg_line_length": 32.567567567567565,
"alnum_prop": 0.5842323651452282,
"repo_name": "ifduyue/sentry",
"id": "50c59a0d813a2d88093c3d8e9cc78648fbb44c0d",
"size": "1205",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/sentry/api/endpoints/system_health.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "301292"
},
{
"name": "HTML",
"bytes": "241298"
},
{
"name": "JavaScript",
"bytes": "3295572"
},
{
"name": "Lua",
"bytes": "65795"
},
{
"name": "Makefile",
"bytes": "6892"
},
{
"name": "Python",
"bytes": "36910084"
},
{
"name": "Ruby",
"bytes": "217"
},
{
"name": "Shell",
"bytes": "5701"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class LineValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="line", parent_name="scatter3d.marker", **kwargs):
super(LineValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Line"),
data_docs=kwargs.pop(
"data_docs",
"""
autocolorscale
Determines whether the colorscale is a default
palette (`autocolorscale: true`) or the palette
determined by `marker.line.colorscale`. Has an
effect only if in `marker.line.color`is set to
a numerical array. In case `colorscale` is
unspecified or `autocolorscale` is true, the
default palette will be chosen according to
whether numbers in the `color` array are all
positive, all negative or mixed.
cauto
Determines whether or not the color domain is
computed with respect to the input data (here
in `marker.line.color`) or the bounds set in
`marker.line.cmin` and `marker.line.cmax` Has
an effect only if in `marker.line.color`is set
to a numerical array. Defaults to `false` when
`marker.line.cmin` and `marker.line.cmax` are
set by the user.
cmax
Sets the upper bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmin` must be set as well.
cmid
Sets the mid-point of the color domain by
scaling `marker.line.cmin` and/or
`marker.line.cmax` to be equidistant to this
point. Has an effect only if in
`marker.line.color`is set to a numerical array.
Value should have the same units as in
`marker.line.color`. Has no effect when
`marker.line.cauto` is `false`.
cmin
Sets the lower bound of the color domain. Has
an effect only if in `marker.line.color`is set
to a numerical array. Value should have the
same units as in `marker.line.color` and if
set, `marker.line.cmax` must be set as well.
color
Sets themarker.linecolor. It accepts either a
specific color or an array of numbers that are
mapped to the colorscale relative to the max
and min values of the array or relative to
`marker.line.cmin` and `marker.line.cmax` if
set.
coloraxis
Sets a reference to a shared color axis.
References to these shared color axes are
"coloraxis", "coloraxis2", "coloraxis3", etc.
Settings for these shared color axes are set in
the layout, under `layout.coloraxis`,
`layout.coloraxis2`, etc. Note that multiple
color scales can be linked to the same color
axis.
colorscale
Sets the colorscale. Has an effect only if in
`marker.line.color`is set to a numerical array.
The colorscale must be an array containing
arrays mapping a normalized value to an rgb,
rgba, hex, hsl, hsv, or named color string. At
minimum, a mapping for the lowest (0) and
highest (1) values are required. For example,
`[[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']]`.
To control the bounds of the colorscale in
color space, use`marker.line.cmin` and
`marker.line.cmax`. Alternatively, `colorscale`
may be a palette name string of the following
list: Greys,YlGnBu,Greens,YlOrRd,Bluered,RdBu,R
eds,Blues,Picnic,Rainbow,Portland,Jet,Hot,Black
body,Earth,Electric,Viridis,Cividis.
colorsrc
Sets the source reference on Chart Studio Cloud
for color .
reversescale
Reverses the color mapping if true. Has an
effect only if in `marker.line.color`is set to
a numerical array. If true, `marker.line.cmin`
will correspond to the last color in the array
and `marker.line.cmax` will correspond to the
first color.
width
Sets the width (in px) of the lines bounding
the marker points.
""",
),
**kwargs
)
| {
"content_hash": "3587d05f3201000374b34452f9bb6451",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 85,
"avg_line_length": 49.554455445544555,
"alnum_prop": 0.5478521478521479,
"repo_name": "plotly/python-api",
"id": "2534fa8746374986a1acc6f71eeea5cd8298bc6e",
"size": "5005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/scatter3d/marker/_line.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import numpy as np
import pints
class ForwardModel(object):
"""
Defines an interface for user-supplied forward models.
Classes extending ``ForwardModel`` can implement the required methods
directly in Python or interface with other languages (for example via
Python wrappers around C code).
"""
def __init__(self):
super(ForwardModel, self).__init__()
def n_parameters(self):
"""
Returns the dimension of the parameter space.
"""
raise NotImplementedError
def simulate(self, parameters, times):
"""
Runs a forward simulation with the given ``parameters`` and returns a
time-series with data points corresponding to the given ``times``.
Returns a sequence of length ``n_times`` (for single output problems)
or a NumPy array of shape ``(n_times, n_outputs)`` (for multi-output
problems), representing the values of the model at the given ``times``.
Parameters
----------
parameters
An ordered sequence of parameter values.
times
The times at which to evaluate. Must be an ordered sequence,
without duplicates, and without negative values.
All simulations are started at time 0, regardless of whether this
value appears in ``times``.
"""
raise NotImplementedError
def n_outputs(self):
"""
Returns the number of outputs this model has. The default is 1.
"""
return 1
class ForwardModelS1(ForwardModel):
"""
Defines an interface for user-supplied forward models which can calculate
the first-order derivative of the simulated values with respect to the
parameters.
Extends :class:`pints.ForwardModel`.
"""
def __init__(self):
super(ForwardModelS1, self).__init__()
def simulateS1(self, parameters, times):
"""
Runs a forward simulation with the given ``parameters`` and returns a
time-series with data points corresponding to the given ``times``,
along with the sensitivities of the forward simulation with respect to
the parameters.
Parameters
----------
parameters
An ordered list of parameter values.
times
The times at which to evaluate. Must be an ordered sequence,
without duplicates, and without negative values.
All simulations are started at time 0, regardless of whether this
value appears in ``times``.
Returns
-------
y
The simulated values, as a sequence of ``n_times`` values, or
a NumPy array of shape ``(n_times, n_outputs)``.
y'
The corresponding derivatives, as a NumPy array of shape
``(n_times, n_parameters)`` or an array of shape
``(n_times, n_outputs, n_parameters)``.
"""
raise NotImplementedError
class SingleOutputProblem(object):
"""
Represents an inference problem where a model is fit to a single time
series, such as measured from a system with a single output.
Parameters
----------
model
A model or model wrapper extending :class:`ForwardModel`.
times
A sequence of points in time. Must be non-negative and increasing.
values
A sequence of scalar output values, measured at the times in ``times``.
"""
def __init__(self, model, times, values):
# Check model
self._model = model
if model.n_outputs() != 1:
raise ValueError(
'Only single-output models can be used for a'
' SingleOutputProblem.')
# Check times, copy so that they can no longer be changed and set them
# to read-only
self._times = pints.vector(times)
if np.any(self._times < 0):
raise ValueError('Times can not be negative.')
if np.any(self._times[:-1] >= self._times[1:]):
raise ValueError('Times must be increasing.')
# Check values, copy so that they can no longer be changed
self._values = pints.vector(values)
# Check dimensions
self._n_parameters = int(model.n_parameters())
self._n_times = len(self._times)
# Check times and values array have write shape
if len(self._values) != self._n_times:
raise ValueError(
'Times and values arrays must have same length.')
def evaluate(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values as a NumPy array of shape ``(n_times,)``.
"""
y = np.asarray(self._model.simulate(parameters, self._times))
return y.reshape((self._n_times,))
def evaluateS1(self, parameters):
"""
Runs a simulation with first-order sensitivity calculation, returning
the simulated values and derivatives.
The returned data is a tuple of NumPy arrays ``(y, y')``, where ``y``
has shape ``(self._n_times,)`` while ``y'`` has shape
``(n_times, n_parameters)``.
*This method only works for problems with a model that implements the
:class:`ForwardModelS1` interface.*
"""
y, dy = self._model.simulateS1(parameters, self._times)
return (
np.asarray(y).reshape((self._n_times,)),
np.asarray(dy).reshape((self._n_times, self._n_parameters))
)
def n_outputs(self):
"""
Returns the number of outputs for this problem (always 1).
"""
return 1
def n_parameters(self):
"""
Returns the dimension (the number of parameters) of this problem.
"""
return self._n_parameters
def n_times(self):
"""
Returns the number of sampling points, i.e. the length of the vectors
returned by :meth:`times()` and :meth:`values()`.
"""
return self._n_times
def times(self):
"""
Returns this problem's times.
The returned value is a read-only NumPy array of shape ``(n_times, )``,
where ``n_times`` is the number of time points.
"""
return self._times
def values(self):
"""
Returns this problem's values.
The returned value is a read-only NumPy array of shape ``(n_times, )``,
where ``n_times`` is the number of time points.
"""
return self._values
class MultiOutputProblem(object):
"""
Represents an inference problem where a model is fit to a multi-valued time
series, such as measured from a system with multiple outputs.
Parameters
----------
model
A model or model wrapper extending :class:`ForwardModel`.
times
A sequence of points in time. Must be non-negative and non-decreasing.
values
A sequence of multi-valued measurements. Must have shape
``(n_times, n_outputs)``, where ``n_times`` is the number of points in
``times`` and ``n_outputs`` is the number of outputs in the model.
"""
def __init__(self, model, times, values):
# Check model
self._model = model
# Check times, copy so that they can no longer be changed and set them
# to read-only
self._times = pints.vector(times)
if np.any(self._times < 0):
raise ValueError('Times cannot be negative.')
if np.any(self._times[:-1] > self._times[1:]):
raise ValueError('Times must be non-decreasing.')
# Check values, copy so that they can no longer be changed
self._values = pints.matrix2d(values)
# Check dimensions
self._n_parameters = int(model.n_parameters())
self._n_outputs = int(model.n_outputs())
self._n_times = len(self._times)
# Check for correct shape
if self._values.shape != (self._n_times, self._n_outputs):
raise ValueError(
'Values array must have shape `(n_times, n_outputs)`.')
def evaluate(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values.
The returned data is a NumPy array with shape ``(n_times, n_outputs)``.
"""
y = np.asarray(self._model.simulate(parameters, self._times))
return y.reshape(self._n_times, self._n_outputs)
def evaluateS1(self, parameters):
"""
Runs a simulation using the given parameters, returning the simulated
values.
The returned data is a tuple of NumPy arrays ``(y, y')``, where ``y``
has shape ``(n_times, n_outputs)``, while ``y'`` has shape
``(n_times, n_outputs, n_parameters)``.
*This method only works for problems whose model implements the
:class:`ForwardModelS1` interface.*
"""
y, dy = self._model.simulateS1(parameters, self._times)
return (
np.asarray(y).reshape(self._n_times, self._n_outputs),
np.asarray(dy).reshape(
self._n_times, self._n_outputs, self._n_parameters)
)
def n_outputs(self):
"""
Returns the number of outputs for this problem.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the dimension (the number of parameters) of this problem.
"""
return self._n_parameters
def n_times(self):
"""
Returns the number of sampling points, i.e. the length of the vectors
returned by :meth:`times()` and :meth:`values()`.
"""
return self._n_times
def times(self):
"""
Returns this problem's times.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._times
def values(self):
"""
Returns this problem's values.
The returned value is a read-only NumPy array of shape
``(n_times, n_outputs)``, where ``n_times`` is the number of time
points and ``n_outputs`` is the number of outputs.
"""
return self._values
class TunableMethod(object):
"""
Defines an interface for a numerical method with a given number of
hyper-parameters.
Each optimiser or sampler method implemented in pints has a number of
parameters which alters its behaviour, which can be called
"hyper-parameters". The optimiser/sampler method will provide member
functions to set each of these hyper-parameters individually. In contrast,
this interface provides a generic way to set the hyper-parameters, which
allows the user to, for example, use an optimiser to tune the
hyper-parameters of the method.
Note that :meth:`set_hyper_parameters` takes an array of parameters, which
might be of the same type (e.g. a NumPy array). So derived classes should
not raise any errors if individual hyper parameters are set using the wrong
type (e.g. float rather than int), but should instead implicitly convert
the argument to the correct type.
"""
def n_hyper_parameters(self):
"""
Returns the number of hyper-parameters for this method (see
:class:`TunableMethod`).
"""
return 0
def set_hyper_parameters(self, x):
"""
Sets the hyper-parameters for the method with the given vector of
values (see :class:`TunableMethod`).
Parameters
----------
x
An array of length ``n_hyper_parameters`` used to set the
hyper-parameters.
"""
pass
| {
"content_hash": "9038ab63bfbc942e5fbb1f4ca4fd52bb",
"timestamp": "",
"source": "github",
"line_count": 356,
"max_line_length": 79,
"avg_line_length": 33.18258426966292,
"alnum_prop": 0.6023025480402946,
"repo_name": "martinjrobins/hobo",
"id": "adba9b54ab1e03573707b0e1aade9087dc65c767",
"size": "12043",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pints/_core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "278656"
},
{
"name": "C++",
"bytes": "86361"
},
{
"name": "CMake",
"bytes": "1710"
},
{
"name": "Cuda",
"bytes": "7890"
},
{
"name": "M",
"bytes": "2347"
},
{
"name": "Matlab",
"bytes": "437018"
},
{
"name": "Python",
"bytes": "1841329"
},
{
"name": "Stan",
"bytes": "8353"
},
{
"name": "TeX",
"bytes": "88007"
},
{
"name": "mupad",
"bytes": "73951"
}
],
"symlink_target": ""
} |
"""
BMH Search
----------
Search that attempts to find a substring in a string. Uses a bad-character
shift of the rightmost character of the window to compute shifts.
Time: Complexity: O(m + n), where m is the substring to be found.
Space: Complexity: O(m), where m is the substring to be found.
Psuedo Code: https://github.com/FooBarWidget/boyer-moore-horspool
"""
def search(text, pattern):
"""
Takes a string and searches if the `pattern` is substring within `text`.
:param text: A string that will be searched.
:param pattern: A string that will be searched as a substring within
`text`.
:rtype: The indices of all occurences of where the substring `pattern`
was found in `text`.
"""
pattern_length = len(pattern)
text_length = len(text)
offsets = []
if pattern_length > text_length:
return offsets
bmbc = [pattern_length] * 256
for index, char in enumerate(pattern[:-1]):
bmbc[ord(char)] = pattern_length - index - 1
bmbc = tuple(bmbc)
search_index = pattern_length - 1
while search_index < text_length:
pattern_index = pattern_length - 1
text_index = search_index
while text_index >= 0 and \
text[text_index] == pattern[pattern_index]:
pattern_index -= 1
text_index -= 1
if pattern_index == -1:
offsets.append(text_index + 1)
search_index += bmbc[ord(text[search_index])]
return offsets
| {
"content_hash": "e39057eb946d14b463e6e3d296182480",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 78,
"avg_line_length": 31.958333333333332,
"alnum_prop": 0.6134289439374185,
"repo_name": "stphivos/algorithms",
"id": "ca4371eb4f83b1ef2a63933ef1786a4f2a4527da",
"size": "1534",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "algorithms/searching/bmh_search.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "110413"
}
],
"symlink_target": ""
} |
import os
from eventlet import timeout as etimeout
import mock
from nova.compute import vm_states
from nova import exception
from nova.objects import fields
from nova.objects import flavor as flavor_obj
from nova.tests.unit.objects import test_flavor
from nova.tests.unit.objects import test_virtual_interface
from nova.virt import hardware
from os_win import constants as os_win_const
from os_win import exceptions as os_win_exc
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import fileutils
from oslo_utils import units
from hyperv.nova import block_device_manager
from hyperv.nova import constants
from hyperv.nova import pdk
from hyperv.nova import vmops
from hyperv.nova import volumeops
from hyperv.tests import fake_instance
from hyperv.tests.unit import test_base
CONF = cfg.CONF
class VMOpsTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V VMOps class."""
_FAKE_TIMEOUT = 2
FAKE_SIZE = 10
FAKE_DIR = 'fake_dir'
FAKE_ROOT_PATH = 'C:\\path\\to\\fake.%s'
FAKE_CONFIG_DRIVE_ISO = 'configdrive.iso'
FAKE_CONFIG_DRIVE_VHD = 'configdrive.vhd'
FAKE_UUID = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
FAKE_LOG = 'fake_log'
_FAKE_PDK_FILE_PATH = 'C:\\path\\to\\fakepdk.pdk'
_FAKE_FSK_FILE_PATH = 'C:\\path\\to\\fakefsk.fsk'
_WIN_VERSION_6_3 = '6.3.0'
_WIN_VERSION_10 = '10.0'
ISO9660 = 'iso9660'
_FAKE_CONFIGDRIVE_PATH = 'C:/fake_instance_dir/configdrive.vhd'
def setUp(self):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps(virtapi=mock.MagicMock())
self._vmops._vmutils = mock.MagicMock()
self._vmops._metricsutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
self._vmops._pathutils = mock.MagicMock()
self._vmops._hostutils = mock.MagicMock()
self._vmops._pdk = mock.MagicMock()
self._vmops._serial_console_ops = mock.MagicMock()
self._vmops._block_dev_man = mock.MagicMock()
def test_get_vif_driver_cached(self):
self._vmops._vif_driver_cache = mock.MagicMock()
self._vmops._vif_driver_cache.get.return_value = mock.sentinel.VIF_DRV
self._vmops._get_vif_driver(mock.sentinel.VIF_TYPE)
self._vmops._vif_driver_cache.get.assert_called_with(
mock.sentinel.VIF_TYPE)
@mock.patch('hyperv.nova.vif.get_vif_driver')
def test_get_vif_driver_not_cached(self, mock_get_vif_driver):
mock_get_vif_driver.return_value = mock.sentinel.VIF_DRV
self._vmops._get_vif_driver(mock.sentinel.VIF_TYPE)
mock_get_vif_driver.assert_called_once_with(mock.sentinel.VIF_TYPE)
self.assertEqual(mock.sentinel.VIF_DRV,
self._vmops._vif_driver_cache[mock.sentinel.VIF_TYPE])
def test_list_instances(self):
mock_instance = mock.MagicMock()
self._vmops._vmutils.list_instances.return_value = [mock_instance]
response = self._vmops.list_instances()
self._vmops._vmutils.list_instances.assert_called_once_with()
self.assertEqual(response, [mock_instance])
def test_estimate_instance_overhead(self):
instance_info = {'memory_mb': 512}
overhead = self._vmops.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['memory_mb'])
self.assertEqual(1, overhead['disk_gb'])
instance_info = {'memory_mb': 500}
overhead = self._vmops.estimate_instance_overhead(instance_info)
self.assertEqual(0, overhead['disk_gb'])
def _test_get_info(self, vm_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_info = mock.MagicMock(spec_set=dict)
fake_info = {'EnabledState': 2,
'MemoryUsage': mock.sentinel.FAKE_MEM_KB,
'NumberOfProcessors': mock.sentinel.FAKE_NUM_CPU,
'UpTime': mock.sentinel.FAKE_CPU_NS}
def getitem(key):
return fake_info[key]
mock_info.__getitem__.side_effect = getitem
expected = hardware.InstanceInfo(state=constants.HYPERV_POWER_STATE[2],
max_mem_kb=mock.sentinel.FAKE_MEM_KB,
mem_kb=mock.sentinel.FAKE_MEM_KB,
num_cpu=mock.sentinel.FAKE_NUM_CPU,
cpu_time_ns=mock.sentinel.FAKE_CPU_NS)
self._vmops._vmutils.vm_exists.return_value = vm_exists
self._vmops._vmutils.get_vm_summary_info.return_value = mock_info
if not vm_exists:
self.assertRaises(exception.InstanceNotFound,
self._vmops.get_info, mock_instance)
else:
response = self._vmops.get_info(mock_instance)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
self._vmops._vmutils.get_vm_summary_info.assert_called_once_with(
mock_instance.name)
self.assertEqual(response, expected)
def test_get_info(self):
self._test_get_info(vm_exists=True)
def test_get_info_exception(self):
self._test_get_info(vm_exists=False)
@mock.patch.object(vmops.VMOps, 'check_vm_image_type')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
def test_create_root_device_type_disk(self, mock_create_root_device,
mock_check_vm_image_type):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_root_disk_info = {'type': constants.DISK}
self._vmops._create_root_device(self.context, mock_instance,
mock_root_disk_info,
mock.sentinel.VM_GEN_1)
mock_create_root_device.assert_called_once_with(
self.context, mock_instance)
mock_check_vm_image_type.assert_called_once_with(
mock_instance.uuid, mock.sentinel.VM_GEN_1,
mock_create_root_device.return_value)
@mock.patch.object(vmops.VMOps, '_create_root_iso')
def test_create_root_device_type_iso(self, mock_create_root_iso):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_root_disk_info = {'type': constants.DVD}
self._vmops._create_root_device(self.context, mock_instance,
mock_root_disk_info,
mock.sentinel.VM_GEN_1)
mock_create_root_iso.assert_called_once_with(self.context,
mock_instance)
@mock.patch.object(vmops.imagecache.ImageCache, 'get_cached_image')
def test_create_root_iso(self, mock_get_cached_image):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_get_root_vhd_path = self._vmops._pathutils.get_root_vhd_path
mock_get_root_vhd_path.return_value = mock.sentinel.ROOT_ISO_PATH
mock_get_cached_image.return_value = mock.sentinel.CACHED_ISO_PATH
self._vmops._create_root_iso(self.context, mock_instance)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance)
mock_get_root_vhd_path.assert_called_once_with(mock_instance.name,
'iso')
self._vmops._pathutils.copyfile.assert_called_once_with(
mock.sentinel.CACHED_ISO_PATH, mock.sentinel.ROOT_ISO_PATH)
def _prepare_create_root_device_mocks(self, use_cow_images, vhd_format,
vhd_size):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.root_gb = self.FAKE_SIZE
self.flags(use_cow_images=use_cow_images)
self._vmops._vhdutils.get_vhd_info.return_value = {'VirtualSize':
vhd_size * units.Gi}
self._vmops._vhdutils.get_vhd_format.return_value = vhd_format
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
get_size.return_value = root_vhd_internal_size
self._vmops._pathutils.exists.return_value = True
return mock_instance
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_exception(self, mock_get_cached_image,
vhd_format):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE + 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
self.assertRaises(exception.FlavorDiskSmallerThanImage,
self._vmops._create_root_vhd, self.context,
mock_instance)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
self._vmops._pathutils.exists.assert_called_once_with(
fake_root_path)
self._vmops._pathutils.remove.assert_called_once_with(
fake_root_path)
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd_qcow(self, mock_get_cached_image, vhd_format):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=True, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(context=self.context,
instance=mock_instance)
self.assertEqual(fake_root_path, response)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, False)
differencing_vhd = self._vmops._vhdutils.create_differencing_vhd
differencing_vhd.assert_called_with(fake_root_path, fake_vhd_path)
self._vmops._vhdutils.get_vhd_info.assert_called_once_with(
fake_vhd_path)
if vhd_format is constants.DISK_FORMAT_VHD:
self.assertFalse(get_size.called)
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
get_size.assert_called_once_with(fake_vhd_path,
root_vhd_internal_size)
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size, is_file_max_size=False)
@mock.patch('hyperv.nova.imagecache.ImageCache.get_cached_image')
def _test_create_root_vhd(self, mock_get_cached_image, vhd_format,
is_rescue_vhd=False):
mock_instance = self._prepare_create_root_device_mocks(
use_cow_images=False, vhd_format=vhd_format,
vhd_size=(self.FAKE_SIZE - 1))
fake_vhd_path = self.FAKE_ROOT_PATH % vhd_format
mock_get_cached_image.return_value = fake_vhd_path
rescue_image_id = (
mock.sentinel.rescue_image_id if is_rescue_vhd else None)
fake_root_path = self._vmops._pathutils.get_root_vhd_path.return_value
root_vhd_internal_size = mock_instance.flavor.root_gb * units.Gi
get_size = self._vmops._vhdutils.get_internal_vhd_size_by_file_size
response = self._vmops._create_root_vhd(
context=self.context,
instance=mock_instance,
rescue_image_id=rescue_image_id)
self.assertEqual(fake_root_path, response)
mock_get_cached_image.assert_called_once_with(self.context,
mock_instance,
rescue_image_id)
self._vmops._pathutils.get_root_vhd_path.assert_called_with(
mock_instance.name, vhd_format, is_rescue_vhd)
self._vmops._pathutils.copyfile.assert_called_once_with(
fake_vhd_path, fake_root_path)
get_size.assert_called_once_with(fake_vhd_path, root_vhd_internal_size)
if is_rescue_vhd:
self.assertFalse(self._vmops._vhdutils.resize_vhd.called)
else:
self._vmops._vhdutils.resize_vhd.assert_called_once_with(
fake_root_path, root_vhd_internal_size,
is_file_max_size=False)
def test_create_root_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_root_vhd_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHD)
def test_create_root_vhdx_use_cow_images_true(self):
self._test_create_root_vhd_qcow(vhd_format=constants.DISK_FORMAT_VHDX)
def test_create_rescue_vhd(self):
self._test_create_root_vhd(vhd_format=constants.DISK_FORMAT_VHD,
is_rescue_vhd=True)
def test_create_root_vhdx_size_less_than_internal(self):
self._test_create_root_vhd_exception(
vhd_format=constants.DISK_FORMAT_VHD)
def test_is_resize_needed_exception(self):
inst = mock.MagicMock()
self.assertRaises(
exception.FlavorDiskSmallerThanImage,
self._vmops._is_resize_needed,
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE - 1, inst)
def test_is_resize_needed_true(self):
inst = mock.MagicMock()
self.assertTrue(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE + 1, inst))
def test_is_resize_needed_false(self):
inst = mock.MagicMock()
self.assertFalse(self._vmops._is_resize_needed(
mock.sentinel.FAKE_PATH, self.FAKE_SIZE, self.FAKE_SIZE, inst))
@mock.patch.object(vmops.VMOps, 'create_ephemeral_disk')
def test_create_ephemerals(self, mock_create_ephemeral_disk):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_ephemerals = [dict(), dict()]
self._vmops._vhdutils.get_best_supported_vhd_format.return_value = (
mock.sentinel.format)
self._vmops._pathutils.get_ephemeral_vhd_path.side_effect = [
mock.sentinel.FAKE_PATH0, mock.sentinel.FAKE_PATH1]
self._vmops._create_ephemerals(mock_instance, fake_ephemerals)
self._vmops._pathutils.get_ephemeral_vhd_path.assert_has_calls(
[mock.call(mock_instance.name, mock.sentinel.format, 'eph0'),
mock.call(mock_instance.name, mock.sentinel.format, 'eph1')])
mock_create_ephemeral_disk.assert_has_calls(
[mock.call(mock_instance.name, fake_ephemerals[0]),
mock.call(mock_instance.name, fake_ephemerals[1])])
def test_create_ephemeral_disk(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_ephemeral_info = {'path': 'fake_eph_path',
'size': 10}
self._vmops.create_ephemeral_disk(mock_instance.name,
mock_ephemeral_info)
mock_create_dynamic_vhd = self._vmops._vhdutils.create_dynamic_vhd
mock_create_dynamic_vhd.assert_called_once_with('fake_eph_path',
10 * units.Gi)
def test_set_boot_order(self):
mock_get_boot_order = self._vmops._block_dev_man.get_boot_order
self._vmops.set_boot_order(mock.sentinel.FAKE_VM_GEN,
mock.sentinel.FAKE_BDI,
mock.sentinel.FAKE_INSTANCE_NAME)
mock_get_boot_order.assert_called_once_with(
mock.sentinel.FAKE_VM_GEN, mock.sentinel.FAKE_BDI)
self._vmops._vmutils.set_boot_order.assert_called_once_with(
mock.sentinel.FAKE_INSTANCE_NAME, mock_get_boot_order.return_value)
@mock.patch.object(vmops.objects, 'PCIDeviceBus')
@mock.patch.object(vmops.objects, 'NetworkInterfaceMetadata')
@mock.patch.object(vmops.objects.VirtualInterfaceList,
'get_by_instance_uuid')
def test_get_vif_metadata(self, mock_get_by_inst_uuid,
mock_NetworkInterfaceMetadata, mock_PCIDevBus):
mock_vif = mock.MagicMock(tag='taggy')
mock_vif.__contains__.side_effect = (
lambda attr: getattr(mock_vif, attr, None) is not None)
mock_get_by_inst_uuid.return_value = [mock_vif,
mock.MagicMock(tag=None)]
vif_metadata = self._vmops._get_vif_metadata(self.context,
mock.sentinel.instance_id)
mock_get_by_inst_uuid.assert_called_once_with(
self.context, mock.sentinel.instance_id)
mock_NetworkInterfaceMetadata.assert_called_once_with(
mac=mock_vif.address,
bus=mock_PCIDevBus.return_value,
tags=[mock_vif.tag])
self.assertEqual([mock_NetworkInterfaceMetadata.return_value],
vif_metadata)
@mock.patch.object(vmops.objects, 'InstanceDeviceMetadata')
@mock.patch.object(vmops.VMOps, '_get_vif_metadata')
def test_save_device_metadata(self, mock_get_vif_metadata,
mock_InstanceDeviceMetadata):
mock_instance = mock.MagicMock()
mock_get_vif_metadata.return_value = [mock.sentinel.vif_metadata]
self._vmops._block_dev_man.get_bdm_metadata.return_value = [
mock.sentinel.bdm_metadata]
self._vmops._save_device_metadata(self.context, mock_instance,
mock.sentinel.block_device_info)
mock_get_vif_metadata.assert_called_once_with(self.context,
mock_instance.uuid)
self._vmops._block_dev_man.get_bdm_metadata.assert_called_once_with(
self.context, mock_instance, mock.sentinel.block_device_info)
expected_metadata = [mock.sentinel.vif_metadata,
mock.sentinel.bdm_metadata]
mock_InstanceDeviceMetadata.assert_called_once_with(
devices=expected_metadata)
self.assertEqual(mock_InstanceDeviceMetadata.return_value,
mock_instance.device_metadata)
@mock.patch('hyperv.nova.vmops.VMOps.destroy')
@mock.patch('hyperv.nova.vmops.VMOps.power_on')
@mock.patch('hyperv.nova.vmops.VMOps.attach_config_drive')
@mock.patch('hyperv.nova.vmops.VMOps._create_config_drive')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('hyperv.nova.vmops.VMOps._save_device_metadata')
@mock.patch('hyperv.nova.vmops.VMOps.create_instance')
@mock.patch('hyperv.nova.vmops.VMOps.get_image_vm_generation')
@mock.patch('hyperv.nova.vmops.VMOps._create_ephemerals')
@mock.patch('hyperv.nova.vmops.VMOps._create_root_device')
@mock.patch('hyperv.nova.vmops.VMOps._delete_disk_files')
@mock.patch('hyperv.nova.vif.get_vif_driver')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'validate_and_update_bdi')
@mock.patch.object(vmops.VMOps, 'set_boot_order')
def _test_spawn(self, mock_set_boot_order, mock_validate_and_update_bdi,
mock_get_vif_driver, mock_delete_disk_files,
mock_create_root_device,
mock_create_ephemerals, mock_get_image_vm_gen,
mock_create_instance, mock_save_device_metadata,
mock_configdrive_required,
mock_create_config_drive, mock_attach_config_drive,
mock_power_on, mock_destroy, exists,
configdrive_required, fail,
fake_vm_gen=constants.VM_GEN_2):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_image_meta = mock.MagicMock()
root_device_info = mock.sentinel.ROOT_DEV_INFO
mock_get_image_vm_gen.return_value = fake_vm_gen
fake_config_drive_path = mock_create_config_drive.return_value
block_device_info = {'ephemerals': [], 'root_disk': root_device_info}
self._vmops._vmutils.vm_exists.return_value = exists
mock_configdrive_required.return_value = configdrive_required
mock_create_instance.side_effect = fail
if exists:
self.assertRaises(exception.InstanceExists, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, block_device_info)
elif fail is os_win_exc.HyperVException:
self.assertRaises(os_win_exc.HyperVException, self._vmops.spawn,
self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, block_device_info)
mock_destroy.assert_called_once_with(mock_instance)
else:
self._vmops.spawn(self.context, mock_instance, mock_image_meta,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, block_device_info)
self._vmops._vmutils.vm_exists.assert_called_once_with(
mock_instance.name)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_validate_and_update_bdi = (
self._vmops._block_dev_man.validate_and_update_bdi)
mock_validate_and_update_bdi.assert_called_once_with(
mock_instance, mock_image_meta, fake_vm_gen, block_device_info)
mock_create_root_device.assert_called_once_with(self.context,
mock_instance,
root_device_info,
fake_vm_gen)
mock_create_ephemerals.assert_called_once_with(
mock_instance, block_device_info['ephemerals'])
mock_get_image_vm_gen.assert_called_once_with(mock_instance.uuid,
mock_image_meta)
mock_create_instance.assert_called_once_with(
self.context, mock_instance, mock.sentinel.INFO,
root_device_info, block_device_info, fake_vm_gen,
mock_image_meta)
mock_save_device_metadata.assert_called_once_with(
self.context, mock_instance, block_device_info)
mock_configdrive_required.assert_called_once_with(mock_instance)
if configdrive_required:
mock_create_config_drive.assert_called_once_with(
self.context, mock_instance, [mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.INFO)
mock_attach_config_drive.assert_called_once_with(
mock_instance, fake_config_drive_path, fake_vm_gen)
mock_power_on.assert_called_once_with(
mock_instance, network_info=mock.sentinel.INFO)
def test_spawn(self):
self._test_spawn(exists=False, configdrive_required=True, fail=None)
def test_spawn_instance_exists(self):
self._test_spawn(exists=True, configdrive_required=True, fail=None)
def test_spawn_create_instance_exception(self):
self._test_spawn(exists=False, configdrive_required=True,
fail=os_win_exc.HyperVException)
def test_spawn_not_required(self):
self._test_spawn(exists=False, configdrive_required=False, fail=None)
def test_spawn_no_admin_permissions(self):
self._vmops._vmutils.check_admin_permissions.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(os_win_exc.HyperVException,
self._vmops.spawn,
self.context, mock.DEFAULT, mock.DEFAULT,
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch.object(vmops.VMOps, '_get_neutron_events')
@mock.patch.object(vmops.utils, 'is_neutron')
def test_wait_vif_plug_events(self, mock_is_neutron, mock_get_events):
self._vmops._virtapi.wait_for_instance_event.side_effect = (
etimeout.Timeout)
self.flags(vif_plugging_timeout=1)
self.flags(vif_plugging_is_fatal=True)
def _context_user():
with self._vmops.wait_vif_plug_events(mock.sentinel.instance,
mock.sentinel.network_info):
pass
self.assertRaises(exception.VirtualInterfaceCreateException,
_context_user)
mock_is_neutron.assert_called_once_with()
mock_get_events.assert_called_once_with(mock.sentinel.network_info)
self._vmops._virtapi.wait_for_instance_event.assert_called_once_with(
mock.sentinel.instance, mock_get_events.return_value,
deadline=CONF.vif_plugging_timeout,
error_callback=self._vmops._neutron_failed_callback)
def test_neutron_failed_callback(self):
self.flags(vif_plugging_is_fatal=True)
self.assertRaises(exception.VirtualInterfaceCreateException,
self._vmops._neutron_failed_callback,
mock.sentinel.event_name, mock.sentinel.instance)
def test_get_neutron_events(self):
network_info = [{'id': mock.sentinel.vif_id1, 'active': True},
{'id': mock.sentinel.vif_id2, 'active': False},
{'id': mock.sentinel.vif_id3}]
events = self._vmops._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', mock.sentinel.vif_id2)],
events)
@mock.patch.object(vmops.VMOps, '_configure_secure_vm')
@mock.patch('hyperv.nova.vif.get_vif_driver')
@mock.patch.object(vmops.VMOps, '_requires_secure_boot')
@mock.patch.object(vmops.VMOps, '_requires_certificate')
@mock.patch.object(vmops.volumeops.VolumeOps, 'attach_volumes')
@mock.patch.object(vmops.VMOps, '_set_instance_disk_qos_specs')
@mock.patch.object(vmops.VMOps, '_attach_root_device')
@mock.patch.object(vmops.VMOps, '_get_image_serial_port_settings')
@mock.patch.object(vmops.VMOps, '_create_vm_com_port_pipes')
@mock.patch.object(vmops.VMOps, '_attach_ephemerals')
@mock.patch.object(vmops.VMOps, '_configure_remotefx')
@mock.patch.object(vmops.VMOps, '_get_instance_vnuma_config')
def _test_create_instance(self, mock_get_instance_vnuma_config,
mock_configure_remotefx,
mock_attach_ephemerals,
mock_create_pipes,
mock_get_port_settings,
mock_attach_root_device,
mock_set_qos_specs,
mock_attach_volumes,
mock_requires_certificate,
mock_requires_secure_boot,
mock_get_vif_driver,
mock_configure_secure_vm,
enable_instance_metrics,
vm_gen=constants.VM_GEN_1, vnuma_enabled=False,
requires_sec_boot=True):
mock_vif_driver = mock_get_vif_driver()
self.flags(dynamic_memory_ratio=2.0, group='hyperv')
self.flags(enable_instance_metrics_collection=enable_instance_metrics,
group='hyperv')
root_device_info = mock.sentinel.ROOT_DEV_INFO
block_device_info = {'ephemerals': [], 'block_device_mapping': []}
fake_network_info = {'id': mock.sentinel.ID,
'address': mock.sentinel.ADDRESS}
mock_instance = fake_instance.fake_instance_obj(self.context)
instance_path = os.path.join(CONF.instances_path, mock_instance.name)
mock_requires_secure_boot.return_value = True
if vnuma_enabled:
mock_get_instance_vnuma_config.return_value = (
mock.sentinel.mem_per_numa, mock.sentinel.cpus_per_numa)
cpus_per_numa = mock.sentinel.numa_cpus
mem_per_numa = mock.sentinel.mem_per_numa
dynamic_memory_ratio = 1.0
else:
mock_get_instance_vnuma_config.return_value = (None, None)
mem_per_numa, cpus_per_numa = (None, None)
dynamic_memory_ratio = CONF.hyperv.dynamic_memory_ratio
flavor = flavor_obj.Flavor(**test_flavor.fake_flavor)
mock_instance.flavor = flavor
self._vmops.create_instance(
context=self.context,
instance=mock_instance,
network_info=[fake_network_info],
block_device_info=block_device_info,
root_device=root_device_info,
vm_gen=vm_gen,
image_meta=mock.sentinel.image_meta)
self._vmops._vmutils.create_vm.assert_called_once_with(
mock_instance.name, vnuma_enabled, vm_gen,
instance_path, [mock_instance.uuid])
self._vmops._vmutils.update_vm.assert_called_once_with(
mock_instance.name, mock_instance.flavor.memory_mb, mem_per_numa,
mock_instance.flavor.vcpus, cpus_per_numa,
CONF.hyperv.limit_cpu_features, dynamic_memory_ratio)
mock_configure_remotefx.assert_called_once_with(mock_instance, vm_gen)
mock_create_scsi_ctrl = self._vmops._vmutils.create_scsi_controller
mock_create_scsi_ctrl.assert_called_once_with(mock_instance.name)
mock_attach_root_device.assert_called_once_with(mock_instance.name,
root_device_info)
mock_attach_ephemerals.assert_called_once_with(mock_instance.name,
block_device_info['ephemerals'])
mock_attach_volumes.assert_called_once_with(
block_device_info['block_device_mapping'], mock_instance.name)
mock_get_port_settings.assert_called_with(mock.sentinel.image_meta)
mock_create_pipes.assert_called_once_with(
mock_instance, mock_get_port_settings.return_value)
self._vmops._vmutils.create_nic.assert_called_once_with(
mock_instance.name, mock.sentinel.ID, mock.sentinel.ADDRESS)
mock_vif_driver.plug.assert_called_once_with(mock_instance,
fake_network_info)
mock_enable = self._vmops._metricsutils.enable_vm_metrics_collection
if enable_instance_metrics:
mock_enable.assert_called_once_with(mock_instance.name)
mock_set_qos_specs.assert_called_once_with(mock_instance)
mock_requires_secure_boot.assert_called_once_with(
mock_instance, mock.sentinel.image_meta, vm_gen)
mock_requires_certificate.assert_called_once_with(
mock.sentinel.image_meta)
enable_secure_boot = self._vmops._vmutils.enable_secure_boot
enable_secure_boot.assert_called_once_with(
mock_instance.name,
msft_ca_required=mock_requires_certificate.return_value)
mock_configure_secure_vm.assert_called_once_with(self.context,
mock_instance, mock.sentinel.image_meta, requires_sec_boot)
def test_create_instance(self):
self._test_create_instance(enable_instance_metrics=True)
def test_create_instance_enable_instance_metrics_false(self):
self._test_create_instance(enable_instance_metrics=False)
def test_create_instance_gen2(self):
self._test_create_instance(enable_instance_metrics=False,
vm_gen=constants.VM_GEN_2)
@mock.patch.object(vmops.volumeops.VolumeOps, 'attach_volume')
def test_attach_root_device_volume(self, mock_attach_volume):
mock_instance = fake_instance.fake_instance_obj(self.context)
root_device_info = {'type': constants.VOLUME,
'connection_info': mock.sentinel.CONN_INFO,
'disk_bus': constants.CTRL_TYPE_IDE}
self._vmops._attach_root_device(mock_instance.name, root_device_info)
mock_attach_volume.assert_called_once_with(
root_device_info['connection_info'], mock_instance.name,
disk_bus=root_device_info['disk_bus'])
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_root_device_disk(self, mock_attach_drive):
mock_instance = fake_instance.fake_instance_obj(self.context)
root_device_info = {'type': constants.DISK,
'boot_index': 0,
'disk_bus': constants.CTRL_TYPE_IDE,
'path': 'fake_path',
'drive_addr': 0,
'ctrl_disk_addr': 1}
self._vmops._attach_root_device(mock_instance.name, root_device_info)
mock_attach_drive.assert_called_once_with(
mock_instance.name, root_device_info['path'],
root_device_info['drive_addr'], root_device_info['ctrl_disk_addr'],
root_device_info['disk_bus'], root_device_info['type'])
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_ephemerals(self, mock_attach_drive):
mock_instance = fake_instance.fake_instance_obj(self.context)
ephemerals = [{'path': mock.sentinel.PATH1,
'boot_index': 1,
'disk_bus': constants.CTRL_TYPE_IDE,
'device_type': 'disk',
'drive_addr': 0,
'ctrl_disk_addr': 1},
{'path': mock.sentinel.PATH2,
'boot_index': 2,
'disk_bus': constants.CTRL_TYPE_SCSI,
'device_type': 'disk',
'drive_addr': 0,
'ctrl_disk_addr': 0},
{'path': None}]
self._vmops._attach_ephemerals(mock_instance.name, ephemerals)
mock_attach_drive.assert_has_calls(
[mock.call(mock_instance.name, mock.sentinel.PATH1, 0,
1, constants.CTRL_TYPE_IDE, constants.DISK),
mock.call(mock_instance.name, mock.sentinel.PATH2, 0,
0, constants.CTRL_TYPE_SCSI, constants.DISK)
])
def test_attach_drive_vm_to_scsi(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_SCSI)
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
constants.DISK)
def test_attach_drive_vm_to_ide(self):
self._vmops._attach_drive(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.CTRL_TYPE_IDE)
self._vmops._vmutils.attach_ide_drive.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME, mock.sentinel.FAKE_PATH,
mock.sentinel.FAKE_DRIVE_ADDR, mock.sentinel.FAKE_CTRL_DISK_ADDR,
constants.DISK)
def _check_get_image_vm_gen_except(self, image_prop):
image_meta = {"properties": {constants.IMAGE_PROP_VM_GEN: image_prop}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
self.assertRaises(exception.InstanceUnacceptable,
self._vmops.get_image_vm_generation,
mock.sentinel.instance_id,
image_meta)
def test_get_image_vm_generation_default(self):
image_meta = {"properties": {}}
self._vmops._hostutils.get_default_vm_generation.return_value = (
constants.IMAGE_PROP_VM_GEN_1)
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(
mock.sentinel.instance_id, image_meta)
self.assertEqual(constants.VM_GEN_1, response)
def test_get_image_vm_generation_gen2(self):
image_meta = {"properties": {
constants.IMAGE_PROP_VM_GEN: constants.IMAGE_PROP_VM_GEN_2}}
self._vmops._hostutils.get_supported_vm_types.return_value = [
constants.IMAGE_PROP_VM_GEN_1, constants.IMAGE_PROP_VM_GEN_2]
response = self._vmops.get_image_vm_generation(
mock.sentinel.instance_id, image_meta)
self.assertEqual(constants.VM_GEN_2, response)
def test_get_image_vm_generation_bad_prop(self):
self._check_get_image_vm_gen_except(mock.sentinel.FAKE_IMAGE_PROP)
def test_check_vm_image_type_exception(self):
self._vmops._vhdutils.get_vhd_format.return_value = (
constants.DISK_FORMAT_VHD)
self.assertRaises(exception.InstanceUnacceptable,
self._vmops.check_vm_image_type,
mock.sentinel.instance_id, constants.VM_GEN_2,
mock.sentinel.FAKE_PATH)
def _check_requires_certificate(self, os_type):
mock_image_meta = {'properties': {'os_type': os_type}}
expected_result = os_type == fields.OSType.LINUX
result = self._vmops._requires_certificate(mock_image_meta)
self.assertEqual(expected_result, result)
def test_requires_certificate_windows(self):
self._check_requires_certificate(os_type=fields.OSType.WINDOWS)
def test_requires_certificate_linux(self):
self._check_requires_certificate(os_type=fields.OSType.LINUX)
def _check_requires_secure_boot(
self, image_prop_os_type=fields.OSType.LINUX,
image_prop_secure_boot=fields.SecureBoot.REQUIRED,
flavor_secure_boot=fields.SecureBoot.REQUIRED,
vm_gen=constants.VM_GEN_2, expected_exception=True):
mock_instance = fake_instance.fake_instance_obj(self.context)
if flavor_secure_boot:
mock_instance.flavor.extra_specs = {
constants.FLAVOR_SPEC_SECURE_BOOT: flavor_secure_boot}
mock_image_meta = {'properties': {'os_type': image_prop_os_type}}
if image_prop_secure_boot:
mock_image_meta['properties']['os_secure_boot'] = (
image_prop_secure_boot)
if expected_exception:
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._requires_secure_boot,
mock_instance, mock_image_meta, vm_gen)
else:
result = self._vmops._requires_secure_boot(mock_instance,
mock_image_meta,
vm_gen)
requires_sb = fields.SecureBoot.REQUIRED in [
flavor_secure_boot, image_prop_secure_boot]
self.assertEqual(requires_sb, result)
def test_requires_secure_boot_ok(self):
self._check_requires_secure_boot(
expected_exception=False)
def test_requires_secure_boot_image_img_prop_none(self):
self._check_requires_secure_boot(
image_prop_secure_boot=None,
expected_exception=False)
def test_requires_secure_boot_image_extra_spec_none(self):
self._check_requires_secure_boot(
flavor_secure_boot=None,
expected_exception=False)
def test_requires_secure_boot_flavor_no_os_type(self):
self._check_requires_secure_boot(
image_prop_os_type=None)
def test_requires_secure_boot_flavor_no_os_type_no_exc(self):
self._check_requires_secure_boot(
image_prop_os_type=None,
image_prop_secure_boot=fields.SecureBoot.DISABLED,
flavor_secure_boot=fields.SecureBoot.DISABLED,
expected_exception=False)
def test_requires_secure_boot_flavor_disabled(self):
self._check_requires_secure_boot(
flavor_secure_boot=fields.SecureBoot.DISABLED)
def test_requires_secure_boot_image_disabled(self):
self._check_requires_secure_boot(
image_prop_secure_boot=fields.SecureBoot.DISABLED)
def test_requires_secure_boot_generation_1(self):
self._check_requires_secure_boot(vm_gen=constants.VM_GEN_1)
@mock.patch('nova.api.metadata.base.InstanceMetadata')
@mock.patch('nova.virt.configdrive.ConfigDriveBuilder')
@mock.patch('nova.utils.execute')
def _test_create_config_drive(self, mock_execute, mock_ConfigDriveBuilder,
mock_InstanceMetadata, config_drive_format,
config_drive_cdrom, side_effect,
rescue=False):
mock_instance = fake_instance.fake_instance_obj(self.context)
self.flags(config_drive_format=config_drive_format)
self.flags(config_drive_cdrom=config_drive_cdrom, group='hyperv')
self.flags(config_drive_inject_password=True, group='hyperv')
mock_ConfigDriveBuilder().__enter__().make_drive.side_effect = [
side_effect]
path_iso = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO)
path_vhd = os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_VHD)
def fake_get_configdrive_path(instance_name, disk_format,
rescue=False):
return (path_iso
if disk_format == constants.DVD_FORMAT else path_vhd)
mock_get_configdrive_path = self._vmops._pathutils.get_configdrive_path
mock_get_configdrive_path.side_effect = fake_get_configdrive_path
expected_get_configdrive_path_calls = [mock.call(mock_instance.name,
constants.DVD_FORMAT,
rescue=rescue)]
if not config_drive_cdrom:
expected_call = mock.call(mock_instance.name,
constants.DISK_FORMAT_VHD,
rescue=rescue)
expected_get_configdrive_path_calls.append(expected_call)
if config_drive_format != self.ISO9660:
self.assertRaises(exception.ConfigDriveUnsupportedFormat,
self._vmops._create_config_drive,
self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
elif side_effect is processutils.ProcessExecutionError:
self.assertRaises(processutils.ProcessExecutionError,
self._vmops._create_config_drive,
self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
else:
path = self._vmops._create_config_drive(self.context,
mock_instance,
[mock.sentinel.FILE],
mock.sentinel.PASSWORD,
mock.sentinel.NET_INFO,
rescue)
mock_InstanceMetadata.assert_called_once_with(
mock_instance, content=[mock.sentinel.FILE],
extra_md={'admin_pass': mock.sentinel.PASSWORD},
network_info=mock.sentinel.NET_INFO,
request_context=self.context)
mock_get_configdrive_path.assert_has_calls(
expected_get_configdrive_path_calls)
mock_ConfigDriveBuilder.assert_called_with(
instance_md=mock_InstanceMetadata())
mock_make_drive = mock_ConfigDriveBuilder().__enter__().make_drive
mock_make_drive.assert_called_once_with(path_iso)
if not CONF.hyperv.config_drive_cdrom:
expected = path_vhd
mock_execute.assert_called_once_with(
CONF.hyperv.qemu_img_cmd,
'convert', '-f', 'raw', '-O', 'vpc',
path_iso, path_vhd, attempts=1)
self._vmops._pathutils.remove.assert_called_once_with(
os.path.join(self.FAKE_DIR, self.FAKE_CONFIG_DRIVE_ISO))
else:
expected = path_iso
self.assertEqual(expected, path)
def test_create_config_drive_cdrom(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=True,
side_effect=None)
def test_create_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None)
def test_create_rescue_config_drive_vhd(self):
self._test_create_config_drive(config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=None,
rescue=True)
def test_create_config_drive_other_drive_format(self):
self._test_create_config_drive(config_drive_format=mock.sentinel.OTHER,
config_drive_cdrom=False,
side_effect=None)
def test_create_config_drive_execution_error(self):
self._test_create_config_drive(
config_drive_format=self.ISO9660,
config_drive_cdrom=False,
side_effect=processutils.ProcessExecutionError)
def test_attach_config_drive_exception(self):
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(exception.InvalidDiskFormat,
self._vmops.attach_config_drive,
instance, 'C:/fake_instance_dir/configdrive.xxx',
constants.VM_GEN_1)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_1)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_IDE, constants.DISK)
@mock.patch.object(vmops.VMOps, '_attach_drive')
def test_attach_config_drive_gen2(self, mock_attach_drive):
instance = fake_instance.fake_instance_obj(self.context)
self._vmops.attach_config_drive(instance,
self._FAKE_CONFIGDRIVE_PATH,
constants.VM_GEN_2)
mock_attach_drive.assert_called_once_with(
instance.name, self._FAKE_CONFIGDRIVE_PATH,
1, 0, constants.CTRL_TYPE_SCSI, constants.DISK)
def test_detach_config_drive(self):
is_rescue_configdrive = True
mock_lookup_configdrive = (
self._vmops._pathutils.lookup_configdrive_path)
mock_lookup_configdrive.return_value = mock.sentinel.configdrive_path
self._vmops._detach_config_drive(mock.sentinel.instance_name,
rescue=is_rescue_configdrive,
delete=True)
mock_lookup_configdrive.assert_called_once_with(
mock.sentinel.instance_name,
rescue=is_rescue_configdrive)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock.sentinel.instance_name, mock.sentinel.configdrive_path,
is_physical=False)
self._vmops._pathutils.remove.assert_called_once_with(
mock.sentinel.configdrive_path)
def test_delete_disk_files(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._delete_disk_files(mock_instance.name)
stop_console_handler = (
self._vmops._serial_console_ops.stop_console_handler_unsync)
stop_console_handler.assert_called_once_with(mock_instance.name)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock_instance.name, create_dir=False, remove_dir=True)
@mock.patch('hyperv.nova.volumeops.VolumeOps.disconnect_volumes')
@mock.patch('hyperv.nova.vmops.VMOps._delete_disk_files')
@mock.patch('hyperv.nova.vmops.VMOps.power_off')
@mock.patch('hyperv.nova.vmops.VMOps.unplug_vifs')
def test_destroy(self, mock_unplug_vifs, mock_power_off,
mock_delete_disk_files, mock_disconnect_volumes):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = True
self._vmops.destroy(instance=mock_instance,
network_info=mock.sentinel.fake_network_info,
block_device_info=mock.sentinel.FAKE_BD_INFO)
self._vmops._vmutils.vm_exists.assert_called_with(
mock_instance.name)
mock_power_off.assert_called_once_with(mock_instance)
self._vmops._vmutils.destroy_vm.assert_called_once_with(
mock_instance.name)
mock_disconnect_volumes.assert_called_once_with(
mock.sentinel.FAKE_BD_INFO)
mock_delete_disk_files.assert_called_once_with(
mock_instance.name)
mock_unplug_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
def test_destroy_inexistent_instance(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.vm_exists.return_value = False
self._vmops.destroy(instance=mock_instance)
self.assertFalse(self._vmops._vmutils.destroy_vm.called)
@mock.patch('hyperv.nova.vmops.VMOps.power_off')
def test_destroy_exception(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.destroy_vm.side_effect = (
os_win_exc.HyperVException)
self._vmops._vmutils.vm_exists.return_value = True
self.assertRaises(os_win_exc.HyperVException,
self._vmops.destroy, mock_instance)
def test_reboot_hard(self):
self._test_reboot(vmops.REBOOT_TYPE_HARD,
os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = True
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft_failed(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_reboot(vmops.REBOOT_TYPE_SOFT,
os_win_const.HYPERV_VM_STATE_REBOOT)
@mock.patch("hyperv.nova.vmops.VMOps.power_on")
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_reboot_soft_exception(self, mock_soft_shutdown, mock_power_on):
mock_soft_shutdown.return_value = True
mock_power_on.side_effect = os_win_exc.HyperVException(
"Expected failure")
instance = fake_instance.fake_instance_obj(self.context)
self.assertRaises(os_win_exc.HyperVException, self._vmops.reboot,
instance, {}, vmops.REBOOT_TYPE_SOFT)
mock_soft_shutdown.assert_called_once_with(instance)
mock_power_on.assert_called_once_with(instance, network_info={})
def _test_reboot(self, reboot_type, vm_state):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.reboot(instance, {}, reboot_type)
mock_set_state.assert_called_once_with(instance, vm_state)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = True
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_once_with(instance.name)
mock_wait_for_power_off.assert_called_once_with(
instance.name, self._FAKE_TIMEOUT)
self.assertTrue(result)
@mock.patch("time.sleep")
def test_soft_shutdown_failed(self, mock_sleep):
instance = fake_instance.fake_instance_obj(self.context)
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.side_effect = os_win_exc.HyperVException(
"Expected failure.")
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT)
mock_shutdown_vm.assert_called_once_with(instance.name)
self.assertFalse(result)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.side_effect = [False, True]
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1)
calls = [mock.call(instance.name, 1),
mock.call(instance.name, self._FAKE_TIMEOUT - 1)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertTrue(result)
@mock.patch("hyperv.nova.vmops.VMOps._wait_for_power_off")
def test_soft_shutdown_wait_timeout(self, mock_wait_for_power_off):
instance = fake_instance.fake_instance_obj(self.context)
mock_wait_for_power_off.return_value = False
result = self._vmops._soft_shutdown(instance, self._FAKE_TIMEOUT, 1.5)
calls = [mock.call(instance.name, 1.5),
mock.call(instance.name, self._FAKE_TIMEOUT - 1.5)]
mock_shutdown_vm = self._vmops._vmutils.soft_shutdown_vm
mock_shutdown_vm.assert_called_with(instance.name)
mock_wait_for_power_off.assert_has_calls(calls)
self.assertFalse(result)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_pause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.pause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_PAUSED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_unpause(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.unpause(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_suspend(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.suspend(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_SUSPENDED)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_resume(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.resume(instance=mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
def _test_power_off(self, timeout, set_state_expected=True):
instance = fake_instance.fake_instance_obj(self.context)
with mock.patch.object(self._vmops, '_set_vm_state') as mock_set_state:
self._vmops.power_off(instance, timeout)
serialops = self._vmops._serial_console_ops
serialops.stop_console_handler.assert_called_once_with(
instance.name)
if set_state_expected:
mock_set_state.assert_called_once_with(
instance, os_win_const.HYPERV_VM_STATE_DISABLED)
def test_power_off_hard(self):
self._test_power_off(timeout=0)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_power_off_exception(self, mock_soft_shutdown):
mock_soft_shutdown.return_value = False
self._test_power_off(timeout=1)
@mock.patch("hyperv.nova.vmops.VMOps._set_vm_state")
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_power_off_soft(self, mock_soft_shutdown, mock_set_state):
instance = fake_instance.fake_instance_obj(self.context)
mock_soft_shutdown.return_value = True
self._vmops.power_off(instance, 1, 0)
serialops = self._vmops._serial_console_ops
serialops.stop_console_handler.assert_called_once_with(
instance.name)
mock_soft_shutdown.assert_called_once_with(
instance, 1, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(mock_set_state.called)
@mock.patch("hyperv.nova.vmops.VMOps._soft_shutdown")
def test_power_off_unexisting_instance(self, mock_soft_shutdown):
mock_soft_shutdown.side_effect = os_win_exc.HyperVVMNotFoundException(
vm_name=mock.sentinel.vm_name)
self._test_power_off(timeout=1, set_state_expected=False)
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_power_on(self, mock_set_vm_state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch('hyperv.nova.volumeops.VolumeOps'
'.fix_instance_volume_disk_paths')
@mock.patch('hyperv.nova.vmops.VMOps._set_vm_state')
def test_power_on_having_block_devices(self, mock_set_vm_state,
mock_fix_instance_vol_paths):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance, mock.sentinel.block_device_info)
mock_fix_instance_vol_paths.assert_called_once_with(
mock_instance.name, mock.sentinel.block_device_info)
mock_set_vm_state.assert_called_once_with(
mock_instance, os_win_const.HYPERV_VM_STATE_ENABLED)
@mock.patch.object(vmops.VMOps, 'post_start_vifs')
def test_power_on_with_network_info(self, mock_post_start_vifs):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops.power_on(mock_instance,
network_info=mock.sentinel.fake_network_info)
mock_post_start_vifs.assert_called_once_with(
mock_instance, mock.sentinel.fake_network_info)
def _test_set_vm_state(self, state):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._set_vm_state(mock_instance, state)
self._vmops._vmutils.set_vm_state.assert_called_once_with(
mock_instance.name, state)
def test_set_vm_state_disabled(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_DISABLED)
def test_set_vm_state_enabled(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_ENABLED)
def test_set_vm_state_reboot(self):
self._test_set_vm_state(state=os_win_const.HYPERV_VM_STATE_REBOOT)
def test_set_vm_state_exception(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._vmutils.set_vm_state.side_effect = (
os_win_exc.HyperVException)
self.assertRaises(os_win_exc.HyperVException,
self._vmops._set_vm_state,
mock_instance, mock.sentinel.STATE)
def test_get_vm_state(self):
summary_info = {'EnabledState': os_win_const.HYPERV_VM_STATE_DISABLED}
with mock.patch.object(self._vmops._vmutils,
'get_vm_summary_info') as mock_get_summary_info:
mock_get_summary_info.return_value = summary_info
response = self._vmops._get_vm_state(mock.sentinel.FAKE_VM_NAME)
self.assertEqual(response, os_win_const.HYPERV_VM_STATE_DISABLED)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_wait_for_power_off_true(self, mock_get_state):
mock_get_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
mock_get_state.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self.assertTrue(result)
@mock.patch.object(vmops.etimeout, "with_timeout")
def test_wait_for_power_off_false(self, mock_with_timeout):
mock_with_timeout.side_effect = etimeout.Timeout()
result = self._vmops._wait_for_power_off(
mock.sentinel.FAKE_VM_NAME, vmops.SHUTDOWN_TIME_INCREMENT)
self.assertFalse(result)
def test_create_vm_com_port_pipes(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_serial_ports = {
1: constants.SERIAL_PORT_TYPE_RO,
2: constants.SERIAL_PORT_TYPE_RW
}
self._vmops._create_vm_com_port_pipes(mock_instance,
mock_serial_ports)
expected_calls = []
for port_number, port_type in mock_serial_ports.items():
expected_pipe = r'\\.\pipe\%s_%s' % (mock_instance.uuid,
port_type)
expected_calls.append(mock.call(mock_instance.name,
port_number,
expected_pipe))
mock_set_conn = self._vmops._vmutils.set_vm_serial_port_connection
mock_set_conn.assert_has_calls(expected_calls)
def test_list_instance_uuids(self):
fake_uuid = '4f54fb69-d3a2-45b7-bb9b-b6e6b3d893b3'
with mock.patch.object(self._vmops._vmutils,
'list_instance_notes') as mock_list_notes:
mock_list_notes.return_value = [('fake_name', [fake_uuid])]
response = self._vmops.list_instance_uuids()
mock_list_notes.assert_called_once_with()
self.assertEqual(response, [fake_uuid])
def test_copy_vm_dvd_disks(self):
fake_paths = [mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DVD_PATH2]
mock_copy = self._vmops._pathutils.copyfile
mock_get_dvd_disk_paths = self._vmops._vmutils.get_vm_dvd_disk_paths
mock_get_dvd_disk_paths.return_value = fake_paths
self._vmops._pathutils.get_instance_dir.return_value = (
mock.sentinel.FAKE_DEST_PATH)
self._vmops.copy_vm_dvd_disks(mock.sentinel.FAKE_VM_NAME,
mock.sentinel.FAKE_DEST_HOST)
mock_get_dvd_disk_paths.assert_called_with(mock.sentinel.FAKE_VM_NAME)
self._vmops._pathutils.get_instance_dir.assert_called_once_with(
mock.sentinel.FAKE_VM_NAME,
remote_server=mock.sentinel.FAKE_DEST_HOST)
mock_copy.has_calls(mock.call(mock.sentinel.FAKE_DVD_PATH1,
mock.sentinel.FAKE_DEST_PATH),
mock.call(mock.sentinel.FAKE_DVD_PATH2,
mock.sentinel.FAKE_DEST_PATH))
def _setup_remotefx_mocks(self):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.extra_specs = {
'os:resolution': os_win_const.REMOTEFX_MAX_RES_1920x1200,
'os:monitors': '2',
'os:vram': '256'}
return mock_instance
def test_configure_remotefx_not_required(self):
self.flags(enable_remotefx=False, group='hyperv')
mock_instance = fake_instance.fake_instance_obj(self.context)
self._vmops._configure_remotefx(mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_enable_config(self):
self.flags(enable_remotefx=False, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_server_feature(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = False
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx_exception_vm_gen(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = True
self._vmops._vmutils.vm_gen_supports_remotefx.return_value = False
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._configure_remotefx,
mock_instance, mock.sentinel.VM_GEN)
def test_configure_remotefx(self):
self.flags(enable_remotefx=True, group='hyperv')
mock_instance = self._setup_remotefx_mocks()
self._vmops._hostutils.check_server_feature.return_value = True
self._vmops._vmutils.vm_gen_supports_remotefx.return_value = True
extra_specs = mock_instance.flavor.extra_specs
self._vmops._configure_remotefx(mock_instance,
constants.VM_GEN_1)
mock_enable_remotefx = (
self._vmops._vmutils.enable_remotefx_video_adapter)
mock_enable_remotefx.assert_called_once_with(
mock_instance.name, int(extra_specs['os:monitors']),
extra_specs['os:resolution'],
int(extra_specs['os:vram']) * units.Mi)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def test_check_hotplug_available_vm_disabled(self, mock_get_vm_state):
fake_vm = fake_instance.fake_instance_obj(self.context)
mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_DISABLED
result = self._vmops._check_hotplug_available(fake_vm)
self.assertTrue(result)
mock_get_vm_state.assert_called_once_with(fake_vm.name)
self.assertFalse(
self._vmops._hostutils.check_min_windows_version.called)
self.assertFalse(self._vmops._vmutils.get_vm_generation.called)
@mock.patch.object(vmops.VMOps, '_get_vm_state')
def _test_check_hotplug_available(
self, mock_get_vm_state, expected_result=False,
vm_gen=constants.VM_GEN_2, windows_version=_WIN_VERSION_10):
fake_vm = fake_instance.fake_instance_obj(self.context)
mock_get_vm_state.return_value = os_win_const.HYPERV_VM_STATE_ENABLED
self._vmops._vmutils.get_vm_generation.return_value = vm_gen
fake_check_win_vers = self._vmops._hostutils.check_min_windows_version
fake_check_win_vers.return_value = (
windows_version == self._WIN_VERSION_10)
result = self._vmops._check_hotplug_available(fake_vm)
self.assertEqual(expected_result, result)
mock_get_vm_state.assert_called_once_with(fake_vm.name)
fake_check_win_vers.assert_called_once_with(10, 0)
def test_check_if_hotplug_available(self):
self._test_check_hotplug_available(expected_result=True)
def test_check_if_hotplug_available_gen1(self):
self._test_check_hotplug_available(
expected_result=False, vm_gen=constants.VM_GEN_1)
def test_check_if_hotplug_available_win_6_3(self):
self._test_check_hotplug_available(
expected_result=False, windows_version=self._WIN_VERSION_6_3)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_attach_interface(self, mock_check_hotplug_available,
mock_get_vif_driver):
mock_check_hotplug_available.return_value = True
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
self._vmops.attach_interface(fake_vm, fake_vif)
mock_check_hotplug_available.assert_called_once_with(fake_vm)
mock_get_vif_driver.return_value.plug.assert_called_once_with(
fake_vm, fake_vif)
mock_get_vif_driver.return_value.post_start.assert_called_once_with(
fake_vm, fake_vif)
self._vmops._vmutils.create_nic.assert_called_once_with(
fake_vm.name, fake_vif['id'], fake_vif['address'])
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_attach_interface_failed(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = False
self.assertRaises(exception.InterfaceAttachFailed,
self._vmops.attach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface(self, mock_check_hotplug_available,
mock_get_vif_driver):
mock_check_hotplug_available.return_value = True
fake_vm = fake_instance.fake_instance_obj(self.context)
fake_vif = test_virtual_interface.fake_vif
self._vmops.detach_interface(fake_vm, fake_vif)
mock_check_hotplug_available.assert_called_once_with(fake_vm)
mock_get_vif_driver.return_value.unplug.assert_called_once_with(
fake_vm, fake_vif)
self._vmops._vmutils.destroy_nic.assert_called_once_with(
fake_vm.name, fake_vif['id'])
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface_failed(self, mock_check_hotplug_available):
mock_check_hotplug_available.return_value = False
self.assertRaises(exception.InterfaceDetachFailed,
self._vmops.detach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch.object(vmops.VMOps, '_check_hotplug_available')
def test_detach_interface_missing_instance(self, mock_check_hotplug):
mock_check_hotplug.side_effect = os_win_exc.HyperVVMNotFoundException(
vm_name='fake_vm')
self.assertRaises(exception.InterfaceDetachFailed,
self._vmops.detach_interface,
mock.MagicMock(), mock.sentinel.fake_vif)
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, '_create_config_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
def test_rescue_instance(self, mock_power_on,
mock_detach_config_drive,
mock_attach_config_drive,
mock_create_config_drive,
mock_attach_drive,
mock_get_image_vm_gen,
mock_create_root_vhd,
mock_configdrive_required):
mock_image_meta = mock.MagicMock()
mock_vm_gen = constants.VM_GEN_2
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_configdrive_required.return_value = True
mock_create_root_vhd.return_value = mock.sentinel.rescue_vhd_path
mock_get_image_vm_gen.return_value = mock_vm_gen
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path)
mock_create_config_drive.return_value = (
mock.sentinel.rescue_configdrive_path)
self._vmops.rescue_instance(self.context,
mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
mock_get_image_vm_gen.assert_called_once_with(
mock_instance.uuid, mock_image_meta)
self._vmops._vmutils.detach_vm_disk.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
is_physical=False)
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.rescue_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
self._vmops._vmutils.attach_scsi_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path,
drive_type=constants.DISK)
mock_detach_config_drive.assert_called_once_with(mock_instance.name)
mock_create_config_drive.assert_called_once_with(
self.context, mock_instance,
injected_files=None,
admin_password=mock.sentinel.rescue_password,
network_info=mock.sentinel.network_info,
rescue=True)
mock_attach_config_drive.assert_called_once_with(
mock_instance, mock.sentinel.rescue_configdrive_path,
mock_vm_gen)
@mock.patch.object(vmops.VMOps, '_create_root_vhd')
@mock.patch.object(vmops.VMOps, 'get_image_vm_generation')
@mock.patch.object(vmops.VMOps, 'unrescue_instance')
def _test_rescue_instance_exception(self, mock_unrescue,
mock_get_image_vm_gen,
mock_create_root_vhd,
wrong_vm_gen=False,
boot_from_volume=False,
expected_exc=None):
mock_vm_gen = constants.VM_GEN_1
image_vm_gen = (mock_vm_gen
if not wrong_vm_gen else constants.VM_GEN_2)
mock_image_meta = mock.MagicMock()
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_get_image_vm_gen.return_value = image_vm_gen
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._pathutils.lookup_root_vhd_path.return_value = (
mock.sentinel.root_vhd_path if not boot_from_volume else None)
self.assertRaises(expected_exc,
self._vmops.rescue_instance,
self.context, mock_instance,
mock.sentinel.network_info,
mock_image_meta,
mock.sentinel.rescue_password)
mock_unrescue.assert_called_once_with(mock_instance)
def test_rescue_instance_wrong_vm_gen(self):
# Test the case when the rescue image requires a different
# vm generation than the actual rescued instance.
self._test_rescue_instance_exception(
wrong_vm_gen=True,
expected_exc=exception.ImageUnacceptable)
def test_rescue_instance_boot_from_volume(self):
# Rescuing instances booted from volume is not supported.
self._test_rescue_instance_exception(
boot_from_volume=True,
expected_exc=exception.InstanceNotRescuable)
@mock.patch.object(fileutils, 'delete_if_exists')
@mock.patch.object(vmops.VMOps, '_attach_drive')
@mock.patch.object(vmops.VMOps, 'attach_config_drive')
@mock.patch.object(vmops.VMOps, '_detach_config_drive')
@mock.patch.object(vmops.VMOps, 'power_on')
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance(self, mock_power_on, mock_power_off,
mock_detach_config_drive,
mock_attach_configdrive,
mock_attach_drive,
mock_delete_if_exists):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_vm_gen = constants.VM_GEN_2
self._vmops._vmutils.get_vm_generation.return_value = mock_vm_gen
self._vmops._vmutils.is_disk_attached.return_value = False
self._vmops._pathutils.lookup_root_vhd_path.side_effect = (
mock.sentinel.root_vhd_path, mock.sentinel.rescue_vhd_path)
self._vmops._pathutils.lookup_configdrive_path.return_value = (
mock.sentinel.configdrive_path)
self._vmops.unrescue_instance(mock_instance)
self._vmops._pathutils.lookup_root_vhd_path.assert_has_calls(
[mock.call(mock_instance.name),
mock.call(mock_instance.name, rescue=True)])
self._vmops._vmutils.detach_vm_disk.assert_has_calls(
[mock.call(mock_instance.name,
mock.sentinel.root_vhd_path,
is_physical=False),
mock.call(mock_instance.name,
mock.sentinel.rescue_vhd_path,
is_physical=False)])
mock_attach_drive.assert_called_once_with(
mock_instance.name, mock.sentinel.root_vhd_path, 0,
self._vmops._ROOT_DISK_CTRL_ADDR,
vmops.VM_GENERATIONS_CONTROLLER_TYPES[mock_vm_gen])
mock_detach_config_drive.assert_called_once_with(mock_instance.name,
rescue=True,
delete=True)
mock_delete_if_exists.assert_called_once_with(
mock.sentinel.rescue_vhd_path)
self._vmops._vmutils.is_disk_attached.assert_called_once_with(
mock.sentinel.configdrive_path,
is_physical=False)
mock_attach_configdrive.assert_called_once_with(
mock_instance, mock.sentinel.configdrive_path, mock_vm_gen)
mock_power_on.assert_called_once_with(mock_instance)
@mock.patch.object(vmops.VMOps, 'power_off')
def test_unrescue_instance_missing_root_image(self, mock_power_off):
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.vm_state = vm_states.RESCUED
self._vmops._pathutils.lookup_root_vhd_path.return_value = None
self.assertRaises(exception.InstanceNotRescuable,
self._vmops.unrescue_instance,
mock_instance)
@mock.patch.object(volumeops.VolumeOps, 'bytes_per_sec_to_iops')
@mock.patch.object(vmops.VMOps, '_get_scoped_flavor_extra_specs')
@mock.patch.object(vmops.VMOps, '_get_instance_local_disks')
def test_set_instance_disk_qos_specs(self, mock_get_local_disks,
mock_get_scoped_specs,
mock_bytes_per_sec_to_iops):
fake_total_bytes_sec = 8
fake_total_iops_sec = 1
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_local_disks = [mock.sentinel.root_vhd_path,
mock.sentinel.eph_vhd_path]
mock_get_local_disks.return_value = mock_local_disks
mock_set_qos_specs = self._vmops._vmutils.set_disk_qos_specs
mock_get_scoped_specs.return_value = dict(
disk_total_bytes_sec=fake_total_bytes_sec)
mock_bytes_per_sec_to_iops.return_value = fake_total_iops_sec
self._vmops._set_instance_disk_qos_specs(mock_instance)
mock_bytes_per_sec_to_iops.assert_called_once_with(
fake_total_bytes_sec)
mock_get_local_disks.assert_called_once_with(mock_instance.name)
expected_calls = [mock.call(disk_path, fake_total_iops_sec)
for disk_path in mock_local_disks]
mock_set_qos_specs.assert_has_calls(expected_calls)
def test_get_instance_local_disks(self):
fake_instance_dir = 'fake_instance_dir'
fake_local_disks = [os.path.join(fake_instance_dir, disk_name)
for disk_name in ['root.vhd', 'configdrive.iso']]
fake_instance_disks = ['fake_remote_disk'] + fake_local_disks
mock_get_storage_paths = self._vmops._vmutils.get_vm_storage_paths
mock_get_storage_paths.return_value = [fake_instance_disks, []]
mock_get_instance_dir = self._vmops._pathutils.get_instance_dir
mock_get_instance_dir.return_value = fake_instance_dir
ret_val = self._vmops._get_instance_local_disks(
mock.sentinel.instance_name)
self.assertEqual(fake_local_disks, ret_val)
def test_get_scoped_flavor_extra_specs(self):
# The flavor extra spect dict contains only string values.
fake_total_bytes_sec = '8'
mock_instance = fake_instance.fake_instance_obj(self.context)
mock_instance.flavor.extra_specs = {
'spec_key': 'spec_value',
'quota:total_bytes_sec': fake_total_bytes_sec}
ret_val = self._vmops._get_scoped_flavor_extra_specs(
mock_instance, scope='quota')
expected_specs = {
'total_bytes_sec': fake_total_bytes_sec
}
self.assertEqual(expected_specs, ret_val)
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
@mock.patch.object(vmops.objects.ImageMeta, 'from_dict')
def _check_get_instance_vnuma_config_exception(self, mock_from_dict,
mock_get_numa, numa_cells):
flavor = {'extra_specs': {}}
mock_instance = mock.MagicMock(flavor=flavor)
image_meta = mock.MagicMock(properties={})
mock_get_numa.return_value.cells = numa_cells
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._get_instance_vnuma_config,
mock_instance, image_meta)
def test_get_instance_vnuma_config_bad_cpuset(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=1024, cpu_pinning=None)
cell2 = mock.MagicMock(cpuset=set([1, 2]), memory=1024,
cpu_pinning=None)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_bad_memory(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=1024, cpu_pinning=None)
cell2 = mock.MagicMock(cpuset=set([1]), memory=2048, cpu_pinning=None)
self._check_get_instance_vnuma_config_exception(
numa_cells=[cell1, cell2])
def test_get_instance_vnuma_config_cpu_pinning_requested(self):
cell = mock.MagicMock(cpu_pinning={})
self._check_get_instance_vnuma_config_exception(numa_cells=[cell])
@mock.patch.object(vmops.hardware, 'numa_get_constraints')
@mock.patch.object(vmops.objects.ImageMeta, 'from_dict')
def _check_get_instance_vnuma_config(
self, mock_from_dict, mock_get_numa, numa_topology=None,
expected_mem_per_numa=None, expected_cpus_per_numa=None):
mock_instance = mock.MagicMock()
image_meta = mock.MagicMock()
mock_get_numa.return_value = numa_topology
result_memory_per_numa, result_cpus_per_numa = (
self._vmops._get_instance_vnuma_config(mock_instance, image_meta))
self.assertEqual(expected_cpus_per_numa, result_cpus_per_numa)
self.assertEqual(expected_mem_per_numa, result_memory_per_numa)
def test_get_instance_vnuma_config(self):
cell1 = mock.MagicMock(cpuset=set([0]), memory=2048, cpu_pinning=None)
cell2 = mock.MagicMock(cpuset=set([1]), memory=2048, cpu_pinning=None)
mock_topology = mock.MagicMock(cells=[cell1, cell2])
self._check_get_instance_vnuma_config(numa_topology=mock_topology,
expected_cpus_per_numa=1,
expected_mem_per_numa=2048)
def test_get_instance_vnuma_config_no_topology(self):
self._check_get_instance_vnuma_config()
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
def test_unplug_vifs(self, mock_get_vif_driver):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_vif1 = {'id': mock.sentinel.ID1,
'type': mock.sentinel.vif_type1}
fake_vif2 = {'id': mock.sentinel.ID2,
'type': mock.sentinel.vif_type2}
mock_network_info = [fake_vif1, fake_vif2]
fake_vif_driver = mock.MagicMock()
mock_get_vif_driver.return_value = fake_vif_driver
calls = [mock.call(mock_instance, fake_vif1),
mock.call(mock_instance, fake_vif2)]
self._vmops.unplug_vifs(mock_instance,
network_info=mock_network_info)
fake_vif_driver.unplug.assert_has_calls(calls)
@mock.patch.object(vmops.VMOps, '_get_vif_driver')
def test_post_start_vifs(self, mock_get_vif_driver):
mock_instance = fake_instance.fake_instance_obj(self.context)
fake_vif1 = {'id': mock.sentinel.ID1,
'type': mock.sentinel.vif_type1}
fake_vif2 = {'id': mock.sentinel.ID2,
'type': mock.sentinel.vif_type2}
mock_network_info = [fake_vif1, fake_vif2]
fake_vif_driver = mock.MagicMock()
mock_get_vif_driver.return_value = fake_vif_driver
calls = [mock.call(mock_instance, fake_vif1),
mock.call(mock_instance, fake_vif2)]
self._vmops.post_start_vifs(mock_instance,
network_info=mock_network_info)
fake_vif_driver.post_start.assert_has_calls(calls)
def _mock_get_port_settings(self, logging_port, interactive_port):
mock_image_port_settings = {
constants.IMAGE_PROP_LOGGING_SERIAL_PORT: logging_port,
constants.IMAGE_PROP_INTERACTIVE_SERIAL_PORT: interactive_port
}
mock_image_meta = {'properties': mock_image_port_settings}
acceptable_ports = [1, 2]
expected_exception = not (logging_port in acceptable_ports and
interactive_port in acceptable_ports)
if expected_exception:
self.assertRaises(exception.ImageSerialPortNumberInvalid,
self._vmops._get_image_serial_port_settings,
mock_image_meta)
else:
return self._vmops._get_image_serial_port_settings(
mock_image_meta)
def test_get_image_serial_port_settings(self):
logging_port = 1
interactive_port = 2
ret_val = self._mock_get_port_settings(logging_port, interactive_port)
expected_serial_ports = {
logging_port: constants.SERIAL_PORT_TYPE_RO,
interactive_port: constants.SERIAL_PORT_TYPE_RW,
}
self.assertEqual(expected_serial_ports, ret_val)
def test_get_image_serial_port_settings_exception(self):
self._mock_get_port_settings(1, 3)
def test_get_image_serial_port_settings_single_port(self):
interactive_port = 1
ret_val = self._mock_get_port_settings(interactive_port,
interactive_port)
expected_serial_ports = {
interactive_port: constants.SERIAL_PORT_TYPE_RW
}
self.assertEqual(expected_serial_ports, ret_val)
@mock.patch.object(vmops.VMOps, '_check_vtpm_requirements')
@mock.patch.object(vmops.VMOps, '_feature_requested')
@mock.patch.object(vmops.VMOps, '_create_fsk')
@mock.patch.object(pdk.PDK, 'create_pdk')
def _test_configure_secure_vm(self, mock_create_pdk, mock_create_fsk,
mock_feature_requested,
mock_check_vtpm_requirements,
requires_shielded, requires_encryption):
instance = mock.MagicMock()
mock_tmp_file = self._vmops._pathutils.temporary_file
mock_tmp_file.return_value.__enter__.side_effect = [
self._FAKE_FSK_FILE_PATH, self._FAKE_PDK_FILE_PATH]
mock_feature_requested.side_effect = [requires_shielded,
requires_encryption]
self._vmops._configure_secure_vm(mock.sentinel.context, instance,
mock.sentinel.image_meta,
mock.sentinel.secure_boot_enabled)
expected_calls = [mock.call(instance,
mock.sentinel.image_meta,
constants.IMAGE_PROP_VTPM_SHIELDED)]
if not requires_shielded:
expected_calls.append(mock.call(instance,
mock.sentinel.image_meta,
constants.IMAGE_PROP_VTPM))
mock_feature_requested.has_calls(expected_calls)
mock_check_vtpm_requirements.assert_called_with(instance,
mock.sentinel.image_meta, mock.sentinel.secure_boot_enabled)
self._vmops._vmutils.add_vtpm.assert_called_once_with(
instance.name, self._FAKE_PDK_FILE_PATH,
shielded=requires_shielded)
self._vmops._vmutils.provision_vm.assert_called_once_with(
instance.name, self._FAKE_FSK_FILE_PATH, self._FAKE_PDK_FILE_PATH)
def test_configure_secure_vm_shielded(self):
self._test_configure_secure_vm(requires_shielded=True,
requires_encryption=True)
def test_configure_secure_vm_encryption(self):
self._test_configure_secure_vm(requires_shielded=False,
requires_encryption=True)
@mock.patch.object(vmops.VMOps, '_check_vtpm_requirements')
@mock.patch.object(vmops.VMOps, '_feature_requested')
def test_configure_regular_vm(self, mock_feature_requested,
mock_check_vtpm_requirements):
mock_feature_requested.side_effect = [False, False]
self._vmops._configure_secure_vm(mock.sentinel.context,
mock.MagicMock(),
mock.sentinel.image_meta,
mock.sentinel.secure_boot_enabled)
self.assertFalse(mock_check_vtpm_requirements.called)
def _test_feature_requested(self, image_prop, image_prop_required):
mock_instance = mock.MagicMock()
mock_image_meta = {'properties': {image_prop: image_prop_required}}
feature_requested = image_prop_required == constants.REQUIRED
result = self._vmops._feature_requested(mock_instance,
mock_image_meta,
image_prop)
self.assertEqual(feature_requested, result)
def test_vtpm_image_required(self):
self._test_feature_requested(
image_prop=constants.IMAGE_PROP_VTPM_SHIELDED,
image_prop_required=constants.REQUIRED)
def test_vtpm_image_disabled(self):
self._test_feature_requested(
image_prop=constants.IMAGE_PROP_VTPM_SHIELDED,
image_prop_required=constants.DISABLED)
def _test_check_vtpm_requirements(self, os_type='windows',
secure_boot_enabled=True,
guarded_host=True):
mock_instance = mock.MagicMock()
mock_image_meta = {'properties': {'os_type': os_type}}
guarded_host = self._vmops._hostutils.is_host_guarded.return_value
if (not secure_boot_enabled or not guarded_host or
os_type not in os_win_const.VTPM_SUPPORTED_OS):
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._check_vtpm_requirements,
mock_instance,
mock_image_meta,
secure_boot_enabled)
else:
self._vmops._check_vtpm_requirements(mock_instance,
mock_image_meta,
secure_boot_enabled)
def test_vtpm_requirements_all_satisfied(self):
self._test_check_vtpm_requirements()
def test_vtpm_requirement_no_secureboot(self):
self._test_check_vtpm_requirements(secure_boot_enabled=False)
def test_vtpm_requirement_not_supported_os(self):
self._test_check_vtpm_requirements(
os_type=mock.sentinel.unsupported_os)
def test_vtpm_requirement_host_not_guarded(self):
self._test_check_vtpm_requirements(guarded_host=False)
@mock.patch.object(vmops.VMOps, '_get_fsk_data')
def test_create_fsk(self, mock_get_fsk_data):
mock_instance = mock.MagicMock()
fsk_pairs = mock_get_fsk_data.return_value
self._vmops._create_fsk(mock_instance, mock.sentinel.fsk_filename)
mock_get_fsk_data.assert_called_once_with(mock_instance)
self._vmops._vmutils.populate_fsk.assert_called_once_with(
mock.sentinel.fsk_filename, fsk_pairs)
def _test_get_fsk_data(self, metadata, instance_name,
expected_fsk_pairs=None):
mock_instance = mock.MagicMock()
mock_instance.metadata = metadata
mock_instance.hostname = instance_name
result = self._vmops._get_fsk_data(mock_instance)
self.assertEqual(expected_fsk_pairs, result)
def test_get_fsk_data_no_computername(self):
metadata = {'TimeZone': mock.sentinel.timezone}
expected_fsk_pairs = {'@@ComputerName@@': mock.sentinel.instance_name}
self._test_get_fsk_data(metadata,
mock.sentinel.instance_name,
expected_fsk_pairs)
def test_get_fsk_data_with_computername(self):
metadata = {'fsk:ComputerName': mock.sentinel.instance_name,
'fsk:TimeZone': mock.sentinel.timezone}
expected_fsk_pairs = {'@@ComputerName@@': mock.sentinel.instance_name,
'@@TimeZone@@': mock.sentinel.timezone}
self._test_get_fsk_data(metadata,
mock.sentinel.instance_name,
expected_fsk_pairs)
def test_get_fsk_data_computername_exception(self):
mock_instance = mock.MagicMock()
mock_instance.metadata = {
'fsk:ComputerName': mock.sentinel.computer_name,
'fsk:TimeZone': mock.sentinel.timezone}
mock_instance.hostname = mock.sentinel.instance_name
self.assertRaises(exception.InstanceUnacceptable,
self._vmops._get_fsk_data,
mock_instance)
| {
"content_hash": "ca73d843616bb047268c6539af3a99e9",
"timestamp": "",
"source": "github",
"line_count": 2036,
"max_line_length": 79,
"avg_line_length": 47.65864440078585,
"alnum_prop": 0.6059484917502294,
"repo_name": "cloudbase/compute-hyperv",
"id": "755ace88fe9691615636603e39b4feacf95dcf09",
"size": "97636",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hyperv/tests/unit/test_vmops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "584437"
}
],
"symlink_target": ""
} |
from entities import entity
class ProgressBar(entity.Entity):
def __init__(self, x, y, width, max, color):
super().__init__(' ', position=(x, y))
self.max_value = max
self.current_value = max
self.width = width
self.color = color
self.x = x
self.y = y
self.show_text = True
def draw(self, console):
bar_width = int(self.current_value / self.max_value * self.width)
v = ' {}/{}'.format(int(self.current_value), self.max_value)
for i in range(self.width):
c = ' '
if i < len(v) and self.show_text:
c = v[i]
bg = 0, 0, 0
if i <= bar_width:
bg = self.color
console.draw_char(self.x + i, self.y, c, bg=bg)
def handle_events(self, event):
pass
def update(self, time):
pass
| {
"content_hash": "c132deaa8a5b26e3e9294850edff6549",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 73,
"avg_line_length": 25.714285714285715,
"alnum_prop": 0.49333333333333335,
"repo_name": "JoshuaSkelly/lunch-break-rl",
"id": "4385cc322f35ee1a312daef89978ae2b4b267b03",
"size": "900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/progressbar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "69311"
}
],
"symlink_target": ""
} |
'''The app module, containing the app factory function.'''
from flask import Flask, render_template
from NewsReader.settings import ProdConfig
from NewsReader.assets import assets
from NewsReader.extensions import (
bcrypt,
cache,
db,
login_manager,
migrate,
debug_toolbar,
)
from NewsReader import public, user
def create_app(config_object=ProdConfig):
'''An application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
:param config_object: The configuration object to use.
'''
app = Flask(__name__)
app.config.from_object(config_object)
register_extensions(app)
register_blueprints(app)
register_errorhandlers(app)
return app
def register_extensions(app):
assets.init_app(app)
bcrypt.init_app(app)
cache.init_app(app)
db.init_app(app)
login_manager.init_app(app)
debug_toolbar.init_app(app)
migrate.init_app(app, db)
return None
def register_blueprints(app):
app.register_blueprint(public.views.blueprint)
app.register_blueprint(user.views.blueprint)
return None
def register_errorhandlers(app):
def render_error(error):
# If a HTTPException, pull the `code` attribute; default to 500
error_code = getattr(error, 'code', 500)
return render_template("{0}.html".format(error_code)), error_code
for errcode in [401, 404, 500]:
app.errorhandler(errcode)(render_error)
return None
| {
"content_hash": "4dd6c0a2d611532b9896e94c367560eb",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 73,
"avg_line_length": 26.818181818181817,
"alnum_prop": 0.6955932203389831,
"repo_name": "jrotich/NewsReader",
"id": "9f4a651d12dc0c709ded91776a6c362cee76eee1",
"size": "1499",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "NewsReader/app.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "8685"
},
{
"name": "JavaScript",
"bytes": "240856"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "25810"
}
],
"symlink_target": ""
} |
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
# templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Armada'
copyright = u'2017, Armada Team'
author = u'Armada Team'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.2.0'
# The full version, including alpha/beta/rc tags.
release = u'0.2.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'Armadadoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Armada.tex', u'Armada Documentation',
u'Armada Team', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'armada', u'Armada Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Armada', u'Armada Documentation',
author, 'Armada', 'One line description of project.',
'Miscellaneous'),
]
| {
"content_hash": "58ea83ea73804c16891a66553ce51bad",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 78,
"avg_line_length": 30.276119402985074,
"alnum_prop": 0.6600936652699039,
"repo_name": "theyer/armada",
"id": "e8d0e72ff0213a1a7e85a6cd93a543fd54f7cefe",
"size": "4784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "133909"
},
{
"name": "Shell",
"bytes": "222"
}
],
"symlink_target": ""
} |
from django.test import TestCase, Client
from django.shortcuts import resolve_url
from imager_profile.tests.test_model import UserFactory
from .test_photos import PhotoFactory, AlbumFactory
from imager_images.models import PRIVATE, PUBLIC
class LibraryImageTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = UserFactory.create(
username="user",
password="password",
)
self.photo = PhotoFactory.create(
owner=self.user.profile
)
self.private_photo = PhotoFactory.create(
owner=self.user.profile,
published=PRIVATE
)
self.client.post(resolve_url('auth_login'), {
'username': self.user.username,
'password': "password",
})
self.response = self.client.get(resolve_url('library'))
def test_view_exists(self):
self.assertTrue(self.response.status_code == 200)
def test_view_is_correct(self):
self.assertContains(self.response, "<!-- LIBRARY -->")
def view_contains_images(self):
self.assertContains(self.response, self.photo.photo.url)
self.assertContains(self.response, self.private_photo.photo.url)
class AlbumViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = UserFactory.create(
username="user",
password="password",
)
self.photo = PhotoFactory.create(
owner=self.user.profile
)
self.photo.save()
self.album = AlbumFactory.create(
owner=self.user.profile
)
self.album.save()
self.album.photos.add(self.photo)
self.response = self.client.get(resolve_url('albums', album_id=self.album.id))
def test_view_album_exists(self):
self.assertEqual(self.response.status_code, 200)
def test_view_album_model_is_correct(self):
self.assertTrue(self.photo in self.album.photos.all())
def test_view_album_view_is_correct(self):
self.assertContains(self.response, self.photo.title)
def test_album_shows_in_library(self):
_ = self.client.post(resolve_url('auth_login'), {
'username': self.user.username,
'password': "password",
})
library_response = self.client.get(resolve_url('library'))
self.assertContains(library_response, resolve_url('albums', album_id=self.album.id))
class PrivatePhotoViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = UserFactory.create()
self.photo = PhotoFactory.create(
owner=self.user.profile,
published=PRIVATE,
)
def test_photo_page_404s(self):
detail_response = self.client.get(resolve_url('photos_view', photo_id=self.photo.id))
self.assertEqual(detail_response.status_code, 404)
# def test_photo_url_404s(self):
# photo_response = self.client.get(self.photo.photo.url)
# self.assertEqual(photo_response.status_code, 404)
def test_private_photo_does_not_shuffle_into_main_page(self):
front_page_response = self.client.get(resolve_url('homepage'))
self.assertNotContains(front_page_response, self.photo.photo.url)
class OwnPrivatePhotoViewTestcase(TestCase):
def setUp(self):
self.client = Client()
self.user = UserFactory.create(
username="skdfjlaksjd",
password="password",
)
self.user.save()
self.photo = PhotoFactory.create(
owner=self.user.profile,
published=PRIVATE,
)
self.photo.save()
self.client.post(resolve_url('auth_login'), {
'username': self.user.username,
'password': "password"
})
def test_own_private_photo_appears(self):
detail_response = self.client.get(resolve_url('photos_view', photo_id=self.photo.id))
self.assertEqual(detail_response.status_code, 200)
self.assertContains(detail_response, self.photo.photo.url)
# def test_own_private_photo_file_exists(self):
# photo_response = self.client.get(self.photo.photo.url)
# self.assertEqual(photo_response.status_code, 200)
class PublicPhotoViewTestCase(TestCase):
def setUp(self):
self.client = Client()
self.user = UserFactory.create(
username="bob"
)
self.user.set_password("password")
self.user.save()
self.photo = PhotoFactory.create(
owner=self.user.profile,
published=PUBLIC,
)
self.photo.save()
self.client.post(resolve_url('auth_login'), {
'username': self.user.username,
'password': "password"
})
def test_photo_page_exists(self):
detail_response = self.client.get(resolve_url('photos_view', photo_id=self.photo.id))
self.assertEqual(detail_response.status_code, 200)
self.assertContains(detail_response, self.photo.photo.url)
# def test_photo_exists(self):
# photo_response = self.client.get(self.photo.photo.url)
# self.assertEqual(photo_response.status_code, 200)
def test_public_photo_shuffles_into_main_page(self):
front_page_response = self.client.get(resolve_url('homepage'))
self.assertContains(front_page_response, self.photo.photo.url)
| {
"content_hash": "fdfebe4c8325be271505b8103d785178",
"timestamp": "",
"source": "github",
"line_count": 160,
"max_line_length": 93,
"avg_line_length": 33.85,
"alnum_prop": 0.6286927621861153,
"repo_name": "qwergram/imgur_clone",
"id": "e8e6c2b6a53f2bab0329f4377ac1a4f182222f79",
"size": "5431",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_images/tests/test_views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "251765"
},
{
"name": "HTML",
"bytes": "12022"
},
{
"name": "JavaScript",
"bytes": "43682"
},
{
"name": "Python",
"bytes": "51108"
},
{
"name": "Shell",
"bytes": "53"
}
],
"symlink_target": ""
} |
from symfit.api import *
import pkg_resources
try:
__version__ = pkg_resources.get_distribution('symfit').version
except pkg_resources.DistributionNotFound:
__version__ = ''
| {
"content_hash": "6bb23cb28184589541cb9bb0168dad6a",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 66,
"avg_line_length": 26.142857142857142,
"alnum_prop": 0.7213114754098361,
"repo_name": "tBuLi/symfit",
"id": "35f2d005682a5bcadaee815b9031e4489ada0934",
"size": "267",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "symfit/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "392718"
}
],
"symlink_target": ""
} |
from m5.params import *
from m5.proxy import *
from Device import PioDevice
from Platform import Platform
class BaseGic(PioDevice):
type = 'BaseGic'
abstract = True
cxx_header = "dev/arm/base_gic.hh"
platform = Param.Platform(Parent.any, "Platform this device is part of.")
class Pl390(BaseGic):
type = 'Pl390'
cxx_header = "dev/arm/gic_pl390.hh"
dist_addr = Param.Addr(0x1f001000, "Address for distributor")
cpu_addr = Param.Addr(0x1f000100, "Address for cpu")
dist_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to distributor")
cpu_pio_delay = Param.Latency('10ns', "Delay for PIO r/w to cpu interface")
int_latency = Param.Latency('10ns', "Delay for interrupt to get to CPU")
it_lines = Param.UInt32(128, "Number of interrupt lines supported (max = 1020)")
| {
"content_hash": "a28695341be7a8222ba74faf82a20df2",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 84,
"avg_line_length": 34.208333333333336,
"alnum_prop": 0.6918392204628502,
"repo_name": "prodromou87/gem5",
"id": "0461758ed7183d48487f7cdfe54692e1707110e8",
"size": "2921",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "src/dev/arm/Gic.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "230559"
},
{
"name": "C",
"bytes": "919900"
},
{
"name": "C++",
"bytes": "10698271"
},
{
"name": "Emacs Lisp",
"bytes": "1969"
},
{
"name": "HTML",
"bytes": "136898"
},
{
"name": "Hack",
"bytes": "2489"
},
{
"name": "Java",
"bytes": "3096"
},
{
"name": "Makefile",
"bytes": "15428"
},
{
"name": "Perl",
"bytes": "33602"
},
{
"name": "Protocol Buffer",
"bytes": "3237"
},
{
"name": "Python",
"bytes": "3455795"
},
{
"name": "Shell",
"bytes": "51835"
},
{
"name": "TeX",
"bytes": "21806"
},
{
"name": "Visual Basic",
"bytes": "2884"
}
],
"symlink_target": ""
} |
import falcon
class HelloResource:
def on_get(self, req, resp):
"""Handles GET requests"""
resp.status = falcon.HTTP_200 # This is the default status
resp.body = ('\nHello Falcon!!\n')
# falcon.API instances are callable WSGI apps
app = falcon.API()
# Resources are represented by long-lived class instances
hello = HelloResource()
# hello will handle all requests to the '/' or '/hello' URL path
app.add_route('/', hello)
app.add_route('/hello', hello)
| {
"content_hash": "c1c10a96f96e830a05616dbd2aee50f5",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 67,
"avg_line_length": 28.647058823529413,
"alnum_prop": 0.6776180698151951,
"repo_name": "yosukesuzuki/falcon-gae-template",
"id": "290426a1343f47e5ebce331df77e21d5c0f521fe",
"size": "512",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2384"
}
],
"symlink_target": ""
} |
from fhirtordf.rdfsupport.namespaces import namespace_for
from i2b2model.metadata.i2b2conceptdimension import ConceptDimension
from rdflib import Graph, URIRef
from rdflib.namespace import split_uri
from i2fhirb2.fhir.fhirspecific import concept_path, concept_code
class FHIRConceptDimension(ConceptDimension):
graph: Graph = None
def __init__(self, subject: URIRef, subject_name: str, base_path: str = '\\') -> None:
""" FHIR wrapper for CommonDimension
:param subject: URI of the subject
:param subject_name: name of subject
:param base_path: base path of items
"""
ns, code = split_uri(subject)
ns_prefix = namespace_for(ns).upper()
super().__init__(ns_prefix, code, subject_name, concept_path(subject)[:-1].split('\\'), base_path)
def path(self) -> str:
return self._base_path + self._subject_path
def name_char_(self) -> str:
return self._name
| {
"content_hash": "2a25f44c029f65a6af1ee0bb723100aa",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 106,
"avg_line_length": 35.25925925925926,
"alnum_prop": 0.6743697478991597,
"repo_name": "BD2KOnFHIR/i2FHIRb2",
"id": "a277aad991de077f45c60fc6a032fbfe73a6656f",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "i2fhirb2/fhir/fhirconceptdimension.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "263110"
}
],
"symlink_target": ""
} |
"""Script for listing top buildbot crashes."""
from __future__ import print_function
import collections
import contextlib
import datetime
import multiprocessing
import logging
import optparse
import os
import re
import sys
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import constants
from chromite.cbuildbot import manifest_version
from chromite.lib import cros_build_lib
from chromite.lib import parallel
def ConvertGoogleStorageURLToHttpURL(url):
return url.replace('gs://', 'http://sandbox.google.com/storage/')
class CrashTriager(object):
"""Helper class to manage crash triaging."""
CRASH_PATTERN = re.compile(r'/([^/.]*)\.(\d+)[^/]*\.dmp\.txt$')
STACK_TRACE_PATTERN = re.compile(r'Thread 0 ((?:[^\n]+\n)*)')
FUNCTION_PATTERN = re.compile(r'\S+!\S+')
def __init__(self, start_date, chrome_branch, all_programs, list_all, jobs):
self.start_date = start_date
self.chrome_branch = chrome_branch
self.crash_triage_queue = multiprocessing.Queue()
self.stack_trace_queue = multiprocessing.Queue()
self.stack_traces = collections.defaultdict(list)
self.all_programs = all_programs
self.list_all = list_all
self.jobs = jobs
def Run(self):
"""Run the crash triager, printing the most common stack traces."""
with self._PrintStackTracesInBackground():
with self._DownloadCrashesInBackground():
with self._ProcessCrashListInBackground():
pass
def _GetGSPath(self, bot_id, build_config):
"""Get the Google Storage path where crashes are stored for a given bot.
Args:
bot_id: Gather crashes from this bot id.
build_config: Configuration options for this bot.
"""
if build_config['gs_path'] == cbuildbot_config.GS_PATH_DEFAULT:
gsutil_archive = 'gs://chromeos-image-archive/' + bot_id
else:
gsutil_archive = build_config['gs_path']
return gsutil_archive
def _ListCrashesForBot(self, bot_id, build_config):
"""List all crashes for the specified bot.
Example output line: [
'gs://chromeos-image-archive/amd64-generic-full/R18-1414.0.0-a1-b537/' +
'chrome.20111207.181520.2533.dmp.txt'
]
Args:
bot_id: Gather crashes from this bot id.
build_config: Configuration options for this bot.
"""
chrome_branch = self.chrome_branch
gsutil_archive = self._GetGSPath(bot_id, build_config)
pattern = '%s/R%s-**.dmp.txt' % (gsutil_archive, chrome_branch)
out = cros_build_lib.RunCommand(['gsutil', 'ls', pattern],
error_code_ok=True,
redirect_stdout=True,
redirect_stderr=True,
print_cmd=False)
if out.returncode == 0:
return out.output.split('\n')
return []
def _ProcessCrashListForBot(self, bot_id, build_config):
"""Process crashes for a given bot.
Args:
bot_id: Gather crashes from this bot id.
build_config: Configuration options for this bot.
"""
for line in self._ListCrashesForBot(bot_id, build_config):
m = self.CRASH_PATTERN.search(line)
if m is None:
continue
program, crash_date = m.groups()
if self.all_programs or program == 'chrome':
crash_date_obj = datetime.datetime.strptime(crash_date, '%Y%m%d')
if self.start_date <= crash_date_obj:
self.crash_triage_queue.put((program, crash_date, line))
@contextlib.contextmanager
def _ProcessCrashListInBackground(self):
"""Create a worker process for processing crash lists."""
with parallel.BackgroundTaskRunner(self._ProcessCrashListForBot,
processes=self.jobs) as queue:
for bot_id, build_config in cbuildbot_config.config.iteritems():
if build_config['vm_tests']:
queue.put((bot_id, build_config))
yield
def _GetStackTrace(self, crash_report_url):
"""Retrieve a stack trace using gsutil cat.
Args:
crash_report_url: The URL where the crash is stored.
"""
out = cros_build_lib.RunCommand(['gsutil', 'cat', crash_report_url],
error_code_ok=True,
redirect_stdout=True,
redirect_stderr=True,
print_cmd=False)
return out
def _DownloadStackTrace(self, program, crash_date, url):
"""Download a crash report, queuing up the stack trace info.
Args:
program: The program that crashed.
crash_date: The date of the crash.
url: The URL where the crash is stored.
"""
out = self._GetStackTrace(url)
if out.returncode == 0:
self.stack_trace_queue.put((program, crash_date, url, out.output))
@contextlib.contextmanager
def _DownloadCrashesInBackground(self):
"""Create a worker process for downloading stack traces."""
with parallel.BackgroundTaskRunner(self._DownloadStackTrace,
queue=self.crash_triage_queue,
processes=self.jobs):
yield
def _ProcessStackTrace(self, program, date, url, output):
"""Process a stack trace that has been downloaded.
Args:
program: The program that crashed.
date: The date of the crash.
url: The URL where the crash is stored.
output: The content of the stack trace.
"""
signature = 'uncategorized'
m = self.STACK_TRACE_PATTERN.search(output)
functions = []
if m:
trace = m.group(1)
functions = self.FUNCTION_PATTERN.findall(trace)
last_function = None
for f in functions:
if not f.startswith('libc-'):
signature = f
if last_function:
signature += '[%s]' % last_function
break
last_function = f.partition('!')[2]
else:
if functions:
signature = functions[0]
stack_len = len(functions)
self.stack_traces[(program, signature)].append((date, stack_len, url))
def _PrintStackTraces(self):
"""Print all stack traces."""
# Print header.
if self.list_all:
print('Crash count, program, function, date, URL')
else:
print('Crash count, program, function, first crash, last crash, URL')
# Print details about stack traces.
stack_traces = sorted(self.stack_traces.iteritems(),
key=lambda x: len(x[1]), reverse=True)
for (program, signature), crashes in stack_traces:
if self.list_all:
for crash in sorted(crashes, reverse=True):
crash_url = ConvertGoogleStorageURLToHttpURL(crash[2])
output = (str(len(crashes)), program, signature, crash[0], crash_url)
print(*output, sep=', ')
else:
first_date = min(x[0] for x in crashes)
last_date = max(x[0] for x in crashes)
crash_url = ConvertGoogleStorageURLToHttpURL(max(crashes)[2])
output = (str(len(crashes)), program, signature, first_date, last_date,
crash_url)
print(*output, sep=', ')
@contextlib.contextmanager
def _PrintStackTracesInBackground(self):
with parallel.BackgroundTaskRunner(self._ProcessStackTrace,
queue=self.stack_trace_queue,
processes=1,
onexit=self._PrintStackTraces):
yield
def _GetChromeBranch():
"""Get the current Chrome branch."""
version_file = os.path.join(constants.SOURCE_ROOT, constants.VERSION_FILE)
version_info = manifest_version.VersionInfo(version_file=version_file)
return version_info.chrome_branch
def _CreateParser():
"""Generate and return the parser with all the options."""
# Parse options
usage = 'usage: %prog [options]'
parser = optparse.OptionParser(usage=usage)
# Main options
parser.add_option('', '--days', dest='days', default=7, type='int',
help=('Number of days to look at for crash info.'))
parser.add_option('', '--chrome_branch', dest='chrome_branch',
default=_GetChromeBranch(),
help=('Chrome branch to look at for crash info.'))
parser.add_option('', '--all_programs', action='store_true',
dest='all_programs', default=False,
help=('Show crashes in programs other than Chrome.'))
parser.add_option('', '--list', action='store_true', dest='list_all',
default=False,
help=('List all stack traces found (not just one).'))
parser.add_option('', '--jobs', dest='jobs', default=32, type='int',
help=('Number of processes to run in parallel.'))
return parser
def main(argv):
# Setup boto config for gsutil.
boto_config = os.path.abspath(os.path.join(constants.SOURCE_ROOT,
'src/private-overlays/chromeos-overlay/googlestorage_account.boto'))
if os.path.isfile(boto_config):
os.environ['BOTO_CONFIG'] = boto_config
else:
print('Cannot find %s' % boto_config, file=sys.stderr)
print('This function requires a private checkout.', file=sys.stderr)
print('See http://goto/chromeos-building', file=sys.stderr)
sys.exit(1)
logging.disable(level=logging.INFO)
parser = _CreateParser()
(options, _) = parser.parse_args(argv)
since = datetime.datetime.today() - datetime.timedelta(days=options.days)
triager = CrashTriager(since, options.chrome_branch, options.all_programs,
options.list_all, options.jobs)
triager.Run()
| {
"content_hash": "81bd47b8aa888245ffdb2aefacac3cff",
"timestamp": "",
"source": "github",
"line_count": 258,
"max_line_length": 79,
"avg_line_length": 37.06201550387597,
"alnum_prop": 0.6260196611587534,
"repo_name": "chadversary/chromiumos.chromite",
"id": "d1658e15580d3af2c68375415e8ad9219ee2c8d4",
"size": "9751",
"binary": false,
"copies": "2",
"ref": "refs/heads/fix-repo-mirror",
"path": "scripts/cros_list_buildbot_crashes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "85"
},
{
"name": "Python",
"bytes": "3652882"
},
{
"name": "Shell",
"bytes": "24031"
}
],
"symlink_target": ""
} |
"""
AVIPES={}
AVSERVERS=['jsmart.web.id', 'jabber.ru', 'xmpp.ru', 'jabbers.ru', 'xmpps.ru', 'qip.ru', 'talkonaut.com', 'jabbus.org', 'jabber.org','gtalk.com','jabber.cz','jabberon.ru','server.dom','linuxoids.net','jabber.kiev.ua','jabber.ufanet.ru','jabber.corbina.ru']
def order_unban_v(groupchat, jid):
iq = xmpp.Iq('set')
iq.setTo(groupchat)
iq.setID('ban'+str(random.randrange(1000, 9999)))
query = xmpp.Node('query')
query.setNamespace('http://jabber.org/protocol/muc#admin')
query.addChild('item', {'jid':jid, 'affiliation':'none'})
iq.addChild(node=query)
JCON.send(iq)
def order_ban_v(groupchat, jid):
iq = xmpp.Iq('set')
iq.setTo(groupchat)
iq.setID('ban'+str(random.randrange(1000, 9999)))
query = xmpp.Node('query')
query.setNamespace('http://jabber.org/protocol/muc#admin')
ban=query.addChild('item', {'jid':jid, 'affiliation':'outcast'})
ban.setTagData('reason', u'dicurigai menyerang dengan dengan metode wipe!')
iq.addChild(node=query)
JCON.send(iq)
def get_serv(serv):
if serv.count('@'):
serv=serv.split('@')[1]
if serv.count('/'):
serv=serv.split('/')[0]
return serv
def findPresenceItemV(node):
for p in [x.getTag('item') for x in node.getTags('x',namespace='http://jabber.org/protocol/muc#user')]:
if p != None:
return p
return None
def avipe_prs(prs):
ptype = prs.getType()
if ptype == 'unavailable' and prs.getStatusCode() == '303':
nick = prs.getNick()
fromjid = prs.getFrom()
groupchat = fromjid.getStripped()
afl=prs.getAffiliation()
role=prs.getRole()
avipe_join(groupchat, nick, afl, role)
def avipe_join(groupchat, nick, afl, role):
global AVIPES
if not AVIPES.has_key(groupchat):
return
if afl != 'none':
return
jid = get_true_jid(groupchat+'/'+nick)
if not jid or jid.count('@localhost'):
return
global INFO
ttime=int(time.time())
if ttime - INFO['start'] < 60:
return
if (ttime - AVIPES[groupchat]['ltime']) > 20:
AVIPES[groupchat]['ltime']=ttime
AVIPES[groupchat]['num']=0
AVIPES[groupchat]['jids']=[jid]
return
AVIPES[groupchat]['num']+=1
AVIPES[groupchat]['jids'].append(jid)
joined=AVIPES[groupchat]['jids']
global GROUPCHATS
if len(joined) > 2:
AVIPES[groupchat]['ltime']=ttime
x=len(joined)
if (get_serv(joined[x-2]) == get_serv(joined[x-1])) and (get_serv(joined[x-3]) == get_serv(joined[x-1])): #and joined[x-2] != joined[x-1]:
serv=get_serv(joined[x-2])
if not serv in AVSERVERS:
node='<item affiliation="outcast" jid="'+serv+u'"><reason>dicurigai menyerang dengan dengan metode wipe.</reason></item>'
node=xmpp.simplexml.XML2Node(unicode('<iq from="'+JID+'/'+RESOURCE+'" id="ban1" to="'+groupchat+'" type="set"><query xmlns="http://jabber.org/protocol/muc#admin">'+node+'</query></iq>').encode('utf8'))
JCON.send(node)
node=''
for nick in GROUPCHATS[groupchat].keys():
if get_serv(get_true_jid(groupchat+'/'+nick)) == serv and GROUPCHATS[groupchat][nick]['ishere']:
node+='<item role="none" nick="'+nick+u'"><reason>dicurigai menyerang dengan dengan metode wipe.</reason></item>'
if node:
node=xmpp.simplexml.XML2Node(unicode('<iq from="'+JID+'/'+RESOURCE+'" id="kick1" to="'+groupchat+'" type="set"><query xmlns="http://jabber.org/protocol/muc#admin">'+node+'</query></iq>').encode('utf8'))
JCON.send(node)
if not serv in AVSERVERS:
for nick in GROUPCHATS[groupchat].keys():
if user_level(groupchat+'/'+nick, groupchat) > 19:
#if GROUPCHATS[groupchat][nick]['status'] in [u'online',u'chat',u'away']:
msg(groupchat+'/'+nick, u'Peringatan! Server '+serv+u' masuk antrian daftar ban antivipe!')
if AVIPES[groupchat]['num'] > 4:
order_ban_v(groupchat, jid)
threading.Timer(60, order_unban_v,(groupchat, jid, )).start()
def avipe_call(type, source, parameters):
global AVIPES
PATH='dynamic/'+source[1]+'/antivipe.txt'
parameters=parameters.strip().lower()
if parameters:
if check_file(source[1],'antivipe.txt'):
if parameters=='off' or parameters=='0':# or parameters==u'вкл':
write_file(PATH, 'off')
AVIPES[source[1]]={'ltime':0, 'num':0, 'jids': []}
reply(type, source, u'fungsi antivipe dinon-aktifkan!')
elif parameters=='on' or parameters=='1':# or parameters==u'выкл':
write_file(PATH, 'on')
if AVIPES.has_key(source[1]):
del AVIPES[source[1]]
reply(type, source, u'fungsi antivipe diaktifkan!')
else:
reply(type, source, u'baca "help antivipe"!')
else:
if not AVIPES.has_key(source[1]):
reply(type, source, u'anda telah menon-aktifkan fungsi antivipe!')
else:
reply(type, source, u'anda telah mengaktifkan fungsi antivipe!')
def avipe_init(groupchat):
if check_file(groupchat,'antivipe.txt'):
if not read_file('dynamic/'+groupchat+'/antivipe.txt')=='off':
AVIPES[groupchat]={'ltime':0, 'num':0, 'jids': []}
#register_presence_handler(avipe_prs)
#register_join_handler(avipe_join)
#register_command_handler(avipe_call, 'antivipe', ['all', 'admin'], 20, 'aktif/non-aktif fungsi perlindungan terhadap vipe attacks.\nSetting default adalah OFF.', 'antivipe [<1/on/||0/off/]', ['antivipe on','antivipe off'])
#register_stage1_init(avipe_init)
"""
"""
global GROUPCHATS
if len(joined) > 2:
x=len(joined)
if (get_serv(joined[x-2]) == get_serv(joined[x-1])) and (get_serv(joined[x-3]) == get_serv(joined[x-1])): #and joined[x-2] != joined[x-1]:
serv=get_serv(joined[x-2])
if not serv in AVSERVERS:
node='<item affiliation="outcast" jid="'+serv+u'"><reason>Подозрение на вайп атаку.</reason></item>'
for nick in GROUPCHATS[groupchat].keys():
if get_serv(get_true_jid(groupchat+'/'+nick)) == serv and GROUPCHATS[groupchat][nick]['ishere']:
print nick,
node+='<item role="none" nick="'+nick+u'"><reason>Подозрение на вайп атаку.</reason></item>'
node=xmpp.simplexml.XML2Node(unicode('<iq from="'+USERNAME+'@'+SERVER+'/'+RESOURCE+'" id="ban1" to="'+groupchat+'" type="set"><query xmlns="http://jabber.org/protocol/muc#admin">'+node+'</query></iq>').encode('utf8'))
JCON.send(node)
for nick in GROUPCHATS[groupchat].keys():
if user_level(groupchat+'/'+nick, groupchat) > 19:
if GROUPCHATS[groupchat][nick]['status'] in [u'online',u'chat',u'away']:
msg(groupchat+'/'+nick, u'Внимание! Сервер '+serv+u' занесен в бан лист!')
"""
| {
"content_hash": "0400952c06a56540aa7990f737580eda",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 255,
"avg_line_length": 38.926829268292686,
"alnum_prop": 0.6527255639097744,
"repo_name": "alosh55/STORM-BOT",
"id": "025d38e17d09baa12e5f872dabd3d0cb4ad5fa60",
"size": "6531",
"binary": false,
"copies": "1",
"ref": "refs/heads/ver-5.2",
"path": "plugins/antivipe_plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "669053"
}
],
"symlink_target": ""
} |
"""Provide some default values/objects"""
from pathlib import Path
from typing import ClassVar
from diot import Diot
from simpleconf import Config
import uvloop
from xqute import logger as xqute_logger, JobErrorStrategy
# turn xqute's logger off
xqute_logger.setLevel(100)
xqute_logger.removeHandler(xqute_logger.handlers[0])
uvloop.install()
LOGGER_NAME = 'main'
DEFAULT_CONFIG_FILES = (Path('~/.pipen.toml'), './.pipen.toml', 'PIPEN.osenv')
DEFAULT_CONFIG = Diot(
loglevel='debug',
# The cache option, True/False/export
cache=True,
# Whether expand directory to check signature
dirsig=1,
# How to deal with the errors
# retry, ignore, halt
# halt to halt the whole pipeline, no submitting new jobs
# terminate to just terminate the job itself
error_strategy=JobErrorStrategy.IGNORE,
# How many times to retry to jobs once error occurs
num_retries=3,
# The directory to export the output files
forks=1,
# Default shell/language
lang='bash',
# How many jobs to be submitted in a batch
submission_batch=8,
# The working directory for the pipeline
workdir='./.pipen',
# template engine
template='liquid',
# template envs
envs={},
# scheduler
scheduler='local',
# scheduler options
scheduler_opts={},
# plugins
plugins=None,
# plugin opts
plugin_opts={}
)
DEFAULT_CONSOLE_WIDTH: int = 80
DEFAULT_CONSOLE_WIDTH_SHIFT: int = 26
SCHEDULER_ENTRY_GROUP = 'pipen-sched'
TEMPLATE_ENTRY_GROUP = 'pipen-tpl'
class ProcInputType:
"""Types for process inputs"""
VAR: ClassVar[str] = 'var'
FILE: ClassVar[str] = 'file'
FILES: ClassVar[str] = 'files'
class ProcOutputType:
"""Types for process outputs"""
VAR: ClassVar[str] = 'var'
FILE: ClassVar[str] = 'file'
config = Config() # pylint: disable=invalid-name
| {
"content_hash": "eafee9f14bad83fc4ad94822fc04bded",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 78,
"avg_line_length": 27.308823529411764,
"alnum_prop": 0.6855142703284868,
"repo_name": "pwwang/pyppl",
"id": "6d7a5d1c7c4aa859c4e12f51b3d5f0cc7e82ba18",
"size": "1857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pipen/defaults.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "234192"
}
],
"symlink_target": ""
} |
"""SCons.Tool.pdf
Common PDF Builder definition for various other Tool modules that use it.
Add an explicit action to run epstopdf to convert .eps files to .pdf
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/pdf.py issue-2856:2676:d23b7a2f45e8 2012/08/05 15:38:28 garyo"
import SCons.Builder
import SCons.Tool
PDFBuilder = None
EpsPdfAction = SCons.Action.Action('$EPSTOPDFCOM', '$EPSTOPDFCOMSTR')
def generate(env):
try:
env['BUILDERS']['PDF']
except KeyError:
global PDFBuilder
if PDFBuilder is None:
PDFBuilder = SCons.Builder.Builder(action = {},
source_scanner = SCons.Tool.PDFLaTeXScanner,
prefix = '$PDFPREFIX',
suffix = '$PDFSUFFIX',
emitter = {},
source_ext_match = None,
single_source=True)
env['BUILDERS']['PDF'] = PDFBuilder
env['PDFPREFIX'] = ''
env['PDFSUFFIX'] = '.pdf'
# put the epstopdf builder in this routine so we can add it after
# the pdftex builder so that one is the default for no source suffix
def generate2(env):
bld = env['BUILDERS']['PDF']
#bld.add_action('.ps', EpsPdfAction) # this is covered by direct Ghostcript action in gs.py
bld.add_action('.eps', EpsPdfAction)
env['EPSTOPDF'] = 'epstopdf'
env['EPSTOPDFFLAGS'] = SCons.Util.CLVar('')
env['EPSTOPDFCOM'] = '$EPSTOPDF $EPSTOPDFFLAGS ${SOURCE} --outfile=${TARGET}'
def exists(env):
# This only puts a skeleton Builder in place, so if someone
# references this Tool directly, it's always "available."
return 1
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "266e4193a897d71902fad6ca03207b82",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 107,
"avg_line_length": 39.32051282051282,
"alnum_prop": 0.6521030322791,
"repo_name": "angad/libjingle-mac",
"id": "beae6dd2a39186bcc6e5a48d375b9b6cdc352767",
"size": "3067",
"binary": false,
"copies": "14",
"ref": "refs/heads/master",
"path": "scons-2.2.0/build/lib/SCons/Tool/pdf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "2015946"
},
{
"name": "C++",
"bytes": "9306077"
},
{
"name": "Objective-C",
"bytes": "28091"
},
{
"name": "Perl",
"bytes": "50523"
},
{
"name": "Python",
"bytes": "4283804"
},
{
"name": "Shell",
"bytes": "1445083"
}
],
"symlink_target": ""
} |
from telemetry.page import page as page_module
from telemetry.page import page_set as page_set_module
class KeyHitTestCasesPage(page_module.Page):
def __init__(self, url, page_set):
super(KeyHitTestCasesPage, self).__init__(
url=url, page_set=page_set, credentials_path = 'data/credentials.json')
self.user_agent_type = 'mobile'
def RunNavigateSteps(self, action_runner):
super(KeyHitTestCasesPage, self).RunNavigateSteps(action_runner)
action_runner.Wait(2)
def RunPageInteractions(self, action_runner):
action_runner.Wait(2)
for _ in xrange(100):
self.TapButton(action_runner)
class PaperCalculatorHitTest(KeyHitTestCasesPage):
def __init__(self, page_set):
super(PaperCalculatorHitTest, self).__init__(
# Generated from https://github.com/zqureshi/paper-calculator
# vulcanize --inline --strip paper-calculator/demo.html
url='file://key_hit_test_cases/paper-calculator-no-rendering.html',
page_set=page_set)
def TapButton(self, action_runner):
interaction = action_runner.BeginInteraction(
'Action_TapAction', is_smooth=True)
action_runner.TapElement(element_function='''
document.querySelector(
'body /deep/ #outerPanels'
).querySelector(
'#standard'
).shadowRoot.querySelector(
'paper-calculator-key[label="5"]'
)''')
interaction.End()
class KeyHitTestCasesPageSet(page_set_module.PageSet):
def __init__(self):
super(KeyHitTestCasesPageSet, self).__init__(
user_agent_type='mobile')
self.AddUserStory(PaperCalculatorHitTest(self))
| {
"content_hash": "14b76ce50647a7b306f1f6b4097bb654",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 31.980392156862745,
"alnum_prop": 0.6860821581851625,
"repo_name": "sgraham/nope",
"id": "6f39d948e182a646149cca3a7b50d6fd381c453c",
"size": "1794",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/perf/page_sets/key_hit_test_cases.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
} |
from unittest import mock
from airflow.callbacks.callback_requests import CallbackRequest
from airflow.configuration import conf
from airflow.executors.local_executor import LocalExecutor
from airflow.executors.local_kubernetes_executor import LocalKubernetesExecutor
class TestLocalKubernetesExecutor:
def test_queued_tasks(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_queued_tasks = {('dag_id', 'task_id', '2020-08-30', 1): 'queued_command'}
k8s_queued_tasks = {('dag_id_2', 'task_id_2', '2020-08-30', 2): 'queued_command'}
local_executor_mock.queued_tasks = local_queued_tasks
k8s_executor_mock.queued_tasks = k8s_queued_tasks
expected_queued_tasks = {**local_queued_tasks, **k8s_queued_tasks}
assert local_kubernetes_executor.queued_tasks == expected_queued_tasks
assert len(local_kubernetes_executor.queued_tasks) == 2
def test_running(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_running_tasks = {('dag_id', 'task_id', '2020-08-30', 1)}
k8s_running_tasks = {}
local_executor_mock.running = local_running_tasks
k8s_executor_mock.running = k8s_running_tasks
assert local_kubernetes_executor.running == local_running_tasks.union(k8s_running_tasks)
assert len(local_kubernetes_executor.running) == 1
def test_slots_available(self):
local_executor = LocalExecutor()
k8s_executor_mock = mock.MagicMock()
local_kubernetes_executor = LocalKubernetesExecutor(local_executor, k8s_executor_mock)
# Should be equal to Local Executor default parallelism.
assert local_kubernetes_executor.slots_available == conf.getint('core', 'PARALLELISM')
def test_kubernetes_executor_knows_its_queue(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
assert k8s_executor_mock.kubernetes_queue == conf.get('local_kubernetes_executor', 'kubernetes_queue')
def test_send_callback(self):
local_executor_mock = mock.MagicMock()
k8s_executor_mock = mock.MagicMock()
local_k8s_exec = LocalKubernetesExecutor(local_executor_mock, k8s_executor_mock)
local_k8s_exec.callback_sink = mock.MagicMock()
callback = CallbackRequest(full_filepath="fake")
local_k8s_exec.send_callback(callback)
local_k8s_exec.callback_sink.send.assert_called_once_with(callback)
| {
"content_hash": "3b371d1156bc3739069204c6106457ed",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 110,
"avg_line_length": 43.921875,
"alnum_prop": 0.7018854500177872,
"repo_name": "danielvdende/incubator-airflow",
"id": "48d09ad99e5edbe054a978a7781f3a7a0d56dfbf",
"size": "3598",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/executors/test_local_kubernetes_executor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25785"
},
{
"name": "Dockerfile",
"bytes": "76693"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "164512"
},
{
"name": "JavaScript",
"bytes": "236992"
},
{
"name": "Jinja",
"bytes": "37155"
},
{
"name": "Jupyter Notebook",
"bytes": "2929"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "21824455"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "495567"
},
{
"name": "TypeScript",
"bytes": "326556"
}
],
"symlink_target": ""
} |
import datetime
import random
from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
RESPONSES = [
"Pretty good, all things considered. You?",
"Doing alright. How are you?",
"Pretty solid for a %(day_name)s, thanks. And you?",
"Last night was crazy, but today is looking good. What about you?",
"A little bored, if I'm honest. How're you?",
"Up and down, but good overall. What about you?",
]
class HowAreYouPlugin(WillPlugin):
@hear("^how are you\?")
def how_are_you(self, message):
now = datetime.datetime.now()
context = {
"day_name": now.strftime("%A")
}
reply = random.choice(RESPONSES) % context
message.said_to_how_are_you = True
self.say(reply, message=message)
# @hear("")
# def how_are_you_reply(self, message):
# print(message.analysis["history"][0].data)
# print(message.analysis["history"][1].data)
| {
"content_hash": "bbed2a0bd5ac4d5ddeee71b70b5036ba",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 108,
"avg_line_length": 33.096774193548384,
"alnum_prop": 0.6354775828460039,
"repo_name": "mike-love/will",
"id": "014685576ab9bd85f6f94e5c15056ff2979f5083",
"size": "1026",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "will/plugins/friendly/howareyou.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1409"
},
{
"name": "HTML",
"bytes": "2008"
},
{
"name": "Python",
"bytes": "316193"
},
{
"name": "Shell",
"bytes": "1940"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
def on_submit_email(job):
subject = "Thank you for posting your job!"
body = render_to_string("email/jobsubmit.txt", {"job": job})
from_email = "[email protected]"
to = [job.contact_email]
headers = {'Reply-To': '[email protected]'}
email = EmailMessage(subject, body, from_email, to, headers=headers)
email.send(fail_silently=True)
def on_moderation_email(mo):
if mo.status == 1:
job = mo.content_object
subject = "Your job listing has been %s" % mo.get_status_display().lower()
if mo.status == 1:
body = render_to_string("email/job_approved.txt", {"job": job})
else:
body = render_to_string("email/job_rejected.txt", {"job": job})
from_email = "[email protected]"
to = [job.contact_email]
headers = {'Reply-To': '[email protected]'}
email = EmailMessage(subject, body, from_email, to, headers=headers)
email.send(fail_silently=True) | {
"content_hash": "01b4dae459e7254509c215e880427397",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 82,
"avg_line_length": 34.73529411764706,
"alnum_prop": 0.6215071972904318,
"repo_name": "sunlightlabs/transparencyjobs",
"id": "8eac0b1a815995c576c3bcf77a6446dd3be3e190",
"size": "1181",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "jobs/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12666"
}
],
"symlink_target": ""
} |
"""
Track class
"""
from copy import deepcopy
from os.path import basename
from datetime import timedelta
import gpxpy
import numpy as np
from rtree import index
from .segment import Segment
from .similarity import segment_similarity
DEFAULT_FILE_NAME_FORMAT = "%Y-%m-%d"
class Track(object):
"""Collection of segments
This is a higher level class, all methods of TrackToTrip library
can be called over this class
Attributes:
name: A string indicating the name of the track
segments: Array of TrackToTrip Segments
preprocessed: Boolean, true if it has been preprocessed
"""
def __init__(self, name, segments):
""" Constructor
When constructing a track it's not guaranteed that the segments
have their properties computed. Call preprocess method over this
class, or over each segment to guarantee it.
Args:
name (:obj:`str`)
segments(:obj:`list` of :obj:`Segment`)
"""
self.name = name
self.meta = []
self.segments = sorted(segments, key=lambda s: s.points[0].time)
def generate_name(self, name_format=DEFAULT_FILE_NAME_FORMAT):
""" Generates a name for the track
The name is generated based on the date of the first point of the
track, or in case it doesn't exist, "EmptyTrack"
Args:
name_format (str, optional): Name formar to give to the track, based on
its start time. Defaults to DEFAULT_FILE_NAME_FORMAT
Returns:
str
"""
if len(self.segments) > 0:
return self.segments[0].points[0].time.strftime(name_format) + ".gpx"
else:
return "EmptyTrack"
def remove_noise(self):
""" In-place removal of noise points
Returns:
:obj:`Track`: self
"""
for segment in self.segments:
segment.remove_noise()
return self
def smooth(self, strategy, noise):
""" In-place smoothing of segments
Returns:
:obj:`Track`: self
"""
print noise
for segment in self.segments:
segment.smooth(noise, strategy)
return self
def segment(self, eps, min_time):
"""In-place segmentation of segments
Spatio-temporal segmentation of each segment
The number of segments may increse after this step
Returns:
This track
"""
new_segments = []
for segment in self.segments:
segmented = segment.segment(eps, min_time)
for seg in segmented:
new_segments.append(Segment(seg))
self.segments = new_segments
return self
def simplify(self, eps, max_dist_error, max_speed_error, topology_only=False):
""" In-place simplification of segments
Args:
max_dist_error (float): Min distance error, in meters
max_speed_error (float): Min speed error, in km/h
topology_only: Boolean, optional. True to keep
the topology, neglecting velocity and time
accuracy (use common Douglas-Ramen-Peucker).
False (default) to simplify segments keeping
the velocity between points.
Returns:
This track
"""
for segment in self.segments:
segment.simplify(eps, max_dist_error, max_speed_error, topology_only)
return self
def infer_transportation_mode(self, clf, min_time):
"""In-place transportation mode inferring of segments
Returns:
This track
"""
for segment in self.segments:
segment.infer_transportation_mode(clf, min_time)
return self
def copy(self):
"""Creates a deep copy of itself
All segments and points are copied
Returns:
A Track object different from this instance
"""
return deepcopy(self)
def to_trip(
self,
smooth,
smooth_strategy,
smooth_noise,
seg,
seg_eps,
seg_min_time,
simplify,
simplify_max_dist_error,
simplify_max_speed_error
):
"""In-place, transformation of a track into a trip
A trip is a more accurate depiction of reality than a
track.
For a track to become a trip it need to go through the
following steps:
+ noise removal
+ smoothing
+ spatio-temporal segmentation
+ simplification
At the end of these steps we have a less noisy, track
that has less points, but that holds the same information.
It's required that each segment has their metrics calculated
or has been preprocessed.
Args:
name: An optional string with the name of the trip. If
none is given, one will be generated by generateName
Returns:
This Track instance
"""
self.compute_metrics()
self.remove_noise()
print (smooth, seg, simplify)
if smooth:
self.compute_metrics()
self.smooth(smooth_strategy, smooth_noise)
if seg:
self.compute_metrics()
self.segment(seg_eps, seg_min_time)
if simplify:
self.compute_metrics()
self.simplify(0, simplify_max_dist_error, simplify_max_speed_error)
self.compute_metrics()
return self
def infer_transportation_modes(self, dt_threshold=10):
"""In-place transportation inferring of segments
Returns:
This track
"""
self.segments = [
segment.infer_transportation_mode(dt_threshold=dt_threshold)
for segment in self.segments
]
return self
# TODO
def infer_location(
self,
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
):
"""In-place location inferring of segments
Returns:
This track
"""
self.segments = [
segment.infer_location(
location_query,
max_distance,
google_key,
foursquare_client_id,
foursquare_client_secret,
limit
)
for segment in self.segments
]
return self
def to_json(self):
"""Converts track to a JSON serializable format
Returns:
Map with the name, and segments of the track.
"""
return {
'name': self.name,
'segments': [segment.to_json() for segment in self.segments],
'meta': self.meta
}
# TODO
def merge_and_fit(self, track, pairings):
""" Merges another track with this one, ordering the points based on a
distance heuristic
Args:
track (:obj:`Track`): Track to merge with
pairings
Returns:
:obj:`Segment`: self
"""
for (self_seg_index, track_seg_index, _) in pairings:
self_s = self.segments[self_seg_index]
ss_start = self_s.points[0]
track_s = track.segments[track_seg_index]
tt_start = track_s.points[0]
tt_end = track_s.points[-1]
d_start = ss_start.distance(tt_start)
d_end = ss_start.distance(tt_end)
if d_start > d_end:
track_s = track_s.copy()
track_s.points = list(reversed(track_s.points))
self_s.merge_and_fit(track_s)
return self
def get_point_index(self, point):
""" Gets of the closest first point
Args:
point (:obj:`Point`)
Returns:
(int, int): Segment id and point index in that segment
"""
for i, segment in enumerate(self.segments):
idx = segment.getPointIndex(point)
if idx != -1:
return i, idx
return -1, -1
def bounds(self, thr=0):
""" Gets the bounds of this segment
Returns:
(float, float, float, float): Bounds, with min latitude, min longitude,
max latitude and max longitude
"""
min_lat = float("inf")
min_lon = float("inf")
max_lat = -float("inf")
max_lon = -float("inf")
for segment in self.segments:
milat, milon, malat, malon = segment.bounds(thr=thr)
min_lat = min(milat, min_lat)
min_lon = min(milon, min_lon)
max_lat = max(malat, max_lat)
max_lon = max(malon, max_lon)
return min_lat, min_lon, max_lat, max_lon
def has_point(self, point):
""" Checks if a point exist in any of the segments
Args:
points (:obj:`Point`)
Returns:
bool
"""
s_ix, _ = self.get_point_index(point)
return s_ix != -1
def similarity(self, track):
""" Compares two tracks based on their topology
This method compares the given track against this
instance. It only verifies if given track is close
to this one, not the other way arround
Args:
track (:obj:`Track`)
Returns:
Two-tuple with global similarity between tracks
and an array the similarity between segments
"""
idx = index.Index()
i = 0
for i, segment in enumerate(self.segments):
idx.insert(i, segment.bounds(), obj=segment)
final_siml = []
final_diff = []
for i, segment in enumerate(track.segments):
query = idx.intersection(segment.bounds(), objects=True)
res_siml = []
res_diff = []
for result in query:
siml, diff = segment_similarity(segment, result.object)
res_siml.append(siml)
res_diff.append((result.id, i, diff))
if len(res_siml) > 0:
final_siml.append(max(res_siml))
final_diff.append(res_diff[np.argmax(res_siml)])
else:
final_siml.append(0)
final_diff.append([])
return np.mean(final_siml), final_diff
def compute_metrics(self):
""" Computes metrics for every segment's point
See Segment.compute_metrics
Returns:
:obj:`Track`: Self
"""
for segment in self.segments:
segment.compute_metrics()
return self
def to_gpx(self):
"""Converts track to a GPX format
Uses GPXPY library as an intermediate format
Returns:
A string with the GPX/XML track
"""
gpx_segments = []
for segment in self.segments:
gpx_points = []
for point in segment.points:
time = ''
if point.time:
iso_time = point.time.isoformat().split('.')[0]
time = '<time>%s</time>' % iso_time
gpx_points.append(
u'<trkpt lat="%f" lon="%f">%s</trkpt>' % (point.lat, point.lon, time)
)
points = u'\n\t\t\t'.join(gpx_points)
gpx_segments.append(u'\t\t<trkseg>\n\t\t\t%s\n\t\t</trkseg>' % points)
segments = u'\t\n'.join(gpx_segments)
content = [
u'<?xml version="1.0" encoding="UTF-8"?>',
u'<gpx xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://www.topografix.com/GPX/1/0" xsi:schemaLocation="http://www.topografix.com/GPX/1/0 http://www.topografix.com/GPX/1/0/gpx.xsd" version="1.0" creator="GatherMySteps">',
u'\t<trk>',
segments,
u'\t</trk>',
u'</gpx>'
]
return u'\n'.join(content)
def timezone(self, timezone=0):
""" Sets the timezone of the entire track
Args:
timezone (int): Timezone hour delta
"""
tz_dt = timedelta(hours=timezone)
for segment in self.segments:
for point in segment.points:
point.time = point.time + tz_dt
return self
def to_life(self):
"""Converts track to LIFE format
"""
buff = "--%s\n" % self.segments[0].points[0].time.strftime("%Y_%m_%d")
# buff += "--" + day
# buff += "UTC+s" # if needed
def military_time(time):
""" Converts time to military time
Args:
time (:obj:`datetime.datetime`)
Returns:
str: Time in the format 1245 (12 hours and 45 minutes)
"""
return time.strftime("%H%M")
def stay(buff, start, end, place):
""" Creates a stay representation
Args:
start (:obj:`datetime.datetime` or str)
end (:obj:`datetime.datetime` or str)
place (:obj:`Location`)
Returns:
str
"""
if not isinstance(start, str):
start = military_time(start)
if not isinstance(end, str):
end = military_time(end)
return "%s\n%s-%s: %s" % (buff, start, end, place.label)
def trip(buff, segment):
""" Creates a trip representation
Args:
buff (str): buffer
segment (:obj:`Segment`)
Returns:
str: buffer and trip representation
"""
trip = "%s-%s: %s -> %s" % (
military_time(segment.points[0].time),
military_time(segment.points[-1].time),
segment.location_from.label,
segment.location_to.label
)
t_modes = segment.transportation_modes
if len(t_modes) == 1:
trip = "%s [%s]" % (trip, t_modes[0]['label'])
elif len(t_modes) > 1:
modes = []
for mode in t_modes:
trip_from = military_time(segment.points[mode['from']].time)
trip_to = military_time(segment.points[mode['to']].time)
modes.append(" %s-%s: [%s]" % (trip_from, trip_to, mode['label']))
trip = "%s\n%s" % (trip, "\n".join(modes))
return "%s\n%s" % (buff, trip)
last = len(self.segments) - 1
for i, segment in enumerate(self.segments):
if i == 0:
buff = stay(
buff,
"0000",
military_time(segment.points[0].time),
segment.location_from
)
buff = trip(buff, segment)
if i is last:
buff = stay(
buff,
military_time(segment.points[-1].time),
"2359",
segment.location_to
)
else:
next_seg = self.segments[i+1]
buff = stay(
buff,
military_time(segment.points[-1].time),
military_time(next_seg.points[0].time),
segment.location_to
)
return buff
@staticmethod
def from_gpx(file_path):
""" Creates a Track from a GPX file.
No preprocessing is done.
Arguments:
file_path (str): file path and name to the GPX file
Return:
:obj:`list` of :obj:`Track`
"""
gpx = gpxpy.parse(open(file_path, 'r'))
file_name = basename(file_path)
tracks = []
for i, track in enumerate(gpx.tracks):
segments = []
for segment in track.segments:
segments.append(Segment.from_gpx(segment))
if len(gpx.tracks) > 1:
name = file_name + "_" + str(i)
else:
name = file_name
tracks.append(Track(name, segments))
return tracks
@staticmethod
def from_json(json):
"""Creates a Track from a JSON file.
No preprocessing is done.
Arguments:
json: map with the keys: name (optional) and segments.
Return:
A track instance
"""
segments = [Segment.from_json(s) for s in json['segments']]
return Track(json['name'], segments).compute_metrics()
| {
"content_hash": "601b7bb9343b334d7eec0dec733d677b",
"timestamp": "",
"source": "github",
"line_count": 544,
"max_line_length": 252,
"avg_line_length": 30.683823529411764,
"alnum_prop": 0.5222861250898634,
"repo_name": "ruipgil/TrackToTrip",
"id": "b6e66b9159d867bbaa12aeaa9f0c948e227395b9",
"size": "16692",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tracktotrip/track.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "121973"
}
],
"symlink_target": ""
} |
from status import *
from views import *
from permissions import *
| {
"content_hash": "6c71c24fda32e4565a2315e8af10dde2",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 25,
"avg_line_length": 22.333333333333332,
"alnum_prop": 0.7761194029850746,
"repo_name": "rvanlaar/easy-transifex",
"id": "8e574ed268043a5d9c6b97c3c93f731b670a6a1e",
"size": "67",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/transifex/transifex/resources/tests/views/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "105585"
},
{
"name": "HTML",
"bytes": "365175"
},
{
"name": "JavaScript",
"bytes": "187021"
},
{
"name": "Python",
"bytes": "2303001"
},
{
"name": "Shell",
"bytes": "1358"
}
],
"symlink_target": ""
} |
from sphinx import version_info as sphinx_version_info
import os
import git
curr_path = os.path.abspath('.')
# The try catch is need because of the subversion tool when it creates the master.
try:
repo = git.Repo(curr_path)
current_branch = repo.active_branch.name
except git.exc.InvalidGitRepositoryError:
current_branch = ''
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.githubpages', 'recommonmark']
if sphinx_version_info >= (1, 4):
extensions += ['recommonmark']
else:
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Industrial Training'
copyright = u'2020, ROS-Industrial'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = current_branch
# The full version, including alpha/beta/rc tags.
release = current_branch
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'README.md', 'exercise']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
download_support = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = ['_themes',]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
#'canonical_url': '',
'analytics_id': '',
'logo_only': False,
'display_version': True,
'prev_next_buttons_location': 'bottom',
'style_external_links': False,
#'vcs_pageview_mode': '',
# Toc options
'collapse_navigation': True,
'sticky_navigation': False,
'navigation_depth': 4,
'includehidden': True,
'titles_only': False,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
#"wiki_title": "ROS-Industrial Training Material",
"display_github": True,
"github_user": "ros-industrial",
"github_repo": "industrial_training",
"github_version": current_branch,
"conf_py_path": "gh_pages/",
"source_suffix": source_suffix,
"css_files": ['_static/override.css'],
}
# Output file base name for HTML help builder.
htmlhelp_basename = 'IndustrialTrainingDocumentation'
| {
"content_hash": "1be72dfed7a6eaeab77e64b8460cc601",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 82,
"avg_line_length": 31.458333333333332,
"alnum_prop": 0.6988079470198676,
"repo_name": "ros-industrial/industrial_training",
"id": "c6b2afe1bb8784c9e1281ea92a6ddeb651c52e20",
"size": "4438",
"binary": false,
"copies": "1",
"ref": "refs/heads/foxy",
"path": "gh_pages/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1138828"
},
{
"name": "CMake",
"bytes": "340191"
},
{
"name": "CSS",
"bytes": "804"
},
{
"name": "HTML",
"bytes": "20499"
},
{
"name": "Makefile",
"bytes": "205"
},
{
"name": "Python",
"bytes": "120511"
},
{
"name": "Shell",
"bytes": "16663"
}
],
"symlink_target": ""
} |
from ambry.orm.database import BaseMigration
class Migration(BaseMigration):
def _migrate_sqlite(self, connection):
# connection.execute('ALTER table ...')
connection.execute('ALTER table columns ADD COLUMN c_valuetype VARCHAR')
def _migrate_postgresql(self, connection):
connection.execute('ALTER table ambrylib.columns ADD COLUMN c_valuetype VARCHAR')
| {
"content_hash": "6bdb73060dce622f7572c341659e6646",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 89,
"avg_line_length": 35.45454545454545,
"alnum_prop": 0.7307692307692307,
"repo_name": "CivicKnowledge/ambry",
"id": "12eceae304a5d357c9074fc3e9e622ec847d60a3",
"size": "417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ambry/orm/migrations/106_esb1143.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "GCC Machine Description",
"bytes": "1"
},
{
"name": "HTML",
"bytes": "94"
},
{
"name": "Jupyter Notebook",
"bytes": "43362"
},
{
"name": "Python",
"bytes": "1416810"
},
{
"name": "Ruby",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "13977"
}
],
"symlink_target": ""
} |
""" Deletes the loader's database. """
import sys
from memsql_loader.util import cli_utils, servers
from memsql_loader.util.command import Command
from memsql_loader.loader_db.storage import LoaderStorage
class ClearLoaderDb(Command):
@staticmethod
def configure(parser, subparsers):
subparser = subparsers.add_parser('clear-loader-db', help="Deletes the database containing MemSQL Loader's queued, running, and finished jobs.")
subparser.set_defaults(command=ClearLoaderDb)
subparser.add_argument('-f', '--force', help='Clear the loader database even if the MemSQL Loader server is running', action='store_true')
def run(self):
if not self.options.force:
if servers.is_server_running():
print 'Please stop any currently-running servers with stop-server before deleting the MemSQL Loader database.'
sys.exit(1)
prompt = 'Are you sure you want to delete the MemSQL Loader database?\nThe database contains queued, running, and finished jobs.'
if not cli_utils.confirm(prompt, default=False):
print 'Exiting.'
sys.exit(1)
LoaderStorage.drop_database()
print 'MemSQL Loader database deleted.'
| {
"content_hash": "eb0fed7244cccb67b56e522914338725",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 152,
"avg_line_length": 42.758620689655174,
"alnum_prop": 0.692741935483871,
"repo_name": "memsql/memsql-loader",
"id": "72b2a8e84c18d3a1c28b83e99a61cb1be1d4725d",
"size": "1240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memsql_loader/cli/clear_loader_db.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "3401"
},
{
"name": "Python",
"bytes": "208962"
},
{
"name": "Shell",
"bytes": "1822"
}
],
"symlink_target": ""
} |
import ray
import pytest
import sys
from ray.experimental import shuffle
def test_shuffle():
try:
shuffle.run()
finally:
ray.shutdown()
# https://github.com/ray-project/ray/pull/16408
@pytest.mark.skipif(sys.platform == "win32", reason="Failing on Windows.")
def test_shuffle_hang():
try:
shuffle.run(object_store_memory=1e9, num_partitions=200, partition_size=10e6)
finally:
ray.shutdown()
def test_shuffle_no_streaming():
try:
shuffle.run(no_streaming=True)
finally:
ray.shutdown()
@pytest.mark.skip(reason="SIGBUS on CI.")
def test_shuffle_multi_node(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(4):
cluster.add_node(num_cpus=2, object_store_memory=1e9)
shuffle.run(ray_address="auto", num_partitions=200, partition_size=10e6)
@pytest.mark.skip(reason="SIGBUS on CI.")
def test_shuffle_multi_node_no_streaming(ray_start_cluster):
cluster = ray_start_cluster
for _ in range(4):
cluster.add_node(num_cpus=2, object_store_memory=1e9)
shuffle.run(
ray_address="auto", num_partitions=200, partition_size=10e6, no_streaming=True
)
if __name__ == "__main__":
import os
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
| {
"content_hash": "3e42c4bbcb4315e9eb866e5b7b9b50d3",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 86,
"avg_line_length": 24.526315789473685,
"alnum_prop": 0.642346208869814,
"repo_name": "ray-project/ray",
"id": "51178ef37aca76b36f37d54e41d8bc984e63b4c2",
"size": "1398",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/ray/tests/test_shuffle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0011_auto_20160619_1714'),
]
operations = [
migrations.AddField(
model_name='chapterevent',
name='attendees',
field=models.ManyToManyField(to='dashboard.Brother'),
),
migrations.AddField(
model_name='chapterevent',
name='event_type',
field=models.CharField(choices=[(b'0', b'Chapter'), (b'1', b'Service'), (b'2', b'Philanthropy')], default=0, max_length=1),
),
]
| {
"content_hash": "f9beec66aed73c4d3878eef1f7207b6e",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 135,
"avg_line_length": 28.17391304347826,
"alnum_prop": 0.5817901234567902,
"repo_name": "DLance96/ox-dashboard",
"id": "52432c41b694db9e24b0cd38403dda69ce97d54e",
"size": "721",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dashboard/migrations/0012_auto_20160621_0515.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "874"
},
{
"name": "HTML",
"bytes": "106067"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "169586"
},
{
"name": "Shell",
"bytes": "75"
}
],
"symlink_target": ""
} |
class InputHandler(object):
'''
Interface for handling all user input.
lnedentry: Columns from dataframe
checks: Indicates presence absence of data values
rbtns: Indicates presence absence of data values
verify: Indicates whether input should be verified
session: Indicates session creation
filename: Indicates file to load into program
'''
def __init__(
self, name=None, tablename=None, lnedentry={},
checks={}, rbtns={}, cbox={}, verify=False, session=False,
filename=None, timedata=None, covdata=None,
foreignmergeddata=None):
self.name = name
self.tablename = tablename
self.lnedentry = lnedentry
self.checks = checks
self.cboxs = cbox
self.rbtns = rbtns
self.verify = verify
self.session = session
self.filename = filename
self.timedata = timedata
self.covdata = covdata
self.foreignmergeddata = foreignmergeddata
| {
"content_hash": "005d116a53b07e8f1ff26eb3b80adbb4",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 70,
"avg_line_length": 35.857142857142854,
"alnum_prop": 0.6404382470119522,
"repo_name": "bibsian/database-development",
"id": "e156bd20ef1737810d89b4bfff40e3e4561b8172",
"size": "1005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "poplerGUI/class_inputhandler.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1241917"
}
],
"symlink_target": ""
} |
"""Tests for http/websocket.py"""
import base64
import hashlib
import os
import pytest
from aiohttp import websocket, multidict, protocol, errors
from unittest import mock
@pytest.fixture()
def transport():
return mock.Mock()
@pytest.fixture()
def message():
headers = multidict.MultiDict()
return protocol.RawRequestMessage(
'GET', '/path', (1, 0), headers, [], True, None)
def gen_ws_headers(protocols=''):
key = base64.b64encode(os.urandom(16)).decode()
hdrs = [('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', key)]
if protocols:
hdrs += [('SEC-WEBSOCKET-PROTOCOL', protocols)]
return hdrs, key
def test_not_get(message, transport):
with pytest.raises(errors.HttpProcessingError):
websocket.do_handshake('POST', message.headers, transport)
def test_no_upgrade(message, transport):
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
def test_no_connection(message, transport):
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'keep-alive')])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
def test_protocol_version(message, transport):
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade')])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '1')])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
def test_protocol_key(message, transport):
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13')])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', '123')])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
sec_key = base64.b64encode(os.urandom(2))
message.headers.extend([('UPGRADE', 'websocket'),
('CONNECTION', 'upgrade'),
('SEC-WEBSOCKET-VERSION', '13'),
('SEC-WEBSOCKET-KEY', sec_key.decode())])
with pytest.raises(errors.HttpBadRequest):
websocket.do_handshake(message.method, message.headers, transport)
def test_handshake(message, transport):
hdrs, sec_key = gen_ws_headers()
message.headers.extend(hdrs)
status, headers, parser, writer, protocol = websocket.do_handshake(
message.method, message.headers, transport)
assert status == 101
assert protocol is None
key = base64.b64encode(
hashlib.sha1(sec_key.encode() + websocket.WS_KEY).digest())
headers = dict(headers)
assert headers['SEC-WEBSOCKET-ACCEPT'] == key.decode()
def test_handshake_protocol(message, transport):
'''Tests if one protocol is returned by do_handshake'''
proto = 'chat'
message.headers.extend(gen_ws_headers(proto)[0])
_, resp_headers, _, _, protocol = websocket.do_handshake(
message.method, message.headers, transport,
protocols=[proto])
assert protocol == proto
# also test if we reply with the protocol
resp_headers = dict(resp_headers)
assert resp_headers['SEC-WEBSOCKET-PROTOCOL'] == proto
def test_handshake_protocol_agreement(message, transport):
'''Tests if the right protocol is selected given multiple'''
best_proto = 'worse_proto'
wanted_protos = ['best', 'chat', 'worse_proto']
server_protos = 'worse_proto,chat'
message.headers.extend(gen_ws_headers(server_protos)[0])
_, resp_headers, _, _, protocol = websocket.do_handshake(
message.method, message.headers, transport,
protocols=wanted_protos)
assert protocol == best_proto
def test_handshake_protocol_unsupported(log, message, transport):
'''Tests if a protocol mismatch handshake warns and returns None'''
proto = 'chat'
message.headers.extend(gen_ws_headers('test')[0])
with log('aiohttp.websocket') as ctx:
_, _, _, _, protocol = websocket.do_handshake(
message.method, message.headers, transport,
protocols=[proto])
assert protocol is None
assert (ctx.records[-1].msg ==
'Client protocols %r don’t overlap server-known ones %r')
| {
"content_hash": "39a38dc81e06dab5c1dd1b3825d41705",
"timestamp": "",
"source": "github",
"line_count": 146,
"max_line_length": 74,
"avg_line_length": 34.42465753424658,
"alnum_prop": 0.6335057699960207,
"repo_name": "elastic-coders/aiohttp",
"id": "6a59767fd68c46b8f835e6ffb9e8925c08e89bec",
"size": "5028",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/test_websocket_handshake.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4885"
},
{
"name": "Makefile",
"bytes": "2376"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "981759"
},
{
"name": "Shell",
"bytes": "550"
}
],
"symlink_target": ""
} |
import time
import numpy
from nearpy import Engine
from nearpy.hashes import RandomDiscretizedProjections, UniBucket
from nearpy.filters import NearestFilter, UniqueFilter
from nearpy.distances import EuclideanDistance
from nearpy.experiments import DistanceRatioExperiment, RecallPrecisionExperiment
# Set dimension and vector count for this experiment
dimension = 100
vector_count = 100000
# Create data set from two clusters
vectors = []
center = numpy.random.randn(dimension)
for index in range(vector_count/2):
vector = center + 0.01 * numpy.random.randn(dimension)
vectors.append(vector)
center = numpy.random.randn(dimension)
for index in range(vector_count/2):
vector = center + 0.01 * numpy.random.randn(dimension)
vectors.append(vector)
# We are looking for the ten closest neighbours
nearest = NearestFilter(20)
# We will fill this array with all the engines we want to test
engines = []
print 'Creating engines...'
# We are going to test these bin widths
bin_widths = [ 0.01 * x for x in range(1,5)]
# Create engines for all configurations
for bin_width in bin_widths:
# Use four random 1-dim discretized projections
rdp1 = RandomDiscretizedProjections('rdp1', 4, bin_width)
rdp2 = RandomDiscretizedProjections('rdp2', 4, bin_width)
rdp3 = RandomDiscretizedProjections('rdp3', 4, bin_width)
rdp4 = RandomDiscretizedProjections('rdp4', 4, bin_width)
#ub1 = UniBucket('uni')
# Create engine with this configuration
#engine = Engine(dimension, lshashes=[rdp1, rdp2, rdp3, rdp4],
# vector_filters=[unique, nearest])
engine = Engine(dimension, lshashes=[rdp1, rdp2, rdp3, rdp4],
vector_filters=[nearest])
# Add engine to list of engines to evaluate
engines.append(engine)
print 'Creating experiment and performing exact search...'
# Create experiment (looking for ten closest neighbours).
# The constructor performs exact search for evaluation.
# So the data set should not be too large for experiments.
exp = DistanceRatioExperiment(20, vectors, coverage_ratio=0.01)
print 'Performing experiment for all engines...'
# Perform experiment for all engines
result = exp.perform_experiment(engines)
| {
"content_hash": "b4b13ebfddc0e9491c2986f5dcd17179",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 81,
"avg_line_length": 33.59090909090909,
"alnum_prop": 0.7424447451511051,
"repo_name": "imsparsh/NearPy",
"id": "95458351d2cd53e2b8c951e89a4693363a316919",
"size": "2217",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "run_experiment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "183109"
}
],
"symlink_target": ""
} |
"""
WSGI config for myshop project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "myshop.settings")
application = get_wsgi_application()
| {
"content_hash": "fdd6f8217aa51f9eed705189c26a5f97",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 78,
"avg_line_length": 24.375,
"alnum_prop": 0.7692307692307693,
"repo_name": "EssaAlshammri/django-by-example",
"id": "05ef28bdb3e251d8f7f022dad3c16c5a9a22e4e5",
"size": "390",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "online-shop/myshop/myshop/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15532"
},
{
"name": "HTML",
"bytes": "48415"
},
{
"name": "JavaScript",
"bytes": "2952"
},
{
"name": "Python",
"bytes": "113892"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import urllib.parse
class AbandonConflictTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug","-logtimemicros"]))
connect_nodes(self.nodes[0], 1)
def run_test(self):
return #TODO Relies on rawtxn values, fee sizes
self.nodes[1].generate(100)
sync_blocks(self.nodes)
balance = self.nodes[0].getbalance()
txA = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txB = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
txC = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
sync_mempools(self.nodes)
self.nodes[1].generate(1)
sync_blocks(self.nodes)
newbalance = self.nodes[0].getbalance()
assert(balance - newbalance < Decimal("0.01")) #no more than fees lost. CT larger!
balance = newbalance
url = urllib.parse.urlparse(self.nodes[1].url)
self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1)))
# Identify the 10btc outputs
nA = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txA, 1)["vout"]) if vout["value"] == Decimal("10"))
nB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txB, 1)["vout"]) if vout["value"] == Decimal("10"))
nC = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txC, 1)["vout"]) if vout["value"] == Decimal("10"))
inputs =[]
# spend 10btc outputs from txA and txB
inputs.append({"txid":txA, "vout":nA})
inputs.append({"txid":txB, "vout":nB})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("14.99998")
outputs[self.nodes[1].getnewaddress()] = Decimal("5")
signed = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txAB1 = self.nodes[0].sendrawtransaction(signed["hex"])
# Identify the 14.99998btc output
nAB = next(i for i, vout in enumerate(self.nodes[0].getrawtransaction(txAB1, 1)["vout"]) if vout["value"] == Decimal("14.99998"))
#Create a child tx spending AB1 and C
inputs = []
inputs.append({"txid":txAB1, "vout":nAB})
inputs.append({"txid":txC, "vout":nC})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = Decimal("24.9996")
signed2 = self.nodes[0].signrawtransaction(self.nodes[0].createrawtransaction(inputs, outputs))
txABC2 = self.nodes[0].sendrawtransaction(signed2["hex"])
# In mempool txs from self should increase balance from change
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("30") + Decimal("24.9996"))
balance = newbalance
# Restart the node with a higher min relay fee so the parent tx is no longer in mempool
# TODO: redo with eviction
# Note had to make sure tx did not have AllowFree priority
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
# Verify txs no longer in mempool
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Not in mempool txs from self should only reduce balance
# inputs are still spent, but change not received
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
# Unconfirmed received funds that are not in mempool, also shouldn't show
# up in unconfirmed balance
unconfbalance = self.nodes[0].getunconfirmedbalance() + self.nodes[0].getbalance()
assert_equal(unconfbalance, newbalance)
# Also shouldn't show up in listunspent
assert(not txABC2 in [utxo["txid"] for utxo in self.nodes[0].listunspent(0)])
balance = newbalance
# Abandon original transaction and verify inputs are available again
# including that the child tx was also abandoned
self.nodes[0].abandontransaction(txAB1)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.00001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(self.nodes[0].getbalance(), balance)
# But if its received again then it is unabandoned
# And since now in mempool, the change is available
# But its child tx remains abandoned
self.nodes[0].sendrawtransaction(signed["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("20") + Decimal("14.99998"))
balance = newbalance
# Send child tx again so its unabandoned
self.nodes[0].sendrawtransaction(signed2["hex"])
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("10") - Decimal("14.99998") + Decimal("24.9996"))
balance = newbalance
# Remove using high relay fee again
stop_node(self.nodes[0],0)
self.nodes[0]=start_node(0, self.options.tmpdir, ["-debug","-logtimemicros","-minrelaytxfee=0.0001"])
assert_equal(len(self.nodes[0].getrawmempool()), 0)
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs =[]
inputs.append({"txid":txA, "vout":nA})
outputs = {}
outputs[self.nodes[1].getnewaddress()] = Decimal("9.9999")
tx = self.nodes[0].createrawtransaction(inputs, outputs)
signed = self.nodes[0].signrawtransaction(tx)
self.nodes[1].sendrawtransaction(signed["hex"])
self.nodes[1].generate(1)
connect_nodes(self.nodes[0], 1)
sync_blocks(self.nodes)
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
# There is currently a minor bug around this and so this test doesn't work. See Issue #7315
# Invalidate the block with the double spend and B's 10 BTC output should no longer be available
# Don't think C's should either
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
newbalance = self.nodes[0].getbalance()
#assert_equal(newbalance, balance - Decimal("10"))
print("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
print("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
print(str(balance) + " -> " + str(newbalance) + " ?")
if __name__ == '__main__':
AbandonConflictTest().main()
| {
"content_hash": "5dcec83c952653f78ec74ee95390c0dc",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 137,
"avg_line_length": 49.05161290322581,
"alnum_prop": 0.6435617519400236,
"repo_name": "tdudz/elements",
"id": "da0704d9cb6eee2b30b94bd0968e331587b0fc0a",
"size": "7819",
"binary": false,
"copies": "3",
"ref": "refs/heads/elements-0.14.1",
"path": "qa/rpc-tests/abandonconflict.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28453"
},
{
"name": "C",
"bytes": "967748"
},
{
"name": "C++",
"bytes": "5431887"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50622"
},
{
"name": "Java",
"bytes": "30291"
},
{
"name": "M4",
"bytes": "192882"
},
{
"name": "Makefile",
"bytes": "111336"
},
{
"name": "Objective-C",
"bytes": "2162"
},
{
"name": "Objective-C++",
"bytes": "7239"
},
{
"name": "Python",
"bytes": "1281205"
},
{
"name": "QMake",
"bytes": "756"
},
{
"name": "Shell",
"bytes": "60357"
}
],
"symlink_target": ""
} |
import os
import re
import numpy as np
from kitti.data import data_dir, get_drive_dir, get_inds
def get_video_dir(drive, color=False, right=False, **kwargs):
drive_dir = get_drive_dir(drive, **kwargs)
image_dir = 'image_%02d' % (0 + (1 if right else 0) + (2 if color else 0))
return os.path.join(drive_dir, image_dir, 'data')
def get_disp_dir(drive, color=False, **kwargs):
drive_dir = get_drive_dir(drive, **kwargs)
return os.path.join(drive_dir, 'disp_' + ('23' if color else '01'), 'data')
def get_drive_inds(date='2011_09_26'):
date_dir = os.path.join(data_dir, date)
inds = []
for obj in os.listdir(date_dir):
match = re.match("%s_drive_([0-9]{4})_sync" % date, obj)
if match is not None:
inds.append(int(match.groups()[0]))
return sorted(inds)
def get_frame_inds(drive, **kwargs):
return get_inds(get_video_dir(drive, **kwargs))
def get_video_images(path, indices, ext='.png'):
import scipy.ndimage
images = [
scipy.ndimage.imread(os.path.join(path, '%010d%s' % (index, ext)))
for index in indices]
return images
def get_video_odometry(oxts_path, indices, ext='.txt'):
data_path = os.path.join(oxts_path, 'data')
odometry = []
for index in indices:
filename = os.path.join(data_path, '%010d%s' % (index, ext))
with open(filename, 'r') as f:
line = f.readline().strip('\n')
odometry.append(map(float, line.split(' ')))
# get timestamps
import datetime
with open(os.path.join(oxts_path, 'timestamps.txt'), 'r') as f:
parse = lambda s: datetime.datetime.strptime(s[:-3], "%Y-%m-%d %H:%M:%S.%f")
timestamps = [parse(line.strip('\n')) for line in f.readlines()]
times = [(t - timestamps[0]).total_seconds() for t in timestamps]
odometry = np.array(odometry)
times = np.array(times).reshape(-1, 1)
return np.concatenate([odometry, times], axis=1)
def odometry_to_positions(odometry):
lat, lon, alt, roll, pitch, yaw = odometry.T[:6]
R = 6378137 # Earth's radius in metres
if 0:
# convert to Mercator (based on devkit MATLAB code, untested)
lat, lon = np.deg2rad(lat), np.deg2rad(lon)
scale = np.cos(lat)
mx = R * scale * lon
my = R * scale * np.log(np.tan(0.5 * (lat + 0.5 * np.pi)))
else:
# convert to metres
lat, lon = np.deg2rad(lat), np.deg2rad(lon)
mx = R * lon * np.cos(lat)
my = R * lat
times = odometry.T[-1]
return np.vstack([mx, my, alt, roll, pitch, yaw, times]).T
def get_position_transform(pos0, pos1, invert=False):
def rot3d(axis, angle):
ei = np.ones(3, dtype='bool')
ei[axis] = 0
i = np.nonzero(ei)[0]
m = np.eye(3)
c, s = np.cos(angle), np.sin(angle)
m[i[0], i[0]] = c; m[i[0], i[1]] = -s
m[i[1], i[0]] = s; m[i[1], i[1]] = c
return m
def pos_transform(pos):
x, y, z, rx, ry, rz, _ = pos
RT = np.eye(4)
RT[:3,:3] = np.dot(np.dot(rot3d(0, rx), rot3d(1, ry)), rot3d(2, rz))
RT[:3,3] = [x, y, z]
return RT
T0 = pos_transform(pos0)
T1 = pos_transform(pos1)
return (np.dot(T1, np.linalg.inv(T0)).T if not invert else
np.dot(np.linalg.inv(T1), T0).T)
def load_video(drive, **kwargs):
path = get_video_dir(drive, **kwargs)
indices = get_inds(path)
images = get_video_images(path, indices)
return np.array(images)
def load_stereo_frame(drive, ind, **kwargs):
left_path = get_video_dir(drive, right=False, **kwargs)
right_path = get_video_dir(drive, right=True, **kwargs)
left_images = get_video_images(left_path, [ind])
right_images = get_video_images(right_path, [ind])
return np.array(left_images[0]), np.array(right_images[0])
def load_stereo_video(drive, **kwargs):
left_path = get_video_dir(drive, right=False, **kwargs)
right_path = get_video_dir(drive, right=True, **kwargs)
left_inds = get_inds(left_path)
right_inds = get_inds(right_path)
assert (np.unique(left_inds) == np.unique(right_inds)).all()
left_images = get_video_images(left_path, left_inds)
right_images = get_video_images(right_path, right_inds)
return np.array(zip(left_images, right_images))
def load_disp_frame(drive, ind, **kwargs):
path = get_disp_dir(drive, **kwargs)
images = get_video_images(path, [ind])
return np.asarray(images[0])
def load_disp_video(drive, **kwargs):
path = get_disp_dir(drive, **kwargs)
inds = get_inds(path)
images = get_video_images(path, inds)
return np.asarray(images)
def load_video_odometry(drive, raw=False, **kwargs):
drive_dir = get_drive_dir(drive, **kwargs)
oxts_dir = os.path.join(drive_dir, 'oxts')
data_dir = os.path.join(drive_dir, 'oxts', 'data')
inds = get_inds(data_dir, ext='.txt')
odometry = get_video_odometry(oxts_dir, inds)
return odometry if raw else odometry_to_positions(odometry)
def animate_video(video, fig=None, ax=None):
"""
NOTE: the `ani` variable returned by this function must be referenced at
the top level of your script, otherwise Python will garbage-collect it
"""
import matplotlib.pyplot as plt
import matplotlib.animation as animation
if fig is None:
fig = plt.gcf()
if ax is None:
ax = fig.add_subplot(111)
def update_image(index, image):
image.set_data(video[index])
return image
image = ax.imshow(video[0], cmap='gray')
ani = animation.FuncAnimation(
fig, update_image, len(video), fargs=(image,), interval=100)
return ani
| {
"content_hash": "88599cd7fabf38036bb496bdb8d47fd0",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 84,
"avg_line_length": 31.126373626373628,
"alnum_prop": 0.606531332744925,
"repo_name": "hunse/kitti",
"id": "7b73bb207cdbfddcb9004bc38a24ee0981f04751",
"size": "5665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kitti/raw.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "531"
},
{
"name": "C++",
"bytes": "16806"
},
{
"name": "Python",
"bytes": "29349"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.views.generic import TemplateView
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^$', TemplateView.as_view(template_name='base.html')),
url(
r'^pacientes/',
include('pacientes_app.urls')
),
url(
r'^historias/',
include('historias_app.urls')
),
# Examples:
# url(r'^$', 'consultas_proyecto.views.home', name='home'),
# url(r'^consultas_proyecto/', include('consultas_proyecto.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "2d3417a07d5f8210a9f5da3245a94400",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 75,
"avg_line_length": 30.5,
"alnum_prop": 0.6592505854800936,
"repo_name": "gustavoatt/consultas",
"id": "26f489aed7a45115fe3331692adddef1164dc15d",
"size": "854",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "consultas_proyecto/consultas_proyecto/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1221"
},
{
"name": "JavaScript",
"bytes": "535"
},
{
"name": "Puppet",
"bytes": "1966"
},
{
"name": "Python",
"bytes": "92920"
},
{
"name": "Shell",
"bytes": "5120"
}
],
"symlink_target": ""
} |
"""Support for a ScreenLogic 'circuit' switch."""
import logging
from screenlogicpy.const import DATA as SL_DATA, GENERIC_CIRCUIT_NAMES, ON_OFF
from homeassistant.components.switch import SwitchEntity
from . import ScreenlogicEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up entry."""
entities = []
coordinator = hass.data[DOMAIN][config_entry.entry_id]["coordinator"]
for circuit_num, circuit in coordinator.data[SL_DATA.KEY_CIRCUITS].items():
enabled = circuit["name"] not in GENERIC_CIRCUIT_NAMES
entities.append(ScreenLogicSwitch(coordinator, circuit_num, enabled))
async_add_entities(entities)
class ScreenLogicSwitch(ScreenlogicEntity, SwitchEntity):
"""ScreenLogic switch entity."""
@property
def name(self):
"""Get the name of the switch."""
return f"{self.gateway_name} {self.circuit['name']}"
@property
def is_on(self) -> bool:
"""Get whether the switch is in on state."""
return self.circuit["value"] == 1
async def async_turn_on(self, **kwargs) -> None:
"""Send the ON command."""
return await self._async_set_circuit(ON_OFF.ON)
async def async_turn_off(self, **kwargs) -> None:
"""Send the OFF command."""
return await self._async_set_circuit(ON_OFF.OFF)
async def _async_set_circuit(self, circuit_value) -> None:
async with self.coordinator.api_lock:
success = await self.hass.async_add_executor_job(
self.gateway.set_circuit, self._data_key, circuit_value
)
if success:
_LOGGER.debug("Turn %s %s", self._data_key, circuit_value)
await self.coordinator.async_request_refresh()
else:
_LOGGER.warning(
"Failed to set_circuit %s %s", self._data_key, circuit_value
)
@property
def circuit(self):
"""Shortcut to access the circuit."""
return self.coordinator.data[SL_DATA.KEY_CIRCUITS][self._data_key]
| {
"content_hash": "2bcb45a8f5e619e6255cbe25b2b23b7b",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 79,
"avg_line_length": 33.015625,
"alnum_prop": 0.6412683388547089,
"repo_name": "sander76/home-assistant",
"id": "ff73afebb575d9b9ac7e0f930ebbacec86c2d18a",
"size": "2113",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/screenlogic/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1795"
},
{
"name": "Python",
"bytes": "36548768"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
"""Support for Ubiquiti mFi switches."""
from __future__ import annotations
import logging
from mficlient.client import FailedToLogin, MFiClient
import requests
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = True
SWITCH_MODELS = ["Outlet", "Output 5v", "Output 12v", "Output 24v", "Dimmer Switch"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up mFi sensors."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_tls = config[CONF_SSL]
verify_tls = config.get(CONF_VERIFY_SSL)
default_port = 6443 if use_tls else 6080
port = int(config.get(CONF_PORT, default_port))
try:
client = MFiClient(
host, username, password, port=port, use_tls=use_tls, verify=verify_tls
)
except (FailedToLogin, requests.exceptions.ConnectionError) as ex:
_LOGGER.error("Unable to connect to mFi: %s", str(ex))
return
add_entities(
MfiSwitch(port)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SWITCH_MODELS
)
class MfiSwitch(SwitchEntity):
"""Representation of an mFi switch-able device."""
def __init__(self, port):
"""Initialize the mFi device."""
self._port = port
self._target_state = None
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._port.ident
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.output
def update(self):
"""Get the latest state and update the state."""
self._port.refresh()
if self._target_state is not None:
self._port.data["output"] = float(self._target_state)
self._target_state = None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.control(True)
self._target_state = True
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.control(False)
self._target_state = False
@property
def current_power_w(self):
"""Return the current power usage in W."""
return int(self._port.data.get("active_pwr", 0))
@property
def extra_state_attributes(self):
"""Return the state attributes for the device."""
return {
"volts": round(self._port.data.get("v_rms", 0), 1),
"amps": round(self._port.data.get("i_rms", 0), 1),
}
| {
"content_hash": "2ac0234322d53e677ade29687e147771",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 84,
"avg_line_length": 29.28,
"alnum_prop": 0.6377049180327868,
"repo_name": "rohitranjan1991/home-assistant",
"id": "7564229d526f7f5f3b5e1f07f1e1be31ff149304",
"size": "3660",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/mfi/switch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1017265"
},
{
"name": "Python",
"bytes": "1051086"
},
{
"name": "Shell",
"bytes": "3946"
}
],
"symlink_target": ""
} |
"""Example Module for testing utils.monkey_patch()."""
CALLED_FUNCTION = []
def example_decorator(name, function):
"""decorator for notify which is used from utils.monkey_patch().
:param name: name of the function
:param function: - object of the function
:returns: function -- decorated function
"""
def wrapped_func(*args, **kwarg):
CALLED_FUNCTION.append(name)
return function(*args, **kwarg)
return wrapped_func
| {
"content_hash": "375ef2fb68b3e21f99d9794e00d5bada",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 68,
"avg_line_length": 28.058823529411764,
"alnum_prop": 0.649895178197065,
"repo_name": "tomasdubec/openstack-cinder",
"id": "95f58e751179dbdc3392626ef736821187b13cda",
"size": "1151",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "cinder/tests/monkey_patch_example/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
#
# Filename : make_run_val_simfit.py
# Description : Generating scripts for command RunValGenFit
# Author : Yi-Mu "Enoch" Chen [ [email protected] ]
#
#*******************************************************************************
import os,sys
channellist=["MuonSignal","ElectronSignal"]
masspointlist=[
"TstarM700",
"TstarM800",
"TstarM900",
"TstarM1000",
"TstarM1100",
"TstarM1200",
"TstarM1300",
"TstarM1400",
"TstarM1500",
"TstarM1600",
]
functionlist=["Lognorm","Exo","LogExt3", "LogExt4"]
runnum = 2000
injlist = [0,1]
header="""
#!/bin/bash
source /cvmfs/cms.cern.ch/cmsset_default.sh
cd {}/src/TstarAnalysis/LimitCalc/
eval `scramv1 runtime -sh`
""".format( os.environ['CMSSW_BASE'] )
cmd = """
RunValGenFit --channel {0} --fitmethod SimFit --fitfunc {1} --num {2} --masspoint {3} -e Rereco {4} {5}
"""
plotcmd = "./PlotValGenFit --channel {0} --fitmethod SimFit --fitfunc {1} --relmag {2} -e Rereco"
for channel in channellist:
for func in functionlist:
for inj in injlist :
for masspoint in masspointlist:
filename = "run/runsimfitval_{}_{}_{}_r{}.sh".format(
channel, func, masspoint, inj )
script = open( filename, 'w')
script.write( header )
script.write(cmd.format( channel, func, runnum , masspoint , "--relmag" , inj ))
script.close()
os.system("chmod +x "+filename)
print plotcmd.format( channel, func, inj )
### other alternative
masspointlist=[
"TstarM800",
"TstarM1200",
"TstarM1600",
]
bkgtypelist = [
"ISRup",
"ISRdown",
"FSRup",
"FSRdown",
]
functionlist = ["Lognorm"]
cmd = """
RunValGenFit --channel {0} --fitmethod SimFit --fitfunc {1} --num {2} --masspoint {3} -e Rereco {4} {5} {6} {7}
"""
plotcmd = "./PlotValGenFit --channel {0} --fitmethod SimFit --fitfunc {1} --relmag {2} -e Rereco {3} {4}"
for channel in channellist:
for func in functionlist:
for inj in injlist :
for bkgtype in bkgtypelist:
for masspoint in masspointlist:
filename = "run/runsimfitval_{}_{}_{}_r{}_{}.sh".format( channel, func, masspoint, inj, bkgtype )
script = open( filename, 'w')
script.write( header )
script.write( cmd.format( channel, func, runnum , masspoint , "--relmag" , inj, "--bkgtype", bkgtype ) )
script.close()
os.system("chmod +x "+filename)
print plotcmd.format( channel, func, inj, "--bkgtype", bkgtype )
bkgtypelist=[1.20,1.55]
for channel in channellist:
for func in functionlist:
for inj in injlist :
for bkgtype in bkgtypelist:
for masspoint in masspointlist:
filename = "run/runsimfitval_{}_{}_{}_r{}_{}.sh".format( channel, func, masspoint, inj, bkgtype )
script = open( filename, 'w')
script.write( header )
script.write( cmd.format( channel, func, runnum , masspoint , "--relmag" , inj, "--forcerho", bkgtype ) )
script.close()
os.system("chmod +x "+filename)
print plotcmd.format( channel, func, inj, "--forcerho", bkgtype )
| {
"content_hash": "6c3aacdb6d71a416c7c5a7de5e266049",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 125,
"avg_line_length": 33.43564356435643,
"alnum_prop": 0.5493041160793604,
"repo_name": "NTUHEP-Tstar/TstarAnalysis",
"id": "a25767b3e86895b43d1320ac054b13c17c1a05c3",
"size": "3480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "LimitCalc/genValSimScripts.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "755251"
},
{
"name": "Objective-C",
"bytes": "38376"
},
{
"name": "Python",
"bytes": "105691"
},
{
"name": "Shell",
"bytes": "15347"
}
],
"symlink_target": ""
} |
'''
Created on 29 sept. 2013
@author: Aristote Diasonama
'''
"""
Python Module handling daily notification sent to user about event taking place daily
"""
import logging
from vendors.sendgrid import sender
import memcache
from market.handlers.base import BaseHandler, jinja_environment
from market.handlers.main import MainHandler
from google.appengine.ext import ndb
from google.appengine.api import taskqueue
from market.lib.event import EventManager
from market.lib.user import UserManager
from market.lib.attendance import AttendanceManager
from shop.models.user import EmailNotificationsSettings as NotifSettings
class StartDailyNotificationHandler(BaseHandler):
"""
This class executes a cron job whose mere purpose is to add the
CreateWeeklyNotificationHandler to the taskqueue
"""
def get(self):
today_events = EventManager.get_event_list(filtered_by = 'today')
if today_events:
task = taskqueue.add(url='/tasks/notification/daily/start')
self.render_template('ebdigest.html', {'event_list': today_events})
class CreateDailyNotificationHandler(BaseHandler):
"""
This class create a list of task that the daily notifier queue must execute
"""
def post(self):
self.add_task()
events = EventManager.get_event_list(filtered_by='today')
self.render_template('ebdigest.html', {'event_list': events})
def add_task(self):
"""
add tasks to the given event
"""
user_event_dict = self.get_user_event_dict()
if user_event_dict:
for user_id in user_event_dict.keys():
queue = taskqueue.Queue('daily-notifier')
queue.add(taskqueue.Task(url='/tasks/notification/daily',
params={'user_id':user_id,
'email':user_event_dict[user_id]['email'],
'events':user_event_dict[user_id]['events']
}))
def get_user_event_dict(self):
"""
Get a dict of user_id as key and dict of list of events they are attending today and their email as value
"""
events = EventManager.get_event_list(filtered_by='today')
user_event_dict = {}
already_in_dict = []
no_send = []
for event in events:
event_key = ndb.Key(urlsafe=event['key'])
user_ids = AttendanceManager.get_users_id_attending(event_key)
if user_ids:
for an_id in user_ids:
if an_id in already_in_dict:
user_event_dict[an_id]['events'].append(event['key'])
else:
if not an_id in no_send:
user_notif = NotifSettings.get_settings_for(user_id=an_id)
if user_notif.daily_alert:
user_event_dict[an_id] = {}
user_event_dict[an_id]['email'] = user_notif.email_to_use
user_event_dict[an_id]['events'] = [event['key']]
already_in_dict.append(an_id)
else:
no_send.append(an_id)
return user_event_dict
class SendDailyNotificationHandler(MainHandler):
"""
This class will send daily notification to a user about the event he has to attend
"""
def post(self):
user_id = int(self.request.get('user_id'))
event_keys = self.request.get('events', allow_multiple=True)
email = self.request.get('email')
already_sent = memcache.get_daily_sent_ids()
if not user_id or not email:
logging.debug('NO POST DATAS PROVIDED')
return
if user_id in already_sent:
return
events = filter(lambda event: event['key'] in event_keys, EventManager.get_event_list(filtered_by='today'))
print(events)
user = UserManager.get_user_friendly(id =user_id)
if not user_id or not email:
logging.debug('NO POST DATAS PROVIDED')
return
html_email = self.get_html_email(user, events)
subject = self.get_subject(user)
non_html_email = self.get_non_html_email(user)
from main import debug
if not debug:
sender.send('EventBuck Rappel','[email protected]', subject, text=non_html_email, html=html_email,
receiver_email=email, receiver_name=user['fullname'])
memcache.add_daily_sent(user_id)
def get_html_email(self, user, events):
return jinja_environment.get_template('ebdigest.html').render({'event_list': events,
'user':user,
'subject':self.get_subject(user)
})
def get_non_html_email(self, user):
email = u"""
{}, Visites ce lien pour voir les events qui auront lieu cette semaine sur EventBuck!
www.eventbuck.com
""".format(user['firstname'] if user['type']=='student' else user['fullname'])
return email
def get_subject(self, user):
return u"{}, Rappel des events à participer!".format(user['firstname'] if user['type']=='student' else user['fullname']) | {
"content_hash": "e496ccd06a3960790bde5cd46db9dfa3",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 128,
"avg_line_length": 39.21678321678322,
"alnum_prop": 0.557774607703281,
"repo_name": "EventBuck/EventBuck",
"id": "38bdf39c9ae3fa7d9e4470fa57dbfeeec944cbe3",
"size": "5656",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "market/handlers/notification/daily_notification.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "72386"
},
{
"name": "JavaScript",
"bytes": "178307"
},
{
"name": "Python",
"bytes": "302393"
}
],
"symlink_target": ""
} |
"""Tests for metadata.py."""
__author__ = '[email protected] (Cihat Imamoglu)'
import json
import metadata
import test_utils
MAPROOT = {
'title': 't1',
'layers': [{'id': 12,
'type': 'KML',
'source': {'kml': {'url': 'http://x.com/a'}}},
{'id': 15,
'type': 'GEORSS',
'source': {'georss': {'url': 'http://y.com/b'}}},
{'id': 16,
'type': 'GOOGLE_TRAFFIC'}]
}
class MetadataTest(test_utils.BaseTest):
def testGetSourceAddresses(self):
self.assertEquals(
{'KML:http://x.com/a', 'GEORSS:http://y.com/b'},
set(metadata.GetSourceAddresses(MAPROOT)))
def testCacheSourceAddresses(self):
cache_key1, sources = metadata.CacheSourceAddresses('abc', MAPROOT)
self.assertEquals(
{'KML:http://x.com/a', 'GEORSS:http://y.com/b'},
set(metadata.SOURCE_ADDRESS_CACHE.Get(cache_key1)))
self.assertEquals(
{'KML:http://x.com/a', 'GEORSS:http://y.com/b'},
set(sources))
# Same map_version_key should yield the same cache key.
cache_key2, sources = metadata.CacheSourceAddresses('abc', MAPROOT)
self.assertEquals(cache_key1, cache_key2)
def testActivateSources(self):
sources = ['KML:http://x.com/a', 'GEORSS:http://y.com/b']
metadata.ActivateSources(sources)
# Both sources should now be queued for metadata fetches.
urls = sorted(task['url'] for task in self.PopTasks('metadata'))
self.assertEquals(2, len(urls))
self.AssertEqualsUrlWithUnorderedParams(
'/root/.metadata_fetch?source=GEORSS:http://y.com/b', urls[0])
self.AssertEqualsUrlWithUnorderedParams(
'/root/.metadata_fetch?source=KML:http://x.com/a', urls[1])
# Activating multiple times should not add redundant tasks.
metadata.ActivateSources(sources)
metadata.ActivateSources(sources)
self.assertEquals(0, len(self.PopTasks('metadata')))
def testGet(self):
cache_key, _ = metadata.CacheSourceAddresses('abc', MAPROOT)
metadata.METADATA_CACHE.Set('KML:http://x.com/a', {'length': 123})
metadata.METADATA_CACHE.Set('KML:http://p.com/q', {'length': 456})
# Map cache key, an address with metadata, and an address without metadata.
response = self.DoGet('/.metadata?ck=' + cache_key +
'&source=KML:http://p.com/q' +
'&source=KML:http://z.com/z')
self.assertEquals({
'KML:http://x.com/a': {'length': 123}, # in map, has metadata
'GEORSS:http://y.com/b': None, # in map, no metadata
'KML:http://p.com/q': {'length': 456}, # source param, has metadata
'KML:http://z.com/z': None # source param, no metadata
}, json.loads(response.body))
def testGetAndActivate(self):
self.DoGet('/.metadata?source=KML:http://u.com/v')
# Requesting metadata should activate the source and queue a task.
self.assertEquals(1, metadata.ACTIVE_CACHE.Get('KML:http://u.com/v'))
urls = sorted(task['url'] for task in self.PopTasks('metadata'))
self.assertEquals(1, len(urls))
self.AssertEqualsUrlWithUnorderedParams(
'/root/.metadata_fetch?source=KML:http://u.com/v', urls[0])
# Requesting multiple times should not add redundant tasks.
self.DoGet('/.metadata?source=KML:http://u.com/v')
self.DoGet('/.metadata?source=KML:http://u.com/v')
self.assertEquals(0, len(self.PopTasks('metadata')))
if __name__ == '__main__':
test_utils.main()
| {
"content_hash": "8a3777003a61495b25ae02eda989fe3a",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 79,
"avg_line_length": 37.60215053763441,
"alnum_prop": 0.6225336002287675,
"repo_name": "klokan/googlecrisismap",
"id": "26e272f29941627b061a89a03b9423375c37fa99",
"size": "4104",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "metadata_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "64220"
},
{
"name": "HTML",
"bytes": "166696"
},
{
"name": "JavaScript",
"bytes": "1690524"
},
{
"name": "Makefile",
"bytes": "8362"
},
{
"name": "Python",
"bytes": "707243"
}
],
"symlink_target": ""
} |
"""A tf.learn implementation of tensor_forest (extremely random forests)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.learn.python.learn import monitors as mon
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.tensor_forest.client import eval_metrics
from tensorflow.contrib.tensor_forest.data import data_ops
from tensorflow.contrib.tensor_forest.python import tensor_forest
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
def _assert_float32(tensors):
"""Assert all tensors are float32.
Args:
tensors: `Tensor` or `dict` of `Tensor` objects.
Raises:
TypeError: if any tensor is not float32.
"""
if not isinstance(tensors, dict):
tensors = [tensors]
else:
tensors = tensors.values()
for tensor in tensors:
if tensor.dtype.base_dtype != dtypes.float32:
raise TypeError('Expected dtype=float32, %s.' % tensor)
class TensorForestLossMonitor(mon.EveryN):
"""Terminates training when training loss stops decreasing."""
def __init__(self,
early_stopping_rounds,
every_n_steps):
super(TensorForestLossMonitor, self).__init__(every_n_steps=every_n_steps)
self.early_stopping_rounds = early_stopping_rounds
self.min_loss = None
self.min_loss_step = 0
def step_begin(self, step):
super(TensorForestLossMonitor, self).step_begin(step)
return [self._loss_op_name]
def set_estimator(self, est):
"""This function gets called in the same graph as _get_train_ops."""
super(TensorForestLossMonitor, self).set_estimator(est)
self._loss_op_name = est.training_loss.name
def every_n_step_end(self, step, outputs):
super(TensorForestLossMonitor, self).every_n_step_end(step, outputs)
current_loss = outputs[self._loss_op_name]
if self.min_loss is None or current_loss < self.min_loss:
self.min_loss = current_loss
self.min_loss_step = step
return step - self.min_loss_step >= self.early_stopping_rounds
class TensorForestEstimator(estimator.BaseEstimator):
"""An estimator that can train and evaluate a random forest."""
def __init__(self, params, device_assigner=None, model_dir=None,
graph_builder_class=tensor_forest.RandomForestGraphs,
master='', accuracy_metric=None,
tf_random_seed=None, config=None,
feature_engineering_fn=None):
self.params = params.fill()
self.accuracy_metric = (accuracy_metric or
('r2' if self.params.regression else 'accuracy'))
self.data_feeder = None
self.device_assigner = (
device_assigner or tensor_forest.RandomForestDeviceAssigner())
self.graph_builder_class = graph_builder_class
self.training_args = {}
self.construction_args = {}
self._feature_engineering_fn = (
feature_engineering_fn or
(lambda features, targets: (features, targets)))
super(TensorForestEstimator, self).__init__(model_dir=model_dir,
config=config)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(
self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns prediction probabilities for given features (classification).
Args:
x: features.
input_fn: Input function. If set, x and y must be None.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted probabilities (or an iterable of predicted
probabilities if as_iterable is True).
Raises:
ValueError: If both or neither of x and input_fn were given.
"""
results = super(TensorForestEstimator, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if as_iterable:
return (r['probabilities'] for r in results)
else:
return results['probabilities']
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=True):
"""Returns predictions for given features.
Args:
x: features.
input_fn: Input function. If set, x must be None.
axis: Axis on which to argmax (for classification).
Last axis is used by default.
batch_size: Override default batch size.
outputs: list of `str`, name of the output to predict.
If `None`, returns all.
as_iterable: If True, return an iterable which keeps yielding predictions
for each example until inputs are exhausted. Note: The inputs must
terminate if you want the iterable to terminate (e.g. be sure to pass
num_epochs=1 if you are using something like read_batch_features).
Returns:
Numpy array of predicted classes or regression values (or an iterable of
predictions if as_iterable is True).
"""
probabilities = self.predict_proba(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if self.params.regression:
return probabilities
else:
if as_iterable:
return (np.argmax(p, axis=0) for p in probabilities)
else:
return np.argmax(probabilities, axis=1)
def predict_with_keys(
self, x=None, input_fn=None, axis=None, batch_size=None, outputs=None,
as_iterable=False):
"""Same as predict but also returns the example keys."""
results = super(TensorForestEstimator, self).predict(
x=x, input_fn=input_fn, batch_size=batch_size, outputs=outputs,
as_iterable=as_iterable)
if self.params.regression:
if as_iterable:
return ((r['probabilities'], r.get('keys', None)) for r in results)
else:
return results['probabilities'], results.get('keys', None)
else:
if as_iterable:
return ((np.argmax(r['probabilities'], axis=0),
r.get('keys', None)) for r in results)
else:
return np.argmax(results['probabilities'], axis=1), results.get('keys',
None)
def _get_train_ops(self, features, targets):
"""Method that builds model graph and returns trainer ops.
Args:
features: `Tensor` or `dict` of `Tensor` objects.
targets: `Tensor` or `dict` of `Tensor` objects.
Returns:
Tuple of train `Operation` and loss `Tensor`.
"""
features, _, weights, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
features, labels = self._feature_engineering_fn(features, labels)
_assert_float32(features)
_assert_float32(labels)
if weights is not None:
if 'input_weights' in self.training_args:
logging.warning('Replacing input_weights in training_args.')
self.training_args['input_weights'] = weights
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner,
**self.construction_args)
epoch = None
if self.data_feeder:
epoch = self.data_feeder.make_epoch_variable()
train = control_flow_ops.group(
graph_builder.training_graph(
features, labels, data_spec=spec, epoch=epoch,
**self.training_args),
state_ops.assign_add(contrib_framework.get_global_step(), 1))
self.training_loss = graph_builder.training_loss(features, targets)
return train, self.training_loss
def _get_predict_ops(self, features):
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner, training=False,
**self.construction_args)
features, keys, _, spec = data_ops.ParseDataTensorOrDict(features)
features, _ = self._feature_engineering_fn(features, None)
_assert_float32(features)
output_dict = {
'probabilities': graph_builder.inference_graph(features,
data_spec=spec)}
if keys is not None:
output_dict['keys'] = keys
return output_dict
def _get_eval_ops(self, features, targets, metrics):
features, _, _, spec = data_ops.ParseDataTensorOrDict(features)
labels = data_ops.ParseLabelTensorOrDict(targets)
features, labels = self._feature_engineering_fn(features, labels)
_assert_float32(features)
_assert_float32(labels)
graph_builder = self.graph_builder_class(
self.params, device_assigner=self.device_assigner, training=False,
**self.construction_args)
probabilities = graph_builder.inference_graph(features, data_spec=spec)
# One-hot the labels.
if not self.params.regression:
labels = math_ops.to_int64(array_ops.one_hot(math_ops.to_int64(
array_ops.squeeze(labels)), self.params.num_classes, 1, 0))
if metrics is None:
metrics = {self.accuracy_metric:
eval_metrics.get_metric(self.accuracy_metric)}
result = {}
for name, metric in six.iteritems(metrics):
result[name] = metric(probabilities, labels)
return result
| {
"content_hash": "9991b3604e329b20fe9c74c8c996f01b",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 79,
"avg_line_length": 37.809701492537314,
"alnum_prop": 0.672357643343531,
"repo_name": "juharris/tensorflow",
"id": "738103523947d20dbcffb558ddb27b349a9a33bc",
"size": "10822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/learn/python/learn/estimators/random_forest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "156005"
},
{
"name": "C++",
"bytes": "9229239"
},
{
"name": "CMake",
"bytes": "29372"
},
{
"name": "CSS",
"bytes": "1297"
},
{
"name": "HTML",
"bytes": "783708"
},
{
"name": "Java",
"bytes": "39181"
},
{
"name": "JavaScript",
"bytes": "10779"
},
{
"name": "Jupyter Notebook",
"bytes": "1773496"
},
{
"name": "Protocol Buffer",
"bytes": "112087"
},
{
"name": "Python",
"bytes": "6699482"
},
{
"name": "Shell",
"bytes": "185658"
},
{
"name": "TypeScript",
"bytes": "410434"
}
],
"symlink_target": ""
} |
import json
import os
import subprocess
import sys
from contextlib import ExitStack, suppress
from datetime import datetime, timedelta
import freezegun
import pytest
# We should set these before loading _any_ of the rest of airflow so that the
# unit test mode config is set as early as possible.
assert "airflow" not in sys.modules, "No airflow module can be imported before these lines"
tests_directory = os.path.dirname(os.path.realpath(__file__))
os.environ["AIRFLOW__CORE__DAGS_FOLDER"] = os.path.join(tests_directory, "dags")
os.environ["AIRFLOW__CORE__UNIT_TEST_MODE"] = "True"
os.environ["AWS_DEFAULT_REGION"] = os.environ.get("AWS_DEFAULT_REGION") or "us-east-1"
os.environ["CREDENTIALS_DIR"] = os.environ.get('CREDENTIALS_DIR') or "/files/airflow-breeze-config/keys"
from tests.test_utils.perf.perf_kit.sqlalchemy import ( # noqa isort:skip
count_queries,
trace_queries,
)
@pytest.fixture()
def reset_environment():
"""
Resets env variables.
"""
init_env = os.environ.copy()
yield
changed_env = os.environ
for key in changed_env:
if key not in init_env:
del os.environ[key]
else:
os.environ[key] = init_env[key]
@pytest.fixture()
def reset_db():
"""
Resets Airflow db.
"""
from airflow.utils import db
db.resetdb()
yield
ALLOWED_TRACE_SQL_COLUMNS = ['num', 'time', 'trace', 'sql', 'parameters', 'count']
@pytest.fixture(autouse=True)
def trace_sql(request):
"""
Displays queries from the tests to console.
"""
trace_sql_option = request.config.getoption("trace_sql")
if not trace_sql_option:
yield
return
terminal_reporter = request.config.pluginmanager.getplugin("terminalreporter")
# if no terminal reporter plugin is present, nothing we can do here;
# this can happen when this function executes in a slave node
# when using pytest-xdist, for example
if terminal_reporter is None:
yield
return
columns = [col.strip() for col in trace_sql_option.split(",")]
def pytest_print(text):
return terminal_reporter.write_line(text)
with ExitStack() as exit_stack:
if columns == ['num']:
# It is very unlikely that the user wants to display only numbers, but probably
# the user just wants to count the queries.
exit_stack.enter_context(count_queries(print_fn=pytest_print))
elif any(c for c in ['time', 'trace', 'sql', 'parameters']):
exit_stack.enter_context(
trace_queries(
display_num='num' in columns,
display_time='time' in columns,
display_trace='trace' in columns,
display_sql='sql' in columns,
display_parameters='parameters' in columns,
print_fn=pytest_print,
)
)
yield
def pytest_addoption(parser):
"""
Add options parser for custom plugins
"""
group = parser.getgroup("airflow")
group.addoption(
"--with-db-init",
action="store_true",
dest="db_init",
help="Forces database initialization before tests",
)
group.addoption(
"--integration",
action="append",
metavar="INTEGRATIONS",
help="only run tests matching integration specified: "
"[cassandra,kerberos,mongo,openldap,rabbitmq,redis,statsd,trino]. ",
)
group.addoption(
"--backend",
action="store",
metavar="BACKEND",
help="only run tests matching the backend: [sqlite,postgres,mysql].",
)
group.addoption(
"--system",
action="append",
metavar="SYSTEMS",
help="only run tests matching the system specified [google.cloud, google.marketing_platform]",
)
group.addoption(
"--include-long-running",
action="store_true",
help="Includes long running tests (marked with long_running marker). They are skipped by default.",
)
group.addoption(
"--include-quarantined",
action="store_true",
help="Includes quarantined tests (marked with quarantined marker). They are skipped by default.",
)
allowed_trace_sql_columns_list = ",".join(ALLOWED_TRACE_SQL_COLUMNS)
group.addoption(
"--trace-sql",
action="store",
help=(
"Trace SQL statements. As an argument, you must specify the columns to be "
f"displayed as a comma-separated list. Supported values: [f{allowed_trace_sql_columns_list}]"
),
metavar="COLUMNS",
)
def initial_db_init():
if os.environ.get("RUN_AIRFLOW_1_10") == "true":
print("Attempting to reset the db using airflow command")
os.system("airflow resetdb -y")
else:
from airflow.utils import db
db.resetdb()
@pytest.fixture(autouse=True, scope="session")
def breeze_test_helper(request):
"""
Helper that setups Airflow testing environment. It does the same thing
as the old 'run-tests' script.
"""
# fixme: this should use some other env variable ex. RUNNING_ON_K8S
if os.environ.get("SKIP_INIT_DB"):
print("Skipping db initialization. Tests do not require database")
return
from airflow import __version__
if __version__.startswith("1.10"):
os.environ['RUN_AIRFLOW_1_10'] = "true"
print(" AIRFLOW ".center(60, "="))
# Setup test environment for breeze
home = os.path.expanduser("~")
airflow_home = os.environ.get("AIRFLOW_HOME") or os.path.join(home, "airflow")
print(f"Home of the user: {home}\nAirflow home {airflow_home}")
# Initialize Airflow db if required
lock_file = os.path.join(airflow_home, ".airflow_db_initialised")
if request.config.option.db_init:
print("Initializing the DB - forced with --with-db-init switch.")
initial_db_init()
elif not os.path.exists(lock_file):
print(
"Initializing the DB - first time after entering the container.\n"
"You can force re-initialization the database by adding --with-db-init switch to run-tests."
)
initial_db_init()
# Create pid file
with open(lock_file, "w+"):
pass
else:
print(
"Skipping initializing of the DB as it was initialized already.\n"
"You can re-initialize the database by adding --with-db-init flag when running tests."
)
integration_kerberos = os.environ.get("INTEGRATION_KERBEROS")
if integration_kerberos == "true":
# Initialize kerberos
kerberos = os.environ.get("KRB5_KTNAME")
if kerberos:
subprocess.check_call(["kinit", "-kt", kerberos, '[email protected]'])
else:
print("Kerberos enabled! Please setup KRB5_KTNAME environment variable")
sys.exit(1)
def pytest_configure(config):
config.addinivalue_line("markers", "integration(name): mark test to run with named integration")
config.addinivalue_line("markers", "backend(name): mark test to run with named backend")
config.addinivalue_line("markers", "system(name): mark test to run with named system")
config.addinivalue_line("markers", "long_running: mark test that run for a long time (many minutes)")
config.addinivalue_line(
"markers", "quarantined: mark test that are in quarantine (i.e. flaky, need to be isolated and fixed)"
)
config.addinivalue_line(
"markers", "credential_file(name): mark tests that require credential file in CREDENTIALS_DIR"
)
config.addinivalue_line("markers", "airflow_2: mark tests that works only on Airflow 2.0 / master")
def skip_if_not_marked_with_integration(selected_integrations, item):
for marker in item.iter_markers(name="integration"):
integration_name = marker.args[0]
if integration_name in selected_integrations or "all" in selected_integrations:
return
pytest.skip(
f"The test is skipped because it does not have the right integration marker. "
f"Only tests marked with pytest.mark.integration(INTEGRATION) are run with INTEGRATION "
f"being one of {selected_integrations}. {item}"
)
def skip_if_not_marked_with_backend(selected_backend, item):
for marker in item.iter_markers(name="backend"):
backend_names = marker.args
if selected_backend in backend_names:
return
pytest.skip(
f"The test is skipped because it does not have the right backend marker "
f"Only tests marked with pytest.mark.backend('{selected_backend}') are run: {item}"
)
def skip_if_not_marked_with_system(selected_systems, item):
for marker in item.iter_markers(name="system"):
systems_name = marker.args[0]
if systems_name in selected_systems or "all" in selected_systems:
return
pytest.skip(
f"The test is skipped because it does not have the right system marker. "
f"Only tests marked with pytest.mark.system(SYSTEM) are run with SYSTEM "
f"being one of {selected_systems}. {item}"
)
def skip_system_test(item):
for marker in item.iter_markers(name="system"):
pytest.skip(
f"The test is skipped because it has system marker. System tests are only run when "
f"--system flag with the right system ({marker.args[0]}) is passed to pytest. {item}"
)
def skip_long_running_test(item):
for _ in item.iter_markers(name="long_running"):
pytest.skip(
f"The test is skipped because it has long_running marker. "
f"And --include-long-running flag is not passed to pytest. {item}"
)
def skip_quarantined_test(item):
for _ in item.iter_markers(name="quarantined"):
pytest.skip(
f"The test is skipped because it has quarantined marker. "
f"And --include-quarantined flag is passed to pytest. {item}"
)
def skip_if_integration_disabled(marker, item):
integration_name = marker.args[0]
environment_variable_name = "INTEGRATION_" + integration_name.upper()
environment_variable_value = os.environ.get(environment_variable_name)
if not environment_variable_value or environment_variable_value != "true":
pytest.skip(
"The test requires {integration_name} integration started and "
"{name} environment variable to be set to true (it is '{value}')."
" It can be set by specifying '--integration {integration_name}' at breeze startup"
": {item}".format(
name=environment_variable_name,
value=environment_variable_value,
integration_name=integration_name,
item=item,
)
)
def skip_if_wrong_backend(marker, item):
valid_backend_names = marker.args
environment_variable_name = "BACKEND"
environment_variable_value = os.environ.get(environment_variable_name)
if not environment_variable_value or environment_variable_value not in valid_backend_names:
pytest.skip(
f"The test requires one of {valid_backend_names} backend started and "
f"{environment_variable_name} environment variable to be set to 'true' (it is "
f"'{environment_variable_value}'). It can be set by specifying backend at breeze startup: {item}"
)
def skip_if_credential_file_missing(item):
for marker in item.iter_markers(name="credential_file"):
credential_file = marker.args[0]
credential_path = os.path.join(os.environ.get('CREDENTIALS_DIR'), credential_file)
if not os.path.exists(credential_path):
pytest.skip(f"The test requires credential file {credential_path}: {item}")
def skip_if_airflow_2_test(item):
for _ in item.iter_markers(name="airflow_2"):
if os.environ.get("RUN_AIRFLOW_1_10") == "true":
pytest.skip("The test works only with Airflow 2.0 / main branch")
def pytest_runtest_setup(item):
selected_integrations_list = item.config.getoption("--integration")
selected_systems_list = item.config.getoption("--system")
include_long_running = item.config.getoption("--include-long-running")
include_quarantined = item.config.getoption("--include-quarantined")
for marker in item.iter_markers(name="integration"):
skip_if_integration_disabled(marker, item)
if selected_integrations_list:
skip_if_not_marked_with_integration(selected_integrations_list, item)
if selected_systems_list:
skip_if_not_marked_with_system(selected_systems_list, item)
else:
skip_system_test(item)
for marker in item.iter_markers(name="backend"):
skip_if_wrong_backend(marker, item)
selected_backend = item.config.getoption("--backend")
if selected_backend:
skip_if_not_marked_with_backend(selected_backend, item)
if not include_long_running:
skip_long_running_test(item)
if not include_quarantined:
skip_quarantined_test(item)
skip_if_credential_file_missing(item)
skip_if_airflow_2_test(item)
@pytest.fixture
def frozen_sleep(monkeypatch):
"""
Use freezegun to "stub" sleep, so that it takes no time, but that
``datetime.now()`` appears to move forwards
If your module under test does ``import time`` and then ``time.sleep``::
def test_something(frozen_sleep):
my_mod.fn_under_test()
If your module under test does ``from time import sleep`` then you will
have to mock that sleep function directly::
def test_something(frozen_sleep, monkeypatch):
monkeypatch.setattr('my_mod.sleep', frozen_sleep)
my_mod.fn_under_test()
"""
freezegun_control = None
def fake_sleep(seconds):
nonlocal freezegun_control
utcnow = datetime.utcnow()
if freezegun_control is not None:
freezegun_control.stop()
freezegun_control = freezegun.freeze_time(utcnow + timedelta(seconds=seconds))
freezegun_control.start()
monkeypatch.setattr("time.sleep", fake_sleep)
yield fake_sleep
if freezegun_control is not None:
freezegun_control.stop()
@pytest.fixture(scope="session")
def app():
from airflow.www import app
return app.create_app(testing=True)
@pytest.fixture
def dag_maker(request):
"""
The dag_maker helps us to create DAG, DagModel, and SerializedDAG automatically.
You have to use the dag_maker as a context manager and it takes
the same argument as DAG::
with dag_maker(dag_id="mydag") as dag:
task1 = DummyOperator(task_id='mytask')
task2 = DummyOperator(task_id='mytask2')
If the DagModel you want to use needs different parameters than the one
automatically created by the dag_maker, you have to update the DagModel as below::
dag_maker.dag_model.is_active = False
session.merge(dag_maker.dag_model)
session.commit()
For any test you use the dag_maker, make sure to create a DagRun::
dag_maker.create_dagrun()
The dag_maker.create_dagrun takes the same arguments as dag.create_dagrun
If you want to operate on serialized DAGs, then either pass ``serialized=True` to the ``dag_maker()``
call, or you can mark your test/class/file with ``@pytest.mark.need_serialized_dag(True)``. In both of
these cases the ``dag`` returned by the context manager will be a lazily-evaluated proxy object to the
SerializedDAG.
"""
import lazy_object_proxy
# IMPORTANT: Delay _all_ imports from `airflow.*` to _inside a method_.
# This fixture is "called" early on in the pytest collection process, and
# if we import airflow.* here the wrong (non-test) config will be loaded
# and "baked" in to various constants
want_serialized = False
# Allow changing default serialized behaviour with `@pytest.mark.need_serialized_dag` or
# `@pytest.mark.need_serialized_dag(False)`
serialized_marker = request.node.get_closest_marker("need_serialized_dag")
if serialized_marker:
(want_serialized,) = serialized_marker.args or (True,)
from airflow.utils.log.logging_mixin import LoggingMixin
class DagFactory(LoggingMixin):
_own_session = False
def __init__(self):
from airflow.models import DagBag
# Keep all the serialized dags we've created in this test
self.dagbag = DagBag(os.devnull, include_examples=False, read_dags_from_db=False)
def __enter__(self):
self.dag.__enter__()
if self.want_serialized:
return lazy_object_proxy.Proxy(self._serialized_dag)
return self.dag
def _serialized_dag(self):
return self.serialized_model.dag
def get_serialized_data(self):
try:
data = self.serialized_model.data
except AttributeError:
raise RuntimeError("DAG serialization not requested")
if isinstance(data, str):
return json.loads(data)
return data
def __exit__(self, type, value, traceback):
from airflow.models import DagModel
from airflow.models.serialized_dag import SerializedDagModel
dag = self.dag
dag.__exit__(type, value, traceback)
if type is not None:
return
dag.clear(session=self.session)
dag.sync_to_db(self.session)
self.dag_model = self.session.query(DagModel).get(dag.dag_id)
if self.want_serialized:
self.serialized_model = SerializedDagModel(dag)
self.session.merge(self.serialized_model)
serialized_dag = self._serialized_dag()
self.dagbag.bag_dag(serialized_dag, root_dag=serialized_dag)
self.session.flush()
else:
self.dagbag.bag_dag(self.dag, self.dag)
def create_dagrun(self, **kwargs):
from airflow.utils import timezone
from airflow.utils.state import State
from airflow.utils.types import DagRunType
dag = self.dag
kwargs = {
"state": State.RUNNING,
"start_date": self.start_date,
"session": self.session,
**kwargs,
}
# Need to provide run_id if the user does not either provide one
# explicitly, or pass run_type for inference in dag.create_dagrun().
if "run_id" not in kwargs and "run_type" not in kwargs:
kwargs["run_id"] = "test"
if "run_type" not in kwargs:
kwargs["run_type"] = DagRunType.from_run_id(kwargs["run_id"])
if "execution_date" not in kwargs:
if kwargs["run_type"] == DagRunType.MANUAL:
kwargs["execution_date"] = self.start_date
else:
kwargs["execution_date"] = dag.next_dagrun_info(None).logical_date
if "data_interval" not in kwargs:
logical_date = timezone.coerce_datetime(kwargs["execution_date"])
if kwargs["run_type"] == DagRunType.MANUAL:
data_interval = dag.timetable.infer_manual_data_interval(run_after=logical_date)
else:
data_interval = dag.infer_automated_data_interval(logical_date)
kwargs["data_interval"] = data_interval
self.dag_run = dag.create_dagrun(**kwargs)
for ti in self.dag_run.task_instances:
ti.refresh_from_task(dag.get_task(ti.task_id))
return self.dag_run
def create_dagrun_after(self, dagrun, **kwargs):
next_info = self.dag.next_dagrun_info(self.dag.get_run_data_interval(dagrun))
if next_info is None:
raise ValueError(f"cannot create run after {dagrun}")
return self.create_dagrun(
execution_date=next_info.logical_date,
data_interval=next_info.data_interval,
**kwargs,
)
def __call__(
self, dag_id='test_dag', serialized=want_serialized, fileloc=None, session=None, **kwargs
):
from airflow import settings
from airflow.models import DAG
from airflow.utils import timezone
if session is None:
self._own_session = True
session = settings.Session()
self.kwargs = kwargs
self.session = session
self.start_date = self.kwargs.get('start_date', None)
default_args = kwargs.get('default_args', None)
if default_args and not self.start_date:
if 'start_date' in default_args:
self.start_date = default_args.get('start_date')
if not self.start_date:
if hasattr(request.module, 'DEFAULT_DATE'):
self.start_date = getattr(request.module, 'DEFAULT_DATE')
else:
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
self.start_date = DEFAULT_DATE
self.kwargs['start_date'] = self.start_date
self.dag = DAG(dag_id, **self.kwargs)
self.dag.fileloc = fileloc or request.module.__file__
self.want_serialized = serialized
return self
def cleanup(self):
from airflow.models import DagModel, DagRun, TaskInstance, XCom
from airflow.models.serialized_dag import SerializedDagModel
from airflow.utils.retries import run_with_db_retries
for attempt in run_with_db_retries(logger=self.log):
with attempt:
dag_ids = list(self.dagbag.dag_ids)
if not dag_ids:
return
# To isolate problems here with problems from elsewhere on the session object
self.session.flush()
self.session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_ids)
).delete(synchronize_session=False)
self.session.query(DagRun).filter(DagRun.dag_id.in_(dag_ids)).delete(
synchronize_session=False
)
self.session.query(TaskInstance).filter(TaskInstance.dag_id.in_(dag_ids)).delete(
synchronize_session=False
)
self.session.query(XCom).filter(XCom.dag_id.in_(dag_ids)).delete(
synchronize_session=False
)
self.session.query(DagModel).filter(DagModel.dag_id.in_(dag_ids)).delete(
synchronize_session=False
)
self.session.commit()
if self._own_session:
self.session.expunge_all()
factory = DagFactory()
try:
yield factory
finally:
factory.cleanup()
with suppress(AttributeError):
del factory.session
@pytest.fixture
def create_dummy_dag(dag_maker):
"""
This fixture creates a `DAG` with a single `DummyOperator` task.
DagRun and DagModel is also created.
Apart from the already existing arguments, any other argument in kwargs
is passed to the DAG and not to the DummyOperator task.
If you have an argument that you want to pass to the DummyOperator that
is not here, please use `default_args` so that the DAG will pass it to the
Task::
dag, task = create_dummy_dag(default_args={'start_date':timezone.datetime(2016, 1, 1)})
You cannot be able to alter the created DagRun or DagModel, use `dag_maker` fixture instead.
"""
from airflow.operators.dummy import DummyOperator
from airflow.utils.types import DagRunType
def create_dag(
dag_id='dag',
task_id='op1',
max_active_tis_per_dag=16,
pool='default_pool',
executor_config={},
trigger_rule='all_done',
on_success_callback=None,
on_execute_callback=None,
on_failure_callback=None,
on_retry_callback=None,
email=None,
with_dagrun_type=DagRunType.SCHEDULED,
**kwargs,
):
with dag_maker(dag_id, **kwargs) as dag:
op = DummyOperator(
task_id=task_id,
max_active_tis_per_dag=max_active_tis_per_dag,
executor_config=executor_config,
on_success_callback=on_success_callback,
on_execute_callback=on_execute_callback,
on_failure_callback=on_failure_callback,
on_retry_callback=on_retry_callback,
email=email,
pool=pool,
trigger_rule=trigger_rule,
)
if with_dagrun_type is not None:
dag_maker.create_dagrun(run_type=with_dagrun_type)
return dag, op
return create_dag
@pytest.fixture
def create_task_instance(dag_maker, create_dummy_dag):
"""
Create a TaskInstance, and associated DB rows (DagRun, DagModel, etc)
Uses ``create_dummy_dag`` to create the dag structure.
"""
def maker(execution_date=None, dagrun_state=None, state=None, run_id=None, run_type=None, **kwargs):
if execution_date is None:
from airflow.utils import timezone
execution_date = timezone.utcnow()
create_dummy_dag(with_dagrun_type=None, **kwargs)
dagrun_kwargs = {"execution_date": execution_date, "state": dagrun_state}
if run_id is not None:
dagrun_kwargs["run_id"] = run_id
if run_type is not None:
dagrun_kwargs["run_type"] = run_type
dagrun = dag_maker.create_dagrun(**dagrun_kwargs)
(ti,) = dagrun.task_instances
ti.state = state
return ti
return maker
@pytest.fixture()
def create_task_instance_of_operator(dag_maker):
def _create_task_instance(
operator_class,
*,
dag_id,
execution_date=None,
session=None,
**operator_kwargs,
):
with dag_maker(dag_id=dag_id, session=session):
operator_class(**operator_kwargs)
if execution_date is None:
dagrun_kwargs = {}
else:
dagrun_kwargs = {"execution_date": execution_date}
(ti,) = dag_maker.create_dagrun(**dagrun_kwargs).task_instances
return ti
return _create_task_instance
@pytest.fixture()
def create_task_of_operator(dag_maker):
def _create_task_of_operator(operator_class, *, dag_id, session=None, **operator_kwargs):
with dag_maker(dag_id=dag_id, session=session):
task = operator_class(**operator_kwargs)
return task
return _create_task_of_operator
@pytest.fixture
def session():
from airflow.utils.session import create_session
with create_session() as session:
yield session
session.rollback()
| {
"content_hash": "2c80c583f21b9433e46e76cd086edf60",
"timestamp": "",
"source": "github",
"line_count": 744,
"max_line_length": 110,
"avg_line_length": 36.579301075268816,
"alnum_prop": 0.6183722212015432,
"repo_name": "apache/incubator-airflow",
"id": "d3578997ad246eab1e53b2666e759b403c012c4a",
"size": "28000",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
import thread
import time
class Timer:
def __init__(self, interval, function, length = None):
self._ticks = self._remaining = length
self._lock = thread.allocate_lock()
self._interval = interval
self._function = function
self._running = False
def start(self):
self._lock.acquire()
self._remaining = self._ticks
self._running = True
thread.start_new_thread(self._run, ())
self._lock.release()
def stop(self):
self._lock.acquire()
self._running = False
self._lock.release()
def is_running(self):
return self._running
def _run(self):
while self._running and (self._remaining == None or self._remaining > 0):
time.sleep(self._interval)
self._function()
if self._remaining <> None:
self._remaining = self._remaining - 1
| {
"content_hash": "23a48fc18f44604d86dfbc4ef07326e3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 81,
"avg_line_length": 26.885714285714286,
"alnum_prop": 0.5483528161530287,
"repo_name": "localstatic/steep",
"id": "4c4fedc00c832135d146633c868cae5cccfe045f",
"size": "987",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "timer.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8619"
},
{
"name": "Shell",
"bytes": "118"
}
],
"symlink_target": ""
} |
import io
import logging
import sys
import traceback
def getExceptionTrackeback(ex: Exception):
traceback_str = ""
if ex is not None:
try:
with io.StringIO() as f:
# traceback.print_tb(sys.exc_info()[2], file=f)
traceback.print_exception(etype=type(
ex), value=ex, tb=ex.__traceback__, file=f)
traceback_str = f.getvalue()
except Exception as exx:
print(exx)
return traceback_str
def loggerAddFileHandler(logger: logging.Logger, log_file: str,
level: int = logging.WARN,
format: str = '"%(asctime)s - %(message)s"'):
handler = logging.FileHandler(log_file)
handler.setLevel(level)
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logger.addHandler(handler)
def initLogging(log_level=logging.INFO, format=None):
""" Initialize logging framework
"""
# name is the logger name,
# thread is thread id
DEFAULT_LOG_FORMAT = '%(asctime)s, %(name)s, %(thread)d' + \
', %(funcName)s, %(levelname)s: %(message)s'
_format = format or DEFAULT_LOG_FORMAT
# set the default logging configuration.
# The basicConfig is used only once -
# when the FIRST log message is generated
logging.basicConfig(format=_format, level=log_level)
# create a named logger for this module
logger = logging.getLogger(__name__)
logger.log(logging.INFO, "Initialized logging Framework.")
| {
"content_hash": "5518a2f34a863ccb39408a818b28e227",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 29.557692307692307,
"alnum_prop": 0.6193884189980482,
"repo_name": "abrichards5/ABRUnixScripts",
"id": "b1351c7551d229bc7e5ed04909988c906798af60",
"size": "1537",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bin/pylib/common/utils/logging_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Shell",
"bytes": "163122"
}
],
"symlink_target": ""
} |
from shared.card import CARD_PLACEHOLDER_LENGTH
import pygame
import pygame.locals as pl
from twisted.logger import Logger
class View(object):
log = Logger()
def __init__(self, display):
self.display = display
self.first_call = False
self.tab_order = []
self.tab_position = 0
if self.display.accessibility:
from .speech import Speaker
self.speaker = Speaker()
def update(self):
if not self.first_call:
self.firstUpdate()
self.first_call = True
def render(self):
pass
def handleEvent(self, event):
if len(self.tab_order) == 0:
return
if event.type == pygame.KEYDOWN:
if event.key == pl.K_TAB:
try:
self.tab_order[self.tab_position].setFocus(False)
except AttributeError:
pass
# some weird problem here
# after restoring the focus of the window by tabbing back into
# it, the mod attribute won't be set correctly
# that's why we will try to guess it here in a different way
if pygame.key.get_mods() & pl.KMOD_LSHIFT == pl.KMOD_LSHIFT or \
pygame.key.get_mods() & pl.KMOD_RSHIFT == \
pl.KMOD_RSHIFT:
self.tab_position -= 1
if self.tab_position < 0:
self.tab_position = len(self.tab_order) - 1
else:
self.tab_position += 1
if self.tab_position >= len(self.tab_order):
self.tab_position = 0
self.speak(self.tab_order[self.tab_position].getLabel(), True)
try:
self.tab_order[self.tab_position].setFocus(True)
except AttributeError:
pass
elif event.key == pl.K_LCTRL or pygame.key == pl.K_RCTRL:
self.speak(self.tab_order[self.tab_position].getLabel(), True)
elif event.key == pl.K_RETURN:
try:
if self.tab_order[self.tab_position].getEnable():
self.tab_order[self.tab_position].getCallback()()
sound = self.tab_order[self.tab_position].getClickSound()
sound.stop()
sound.play()
except (AttributeError, TypeError):
pass
def speak(self, text, interrupt=False):
if not self.display.accessibility:
return
self.speaker.output(text.replace('_'*CARD_PLACEHOLDER_LENGTH, "("+self.display.translator.translate("free space")+")"), interrupt)
# will only be called once the view receives it's first update
def firstUpdate(self):
if len(self.tab_order) == 0:
return
self.speak(self.tab_order[0].getLabel(), False)
try:
self.tab_order[0].setFocus(True)
except AttributeError:
pass
# can be overwritten
# will be called each time the view will be removed
def leave(self):
pass
def setNewTabPosition(self, position):
self.tab_position = position
def getTabOrder(self):
return self.tab_order | {
"content_hash": "ed039fa49211968ca6c367f8efbb558a",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 134,
"avg_line_length": 27.88785046728972,
"alnum_prop": 0.5958445040214477,
"repo_name": "Timtam/cards-against-humanity",
"id": "9c39c328ab46f4d443595b9086db0e4a2dc885d7",
"size": "2984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "client/view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "296335"
}
],
"symlink_target": ""
} |
from django import template
from django.template.loader import render_to_string
from django.contrib.contenttypes.models import ContentType as CT
from django.core.urlresolvers import reverse
from valuate.models import Valuation, ValuationType as VT
from valuate.forms import ValuationForm
register = template.Library()
VOs=Valuation.objects
'''
Defines the templatetags for easy integration in templates directly.
'''
class BaseValuateNode(template.Node):
methods = {}
'''
This is the base node for valuate app. Inherited by get and render
template tags. The tags can use the default valuation type (pk=1 or
DEFAULT_VALUATION_TYPE_PK setting. The valuation type if required can
be specified by `for valuation_type` as arguments in the tag.
'''
def __init__(self, parser, token, shift=0):
'''
Parses tag arguments and provides attributes for future methods.
'''
tokens = token.contents.split()
self.vtype = VT.objects.get_type()
self.as_varname = False
method = self.get_method(tokens[1])
if not method:
raise template.TemplateSyntaxError("%r is not a valid method in %r tag" %(tokens[1], tokens[0]))
else:
self.method = method
if tokens[1]=='choice_count':
if len(tokens) < 5 or not tokens[4]=='for_choice':
raise template.TemplateSyntaxError("Fourth argument in %r tag must be 'for_choice'" % tokens[0])
else:
self.choice=tokens[5]
shift+=2
if not tokens[2]=='of':
raise template.TemplateSyntaxError("Second argument in %r tag must be 'of'" % tokens[0])
self.obj = parser.compile_filter(tokens[3])
if len(tokens)==4+shift:
pass
elif len(tokens)==6+shift:
if tokens[4+shift]=='for':
self.vtype = VT.objects.get_type(tokens[5+shift])
elif tokens[4+shift]=='as':
self.as_varname = tokens[5+shift]
else:
raise template.TemplateSyntaxError("Argument #%d in %r tag must be 'for' (valuation type) or 'as' (variable name)" % (4+shift, tokens[0+shift]))
elif len(tokens)==8+shift:
if not tokens[4]=='for' and tokens[7]=='as':
raise template.TemplateSyntaxError("Argument #%d in %r tag must be 'for' (valuation type) or and #%d 'as' (variable name)" %(4+shift, tokens[0], 6+shift))
else:
self.for_vtype = tokens[5]
self.vtype = VT.objects.get_type(tokens[5])
self.as_varname = tokens[7]
else:
raise template.TemplateSyntaxError("Number of arguments in %r tag can be %d, %d or %d and not %d" %(tokens[0], 3+shift, 5+shift, 7+shift, len(tokens)-1))
def get_method(self, method):
return self.methods.get(method, None)
class ValuateGetNode(BaseValuateNode):
'''
This node provides various statistics or properties about valuation of
an object. The properties can be directly rendered or added to context
by passing `as varname` in template tag.
'''
methods = {}
def score(self, context):
'''
Returns the average score of the object according to the
valuations.
'''
avg_score = VOs.get_average_score(self.obj.resolve(context),
vtype=self.vtype)
try:
return round(avg_score, 2)
except TypeError:
return 0.0
methods['score'] = score
def form(self, context):
'''
Gets the valuation form in the context or directly.
User `form_name.target` to access the target for the post request.
'''
request = context['request']
form = ValuationForm(request, obj=self.obj.resolve(context),
vtype=self.vtype)
form.fields['choice'].queryset=self.vtype.choice_queryset()
form.fields['choice'].label=self.vtype.title
return form
methods['form'] = form
def ajax_fields(self, context):
'''
Get the fields as dictionary required for an ajax post request in the
context or directly.
Variables available:
For post request: 'content_type','object_pk', 'choice' (to be selected
by user, can have an initial value if user has already submitted once)
'choices': dictionary of choices for user to provide the 'value' data.
'target': target for the request.
'vtype'": the valuation type.
On a successfull request, true will be retuned.
If you are not using a popular javascript liberary, pass on a POST
variable with name `ajax` and a `true` value.
'''
request = context['request']
vtype = self.vtype
obj = self.obj.resolve(context)
choices = vtype.choice_queryset()
choice = ''
target = reverse('valuate-submit')
fields = {'chocies': choices, 'target':target,
'vtype':vtype}
initial_instance = VOs.get_by_obj_client(request, obj=obj,
vtype=vtype)
if initial_instance:
fields['content_type'] = initial_instance.content_type.id
fields['object_pk'] = initial_instance.object_pk
fields['choice'] = initial_instance.choice
else:
fields['content_type'] = CT.objects.get_for_model(obj).id
fields['object_pk'] = obj.pk
return fields
methods['ajax_fields'] = ajax_fields
def choice_count(self, context):
'''
Returns the score count for a perticular choice of an object. Choice
should be provided with quotes (as string)
'''
return VOs.get_count_for_choice(self.obj.resolve(context),
choice=self.choice,
vtype=self.vtype)
methods['choice_count'] = choice_count
def render(self, context):
result = self.method(self, context)
if self.as_varname:
context[self.as_varname] = result
return ''
else:
return result
class ValuateRenderNode(BaseValuateNode):
'''
This nodes render directly through an html template. Templates can be
overridden at templates/valuate/*.html
'''
methods = {}
def form(self, context):
'''
Renders the valuation form for the object.
Override template: 'valuate/form.html' for modifying the look.
'''
form = ValuationForm(context['request'],
obj=self.obj.resolve(context),
vtype=self.vtype)
form.fields['choice'].queryset=self.vtype.choice_queryset()
form.fields['choice'].label=self.vtype.title
context['valuate_form']=form
return render_to_string('valuate/form.html', context)
methods['form']=form
def status(self, context):
context['valuate_status']=VOs.get_full_status(self.obj.resolve(context),
vtype=self.vtype)
return render_to_string('valuate/status.html', context)
methods['status']=status
def render(self, context):
'''
Renders the status according to the score of various choices.
Override template: 'valuate/status.html' for modifying the look.
'''
result = self.method(self, context)
return result
def do_get_valuate(parser, token):
return ValuateGetNode(parser, token)
def do_render_valuate(parser, token):
return ValuateRenderNode(parser, token)
register.tag('get_valuate', do_get_valuate)
register.tag('render_valuate', do_render_valuate)
| {
"content_hash": "8cdefefd80e820d6137735cfac108065",
"timestamp": "",
"source": "github",
"line_count": 205,
"max_line_length": 170,
"avg_line_length": 39.04390243902439,
"alnum_prop": 0.584207896051974,
"repo_name": "crodjer/django-valuate",
"id": "585f6274b4ad9823a622818e5d1363bc318aa617",
"size": "8004",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "valuate/templatetags/valuation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "1768"
},
{
"name": "Python",
"bytes": "33034"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from event.models import Event, Product, Guest, Attending
# Register your models here.
admin.site.register(Event)
admin.site.register(Product)
admin.site.register(Guest)
admin.site.register(Attending)
| {
"content_hash": "1e93163cac52e6a803e1078b4a21bdd6",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 57,
"avg_line_length": 29.375,
"alnum_prop": 0.8127659574468085,
"repo_name": "ayeletdn/SocialParty",
"id": "adf5e2580e5f57852896604dccfe61d116f99f8b",
"size": "235",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "event/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1077"
},
{
"name": "JavaScript",
"bytes": "14163"
},
{
"name": "Python",
"bytes": "16118"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.