hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequencelengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequencelengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequencelengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a2458ec68258755a956a3e91c4387e3734d51c1 | 7,198 | py | Python | heat/tests/test_stack_lock.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | 1 | 2015-02-26T03:23:23.000Z | 2015-02-26T03:23:23.000Z | heat/tests/test_stack_lock.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | null | null | null | heat/tests/test_stack_lock.py | NeCTAR-RC/heat | b152817f192a7b46514793633ddc968c1fe1ebf8 | [
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common import exception
from heat.db import api as db_api
from heat.engine import stack_lock
from heat.openstack.common.rpc import common as rpc_common
from heat.openstack.common.rpc import proxy
from heat.tests.common import HeatTestCase
from heat.tests import utils
class StackLockTest(HeatTestCase):
def setUp(self):
super(StackLockTest, self).setUp()
utils.setup_dummy_db()
self.context = utils.dummy_context()
self.stack = self.m.CreateMockAnything()
self.stack.id = "aae01f2d-52ae-47ac-8a0d-3fde3d220fea"
self.stack.name = "test_stack"
self.stack.action = "CREATE"
self.engine_id = stack_lock.StackLock.generate_engine_id()
def test_successful_acquire_new_lock(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_existing_lock_current_engine(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn(self.engine_id)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_successful_acquire_existing_lock_engine_dead(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
self.m.StubOutWithMock(proxy.RpcProxy, "call")
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_existing_lock_engine_alive(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
self.m.StubOutWithMock(proxy.RpcProxy, "call")
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndReturn(True)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_failed_acquire_existing_lock_engine_dead(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
self.m.StubOutWithMock(proxy.RpcProxy, "call")
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).\
AndReturn("fake-engine-id2")
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
def test_successful_acquire_with_retry(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
self.m.StubOutWithMock(proxy.RpcProxy, "call")
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).\
AndReturn(True)
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).\
AndReturn(None)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
slock.acquire()
self.m.VerifyAll()
def test_failed_acquire_one_retry_only(self):
self.m.StubOutWithMock(db_api, "stack_lock_create")
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
self.m.StubOutWithMock(proxy.RpcProxy, "call")
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
self.m.StubOutWithMock(db_api, "stack_lock_steal")
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).\
AndReturn(True)
db_api.stack_lock_create(self.stack.id, self.engine_id).\
AndReturn("fake-engine-id")
topic = self.stack.id
rpc = proxy.RpcProxy(topic, "1.0")
rpc.call(self.context, rpc.make_msg("listening"), timeout=2,
topic="fake-engine-id").AndRaise(rpc_common.Timeout)
db_api.stack_lock_steal(self.stack.id, "fake-engine-id",
self.engine_id).\
AndReturn(True)
self.m.ReplayAll()
slock = stack_lock.StackLock(self.context, self.stack, self.engine_id)
self.assertRaises(exception.ActionInProgress, slock.acquire)
self.m.VerifyAll()
| 39.333333 | 78 | 0.651153 |
4a245a64632cc880351e8f4cc1b5c88e99d742b6 | 830 | py | Python | analyse.py | Maz101/Bert | b3eb7a964205f3b2a9f718c1019e99ee9ee5794d | [
"Apache-2.0"
] | null | null | null | analyse.py | Maz101/Bert | b3eb7a964205f3b2a9f718c1019e99ee9ee5794d | [
"Apache-2.0"
] | null | null | null | analyse.py | Maz101/Bert | b3eb7a964205f3b2a9f718c1019e99ee9ee5794d | [
"Apache-2.0"
] | null | null | null | from sklearn.metrics import precision_score, accuracy_score, recall_score
import tensorflow as tf
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("testpath", "None", "testpath dir")
flags.DEFINE_string("testpred", "None", "testpred dir")
testd = []
with open(FLAGS.testpath,'r') as testfile:
for n in testfile:
n = n.split('\t')
testd.append(n[2])
print(len(testd))
testpred = []
with open(FLAGS.testpred,'r') as testfile:
for n in testfile:
n = n.split('\t')
maxvalue = max(n)
indexmx = n.index(maxvalue) + 1
testpred.append(indexmx)
print(len(testpred))
ac = accuracy_score(testd, testpred)
pc = precision_score(testd, testpred)
rc = recall_score(testd, testpred)
print("** Accuracy", ac)
print("** Precision",pc)
print("** Recall", rc)
| 25.151515 | 73 | 0.653012 |
4a245ac6516e207c81c722673457b00ad12a4fc7 | 5,723 | py | Python | nhentai/logger.py | claron-akatsuki/nhentai | b850a23d5a521de35567075f7497fc8caabf4d3d | [
"MIT"
] | null | null | null | nhentai/logger.py | claron-akatsuki/nhentai | b850a23d5a521de35567075f7497fc8caabf4d3d | [
"MIT"
] | null | null | null | nhentai/logger.py | claron-akatsuki/nhentai | b850a23d5a521de35567075f7497fc8caabf4d3d | [
"MIT"
] | null | null | null | #
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
from __future__ import print_function, unicode_literals
import logging
import re
import platform
import sys
if platform.system() == 'Windows':
import ctypes
import ctypes.wintypes
# Reference: https://gist.github.com/vsajip/758430
# https://github.com/ipython/ipython/issues/4252
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms686047%28v=vs.85%29.aspx
ctypes.windll.kernel32.SetConsoleTextAttribute.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD]
ctypes.windll.kernel32.SetConsoleTextAttribute.restype = ctypes.wintypes.BOOL
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
'black': 0,
'red': 1,
'green': 2,
'yellow': 3,
'blue': 4,
'magenta': 5,
'cyan': 6,
'white': 7,
}
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'green', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
}
csi = '\x1b['
reset = '\x1b[0m'
disable_coloring = False
@property
def is_tty(self):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty() and not self.disable_coloring
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
if message and message[0] == "\r":
message = message[1:]
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except IOError:
pass
except:
self.handleError(record)
if not platform.system() == 'Windows':
def output_colorized(self, message):
self.stream.write(message)
else:
ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m')
nt_color_map = {
0: 0x00, # black
1: 0x04, # red
2: 0x02, # green
3: 0x06, # yellow
4: 0x01, # blue
5: 0x05, # magenta
6: 0x03, # cyan
7: 0x07, # white
}
def output_colorized(self, message):
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
fd = getattr(self.stream, 'fileno', None)
if fd is not None:
fd = fd()
if fd in (1, 2): # stdout or stderr
h = ctypes.windll.kernel32.GetStdHandle(-10 - fd)
while parts:
text = parts.pop(0)
if text:
if sys.version_info < (3, 0, 0):
write(text.encode('utf-8'))
else:
write(text)
if parts:
params = parts.pop(0)
if h is not None:
params = [int(p) for p in params.split(';')]
color = 0
for p in params:
if 40 <= p <= 47:
color |= self.nt_color_map[p - 40] << 4
elif 30 <= p <= 37:
color |= self.nt_color_map[p - 30]
elif p == 1:
color |= 0x08 # foreground intensity on
elif p == 0: # reset to default color
color = 0x07
else:
pass # error condition ignored
ctypes.windll.kernel32.SetConsoleTextAttribute(h, color)
def colorize(self, message, record):
if record.levelno in self.level_map and self.is_tty:
bg, fg, bold = self.level_map[record.levelno]
params = []
if bg in self.color_map:
params.append(str(self.color_map[bg] + 40))
if fg in self.color_map:
params.append(str(self.color_map[fg] + 30))
if bold:
params.append('1')
if params and message:
if message.lstrip() != message:
prefix = re.search(r"\s+", message).group(0)
message = message[len(prefix):]
else:
prefix = ""
message = "%s%s" % (prefix, ''.join((self.csi, ';'.join(params),
'm', message, self.reset)))
return message
def format(self, record):
message = logging.StreamHandler.format(self, record)
return self.colorize(message, record)
logging.addLevelName(15, "INFO")
logger = logging.getLogger('nhentai')
LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
FORMATTER = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
LOGGER_HANDLER.setFormatter(FORMATTER)
LOGGER_HANDLER.level_map[logging.getLevelName("INFO")] = (None, "cyan", False)
logger.addHandler(LOGGER_HANDLER)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
logger.log(15, 'nhentai')
logger.info('info')
logger.warn('warn')
logger.debug('debug')
logger.error('error')
logger.critical('critical')
| 31.618785 | 108 | 0.510571 |
4a245b9872c93b0604570db213e9b631553a0299 | 582 | py | Python | retail-smoke/go_open_form.py | nemo0101/1C-Retail-smoke-tests | dddf3d5cd56b087c8c5cae2c7bf776407a39cb4d | [
"MIT"
] | null | null | null | retail-smoke/go_open_form.py | nemo0101/1C-Retail-smoke-tests | dddf3d5cd56b087c8c5cae2c7bf776407a39cb4d | [
"MIT"
] | 5 | 2021-03-18T22:23:05.000Z | 2022-03-11T23:40:48.000Z | retail-smoke/go_open_form.py | nemo0101/1C-Retail-smoke-tests | dddf3d5cd56b087c8c5cae2c7bf776407a39cb4d | [
"MIT"
] | 1 | 2020-03-11T20:58:19.000Z | 2020-03-11T20:58:19.000Z | # -*- coding: utf8 -*
from full_cycle import click_or_enter_text, push
'''
основная логика работы для режима open_form
'''
def click_main_and_section(main_elem, section_elem, main_data):
click_or_enter_text(main_elem, main_data)
click_or_enter_text(section_elem, main_data)
def open_forms(main_data:object):
table = main_data.table_elements()['table']
for x in table:
for y in x['table']['table']:
click_main_and_section(x, y, main_data)
push('insert', 1, main_data)
push('esc', 4, main_data)
| 25.304348 | 64 | 0.656357 |
4a245bba1f877df956cda97ac3dc2b64646373b7 | 4,994 | py | Python | okr/admin/youtube.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 2 | 2021-07-28T08:46:13.000Z | 2022-01-19T17:05:48.000Z | okr/admin/youtube.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | 3 | 2020-11-10T23:34:17.000Z | 2021-03-31T16:19:21.000Z | okr/admin/youtube.py | wdr-data/wdr-okr | 71c9e6e8d3521b1bb67d30310a93584389de2127 | [
"MIT"
] | null | null | null | """Forms for managing YouTube data."""
from django.contrib import admin
from ..models import (
YouTube,
YouTubeAnalytics,
YouTubeDemographics,
YouTubeTrafficSource,
YouTubeVideo,
YouTubeVideoAnalytics,
YouTubeVideoDemographics,
YouTubeVideoTrafficSource,
YouTubeVideoSearchTerm,
YouTubeVideoExternalTraffic,
)
from .base import QuintlyAdmin
class YouTubeAnalyticsAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube analytics data to edit."""
list_display = [
"date",
"youtube",
"subscribers",
"quintly_last_updated",
"last_updated",
]
list_display_links = ["date"]
list_filter = ["youtube"]
date_hierarchy = "date"
class YouTubeDemographicsAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube demographics data to edit."""
list_display = [
"date",
"youtube",
"age_range",
"gender",
"quintly_last_updated",
"last_updated",
]
list_display_links = ["date"]
list_filter = ["youtube", "age_range", "gender"]
date_hierarchy = "date"
class YouTubeTrafficSourceAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube traffic source data to edit."""
list_display = [
"date",
"youtube",
"source_type",
"views",
"watch_time",
"quintly_last_updated",
"last_updated",
]
list_display_links = ["date"]
list_filter = ["youtube", "source_type"]
date_hierarchy = "date"
class YouTubeVideoAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video base data to edit."""
list_display = [
"published_at",
"title",
"duration",
"is_livestream",
"external_id",
"quintly_last_updated",
"last_updated",
]
list_display_links = ["published_at", "title"]
list_filter = ["youtube", "is_livestream"]
search_fields = ["title", "external_id"]
date_hierarchy = "published_at"
class YouTubeVideoAnalyticsAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video analytics data to edit."""
list_display = [
"date",
"youtube_video",
"views",
"watch_time",
"last_updated",
]
list_display_links = ["date", "youtube_video"]
list_filter = ["youtube_video__youtube", "live_or_on_demand"]
search_fields = ["youtube_video__title", "youtube_video__external_id"]
date_hierarchy = "date"
class YouTubeVideoDemographicsAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video demographics data to edit."""
list_display = [
"youtube_video",
"age_range",
"gender",
"views_percentage",
"last_updated",
]
list_display_links = ["youtube_video"]
list_filter = ["youtube_video__youtube", "age_range", "gender"]
search_fields = ["youtube_video__title", "youtube_video__external_id"]
class YouTubeVideoTrafficSourceAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video traffic source data to edit."""
list_display = [
"youtube_video",
"source_type",
"views",
"watch_time",
"last_updated",
]
list_display_links = ["youtube_video", "source_type"]
search_fields = ["youtube_video__title", "youtube_video__external_id"]
list_filter = ["source_type", "youtube_video__youtube"]
class YouTubeVideoSearchTermAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video search term data to edit."""
list_display = [
"youtube_video",
"search_term",
"views",
"watch_time",
"last_updated",
]
list_display_links = ["youtube_video"]
search_fields = ["youtube_video__title", "youtube_video__external_id"]
list_filter = ["youtube_video__youtube"]
class YouTubeVideoExternalTrafficAdmin(admin.ModelAdmin):
"""List for choosing existing YouTube video search term data to edit."""
list_display = [
"youtube_video",
"name",
"views",
"watch_time",
"last_updated",
]
list_display_links = ["youtube_video"]
search_fields = ["youtube_video__title", "youtube_video__external_id"]
list_filter = ["youtube_video__youtube"]
admin.site.register(YouTube, QuintlyAdmin)
admin.site.register(YouTubeAnalytics, YouTubeAnalyticsAdmin)
admin.site.register(YouTubeDemographics, YouTubeDemographicsAdmin)
admin.site.register(YouTubeTrafficSource, YouTubeTrafficSourceAdmin)
admin.site.register(YouTubeVideo, YouTubeVideoAdmin)
admin.site.register(YouTubeVideoAnalytics, YouTubeVideoAnalyticsAdmin)
admin.site.register(YouTubeVideoDemographics, YouTubeVideoDemographicsAdmin)
admin.site.register(YouTubeVideoTrafficSource, YouTubeVideoTrafficSourceAdmin)
admin.site.register(YouTubeVideoSearchTerm, YouTubeVideoSearchTermAdmin)
admin.site.register(YouTubeVideoExternalTraffic, YouTubeVideoExternalTrafficAdmin)
| 29.034884 | 82 | 0.684221 |
4a245bd27b3b61ab45eca2779ee1685d114feafd | 7,549 | py | Python | scripts/utilities.py | Angelina15638749981/HDI-Project | 9dfe5d9c883efc37d285c852fcc92a02c6cdd945 | [
"MIT"
] | 1 | 2018-06-09T13:38:08.000Z | 2018-06-09T13:38:08.000Z | scripts/utilities.py | HangJie720/ATM | 9dfe5d9c883efc37d285c852fcc92a02c6cdd945 | [
"MIT"
] | null | null | null | scripts/utilities.py | HangJie720/ATM | 9dfe5d9c883efc37d285c852fcc92a02c6cdd945 | [
"MIT"
] | 1 | 2021-01-07T13:24:15.000Z | 2021-01-07T13:24:15.000Z | from __future__ import print_function
import argparse
import numpy as np
import os
from collections import defaultdict
from multiprocessing import Process
from sklearn.metrics import auc
from atm.config import *
from atm.worker import work
from atm.database import db_session
from atm.utilities import download_file_http
try:
import matplotlib.pyplot as plt
except ImportError:
plt = None
BASELINE_PATH = os.path.join(PROJECT_ROOT, 'test/baselines/best_so_far_multi_trial/')
DATA_URL = 'https://s3.amazonaws.com/mit-dai-delphi-datastore/downloaded/'
BASELINE_URL = 'https://s3.amazonaws.com/mit-dai-delphi-datastore/best_so_far_multi_trial/'
def get_best_so_far(db, datarun_id):
"""
Get a series representing best-so-far performance for datarun_id.
"""
# generate a list of the "best so far" score after each classifier was
# computed (in chronological order)
classifiers = db.get_classifiers(datarun_id=datarun_id)
y = []
for l in classifiers:
best_so_far = max(y + [l.cv_judgment_metric])
y.append(best_so_far)
return y
def graph_series(length, title, **series):
"""
Graph series of performance metrics against one another.
length: all series will be truncated to this length
title: what to title the graph
**series: mapping of labels to series of performance data
"""
if plt is None:
raise ImportError("Unable to import matplotlib")
lines = []
for label, data in series.items():
# copy up to `length` of the values in `series` into y.
y = data[:length]
x = range(len(y))
# plot y against x
line, = plt.plot(x, y, '-', label=label)
lines.append(line)
plt.xlabel('classifiers')
plt.ylabel('performance')
plt.title(title)
plt.legend(handles=lines)
plt.show()
def report_auc_vs_baseline(db, rids, graph=False):
if len(rids) == 0:
return
rid = rids[0]
with db_session(db):
run = db.get_datarun(rid)
ds = run.dataset
test = np.array([[float(y) for y in get_best_so_far(db, rid)] for rid in rids])
test = test.T
mean_test = np.mean(test, axis =1).tolist()
ds_file = os.path.basename(ds.train_path)
bl_path = download_file_http(BASELINE_URL + ds_file,
local_folder=BASELINE_PATH)
with open(bl_path) as f:
baseline = np.array([[float(each) for each in l.strip().split('\t')] for l in f])
mean_baseline = np.mean(baseline, axis =1).tolist()
min_len = min(baseline.shape[0], test.shape[0])
x = range(min_len)
test_aucs = np.array([auc(x, test[:min_len,row]) for row in range(test.shape[1])])
bl_aucs = np.array([auc(x, baseline[:min_len,row]) for row in range(baseline.shape[1])])
# get avg, std, min of AUC over trials
mean_auc_test = np.mean(test_aucs)
mean_auc_bl = np.mean(bl_aucs)
std_auc_test = np.std(test_aucs)
std_auc_bl = np.std(bl_aucs)
min_auc_test = np.min(test_aucs)
min_auc_bl = np.min(bl_aucs)
mean_auc_diff = mean_auc_test - mean_auc_bl
print('Dataset %s (dataruns %s)' % (ds_file, rids))
print ('Comparing %d trials to baseline generated by %d trials'%(len(rids), baseline.shape[1]))
print('MEAN AUC: test = %.3f, baseline = %.3f (%.3f)' % (mean_auc_test, mean_auc_bl, mean_auc_diff))
print('STD AUC: test = %.3f, baseline = %.3f' % (std_auc_test, std_auc_bl))
print('MIN AUC: test = %.3f, baseline = %.3f' % (min_auc_test, min_auc_bl))
if graph:
graph_series(100, ds_file, baseline=mean_baseline, test=mean_test)
return mean_auc_test, mean_auc_bl
def print_summary(db, rid):
run = db.get_datarun(rid)
ds = db.get_dataset(run.dataset_id)
print()
print('Dataset %s' % ds)
print('Datarun %s' % run)
classifiers = db.get_classifiers(datarun_id=rid)
errs = db.get_classifiers(datarun_id=rid, status=ClassifierStatus.ERRORED)
complete = db.get_classifiers(datarun_id=rid,
status=ClassifierStatus.COMPLETE)
print('Classifiers: %d total; %d errors, %d complete' %
(len(classifiers), len(errs), len(complete)))
best = db.get_best_classifier(score_target=run.score_target,
datarun_id=run.id)
if best is not None:
score = best.cv_judgment_metric
err = 2 * best.cv_judgment_metric_stdev
print('Best result overall: classifier %d, %s = %.3f +- %.3f' %\
(best.id, run.metric, score, err))
def print_method_summary(db, rid):
# maps methods to sets of hyperpartitions, and hyperpartitions to lists of
# classifiers
alg_map = {a: defaultdict(list) for a in db.get_methods(datarun_id=rid)}
run = db.get_datarun(rid)
classifiers = db.get_classifiers(datarun_id=rid)
for l in classifiers:
hp = db.get_hyperpartition(l.hyperpartition_id)
alg_map[hp.method][hp.id].append(l)
for alg, hp_map in alg_map.items():
print()
print('method %s:' % alg)
classifiers = sum(hp_map.values(), [])
errored = len([l for l in classifiers if l.status ==
ClassifierStatus.ERRORED])
complete = len([l for l in classifiers if l.status ==
ClassifierStatus.COMPLETE])
print('\t%d errored, %d complete' % (errored, complete))
best = db.get_best_classifier(score_target=run.score_target,
datarun_id=rid, method=alg)
if best is not None:
score = best.cv_judgment_metric
err = 2 * best.cv_judgment_metric_stdev
print('\tBest: classifier %s, %s = %.3f +- %.3f' % (best, run.metric,
score, err))
def print_hp_summary(db, rid):
run = db.get_datarun(rid)
classifiers = db.get_classifiers(datarun_id=rid)
part_map = defaultdict(list)
for c in classifiers:
hp = c.hyperpartition_id
part_map[hp].append(c)
for hp, classifiers in part_map.items():
print()
print('hyperpartition', hp)
print(db.get_hyperpartition(hp))
errored = len([c for c in classifiers if c.status ==
ClassifierStatus.ERRORED])
complete = len([c for c in classifiers if c.status ==
ClassifierStatus.COMPLETE])
print('\t%d errored, %d complete' % (errored, complete))
best = db.get_best_classifier(score_target=run.score_target,
datarun_id=rid, hyperpartition_id=hp)
if best is not None:
score = best.cv_judgment_metric
err = 2 * best.cv_judgment_metric_stdev
print('\tBest: classifier %s, %s = %.3f +- %.3f' % (best, run.metric,
score, err))
def work_parallel(db, datarun_ids=None, aws_config=None, n_procs=4):
print('starting workers...')
kwargs = dict(db=db, datarun_ids=datarun_ids, save_files=False,
choose_randomly=True, cloud_mode=False,
aws_config=aws_config, wait=False)
if n_procs > 1:
# spawn a set of worker processes to work on the dataruns
procs = []
for i in range(n_procs):
p = Process(target=work, kwargs=kwargs)
p.start()
procs.append(p)
# wait for them to finish
for p in procs:
p.join()
else:
work(**kwargs)
| 35.947619 | 104 | 0.621539 |
4a245cc90a968a3512613c30eae839f6d8932ce0 | 928 | py | Python | Solutions/Longest Substring Without Repeating Characters/longestSubstring.py | Crayzero/LeetCodeProgramming | b10ebe22c0de1501722f0f5c934c0c1902a26789 | [
"MIT"
] | 1 | 2015-04-13T10:58:30.000Z | 2015-04-13T10:58:30.000Z | Solutions/Longest Substring Without Repeating Characters/longestSubstring.py | Crayzero/LeetCodeProgramming | b10ebe22c0de1501722f0f5c934c0c1902a26789 | [
"MIT"
] | null | null | null | Solutions/Longest Substring Without Repeating Characters/longestSubstring.py | Crayzero/LeetCodeProgramming | b10ebe22c0de1501722f0f5c934c0c1902a26789 | [
"MIT"
] | null | null | null | class Solution:
# @return an integer
def lengthOfLongestSubstring(self, s):
s = s.strip()
if not s:
return 0
else:
a = []
max = 0
for i in s:
if i not in a:
a.append(i)
else:
if len(a) > max:
max = len(a)
a = a[a.index(i)+1:]
a.append(i)
if len(a) > max:
max = len(a)
return max
if __name__ == "__main__":
s = Solution()
print s.lengthOfLongestSubstring("")
print s.lengthOfLongestSubstring("p")
print s.lengthOfLongestSubstring("abcabcbb")
print s.lengthOfLongestSubstring("abc")
print s.lengthOfLongestSubstring("bbbb")
print s.lengthOfLongestSubstring("wlrbbmqbhcdarzowkkyhiddqscdxrjmowfrxsjybldbefsarcbynecdyggxxpklorellnmpapqfwkhopkmco") | 30.933333 | 124 | 0.520474 |
4a245dbbd72950bce1bbff5748286188e1028456 | 533 | py | Python | hipster/min_heap.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | hipster/min_heap.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | hipster/min_heap.py | soumasish/hipster | 1fe3f0526839a2a418a7e9e6a0557bfc4a5ef061 | [
"MIT"
] | null | null | null | import heapq
from hipster.error import HeapError
from hipster.heap import Heap
class MinHeap(Heap):
def __init__(self):
super().__init__()
def peek(self):
if len(self.heap) == 0:
raise HeapError("Peeking into an empty heap")
with self.read_lock:
return heapq.nsmallest(1, self.heap)[0]
def pop(self):
if len(self.heap) == 0:
raise HeapError("Popping off an empty heap")
with self.write_lock:
return heapq.heappop(self.heap)
| 20.5 | 57 | 0.604128 |
4a245e97efc9d2d92105987a2cf06d46fc0b3250 | 31,701 | py | Python | brook-web.py | Ccapton/brook-web | 67f145406bd3faf6b14642af3f05a4d87d669ba5 | [
"MIT"
] | 253 | 2018-10-30T01:57:30.000Z | 2022-01-13T01:47:33.000Z | brook-web.py | Ccapton/brook-web | 67f145406bd3faf6b14642af3f05a4d87d669ba5 | [
"MIT"
] | 16 | 2018-11-05T06:53:20.000Z | 2022-02-14T20:07:22.000Z | brook-web.py | Ccapton/brook-web | 67f145406bd3faf6b14642af3f05a4d87d669ba5 | [
"MIT"
] | 86 | 2018-10-31T06:06:04.000Z | 2022-02-25T12:51:49.000Z | # coding=utf-8
# 。——————————————————————————————————————————
# 。
# 。 brook-web.py
# 。
# 。 @Time : 18-10-27 下午4:09
# 。 @Author : capton
# 。 @Software: PyCharm
# 。 @Blog : http://ccapton.cn
# 。 @Github : https://github.com/ccapton
# 。 @Email : [email protected]
# 。__________________________________________
from __future__ import print_function # 同时兼容python2、Python3
from __future__ import division # 同时兼容python2、Python3
from flask import Flask, render_template, send_from_directory, request
from flask_apscheduler import APScheduler
from flask_restful import Api
from flask_restful import Resource, reqparse
import json, os, re, sys
from qr import *
from iptables import release_port, refuse_port
# 判断当前Python执行大版本
python_version = sys.version
if python_version.startswith('2.'):
python_version = '2'
elif python_version.startswith('3.'):
python_version = '3'
# 服务进程号
brook_pid = ''
ss_pid = ''
socks5_pid = ''
# 主机ip
host_ip = None
# 模拟同步的标志 当进行配置信息的保存操作(文件写入)时必须对busy进行赋值为True,保存后赋值为False
busy = False
# 服务类型
SERVICE_TYPE_BROOK = 0
SERVICE_TYPE_SS = 1
SERVICE_TYPE_SOCKS5 = 2
salt = 'brook-web'
split_word = '---YnJvb2std2Vi---'
# Resource封装类,简化数据参数的配置
class BaseResource(Resource):
def __init__(self):
Resource.__init__(self)
self.parser = reqparse.RequestParser()
self.add_args()
self.create_args()
# 等待子类重写
def add_args(self):
pass
def add_argument(self, *args, **kwargs):
self.parser.add_argument(*args, **kwargs)
def create_args(self):
self.args = self.parser.parse_args()
def get_arg(self, key):
return self.args[key]
app = Flask(__name__)
api = Api(app)
default_port = 5000
from conf import config
app.config.from_object(config)
from models import db
db.init_app(app)
from models.system_user import SystemUser
# 默认服务信息(随机端口)
def default_config_json():
import random
random_port = random.randint(10000, 30000)
random_port2 = random.randint(10000, 30000)
random_port3 = random.randint(10000, 30000)
while random_port == random_port2:
random_port2 = random.randint(10000, 30000)
while random_port3 == random_port2 or random_port == random_port2:
random_port3 = random.randint(10000, 30000)
init_config_json = {
'brook': [{'port': 6666, 'psw': '6666', 'state': 0, 'info': '若无法开启,删除后再添加'}],
'shadowsocks': [],
'socks5': [],
}
return init_config_json
# 默认用户信息
def default_user(username="admin", password="admin", email=''):
return {"username": username, "password": password, 'email': email}
# 当前服务实时状态对象
current_brook_state = {}
import sys
from flask import jsonify
# # 用户信息保存路径
# default_userjson_path = os.path.join(sys.path[0], "static/json/user.json")
# 服务信息配置保存路径
config_json_path = os.path.join(sys.path[0], "static/json/brook_state.json")
# 基类json对象格式输出函数
def base_result(msg="", data=None, code=0):
return jsonify({"msg": msg, "data": data, "code": code})
# 读取json文件,若没有对应文件则创建一个json文件、写入myjson的内容
def load_json(path, myjson):
if not os.path.exists(path):
os.system("touch %s" % path)
f = open(path, 'r')
json_str = f.read()
f.close()
if json_str == '':
with open(path, 'w') as f2:
f2.write(json.dumps(myjson, ensure_ascii=False))
f = open(path, 'r')
json_str = f.read()
config_json = json.loads(json_str)
f.close()
return config_json
# 读取服务配置信息
def load_config_json():
return load_json(config_json_path, default_config_json())
# 读取用户信息
def load_default_userjson():
user = None
try:
user = SystemUser.query.first()
except Exception as e:
print(e)
if user:
return {'username': user.username, 'password': user.password, 'email': user.email}
else:
return {'username': 'admin', 'password': 'admin', 'email': ''}
# 保存当前用户信息
def save_userjson(userjson):
try:
user = SystemUser.query.first()
if user:
user.username = userjson.get('username')
user.password = userjson.get('password')
user.email = userjson.get('email')
else:
user = SystemUser(username=userjson.get('username'), password=userjson.get('password'), email=userjson.get('email'))
db.session.add(user)
db.session.commit()
except Exception as e:
print(e)
db.session.rollback()
# 保存当前服务配置
def save_config_json(config_json):
with open(config_json_path, 'w') as f:
f.write(json.dumps(config_json, ensure_ascii=False))
# 输出ico文件
class Favicon(BaseResource):
def get(self):
return send_from_directory(os.path.join(app.root_path, 'static'), 'favicon.ico',
mimetype='image/vnd.microsoft.icon')
def check_base64_data(is_get_service_state=False, is_post=True):
client_data = request.json.get('data') if request.json and request.json.get('data') else (request.form.get('data') if is_post else request.args.get('data'))
data = base64decode(client_data, python_version)
client_salt, raw_data = str(data).split(split_word)
return (base64decode(client_salt, python_version) == salt and base64decode(raw_data, python_version) == salt)\
if is_get_service_state else base64decode(client_salt, python_version) == salt
def get_base64_data(key, is_post=True):
client_data = request.json.get(key) if request.json and request.json.get(key) else (
request.form.get(key) if is_post else request.args.get(key))
data = base64decode(client_data, python_version)
client_salt, raw_data = str(data).split(split_word)
if base64decode(client_salt, python_version) == salt:
return base64decode(raw_data, python_version) if raw_data and len(raw_data) > 0 else ''
# 登录api
class Login(BaseResource):
def add_args(self):
self.add_argument('username', type=str, help='Username')
self.add_argument('password', type=str, help='Password')
def login(self, is_post):
username = get_base64_data('username', is_post)
password = get_base64_data('password', is_post)
user = load_default_userjson()
name = user['username']
psw = user['password']
if name == username and psw == password:
return base_result(msg="Login Successful!", code=0)
return base_result(msg="Login failed!", code=-1)
def post(self):
return self.login(True)
def get(self):
return self.login(False)
# 重置用户信息api
class ResetPsw(BaseResource):
def add_args(self):
self.add_argument('old_username', type=str, help='Old Username')
self.add_argument('old_password', type=str, help='Old Password')
self.add_argument('username', type=str, help='New Username')
self.add_argument('password', type=str, help='New Password')
#
# code : 1 新用户名为空
# code : 2 新密码名为空
# code : -1 旧用户信息不正确
#
def reset_psw(self, is_post):
username = get_base64_data('username', is_post)
password = get_base64_data('password', is_post)
old_username = base64decode(base64decode(get_base64_data('old_username', is_post), python_version).split(split_word)[1], python_version)
old_password = base64decode(base64decode(get_base64_data('old_password', is_post), python_version).split(split_word)[1], python_version)
code = 0
user = load_default_userjson()
if old_username == user['username'] and old_password == user['password']:
if len(username) <= 0:
code = 1
return base_result(msg='Reset User Failed!', code=code)
if len(password) <= 0:
code = 2
return base_result(msg='Reset User Failed!', code=code)
save_userjson(default_user(username=username, password=password))
return base_result(msg='Reset User Successful!', code=code)
return base_result(msg='Reset User Failed!')
def post(self):
return self.reset_psw(True)
def get(self):
return self.reset_psw(False)
# 开启服务api
# code : 3 服务开启失败
# code : 4 操作繁忙
# code : -1 用户信息不正确
#
class StartService(BaseResource):
def add_args(self):
pass
def start_service(self, is_post):
username = get_base64_data('username', is_post)
password = get_base64_data('password', is_post)
user = load_default_userjson()
if username != user['username'] or password != user['password']:
return base_result(msg='Loin Failed')
type = int(get_base64_data('type', is_post))
port = int(get_base64_data('port', is_post))
if busy:
return base_result(msg='Server Busy!,Try Again Later.', code=4)
if type == -1:
stop_service(SERVICE_TYPE_BROOK)
result1 = start_service(SERVICE_TYPE_BROOK)
stop_service(SERVICE_TYPE_SS)
result2 = start_service(SERVICE_TYPE_SS)
stop_service(SERVICE_TYPE_SOCKS5)
result3 = start_service(SERVICE_TYPE_SOCKS5)
if result1 * result2 * result3 == 0:
return base_result(msg='Start All Services Successful!', code=0)
else:
if port == -1:
if type == SERVICE_TYPE_BROOK:
stop_service(SERVICE_TYPE_BROOK)
result = start_service(SERVICE_TYPE_BROOK)
elif type == SERVICE_TYPE_SS:
stop_service(SERVICE_TYPE_SS)
result = start_service(SERVICE_TYPE_SS)
elif type == SERVICE_TYPE_SOCKS5:
stop_service(SERVICE_TYPE_SOCKS5)
result = start_service(SERVICE_TYPE_SOCKS5)
else:
result = -1
if result == 0:
return base_result(msg='Start Service Successful!', code=0)
else:
if type == SERVICE_TYPE_BROOK:
stop_service(SERVICE_TYPE_BROOK, port)
result = start_service(SERVICE_TYPE_BROOK, port)
elif type == SERVICE_TYPE_SS:
stop_service(SERVICE_TYPE_SS, port)
result = start_service(SERVICE_TYPE_SS, port)
elif type == SERVICE_TYPE_SOCKS5:
stop_service(SERVICE_TYPE_SOCKS5, port)
result = start_service(SERVICE_TYPE_SOCKS5, port)
else:
result = -1
if result == 0:
return base_result(msg='Start Service Successful!', code=0)
return base_result(msg='Failed to Start Service', code=3)
def get(self):
return self.start_service(False)
def post(self):
return self.start_service(True)
# 停止服务api
class StopService(BaseResource):
def add_args(self):
pass
def stop_service(self, is_post):
username = get_base64_data('username', is_post)
password = get_base64_data('password', is_post)
if username != load_default_userjson()['username'] or password != load_default_userjson()['password']:
return base_result(msg='Loin Failed')
type = int(get_base64_data('type', is_post))
port = int(get_base64_data('port', is_post))
if type == -1:
stop_service(SERVICE_TYPE_BROOK)
stop_service(SERVICE_TYPE_SS)
stop_service(SERVICE_TYPE_SOCKS5)
else:
if port == -1:
if type == SERVICE_TYPE_BROOK:
stop_service(SERVICE_TYPE_BROOK, force=True)
elif type == SERVICE_TYPE_SS:
stop_service(SERVICE_TYPE_SS, force=True)
elif type == SERVICE_TYPE_SOCKS5:
stop_service(SERVICE_TYPE_SOCKS5, force=True)
else:
if type == SERVICE_TYPE_BROOK:
stop_service(SERVICE_TYPE_BROOK, port)
start_service(SERVICE_TYPE_BROOK, port=-1)
elif type == SERVICE_TYPE_SS:
stop_service(SERVICE_TYPE_SS, port)
start_service(SERVICE_TYPE_SS, port=-1)
elif type == SERVICE_TYPE_SOCKS5:
stop_service(SERVICE_TYPE_SOCKS5, port)
start_service(SERVICE_TYPE_SOCKS5, port=-1)
return base_result(msg='Stop Service Successful!', code=0)
def get(self):
return self.stop_service(False)
def post(self):
return self.stop_service(True)
# 获取服务状态api
class ServiceState(BaseResource):
def add_args(self):
pass
def service_state(self):
return current_brook_state
def get(self):
if check_base64_data(is_get_service_state=False):
return base_result(msg='', code=0, data=base64encode(json.dumps(self.service_state()), python_version))
else:
return base_result(msg='解密失败', code=1)
def post(self):
if check_base64_data(is_get_service_state=True):
return base_result(msg='', code=0, data=base64encode(json.dumps(self.service_state()), python_version))
else:
return base_result(msg='解密失败', code=1)
# 增加端口api
class AddPort(BaseResource):
def add_args(self):
pass
def add(self, is_post):
type = int(get_base64_data('type', is_post))
port = int(get_base64_data('port', is_post))
password = get_base64_data('password', is_post)
username = get_base64_data('username', is_post)
info = get_base64_data('info', is_post)
if busy:
return base_result(msg='Server Busy!,Try Again Later.', code=4)
if is_port_used(port, current_brook_state):
return base_result(msg='Port has been used!', code=-2)
if add_port(service_type=type, port=port, psw=password, username=username, info=info):
return base_result(msg='Add Port Successful!', code=0)
return base_result(msg='Add Port Failed!', code=-1)
def get(self):
return self.add(False)
def post(self):
return self.add(True)
# 删除端口api
class DelPort(BaseResource):
def add_args(self):
pass
def del_port(self, is_post):
type = int(get_base64_data('type', is_post))
port = int(get_base64_data('port', is_post))
if busy:
return base_result(msg='Server Busy!,Try Again Later.', code=4)
if del_port(service_type=type, port=port):
return base_result(msg='Delete Port Successful!', code=0)
return base_result(msg='Delete Port Failed!', code=-1)
def get(self):
return self.del_port(False)
def post(self):
return self.del_port(True)
# 生成二维码api
class GenerateQrImg(BaseResource):
def add_args(self):
pass
def generate_qr_image(self, is_post):
type = int(get_base64_data('type', is_post))
port = int(get_base64_data('port', is_post))
password = get_base64_data('password', is_post)
ip = get_base64_data('ip', is_post)
if type == SERVICE_TYPE_SS:
if port <= 0:
return base_result(msg='Port must > 0', code=-2)
if generate_qr_image(format_ss_link(ip, password, port, python_version), port):
return base_result('GenerateQrImg successful!', code=0)
return base_result('GenerateQrImg failed')
def get(self):
return self.generate_qr_image(False)
def post(self):
return self.generate_qr_image(True)
# 检查目标端口是否被占用、根据配置信息判断端口是否已被记录
def is_port_used(port, config_json):
if port > 0:
brook_list = config_json['brook']
ss_list = config_json['shadowsocks']
socks5_list = config_json['socks5']
for brook in brook_list:
if port == brook['port']:
return True
for ss in ss_list:
if port == ss['port']:
return True
for socks5 in socks5_list:
if port == socks5['port']:
return True
pi = os.popen('lsof -i:' + str(port))
res = pi.read()
pi.close()
if res != '':
return True
return False
# 增加端口
def add_port(username, service_type=SERVICE_TYPE_BROOK, port=-1, psw='', info=''):
print(service_type, port, psw, username)
if port == -1:
return False
if username != '' and username != None:
if psw == '' or psw == None:
return False
config_json = load_config_json()
new_config_json = config_json
if service_type == SERVICE_TYPE_BROOK:
config_json['brook'].append({'port': port, 'psw': str(psw), 'info': info})
elif service_type == SERVICE_TYPE_SS:
config_json['shadowsocks'].append({'port': port, 'psw': str(psw), 'info': info})
elif service_type == SERVICE_TYPE_SOCKS5:
config_json['socks5'].append({'port': port, 'psw': str(psw), 'username': str(username), 'info': info})
global busy
busy = True
save_config_json(new_config_json)
busy = False
if is_linux():
refuse_port([port])
release_port([port])
stop_service(service_type=service_type)
start_service(service_type=service_type, port=port)
return True
# 删除端口
def del_port(service_type=SERVICE_TYPE_BROOK, port=-1):
if port == -1:
return False
config_json = load_config_json()
service_list = [config_json['brook'], config_json['shadowsocks'], config_json['socks5']]
def get_index(service):
index = -1
for i in range(len(service)):
if service[i]['port'] == port:
index = i
break
return index
try:
if service_type == SERVICE_TYPE_BROOK:
index = get_index(service_list[0])
if index == -1: return False
config_json['brook'].remove(config_json['brook'][index])
global busy
busy = True
save_config_json(config_json)
busy = False
elif service_type == SERVICE_TYPE_SS:
index = get_index(service_list[1])
if index == -1: return False
config_json['shadowsocks'].remove(config_json['shadowsocks'][index])
busy = True
save_config_json(config_json)
busy = False
elif service_type == SERVICE_TYPE_SOCKS5:
index = get_index(service_list[2])
if index == -1: return False
config_json['socks5'].remove(config_json['socks5'][index])
busy = True
save_config_json(config_json)
busy = False
stop_service(service_type=service_type, port=port)
start_service(service_type=service_type)
return True
except IndexError:
pass
return False
# 获取本机实际对外通信的地址
def get_host_ip():
import socket
s = None
ip = '0.0.0.0'
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('8.8.8.8', 80))
ip = s.getsockname()[0]
except:
pass
finally:
if s: s.close()
return ip
# 记录所有服务的状态
def record_all_state():
record_state(SERVICE_TYPE_BROOK)
record_state(SERVICE_TYPE_SS)
record_state(SERVICE_TYPE_SOCKS5)
# 记录服务状态
def record_state(service_type=-1):
if service_type == SERVICE_TYPE_BROOK:
service_name = 'brook'
service_cmomand_name = 'servers'
elif service_type == SERVICE_TYPE_SS:
service_name = 'shadowsocks'
service_cmomand_name = 'ssservers'
elif service_type == SERVICE_TYPE_SOCKS5:
service_name = 'socks5'
service_cmomand_name = 'socks5'
else:
return
pi = os.popen('ps aux|grep brook\ %s' % service_cmomand_name)
result = pi.read()
pi.close()
# 正则匹配查找出当前服务的所有端口
all_results = re.findall("-l :\d+", result)
final_results = []
for node in all_results:
final_results.append(int(node[4:]))
# print(final_results)
config_json = load_config_json()
global current_brook_state
current_brook_state[service_name] = []
# 判断当前服务所有端口的状态,并保存到全局变量current_brook_state中去
for server in config_json[service_name]:
current_server = {}
if service_type == SERVICE_TYPE_BROOK:
current_server['link'] = format_brook_link(host_ip, server['psw'], server['port'])
current_server['qr_img_path'] = os.path.join('static/img/qr', str(server['port']) + '.png')
elif service_type == SERVICE_TYPE_SS:
current_server['link'] = format_ss_link(host_ip, server['psw'], server['port'], pv=python_version)
current_server['qr_img_path'] = os.path.join('static/img/qr', str(server['port']) + '.png')
if is_linux():
current_server['linked_num'] = port_linked_num(server['port'])
else:
current_server['linked_num'] = 0
current_server['port'] = server['port']
current_server['psw'] = server['psw']
if server['port'] in final_results:
current_server['state'] = 1
else:
current_server['state'] = 0
if service_type == SERVICE_TYPE_SOCKS5:
current_server['username'] = server['username']
elif service_type == SERVICE_TYPE_SS:
current_server['encode_method'] = 'aes-256-cfb'
current_server['ip'] = host_ip
if server.get('info'):
current_server['info'] = server['info']
else:
current_server['info'] = ''
current_brook_state[service_name].append(current_server)
# 开启服务
def start_service(service_type, port=-1, force=False):
service_name = 'brook'
if service_type == SERVICE_TYPE_BROOK:
service_name = 'brook'
elif service_type == SERVICE_TYPE_SS:
service_name = 'shadowsocks'
elif service_type == SERVICE_TYPE_SOCKS5:
service_name = 'socks5'
config_json = load_config_json()
server_list = config_json[service_name]
server_list_str = ''
for server in server_list:
if service_type != SERVICE_TYPE_SOCKS5:
server_str = '-l ":%d %s" ' % (server['port'], server['psw'])
else:
server_str = '-l :%d ' % (server['port'])
if port != -1:
if port == server['port']:
server['state'] = 1
else:
if force:
server['state'] = 1
if server['state'] != 0:
server_list_str += server_str
if has_service_start(service_type):
print(' %s服务已经开启,不要重复操作' % service_name)
global busy
busy = True
save_config_json(config_json)
busy = False
return 0
else:
code1 = -2
if len(server_list_str) != 0:
# 采用brook程序一次开启多个服务端口的命令
if service_type == SERVICE_TYPE_BROOK:
code1 = os.system('nohup ./brook servers ' + server_list_str + '>/dev/null 2>log &')
elif service_type == SERVICE_TYPE_SS:
code1 = os.system('nohup ./brook ssservers ' + server_list_str + '>/dev/null 2>log &')
elif service_type == SERVICE_TYPE_SOCKS5:
if server_list[0]['username'] != '':
user_mode = ' --username ' + server_list[0]['username'] + ' --password ' + server_list[0]['psw']
else:
# 当socks5服务没有设置用户名时,认为这个socks5服务是无账号、无密码服务
user_mode = ''
code1 = os.system(
'nohup ./brook socks5 ' + server_list_str + '-i ' + host_ip + user_mode + ' >/dev/null 2>log &')
if code1 == 0:
# 这时 brook_pid,ss_pid,socks5_pid未被记录
has_service_start(service_type) # 为了记录brook_pid,ss_pid,socks5_pid
print('%s Service Start Successful' % service_name)
busy = True
save_config_json(config_json)
busy = False
return 0
else:
has_service_start(service_type)
if code1 == -2:
pass
else:
print(' %s Service Start Failed' % service_name)
# 停止服务
def stop_service(service_type=SERVICE_TYPE_BROOK, port=-1, force=False):
has_service_start(service_type)
service_name = 'brook'
if service_type == SERVICE_TYPE_BROOK:
service_name = 'brook'
elif service_type == SERVICE_TYPE_SS:
service_name = 'shadowsocks'
elif service_type == SERVICE_TYPE_SOCKS5:
service_name = 'socks5'
config_json = load_config_json()
server_list = config_json[service_name]
for server in server_list:
if port != -1:
if port == server['port']:
server['state'] = 0
else:
if force:
server['state'] = 0
global busy
busy = True
save_config_json(config_json)
busy = False
try:
global brook_pid, ss_pid
if service_type == SERVICE_TYPE_BROOK:
if brook_pid != '':
os.system('kill ' + brook_pid)
elif service_type == SERVICE_TYPE_SS:
if ss_pid != '':
os.system('kill ' + ss_pid)
elif service_type == SERVICE_TYPE_SOCKS5:
if socks5_pid != '':
os.system('kill ' + socks5_pid)
finally:
pass
# 获取端口已连接的ip数量
def port_linked_num(port):
num = 0
c = "ss state connected sport = :%d -tn|sed '1d'|awk '{print $NF}'|awk -F ':' '{print $(NF-1)}'|sort -u|wc -l" % port
try:
pi = os.popen(c)
num = int(pi.read())
pi.close()
except:
pass
return num
# 检查服务是否开启(记录对应的服务进程号)
def has_service_start(service_type=SERVICE_TYPE_BROOK):
pi = os.popen('ps aux | grep brook')
result = pi.read()
pi.close()
try:
global brook_pid, ss_pid, socks5_pid
if service_type == SERVICE_TYPE_BROOK:
brook_pid = match_pid(result, service_type)
elif service_type == SERVICE_TYPE_SS:
ss_pid = match_pid(result, service_type)
elif service_type == SERVICE_TYPE_SOCKS5:
socks5_pid = match_pid(result, service_type)
except Exception:
if service_type == SERVICE_TYPE_BROOK:
brook_pid = ''
elif service_type == SERVICE_TYPE_SS:
ss_pid = ''
elif service_type == SERVICE_TYPE_SOCKS5:
socks5_pid = ''
started = False
if service_type == SERVICE_TYPE_BROOK:
if str(result).find(' servers -l') != -1:
started = True
elif service_type == SERVICE_TYPE_SS:
if str(result).find(' ssservers -l') != -1:
started = True
elif service_type == SERVICE_TYPE_SOCKS5:
if str(result).find(' socks5 -l') != -1:
started = True
return started
# 正则匹配查找对应服务的进程号
def match_pid(text, service_type=SERVICE_TYPE_BROOK):
import re
if service_type == SERVICE_TYPE_BROOK:
re_result = re.search('.+\s{1}servers -l.+', str(text))
elif service_type == SERVICE_TYPE_SS:
re_result = re.search('.+\s{1}ssservers -l.+', str(text))
else:
re_result = re.search('.+\s{1}socks5 -l.+', str(text))
target_line = re_result.group()
re_result2 = re.search("\S+\s+[\d]+[\s]{0,1}[\d]+\s+\d\.\d", target_line)
target_line2 = re_result2.group()
final_result = re.search("[\d]+[\s]{0,1}[\d]+", target_line2)
return final_result.group()
# 清理后台模式的日志
def clear_log():
if os.path.exists('nohup.out'):
with open('nohup.out', 'w') as f:
f.write('')
print('Clear Log')
class Config(object):
JOBS = [
{
'id': 'job1',
'func': record_all_state,
# 'args': (1, 2),
'trigger': 'interval',
'seconds': 2
},
{
'id': 'job2',
'func': clear_log,
# 'args': (1, 2),
'trigger': 'interval',
'seconds': 300
}
]
SCHEDULER_API_ENABLED = True
#
# flask-restful的api对象添加路由信息
#
api.add_resource(Favicon, '/favicon.ico')
api.add_resource(Login, '/api/login')
api.add_resource(ResetPsw, '/api/resetpsw')
api.add_resource(StartService, '/api/startservice')
api.add_resource(StopService, '/api/stopservice')
api.add_resource(ServiceState, '/api/servicestate')
api.add_resource(AddPort, '/api/addport')
api.add_resource(DelPort, '/api/delport')
api.add_resource(GenerateQrImg, '/api/generateqrimg')
@app.route("/")
def brook_web():
title = 'Brook后台管理'
return render_template('index.html', title=title)
@app.route("/login")
def user_login():
title = 'Brook管理登录'
return render_template('login.html', title=title)
@app.route("/user")
def user_edit():
title = 'Brook后台管理'
return render_template('user.html', title=title)
@app.route("/test")
def test_html():
return render_template('test.html')
# @app.route('/create_models', methods=['PUT'])
# def create_models():
# try:
# db.create_all()
# from models.system_user import SystemUser
# if SystemUser.query.count() == 0:
# user = SystemUser(username='admin', password='admin')
# db.session.add(user)
# db.session.commit()
# return base_result(msg='创建Models成功!')
# except Exception as e:
# print(e)
# db.session.rollback()
# return base_result(msg='创建Models失败' + str(e), code=-1)
#
#
# @app.route('/get_user')
# def get_user_default():
# try:
# db.create_all()
# from models.system_user import SystemUser
# users = SystemUser.query.all()
# user_list = []
# for user in users:
# user_list.append({'username': user.username, 'password': user.password})
# return base_result(msg='获取用户成功', data={'users': user_list})
# except Exception as e:
# print(e)
# return base_result(msg='获取用户失败 ' + str(e), code=-1)
# 修改默认web端口是否错误的标志
port_error = False
def config_param(port=5000, email='', domain=''):
global default_port, port_error
if isinstance(port, int):
if port > 0:
default_port = port
else:
port_error = True
print('端口必须大于0')
else:
port_error = True
print('端口号必须为正整数')
if email == '':
return
if domain == '':
return
# 定时器服务,用于心跳记录当前服务信息
app.config.from_object(Config())
scheduler = APScheduler()
# it is also possible to enable the API directly
# scheduler.api_enabled = True
scheduler.init_app(app)
scheduler.start()
def is_linux():
import platform
sys_name = platform.system()
# machine_name = platform.machine().lower()
if 'Darwin' == sys_name:
return False
elif 'Linux' == sys_name:
return True
return False
if __name__ == '__main__':
if python_version == '2':
reload(sys) # python3解释器下可能会提示错误,没关系,因为只有python2运行本程序才会走到这步
sys.setdefaultencoding("utf-8") # 同上
try:
larger_ram = 'ulimit -n 51200'
os.popen(larger_ram).close()
except:
pass
host_ip = get_host_ip()
import fire
fire.Fire(config_param)
if not port_error:
# 记录当前运行中的服务,并停止该服务
if has_service_start(SERVICE_TYPE_BROOK): stop_service(SERVICE_TYPE_BROOK, port=-1)
if has_service_start(SERVICE_TYPE_SS): stop_service(SERVICE_TYPE_SS, port=-1)
if has_service_start(SERVICE_TYPE_SOCKS5): stop_service(SERVICE_TYPE_SOCKS5, port=-1)
if not os.path.exists('brook'):
print('当前目录下不存在brook程序!请执行 python install-brook.py 后重试')
else:
start_service(SERVICE_TYPE_BROOK)
start_service(SERVICE_TYPE_SS)
start_service(SERVICE_TYPE_SOCKS5)
app.run(host=host_ip, port=default_port)
| 31.9889 | 160 | 0.612378 |
4a245ece0b0cc50ab2bdcd2f0b48a6c23639e56f | 3,591 | py | Python | huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/consume_message.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/consume_message.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-dms/huaweicloudsdkdms/v2/model/consume_message.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ConsumeMessage:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'message': 'ConsumeMessageMessage',
'handler': 'str'
}
attribute_map = {
'message': 'message',
'handler': 'handler'
}
def __init__(self, message=None, handler=None):
"""ConsumeMessage - a model defined in huaweicloud sdk"""
self._message = None
self._handler = None
self.discriminator = None
if message is not None:
self.message = message
if handler is not None:
self.handler = handler
@property
def message(self):
"""Gets the message of this ConsumeMessage.
:return: The message of this ConsumeMessage.
:rtype: ConsumeMessageMessage
"""
return self._message
@message.setter
def message(self, message):
"""Sets the message of this ConsumeMessage.
:param message: The message of this ConsumeMessage.
:type: ConsumeMessageMessage
"""
self._message = message
@property
def handler(self):
"""Gets the handler of this ConsumeMessage.
消息handler。
:return: The handler of this ConsumeMessage.
:rtype: str
"""
return self._handler
@handler.setter
def handler(self, handler):
"""Sets the handler of this ConsumeMessage.
消息handler。
:param handler: The handler of this ConsumeMessage.
:type: str
"""
self._handler = handler
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ConsumeMessage):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.65 | 79 | 0.548315 |
4a245f06c8ea3054095eb0f4ce335dcea114a565 | 4,727 | py | Python | workflow/migrations/0002_auto_20190519_0023.py | tanmayagarwal/Activity-CE | a49c47053b191ffa5aee9a06e66a7c9644804434 | [
"Apache-2.0"
] | 1 | 2021-07-07T14:39:23.000Z | 2021-07-07T14:39:23.000Z | workflow/migrations/0002_auto_20190519_0023.py | michaelbukachi/Activity | f3d4f4da88ae9539c341ca73cc559b850693d669 | [
"Apache-2.0"
] | null | null | null | workflow/migrations/0002_auto_20190519_0023.py | michaelbukachi/Activity | f3d4f4da88ae9539c341ca73cc559b850693d669 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.1 on 2019-05-19 07:23
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workflow', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Currency',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, verbose_name='Currency Name')),
('symbol', models.CharField(blank=True, max_length=10, verbose_name='Currency Symbol')),
('code', models.CharField(blank=True, max_length=20, verbose_name='Currency Code')),
('create_date', models.DateTimeField(blank=True, null=True)),
('edit_date', models.DateTimeField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Currencies',
'ordering': ('name',),
},
),
migrations.AddField(
model_name='organization',
name='default_language',
field=models.CharField(default='English-US', max_length=50, verbose_name='Organization language'),
),
migrations.AddField(
model_name='organization',
name='form_label',
field=models.CharField(default='Form', max_length=255, verbose_name='Form Organization label'),
),
migrations.AddField(
model_name='organization',
name='indicator_label',
field=models.CharField(default='Indicator', max_length=255, verbose_name='Indicator Organization label'),
),
migrations.AddField(
model_name='organization',
name='site_label',
field=models.CharField(default='Site', max_length=255, verbose_name='Site Organization label'),
),
migrations.AddField(
model_name='organization',
name='stakeholder_label',
field=models.CharField(default='Stakeholder', max_length=255, verbose_name='Stakeholder Organization label'),
),
migrations.AddField(
model_name='organization',
name='theme_color',
field=models.CharField(default='#25ced1', max_length=50, verbose_name='Organization theme color'),
),
migrations.AddField(
model_name='program',
name='end_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AddField(
model_name='program',
name='organization',
field=models.ForeignKey(blank=True, help_text='Program Organization', null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Organization'),
),
migrations.AddField(
model_name='program',
name='start_date',
field=models.DateTimeField(blank=True, null=True),
),
migrations.AlterField(
model_name='activityuser',
name='organization',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Organization'),
),
migrations.AlterField(
model_name='historicalprojectcomplete',
name='actual_budget',
field=models.DecimalField(blank=True, decimal_places=2, default=Decimal('0.00'), help_text='What was the actual final cost? This should match any financial documentation you have in the file. It should be completely documented and verifiable by finance and any potential audit', max_digits=20, verbose_name='Actual Cost'),
),
migrations.AlterField(
model_name='organization',
name='logo',
field=models.FileField(blank=True, null=True, upload_to='media/img/', verbose_name='Your Organization Logo'),
),
migrations.AlterField(
model_name='projectcomplete',
name='actual_budget',
field=models.DecimalField(blank=True, decimal_places=2, default=Decimal('0.00'), help_text='What was the actual final cost? This should match any financial documentation you have in the file. It should be completely documented and verifiable by finance and any potential audit', max_digits=20, verbose_name='Actual Cost'),
),
migrations.AddField(
model_name='organization',
name='default_currency',
field=models.ForeignKey(blank=True, help_text='Organization currency', null=True, on_delete=django.db.models.deletion.SET_NULL, to='workflow.Currency'),
),
]
| 46.80198 | 335 | 0.625978 |
4a245f2e6a0f1995fed56ad68574da2d3a6a8f86 | 3,483 | py | Python | staffsplit/staff_split.py | hotkeysoft/staffsplit_py | 2ad528d2db706242c01f478cb47e68d71ba5711a | [
"MIT"
] | null | null | null | staffsplit/staff_split.py | hotkeysoft/staffsplit_py | 2ad528d2db706242c01f478cb47e68d71ba5711a | [
"MIT"
] | null | null | null | staffsplit/staff_split.py | hotkeysoft/staffsplit_py | 2ad528d2db706242c01f478cb47e68d71ba5711a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import logging
import os
import sys
from PIL import Image
from PIL import ImageOps
from PIL import UnidentifiedImageError
from .args import parse_args
from ._version import print_version_info
log = logging.getLogger()
class LogFormatter(logging.Formatter):
def format(self, record):
if record.levelno == logging.INFO:
self._style._fmt = "%(message)s"
else:
self._style._fmt = "%(levelname)s: %(message)s"
return super().format(record)
def process_sheet_file(file, args):
config = vars(args)
log.debug('config: %s', config)
try:
image = Image.open(file).convert('RGB')
print(image.mode)
print(image.info)
inverted_image = ImageOps.invert(image)
bounding_box = inverted_image.getbbox()
print(bounding_box)
margin = 4
left_column = (bounding_box[0]+margin, 0, bounding_box[0]+margin+1, image.height)
cropped = image.crop(left_column)
if cropped.height < 100:
logging.error(f"Can't process image {file}")
return
staves = [y for y in range(cropped.height) if cropped.getpixel((0, y)) < (128, 128, 128)]
gaps = [[s, e] for s, e in zip(staves, staves[1:]) if s+1 < e]
edges = iter(staves[:1] + sum(gaps, []) + staves[-1:])
staves = list(zip(edges, edges))
logging.info(f'found staves: {staves}')
# TODO: more sanity checks
if len(staves) > 10:
logging.error(f'Too many staves found ({len(staves)}), skipping')
for (index, staff) in enumerate(staves, start=1):
staff_height = 1200
h_padding = 500
mid = (staff[0] + staff[1])//2
logging.info(f'Processing staff {index}, height={staff_height}, mid={mid}')
x1 = max(0, bounding_box[0]-(h_padding//2))
x2 = min(image.width, bounding_box[2]+(h_padding//2))
y1 = max(0, (mid-(staff_height)//2))
y2 = min(mid+(staff_height//2), image.height)
region = (x1, y1, x2, y2)
logging.info(f'cropping region: {region}')
cropped = image.crop(region)
(name, ext) = os.path.splitext(file)
outFile = f'{name}-{index}{ext}'
logging.info(f'saving to : {outFile}')
cropped.save(outFile)
except FileNotFoundError:
logging.error(f'Unable to read file {file}')
except UnidentifiedImageError:
logging.error(f"File {file} doesn't seem to be an image")
except:
logging.error("Unexpected error:", sys.exc_info()[0])
def main(argv):
args = parse_args(argv[1:])
if args.version:
print_version_info()
return
# Configure logging
old_loglevel = log.level
handler = logging.StreamHandler()
handler.setFormatter(LogFormatter())
log.setLevel(args.loglevel)
log.addHandler(handler)
try:
if args.infile:
for file in args.infile:
log.info(f'processing {file}')
process_sheet_file(file, args)
else:
log.warning('Nothing to do')
finally:
# Reset log in case we're not running as a standalong app
log.removeHandler(handler)
log.setLevel(old_loglevel)
if __name__ == '__main__':
MIN_PYTHON = (3, 6)
if sys.version_info < MIN_PYTHON:
sys.exit("Python %s.%s or later is required.\n" % MIN_PYTHON)
main(sys.argv)
| 31.098214 | 97 | 0.596612 |
4a245f78b603d76b70c358fa3235d4b3d426e67d | 6,168 | py | Python | autofit/database/migration/migration.py | rhayes777/PyAutoF | 87f56419348833b285b00da1a524e329588e0b01 | [
"MIT"
] | 39 | 2019-01-24T10:45:23.000Z | 2022-03-18T09:37:59.000Z | autofit/database/migration/migration.py | rhayes777/PyAutoF | 87f56419348833b285b00da1a524e329588e0b01 | [
"MIT"
] | 260 | 2018-11-27T12:56:33.000Z | 2022-03-31T16:08:59.000Z | autofit/database/migration/migration.py | rhayes777/PyAutoF | 87f56419348833b285b00da1a524e329588e0b01 | [
"MIT"
] | 13 | 2018-11-30T16:49:05.000Z | 2022-01-21T17:39:29.000Z | import logging
from abc import ABC, abstractmethod
from hashlib import md5
from typing import Union, Generator, Iterable, Optional
from sqlalchemy.exc import OperationalError
from sqlalchemy.orm import Session
from .session_wrapper import SessionWrapper
logger = logging.getLogger(
__name__
)
class Identifiable(ABC):
@property
@abstractmethod
def id(self) -> str:
"""
A unique identifier generated by hashing a string
"""
def __eq__(
self,
other: Union["Identifiable", str]
) -> bool:
"""
Compares ids
"""
if isinstance(
other,
Identifiable
):
return self.id == other.id
if isinstance(
other,
str
):
return self.id == other
return False
class Step(Identifiable):
def __init__(self, *strings: str):
"""
A collection of SQL statements executed as one step
in a database migration.
Parameters
----------
strings
SQL statements
"""
self.strings = strings
@property
def id(self) -> str:
"""
Hash generated from underlying SQL statements
"""
return md5(
":".join(
self.strings
).encode(
"utf-8"
)
).hexdigest()
def __str__(self):
return "\n".join(self.strings)
__repr__ = __str__
class Revision(Identifiable):
def __init__(
self,
steps: Iterable[Step]
):
"""
A specific revision of the database. This comprises
a set of sequential steps and is uniquely identified
by a hash on the hash of those steps.
Parameters
----------
steps
Collections of SQL statements describing the changes
between different versions of the database
"""
self.steps = steps
@property
def id(self) -> str:
"""
A unique identifier created by joining and hashing the
identifiers of comprised steps.
"""
return md5(
":".join(
step.id for step
in self.steps
).encode("utf-8")
).hexdigest()
def __sub__(self, other: "Revision") -> "Revision":
"""
Create a revision with steps that describe the difference
between two revisions.
For example, if the data base were at revision 2 and the
code at revision 5, a 'revision' would be returned containing
the steps required to migrate the database to revision 5.
Parameters
----------
other
A previous revision
Returns
-------
An object comprising steps required to move from the other
revision to this revision.
"""
return Revision(tuple(
step for step in self.steps
if step not in other.steps
))
class Migrator:
def __init__(
self,
*steps: Step
):
"""
Manages migration of an old database.
The revision table is checked to see what version a database is on.
This is compared to the identifier of the current revision to determine
the set of Steps that must be executed to migrate the database.
Parameters
----------
steps
All steps recorded for every migration
"""
self._steps = steps
@property
def revisions(self) -> Generator[Revision, None, None]:
"""
One revision exists for each sequential set of steps
starting on the first step and terminating on any step
"""
for i in range(1, len(self._steps) + 1):
yield Revision(
self._steps[:i]
)
def get_steps(
self,
revision_id: Optional[str] = None
) -> Iterable[Step]:
"""
Retrieve steps required to go from the specified
revision to the latest revision.
Parameters
----------
revision_id
The identifier for a revision.
If None or unrecognised then all steps are returned.
Returns
-------
Steps required to get to the latest revision.
"""
for revision in self.revisions:
if revision_id == revision.id:
return (self.latest_revision - revision).steps
return self._steps
@property
def latest_revision(self) -> Revision:
"""
The latest revision according to the steps passed to the
Migrator
"""
return Revision(
self._steps
)
def migrate(self, session: Session):
"""
Migrate the database that session points to to the current
revision.
Applies each required step and updates the revision identifier
in the database.
If no revision table is found then one is created.
Parameters
----------
session
A session pointing at some database.
"""
wrapper = SessionWrapper(
session
)
revision_id = wrapper.revision_id
steps = list(
self.get_steps(
revision_id
)
)
if len(steps) == 0:
logger.info(
"Database already at latest revision"
)
return
latest_revision_id = self.latest_revision.id
logger.info(
f"Performing migration from {revision_id} to {latest_revision_id} in {len(steps)} steps"
)
for step in steps:
for string in step.strings:
try:
session.execute(
string
)
except OperationalError as e:
logger.debug(e)
wrapper.revision_id = self.latest_revision.id
logger.info(
f"revision_id updated to {wrapper.revision_id}"
)
| 25.073171 | 100 | 0.535019 |
4a245fc18ecb73448302edd12c141e74ef81d004 | 1,851 | py | Python | aliyun-python-sdk-aegis/aliyunsdkaegis/request/v20161111/DescribeSasAssetStatisticsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-aegis/aliyunsdkaegis/request/v20161111/DescribeSasAssetStatisticsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-aegis/aliyunsdkaegis/request/v20161111/DescribeSasAssetStatisticsRequest.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeSasAssetStatisticsRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'aegis', '2016-11-11', 'DescribeSasAssetStatistics','vipaegis')
def get_SourceIp(self):
return self.get_query_params().get('SourceIp')
def set_SourceIp(self,SourceIp):
self.add_query_param('SourceIp',SourceIp)
def get_StatisticsColumn(self):
return self.get_query_params().get('StatisticsColumn')
def set_StatisticsColumn(self,StatisticsColumn):
self.add_query_param('StatisticsColumn',StatisticsColumn)
def get_PageSize(self):
return self.get_query_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_query_param('PageSize',PageSize)
def get_CurrentPage(self):
return self.get_query_params().get('CurrentPage')
def set_CurrentPage(self,CurrentPage):
self.add_query_param('CurrentPage',CurrentPage)
def get_Uuids(self):
return self.get_query_params().get('Uuids')
def set_Uuids(self,Uuids):
self.add_query_param('Uuids',Uuids) | 34.277778 | 92 | 0.765532 |
4a24605171044a364966bf0de9ced9179c2dc519 | 24 | py | Python | profiling/startup.py | oaxiom/glbase3 | 9d3fc1efaad58ffb97e5b8126c2a96802daf9bac | [
"MIT"
] | 8 | 2019-06-11T02:13:20.000Z | 2022-02-22T09:27:23.000Z | profiling/startup.py | oaxiom/glbase3 | 9d3fc1efaad58ffb97e5b8126c2a96802daf9bac | [
"MIT"
] | 6 | 2020-12-18T15:08:14.000Z | 2021-05-22T00:31:57.000Z | profiling/startup.py | oaxiom/glbase3 | 9d3fc1efaad58ffb97e5b8126c2a96802daf9bac | [
"MIT"
] | 2 | 2020-05-06T04:27:03.000Z | 2022-02-22T09:28:25.000Z |
from glbase3 import *
| 6 | 21 | 0.708333 |
4a246152ef337f7f307cb48d83f8b1f50977e3f5 | 1,015 | py | Python | setup.py | rekcahkumar/mediafire-python-open-sdk | 8f1f23db1b16f16e026f5c6777aec32d00baa05f | [
"BSD-2-Clause"
] | 35 | 2015-01-18T22:38:35.000Z | 2021-10-10T04:07:32.000Z | setup.py | rekcahkumar/mediafire-python-open-sdk | 8f1f23db1b16f16e026f5c6777aec32d00baa05f | [
"BSD-2-Clause"
] | 32 | 2015-01-10T08:22:30.000Z | 2022-02-28T23:20:22.000Z | setup.py | rekcahkumar/mediafire-python-open-sdk | 8f1f23db1b16f16e026f5c6777aec32d00baa05f | [
"BSD-2-Clause"
] | 20 | 2015-01-10T08:34:03.000Z | 2022-03-30T22:50:13.000Z | from setuptools import setup
from pip.req import parse_requirements
import uuid
requirements = parse_requirements('requirements.txt', session=uuid.uuid1())
install_requires = [str(r.req) for r in requirements]
setup(
name='mediafire',
version='0.6.0',
author='Roman Yepishev',
author_email='[email protected]',
packages=['mediafire', 'mediafire.media'],
url='https://github.com/MediaFire/mediafire-python-open-sdk',
license='BSD',
description='Python MediaFire client library',
long_description=open('README.rst').read(),
install_requires=install_requires,
keywords="mediafire cloud files sdk storage api upload",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'License :: OSI Approved :: BSD License'
]
)
| 33.833333 | 75 | 0.669951 |
4a24620000e14037b9b25cec3a6362e36074e33d | 19,323 | py | Python | google/ads/google_ads/v4/proto/services/campaign_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/services/campaign_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/services/campaign_service_pb2.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v4/proto/services/campaign_service.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v4.proto.resources import campaign_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
from google.api import client_pb2 as google_dot_api_dot_client__pb2
from google.api import field_behavior_pb2 as google_dot_api_dot_field__behavior__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import field_mask_pb2 as google_dot_protobuf_dot_field__mask__pb2
from google.rpc import status_pb2 as google_dot_rpc_dot_status__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v4/proto/services/campaign_service.proto',
package='google.ads.googleads.v4.services',
syntax='proto3',
serialized_options=_b('\n$com.google.ads.googleads.v4.servicesB\024CampaignServiceProtoP\001ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v4/services;services\242\002\003GAA\252\002 Google.Ads.GoogleAds.V4.Services\312\002 Google\\Ads\\GoogleAds\\V4\\Services\352\002$Google::Ads::GoogleAds::V4::Services'),
serialized_pb=_b('\n=google/ads/googleads_v4/proto/services/campaign_service.proto\x12 google.ads.googleads.v4.services\x1a\x36google/ads/googleads_v4/proto/resources/campaign.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x17google/api/client.proto\x1a\x1fgoogle/api/field_behavior.proto\x1a\x19google/api/resource.proto\x1a google/protobuf/field_mask.proto\x1a\x17google/rpc/status.proto\"V\n\x12GetCampaignRequest\x12@\n\rresource_name\x18\x01 \x01(\tB)\xe0\x41\x02\xfa\x41#\n!googleads.googleapis.com/Campaign\"\xb0\x01\n\x16MutateCampaignsRequest\x12\x18\n\x0b\x63ustomer_id\x18\x01 \x01(\tB\x03\xe0\x41\x02\x12L\n\noperations\x18\x02 \x03(\x0b\x32\x33.google.ads.googleads.v4.services.CampaignOperationB\x03\xe0\x41\x02\x12\x17\n\x0fpartial_failure\x18\x03 \x01(\x08\x12\x15\n\rvalidate_only\x18\x04 \x01(\x08\"\xe1\x01\n\x11\x43\x61mpaignOperation\x12/\n\x0bupdate_mask\x18\x04 \x01(\x0b\x32\x1a.google.protobuf.FieldMask\x12=\n\x06\x63reate\x18\x01 \x01(\x0b\x32+.google.ads.googleads.v4.resources.CampaignH\x00\x12=\n\x06update\x18\x02 \x01(\x0b\x32+.google.ads.googleads.v4.resources.CampaignH\x00\x12\x10\n\x06remove\x18\x03 \x01(\tH\x00\x42\x0b\n\toperation\"\x95\x01\n\x17MutateCampaignsResponse\x12\x31\n\x15partial_failure_error\x18\x03 \x01(\x0b\x32\x12.google.rpc.Status\x12G\n\x07results\x18\x02 \x03(\x0b\x32\x36.google.ads.googleads.v4.services.MutateCampaignResult\"-\n\x14MutateCampaignResult\x12\x15\n\rresource_name\x18\x01 \x01(\t2\xc3\x03\n\x0f\x43\x61mpaignService\x12\xb5\x01\n\x0bGetCampaign\x12\x34.google.ads.googleads.v4.services.GetCampaignRequest\x1a+.google.ads.googleads.v4.resources.Campaign\"C\x82\xd3\xe4\x93\x02-\x12+/v4/{resource_name=customers/*/campaigns/*}\xda\x41\rresource_name\x12\xda\x01\n\x0fMutateCampaigns\x12\x38.google.ads.googleads.v4.services.MutateCampaignsRequest\x1a\x39.google.ads.googleads.v4.services.MutateCampaignsResponse\"R\x82\xd3\xe4\x93\x02\x33\"./v4/customers/{customer_id=*}/campaigns:mutate:\x01*\xda\x41\x16\x63ustomer_id,operations\x1a\x1b\xca\x41\x18googleads.googleapis.comB\xfb\x01\n$com.google.ads.googleads.v4.servicesB\x14\x43\x61mpaignServiceProtoP\x01ZHgoogle.golang.org/genproto/googleapis/ads/googleads/v4/services;services\xa2\x02\x03GAA\xaa\x02 Google.Ads.GoogleAds.V4.Services\xca\x02 Google\\Ads\\GoogleAds\\V4\\Services\xea\x02$Google::Ads::GoogleAds::V4::Servicesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,google_dot_api_dot_client__pb2.DESCRIPTOR,google_dot_api_dot_field__behavior__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_field__mask__pb2.DESCRIPTOR,google_dot_rpc_dot_status__pb2.DESCRIPTOR,])
_GETCAMPAIGNREQUEST = _descriptor.Descriptor(
name='GetCampaignRequest',
full_name='google.ads.googleads.v4.services.GetCampaignRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v4.services.GetCampaignRequest.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002\372A#\n!googleads.googleapis.com/Campaign'), file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=329,
serialized_end=415,
)
_MUTATECAMPAIGNSREQUEST = _descriptor.Descriptor(
name='MutateCampaignsRequest',
full_name='google.ads.googleads.v4.services.MutateCampaignsRequest',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='customer_id', full_name='google.ads.googleads.v4.services.MutateCampaignsRequest.customer_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='operations', full_name='google.ads.googleads.v4.services.MutateCampaignsRequest.operations', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=_b('\340A\002'), file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='partial_failure', full_name='google.ads.googleads.v4.services.MutateCampaignsRequest.partial_failure', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='validate_only', full_name='google.ads.googleads.v4.services.MutateCampaignsRequest.validate_only', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=418,
serialized_end=594,
)
_CAMPAIGNOPERATION = _descriptor.Descriptor(
name='CampaignOperation',
full_name='google.ads.googleads.v4.services.CampaignOperation',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='update_mask', full_name='google.ads.googleads.v4.services.CampaignOperation.update_mask', index=0,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='create', full_name='google.ads.googleads.v4.services.CampaignOperation.create', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='update', full_name='google.ads.googleads.v4.services.CampaignOperation.update', index=2,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='remove', full_name='google.ads.googleads.v4.services.CampaignOperation.remove', index=3,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='operation', full_name='google.ads.googleads.v4.services.CampaignOperation.operation',
index=0, containing_type=None, fields=[]),
],
serialized_start=597,
serialized_end=822,
)
_MUTATECAMPAIGNSRESPONSE = _descriptor.Descriptor(
name='MutateCampaignsResponse',
full_name='google.ads.googleads.v4.services.MutateCampaignsResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='partial_failure_error', full_name='google.ads.googleads.v4.services.MutateCampaignsResponse.partial_failure_error', index=0,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='results', full_name='google.ads.googleads.v4.services.MutateCampaignsResponse.results', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=825,
serialized_end=974,
)
_MUTATECAMPAIGNRESULT = _descriptor.Descriptor(
name='MutateCampaignResult',
full_name='google.ads.googleads.v4.services.MutateCampaignResult',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v4.services.MutateCampaignResult.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=976,
serialized_end=1021,
)
_MUTATECAMPAIGNSREQUEST.fields_by_name['operations'].message_type = _CAMPAIGNOPERATION
_CAMPAIGNOPERATION.fields_by_name['update_mask'].message_type = google_dot_protobuf_dot_field__mask__pb2._FIELDMASK
_CAMPAIGNOPERATION.fields_by_name['create'].message_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__pb2._CAMPAIGN
_CAMPAIGNOPERATION.fields_by_name['update'].message_type = google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__pb2._CAMPAIGN
_CAMPAIGNOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNOPERATION.fields_by_name['create'])
_CAMPAIGNOPERATION.fields_by_name['create'].containing_oneof = _CAMPAIGNOPERATION.oneofs_by_name['operation']
_CAMPAIGNOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNOPERATION.fields_by_name['update'])
_CAMPAIGNOPERATION.fields_by_name['update'].containing_oneof = _CAMPAIGNOPERATION.oneofs_by_name['operation']
_CAMPAIGNOPERATION.oneofs_by_name['operation'].fields.append(
_CAMPAIGNOPERATION.fields_by_name['remove'])
_CAMPAIGNOPERATION.fields_by_name['remove'].containing_oneof = _CAMPAIGNOPERATION.oneofs_by_name['operation']
_MUTATECAMPAIGNSRESPONSE.fields_by_name['partial_failure_error'].message_type = google_dot_rpc_dot_status__pb2._STATUS
_MUTATECAMPAIGNSRESPONSE.fields_by_name['results'].message_type = _MUTATECAMPAIGNRESULT
DESCRIPTOR.message_types_by_name['GetCampaignRequest'] = _GETCAMPAIGNREQUEST
DESCRIPTOR.message_types_by_name['MutateCampaignsRequest'] = _MUTATECAMPAIGNSREQUEST
DESCRIPTOR.message_types_by_name['CampaignOperation'] = _CAMPAIGNOPERATION
DESCRIPTOR.message_types_by_name['MutateCampaignsResponse'] = _MUTATECAMPAIGNSRESPONSE
DESCRIPTOR.message_types_by_name['MutateCampaignResult'] = _MUTATECAMPAIGNRESULT
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
GetCampaignRequest = _reflection.GeneratedProtocolMessageType('GetCampaignRequest', (_message.Message,), dict(
DESCRIPTOR = _GETCAMPAIGNREQUEST,
__module__ = 'google.ads.googleads_v4.proto.services.campaign_service_pb2'
,
__doc__ = """Request message for
[CampaignService.GetCampaign][google.ads.googleads.v4.services.CampaignService.GetCampaign].
Attributes:
resource_name:
Required. The resource name of the campaign to fetch.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.services.GetCampaignRequest)
))
_sym_db.RegisterMessage(GetCampaignRequest)
MutateCampaignsRequest = _reflection.GeneratedProtocolMessageType('MutateCampaignsRequest', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNSREQUEST,
__module__ = 'google.ads.googleads_v4.proto.services.campaign_service_pb2'
,
__doc__ = """Request message for
[CampaignService.MutateCampaigns][google.ads.googleads.v4.services.CampaignService.MutateCampaigns].
Attributes:
customer_id:
Required. The ID of the customer whose campaigns are being
modified.
operations:
Required. The list of operations to perform on individual
campaigns.
partial_failure:
If true, successful operations will be carried out and invalid
operations will return errors. If false, all operations will
be carried out in one transaction if and only if they are all
valid. Default is false.
validate_only:
If true, the request is validated but not executed. Only
errors are returned, not results.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.services.MutateCampaignsRequest)
))
_sym_db.RegisterMessage(MutateCampaignsRequest)
CampaignOperation = _reflection.GeneratedProtocolMessageType('CampaignOperation', (_message.Message,), dict(
DESCRIPTOR = _CAMPAIGNOPERATION,
__module__ = 'google.ads.googleads_v4.proto.services.campaign_service_pb2'
,
__doc__ = """A single operation (create, update, remove) on a campaign.
Attributes:
update_mask:
FieldMask that determines which resource fields are modified
in an update.
operation:
The mutate operation.
create:
Create operation: No resource name is expected for the new
campaign.
update:
Update operation: The campaign is expected to have a valid
resource name.
remove:
Remove operation: A resource name for the removed campaign is
expected, in this format:
``customers/{customer_id}/campaigns/{campaign_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.services.CampaignOperation)
))
_sym_db.RegisterMessage(CampaignOperation)
MutateCampaignsResponse = _reflection.GeneratedProtocolMessageType('MutateCampaignsResponse', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNSRESPONSE,
__module__ = 'google.ads.googleads_v4.proto.services.campaign_service_pb2'
,
__doc__ = """Response message for campaign mutate.
Attributes:
partial_failure_error:
Errors that pertain to operation failures in the partial
failure mode. Returned only when partial\_failure = true and
all errors occur inside the operations. If any errors occur
outside the operations (e.g. auth errors), we return an RPC
level error.
results:
All results for the mutate.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.services.MutateCampaignsResponse)
))
_sym_db.RegisterMessage(MutateCampaignsResponse)
MutateCampaignResult = _reflection.GeneratedProtocolMessageType('MutateCampaignResult', (_message.Message,), dict(
DESCRIPTOR = _MUTATECAMPAIGNRESULT,
__module__ = 'google.ads.googleads_v4.proto.services.campaign_service_pb2'
,
__doc__ = """The result for the campaign mutate.
Attributes:
resource_name:
Returned for successful operations.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v4.services.MutateCampaignResult)
))
_sym_db.RegisterMessage(MutateCampaignResult)
DESCRIPTOR._options = None
_GETCAMPAIGNREQUEST.fields_by_name['resource_name']._options = None
_MUTATECAMPAIGNSREQUEST.fields_by_name['customer_id']._options = None
_MUTATECAMPAIGNSREQUEST.fields_by_name['operations']._options = None
_CAMPAIGNSERVICE = _descriptor.ServiceDescriptor(
name='CampaignService',
full_name='google.ads.googleads.v4.services.CampaignService',
file=DESCRIPTOR,
index=0,
serialized_options=_b('\312A\030googleads.googleapis.com'),
serialized_start=1024,
serialized_end=1475,
methods=[
_descriptor.MethodDescriptor(
name='GetCampaign',
full_name='google.ads.googleads.v4.services.CampaignService.GetCampaign',
index=0,
containing_service=None,
input_type=_GETCAMPAIGNREQUEST,
output_type=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_campaign__pb2._CAMPAIGN,
serialized_options=_b('\202\323\344\223\002-\022+/v4/{resource_name=customers/*/campaigns/*}\332A\rresource_name'),
),
_descriptor.MethodDescriptor(
name='MutateCampaigns',
full_name='google.ads.googleads.v4.services.CampaignService.MutateCampaigns',
index=1,
containing_service=None,
input_type=_MUTATECAMPAIGNSREQUEST,
output_type=_MUTATECAMPAIGNSRESPONSE,
serialized_options=_b('\202\323\344\223\0023\"./v4/customers/{customer_id=*}/campaigns:mutate:\001*\332A\026customer_id,operations'),
),
])
_sym_db.RegisterServiceDescriptor(_CAMPAIGNSERVICE)
DESCRIPTOR.services_by_name['CampaignService'] = _CAMPAIGNSERVICE
# @@protoc_insertion_point(module_scope)
| 46.900485 | 2,366 | 0.774776 |
4a24629988cc6570a7c45d5fec03ee8898181a60 | 2,303 | py | Python | ooobuild/lo/text/x_paragraph_cursor.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/text/x_paragraph_cursor.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | ooobuild/lo/text/x_paragraph_cursor.py | Amourspirit/ooo_uno_tmpl | 64e0c86fd68f24794acc22d63d8d32ae05dd12b8 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Interface Class
# this is a auto generated file generated by Cheetah
# Libre Office Version: 7.3
# Namespace: com.sun.star.text
from abc import abstractmethod
from .x_text_cursor import XTextCursor as XTextCursor_a60c0b48
class XParagraphCursor(XTextCursor_a60c0b48):
"""
makes it possible to move paragraph by paragraph.
See Also:
`API XParagraphCursor <https://api.libreoffice.org/docs/idl/ref/interfacecom_1_1sun_1_1star_1_1text_1_1XParagraphCursor.html>`_
"""
__ooo_ns__: str = 'com.sun.star.text'
__ooo_full_ns__: str = 'com.sun.star.text.XParagraphCursor'
__ooo_type_name__: str = 'interface'
__pyunointerface__: str = 'com.sun.star.text.XParagraphCursor'
@abstractmethod
def gotoEndOfParagraph(self, bExpand: bool) -> bool:
"""
moves the cursor to the end of the current paragraph.
"""
@abstractmethod
def gotoNextParagraph(self, bExpand: bool) -> bool:
"""
moves the cursor to the next paragraph.
"""
@abstractmethod
def gotoPreviousParagraph(self, bExpand: bool) -> bool:
"""
moves the cursor to the previous paragraph.
"""
@abstractmethod
def gotoStartOfParagraph(self, bExpand: bool) -> bool:
"""
moves the cursor to the start of the current paragraph.
"""
@abstractmethod
def isEndOfParagraph(self) -> bool:
"""
determines if the cursor is positioned at the end of a paragraph.
"""
@abstractmethod
def isStartOfParagraph(self) -> bool:
"""
determines if the cursor is positioned at the start of a paragraph.
"""
__all__ = ['XParagraphCursor']
| 33.376812 | 135 | 0.688667 |
4a24639d8c9c3eda34b894341a020e7a8d302953 | 304 | py | Python | Program's_Contributed_By_Contributors/Python_Programs/dice roller.py | alok2947/Hacktoberfest2021 | d7479b32312533e084745b4da1a96f1cf109bd4d | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/Python_Programs/dice roller.py | alok2947/Hacktoberfest2021 | d7479b32312533e084745b4da1a96f1cf109bd4d | [
"MIT"
] | null | null | null | Program's_Contributed_By_Contributors/Python_Programs/dice roller.py | alok2947/Hacktoberfest2021 | d7479b32312533e084745b4da1a96f1cf109bd4d | [
"MIT"
] | null | null | null | import random
while True:
start=input('Type ROLL to start rolling the dice:')
if start=='ROLL':
print('Rolling dice..')
print(f"The value is ", random.randint(1,6))
again=input("to close the program type CLOSE and to continue the program press anything:")
if again =='CLOSE':
break
| 27.636364 | 91 | 0.680921 |
4a2466fae8e7953c93bb79274e60a8f72f31cfaf | 760 | py | Python | tests/enclosed_leds.py | Vykstorm/mowaysim | c16bd0d0453e744a8920ef979f65600fd5775542 | [
"MIT"
] | null | null | null | tests/enclosed_leds.py | Vykstorm/mowaysim | c16bd0d0453e744a8920ef979f65600fd5775542 | [
"MIT"
] | null | null | null | tests/enclosed_leds.py | Vykstorm/mowaysim | c16bd0d0453e744a8920ef979f65600fd5775542 | [
"MIT"
] | null | null | null | import sys, atexit
from time import sleep
from mowaysim import *
print 'Executing ' + __name__ + ' test...'
if __name__ == '__main__':
atexit.register(exit_mow)
channel = 7
moway.usbinit_moway()
ret = moway.init_moway(channel)
if ret == 0:
print 'Moway RFUSB Connected'
else:
print 'Moway RFUSB not connected. Exit'
exit(-1)
moway.command_moway(CMD_GO_SIMPLE,0)
moway.command_moway(CMD_FRONTLEDON,0)
while True:
line = moway.get_line_left() + moway.get_line_right()
if line > 50:
moway.command_moway(CMD_LEDSOFF,0)
moway.command_moway(CMD_BRAKELEDON,0)
moway.command_moway(CMD_TURN_AROUND,0)
moway.wait_mot_end(2)
moway.command_moway(CMD_GO_SIMPLE,0)
moway.command_moway(CMD_LEDSOFF,0)
moway.command_moway(CMD_FRONTLEDON,0)
| 25.333333 | 54 | 0.752632 |
4a24691326aad8079c39c42a97f1d650ef49bc3f | 350 | py | Python | server/setup.py | DmitryTakmakov/Takmachat | b145fb0a95f86613da5a168031263a2227065736 | [
"MIT"
] | null | null | null | server/setup.py | DmitryTakmakov/Takmachat | b145fb0a95f86613da5a168031263a2227065736 | [
"MIT"
] | null | null | null | server/setup.py | DmitryTakmakov/Takmachat | b145fb0a95f86613da5a168031263a2227065736 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
setup(name="takmachat_server",
version="0.0.1",
description="server for small messaging application",
author="Dmitry Takmakov",
author_email="[email protected]",
packages=find_packages(),
install_requires=['PyQt5', 'sqlalchemy', 'pycryptodome', 'pycryptodomex']
)
| 31.818182 | 79 | 0.685714 |
4a24697d9982782378e087f73adc491dcfcaa94d | 3,013 | py | Python | pyaedt/generic/filesystem.py | myoung301/pyaedt | e848dabbdd65aada3af1c5389947bb8a1bfe4a6f | [
"MIT"
] | 38 | 2021-10-01T23:15:26.000Z | 2022-03-30T18:14:41.000Z | pyaedt/generic/filesystem.py | myoung301/pyaedt | e848dabbdd65aada3af1c5389947bb8a1bfe4a6f | [
"MIT"
] | 362 | 2021-09-30T17:11:55.000Z | 2022-03-31T13:36:20.000Z | pyaedt/generic/filesystem.py | pyansys/pyaedt | c7b045fede6bc707fb20a8db7d5680c66d8263f6 | [
"MIT"
] | 15 | 2021-09-30T20:21:02.000Z | 2022-02-21T20:22:03.000Z | import os
import random
import shutil
import string
from glob import glob
from distutils.dir_util import copy_tree
def my_location():
""" """
return os.path.normpath(os.path.dirname(__file__))
def files_in_directory(path=".", ext=""):
"""
Parameters
----------
path :
(Default value = '.')
ext :
(Default value = '')
Returns
-------
"""
result = []
if os.path.exists(path):
for dir in os.listdir(path):
bd = os.path.join(path, dir)
if os.path.isfile(bd) and dir.endswith("." + ext):
result.append(bd)
return result
class Scratch:
""" """
@property
def path(self):
""" """
return self._scratch_path
@property
def is_empty(self):
""" """
return self._cleaned
def __init__(self, local_path, permission=0o777, volatile=False):
self._volatile = volatile
self._cleaned = True
char_set = string.ascii_uppercase + string.digits
self._scratch_path = os.path.normpath(os.path.join(local_path, "scratch" + "".join(random.sample(char_set, 6))))
if os.path.exists(self._scratch_path):
try:
self.remove()
except:
self._cleaned = False
if self._cleaned:
try:
os.mkdir(self.path)
os.chmod(self.path, permission)
except:
pass
def remove(self):
""" """
try:
# TODO check why on Anaconda 3.7 get errors with os.path.exists
shutil.rmtree(self._scratch_path, ignore_errors=True)
except:
pass
def copyfile(self, src_file, dst_filename=None):
"""
Parameters
----------
src_file : str
Source File with fullpath
dst_filename : str, optional
Optional destination filename with extensione
Returns
-------
"""
if dst_filename:
dst_file = os.path.join(self.path, dst_filename)
else:
dst_file = os.path.join(self.path, os.path.basename(src_file))
shutil.copy2(src_file, dst_file)
return dst_file
def copyfolder(self, src_folder, destfolder):
"""
Parameters
----------
src_folder :
destfolder :
Returns
-------
"""
copy_tree(src_folder, destfolder)
return True
def __enter__(self):
return self
def __exit__(self, ex_type, ex_value, ex_traceback):
if ex_type or self._volatile:
self.remove()
def get_json_files(start_folder):
"""
Get the absolute path to all *.json files in start_folder.
Parameters
----------
start_folder, str
Path to the folder where the json files are located.
Returns
-------
"""
return [y for x in os.walk(start_folder) for y in glob(os.path.join(x[0], "*.json"))]
| 22.154412 | 120 | 0.542649 |
4a246a5a806ca9def1d604e24029f76e6ff0f849 | 2,141 | py | Python | main.py | BadrBelkadi/Ultimate-Calculator | a5e28d3abdc77461a86c79e8f10a752869e40b6d | [
"MIT"
] | null | null | null | main.py | BadrBelkadi/Ultimate-Calculator | a5e28d3abdc77461a86c79e8f10a752869e40b6d | [
"MIT"
] | null | null | null | main.py | BadrBelkadi/Ultimate-Calculator | a5e28d3abdc77461a86c79e8f10a752869e40b6d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Made by Jordan Leich on 6/1/2021, all contributors via GitHub can be found at
# https://github.com/JordanLeich/Ultimate-Calculator/graphs/contributors
# Imports
from calculators import algebra, conversions, stocks, financial, calculator, randomization
import restart
import colors
import contribution
import end
import time
from gui import *
def wrong_option():
print(colors.red + 'User input error found... Restarting user input choice...', colors.reset)
time.sleep(2)
start()
INPUT_CHOICES = {
1: calculator.start,
2: algebra.start,
3: conversions.start,
4: stocks.start,
5: financial.start,
6: randomization.start,
7: contribution.start,
8: restart.restart,
9: end.end
}
def start():
choice1 = str(
input('Want to run the GUI version of this project (yes / no): '))
print()
if choice1.lower() in ['y', 'yes']:
print(colors.green, 'GUI Application is now running!\n', colors.reset)
start_gui()
elif choice1.lower() in ['n', 'no']:
print('Proceeding to normal calculator...\n')
else:
print(colors.red + 'User input error found... Restarting input choice...\n', colors.reset)
time.sleep(2)
start()
print(colors.green + 'All Calculators and Converters!', colors.reset)
choice2 = int(input('''(1) Basic Arithmetic Math (Add, Subtract, Multiply, Divide, & More)
(2) Algebra (Find Slope, Pythagorean Theorem)
(3) All Converters (Temperature, Mass, Length, Volume, Digital Storage, Speed, & More)
(4) Stock Market Shares Calculator (Gain/Loss of a stock)
(5) Financial Calculator (Payroll, Tipping Amount, Compound Interest)
(6) Randomization (Random Number Generator, Heads or Tails)
(7) All Project Contributors
(8) Restart Program
(9) Exit Program
Which option would you like to pick: '''))
print()
if choice2 not in [1, 2, 3, 4, 5, 6, 7, 8, 9]:
wrong_option()
else:
# get function from dict and execute
# if option not found then execute wrong option function
INPUT_CHOICES.get(choice2, 9)()
if __name__ == '__main__':
start()
| 28.932432 | 98 | 0.671649 |
4a246b4cfa4fb982befcf8b06472ff64b43fd521 | 14,854 | py | Python | list3/task2/scrabble_genetic.py | KamilKrol5/metaheuristics-algorithms | 19270489fe54c7288429c6edbd7940bcb73bb481 | [
"MIT"
] | null | null | null | list3/task2/scrabble_genetic.py | KamilKrol5/metaheuristics-algorithms | 19270489fe54c7288429c6edbd7940bcb73bb481 | [
"MIT"
] | null | null | null | list3/task2/scrabble_genetic.py | KamilKrol5/metaheuristics-algorithms | 19270489fe54c7288429c6edbd7940bcb73bb481 | [
"MIT"
] | null | null | null | import fileinput
import random
import sys
import time
from collections import Counter
from itertools import chain, takewhile, count
from typing import Dict, Collection, List, Tuple, Generator, Optional, Union
import numpy as np
from scrabble import Scrabble, Word, Letter, WordUtils
from utils.utils import flatten, grouped_by_2, zip_longest
class ScrabbleGenetic(Scrabble):
MINIMUM_SELECTED = 2
class CannotMakeValidChildren(Exception):
pass
class IndividualsRandomZipper:
def __init__(self, sg: 'ScrabbleGenetic', mother: Word, father: Word):
self.sg = sg
self.father = father
self.mother = mother
# self.fill_values = np.random.permutation(list(chain(sg.unused_letters(mother),
# sg.unused_letters(father))))
self.fill_values = np.random.permutation(list(sg.available_letters))
if len(self.fill_values) == 0:
self.fill_values = sg.available_letters
def fill_generator(self):
return np.random.choice(self.fill_values)
def zip_randomly(self, reverse_mother=False, reverse_father=False) -> str:
mother, father = self.mother, self.father
if reverse_mother:
mother = reversed(mother)
if reverse_father:
father = reversed(father)
fetus = []
for mother_char, father_char \
in zip_longest(mother, father, fill_values_generator=self.fill_generator):
if np.random.rand() < 0.5:
fetus.append(mother_char)
else:
fetus.append(father_char)
return ''.join(fetus)
def __init__(self,
max_time: int,
available_letters: Dict[str, Letter],
max_population_size: int,
dictionary: Collection[str],
mutation_probability: float,
initial_population: List[Word]
):
super().__init__(max_time, available_letters, dictionary)
self.mutation_probability = mutation_probability
self.improvement_rate = 1.0
self.mutation_probability_multiplier = 1.0
self.initial_population = initial_population
self.max_population_size = max_population_size
self.word_utils = WordUtils(dictionary, available_letters)
self.population = self.initial_population
self._initialize_population()
self.start_population = self.population.copy()
self.population.sort(key=lambda i: self.word_utils.points(i), reverse=True)
print(f'STARTING POPULATION: {self.population}', file=sys.stderr)
@property
def solution(self) -> Tuple[Word, int]:
solution = sorted(self.population, key=lambda i: self.word_utils.points(i), reverse=True)[0]
return solution, self.word_utils.points(solution)
def unused_letters(self, word: Word) -> Generator[str, None, None]:
letters_in_word = Counter(word)
letters_in_word.setdefault(0)
letters_left = [list((av.count - letters_in_word[av.letter]) * av.letter)
for av in self.available_letters.values()
if av.count > letters_in_word[av.letter]]
return flatten(letters_left)
def _initialize_population(self) -> None:
for i in range(self.max_population_size - len(self.initial_population)):
new_individual = self._generate_random_word()
if new_individual is not None and new_individual not in self.population:
self.population.append(new_individual)
print(f'population size: {len(self.population)}. Maximum is: {self.max_population_size}', file=sys.stderr)
# def get_letters_with_repetitions(self) -> Generator[str, None, None]:
# return flatten(list(l.count * l.letter) for l in self.available_letters.values())
def _generate_random_word(self, attempts_count=2) -> Optional[Word]:
random_word_from_population = np.random.choice(self.population)
for _ in range(attempts_count):
mutated = self._mutate(random_word_from_population)
mutated_valid_prefix = self.word_utils.longest_valid_prefix(mutated)
if mutated_valid_prefix is not None:
return mutated_valid_prefix
return None
@classmethod
def from_stdin(cls,
max_population_size: int,
dictionary: Collection[str],
mutation_probability: float,
) -> 'ScrabbleGenetic':
available_letters, initial_solutions, max_time = ScrabbleGenetic.read_input()
return cls(max_time=max_time,
available_letters=available_letters,
max_population_size=max_population_size,
dictionary=dictionary,
mutation_probability=mutation_probability,
initial_population=initial_solutions)
@staticmethod
def read_input(filename=None) -> Tuple[Dict[str, Letter], List[Word], int]:
with open(filename, 'r') if filename is not None else fileinput.input() as file:
first_line = file.readline()
[max_time, letters_count, initial_solution_count] = [int(x) for x in first_line.split()]
available_letters = {}
initial_solutions: List[Word] = []
for i, line in enumerate(file, 0):
if i < letters_count:
letter, points = str.rstrip(line).split()
points = int(points)
if letter in available_letters:
available_letters[letter].count += 1
else:
available_letters[letter] = Letter(letter, points, 1)
else:
initial_solutions.append(Word(str.lower(line.rstrip())))
if len(initial_solutions) != initial_solution_count:
raise ValueError('Number of provided solutions different from declared.')
print(f'Provided initial solutions: {initial_solutions}', file=sys.stderr)
print(f'Provided available letters: {available_letters}', file=sys.stderr)
return available_letters, initial_solutions, max_time
def _chance_for_reproduction(self, word: Word, divider=None) -> float:
if divider is None:
divider = np.max([self.word_utils.points(word) for word in self.population])
return (self.word_utils.points(word) / divider) ** 2
@staticmethod
def _random_swap(individual: Word, swap_count=1) -> Word:
length = len(individual)
if len(individual) < 2:
return individual
individual = list(individual)
for _ in range(swap_count):
i = np.random.randint(0, length)
j = np.random.randint(0, length)
individual[i], individual[j] = individual[j], individual[i]
return Word(''.join(individual))
def _random_letter_change(self, individual: Word, random_changes=1) -> Tuple[Word, List[int]]:
length = len(individual)
mutated_indexes = []
for _ in range(random_changes):
mutated_index = np.random.randint(0, length)
if mutated_index == length - 1:
end = ''
else:
end = individual[mutated_index + 1:]
free_letters = list(chain(self.unused_letters(individual), [individual[mutated_index]]))
individual = individual[:mutated_index] + np.random.choice(free_letters) + end
mutated_indexes.append(mutated_index)
return individual, mutated_indexes
def _mutate(self, individual: Word) -> Word:
"""
May return word which is not valid.
Given individual is not changed.
"""
changed = individual
for _ in range(1, int((self.mutation_probability_multiplier - 1) * 10) + 1):
rand = np.random.rand()
if rand < 0.78:
changed, _ = self._random_letter_change(changed)
elif rand < 0.86:
changed = self._random_swap(individual)
elif rand < 0.96:
changed = self._random_swap(individual)
changed, _ = self._random_letter_change(changed)
letters_for_random_tail = list(self.unused_letters(changed))
# letters_for_random_tail = list(self.available_letters.keys())
if len(letters_for_random_tail) > 0:
np.random.shuffle(letters_for_random_tail)
random_tail = ''.join(letters_for_random_tail)
if np.random.rand() < 0.90:
result = changed + random_tail
else:
random_prefix = ''.join(
np.random.choice(letters_for_random_tail, np.random.randint(0, len(individual))))
result = random_prefix + changed + random_tail
else:
random_tail = 'NONE'
result = changed
# print(f'MUTATION: {individual} -> {result}; mutated_indexes = ...; random_tail = {random_tail};',
# file=sys.stderr)
return result
def _random_mutate(self, individual: Union[Word, str]) -> Word:
"""
May return word which is not valid.
Given individual is not changed.
"""
if self.mutation_probability_multiplier * np.random.rand() < self.mutation_probability:
return self._mutate(individual)
return individual
def _evolve(self) -> None:
mean_of_last_gen_elite = \
np.mean([self.word_utils.points(i) for i in self.population[:min(2, self.max_population_size // 10)]])
to_reproduce = self._selection()
failed = self._reproduce(to_reproduce)
if failed == len(to_reproduce):
print(f'It was not possible to make a child for any of selected individuals.', file=sys.stderr)
old_rate = self.improvement_rate
self.improvement_rate = abs(
np.mean([self.word_utils.points(i) for i in self.population[:min(2, self.max_population_size // 10)]]) -
mean_of_last_gen_elite)
if self.improvement_rate > 0:
self.mutation_probability_multiplier = 1.0
elif old_rate == self.improvement_rate == 0:
self.mutation_probability_multiplier += 0.0001 / self.mutation_probability_multiplier
self.mutation_probability_multiplier = min(self.mutation_probability_multiplier, 1.2)
print(f'Improvement rate: {self.improvement_rate}; '
f'Mutation chance multiplier: {self.mutation_probability_multiplier}', file=sys.stderr)
def _selection(self) -> List[Word]:
selected: List[Word] = []
# self.population.sort(key=lambda h: self.word_utils.points(h), reverse=True)
# max_points = np.max([self.word_utils.points(word) for word in self.population[:len(self.population) // 5]])
max_points = np.max([self.word_utils.points(word) for word in self.population[:5]])
for i in takewhile(lambda _: len(selected) < self.MINIMUM_SELECTED, count(0.0, 0.1)):
selected.clear()
for individual in self.population:
if i + np.random.rand() < self._chance_for_reproduction(individual, max_points):
selected.append(individual)
print(f'Selected for reproduction: {len(selected)} of {len(self.population)}', file=sys.stderr)
np.random.shuffle(selected)
return selected
@staticmethod
def _connect_randomly(mother: Word, father: Word):
split_index_mother = random.randrange(len(mother))
split_index_father = random.randrange(len(father))
return mother[:split_index_mother] + father[split_index_father]
def _cross(self, mother: Word, father: Word, attempts_count: int) -> List[Word]:
zipper = self.IndividualsRandomZipper(self, mother, father)
parents_points_min = min(self.word_utils.points(mother), self.word_utils.points(father))
children = []
# fetuses = [
# zipper.zip_randomly(reverse_mother=True),
# zipper.zip_randomly(reverse_father=True),
# zipper.zip_randomly(True, True),
# ]
# fetuses = filter(lambda x: x is not None and
# self.word_utils.points(x) > parents_points_min and
# x not in self.population,
# [self.word_utils.longest_valid_prefix(self._random_mutate(f)) for f in fetuses]
# )
# children.extend(set(fetuses))
# print(mother, father, children)
for _ in range(max(0, attempts_count - len(children))):
fetus = zipper.zip_randomly()
# fetus = self._connect_randomly(mother, father)
fetus = self._random_mutate(Word(fetus))
fetus = self.word_utils.longest_valid_prefix(fetus)
if fetus is not None and self.word_utils.points(fetus) > parents_points_min and \
fetus != mother and fetus != father and fetus not in children:
print(f'SUCCESSFUL MUTATION: {mother} x {father} -> {fetus};', file=sys.stderr)
children.append(fetus)
if len(children) == 0:
raise self.CannotMakeValidChildren('Failed to make children in given number of attempts')
return children
def _reproduce(self, selected_for_reproduction: List[Word]) -> int:
failed = 0
if len(selected_for_reproduction) <= 1:
raise ValueError('Cannot reproduce one individual')
if (len(selected_for_reproduction) - 1) % 2 == 0:
selected_for_reproduction = selected_for_reproduction[:-1]
for mother, father in grouped_by_2(selected_for_reproduction):
try:
children = self._cross(mother, father, attempts_count=8)
self.population.extend(children)
except self.CannotMakeValidChildren:
failed += 1
self.population = list(set(self.population))
self.population.sort(key=lambda i: self.word_utils.points(i), reverse=True)
np.random.shuffle(self.population[int(self.max_population_size / 3):])
self.population = self.population[:self.max_population_size]
return failed
def run_algorithm(self):
end_time = time.time() + self.max_time
i = 0
while time.time() < end_time:
self._evolve()
i += 1
print(f'Initial population: {self.start_population}', file=sys.stderr)
print(f'Initial population costs: {[self.word_utils.points(i) for i in self.start_population]}', file=sys.stderr)
print(f'iterations = {i}', file=sys.stderr)
return self.solution
| 45.704615 | 121 | 0.624209 |
4a246c380ba3e805fa0cc968ef5e9dc281d23404 | 17,626 | py | Python | detector/yolo/darknet_trt.py | oreo-lp/AlphaPose_TRT | ffb04455b9ff2a1da3c8e43c2b6acce7b4a9d082 | [
"Apache-2.0"
] | 36 | 2021-05-06T14:10:04.000Z | 2022-03-23T02:02:19.000Z | detector/yolo/darknet_trt.py | oreo-lp/AlphaPose_TRT | ffb04455b9ff2a1da3c8e43c2b6acce7b4a9d082 | [
"Apache-2.0"
] | 10 | 2021-05-11T11:36:10.000Z | 2022-03-28T13:41:59.000Z | detector/yolo/darknet_trt.py | oreo-lp/AlphaPose_TRT | ffb04455b9ff2a1da3c8e43c2b6acce7b4a9d082 | [
"Apache-2.0"
] | 11 | 2021-05-17T07:18:19.000Z | 2022-03-30T07:19:00.000Z | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
import cv2
import matplotlib.pyplot as plt
try:
from util import count_parameters as count
from util import convert2cpu as cpu
from util import predict_transform
except ImportError:
from detector.yolo.util import count_parameters as count
from detector.yolo.util import convert2cpu as cpu
from detector.yolo.util import predict_transform
class test_net(nn.Module):
def __init__(self, num_layers, input_size):
super(test_net, self).__init__()
self.num_layers = num_layers
self.linear_1 = nn.Linear(input_size, 5)
self.middle = nn.ModuleList([nn.Linear(5, 5) for x in range(num_layers)])
self.output = nn.Linear(5, 2)
def forward(self, x):
x = x.view(-1)
fwd = nn.Sequential(self.linear_1, *self.middle, self.output)
return fwd(x)
def get_test_input():
img = cv2.imread("dog-cycle-car.png")
img = cv2.resize(img, (416, 416))
img_ = img[:, :, ::-1].transpose((2, 0, 1))
img_ = img_[np.newaxis, :, :, :] / 255.0
img_ = torch.from_numpy(img_).float()
img_ = Variable(img_)
return img_
def parse_cfg(cfgfile):
"""
Takes a configuration file
Returns a list of blocks. Each blocks describes a block in the neural
network to be built. Block is represented as a dictionary in the list
"""
file = open(cfgfile, 'r')
lines = file.read().split('\n') # store the lines in a list
lines = [x for x in lines if len(x) > 0] # get read of the empty lines
lines = [x for x in lines if x[0] != '#']
lines = [x.rstrip().lstrip() for x in lines]
block = {}
blocks = []
for line in lines:
if line[0] == "[": # This marks the start of a new block
if len(block) != 0:
blocks.append(block)
block = {}
block["type"] = line[1:-1].rstrip()
else:
key, value = line.split("=")
block[key.rstrip()] = value.lstrip()
blocks.append(block)
return blocks
# print('\n\n'.join([repr(x) for x in blocks]))
import pickle as pkl
class MaxPoolStride1(nn.Module):
def __init__(self, kernel_size):
super(MaxPoolStride1, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, x):
padding = int(self.pad / 2)
# padded_x = F.pad(x, (0,self.pad,0,self.pad), mode="replicate")
# pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)
# padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate")
padded_x = F.pad(x, (padding, padding, padding, padding), mode="constant", value=0)
pooled_x = nn.MaxPool2d(self.kernel_size, 1)(padded_x)
return pooled_x
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
class DetectionLayer(nn.Module):
def __init__(self, anchors):
super(DetectionLayer, self).__init__()
self.anchors = anchors
def forward(self, x, inp_dim, num_classes, confidence):
x = x.data
global args
prediction = x.to(args.device)
prediction = predict_transform(prediction, inp_dim, self.anchors, num_classes, confidence, args)
return prediction
class Upsample(nn.Module):
def __init__(self, stride=2):
super(Upsample, self).__init__()
self.stride = stride
def forward(self, x):
stride = self.stride
assert (x.data.dim() == 4)
B = x.data.size(0)
C = x.data.size(1)
H = x.data.size(2)
W = x.data.size(3)
ws = stride
hs = stride
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H * stride, W * stride)
return x
#
class ReOrgLayer(nn.Module):
def __init__(self, stride=2):
super(ReOrgLayer, self).__init__()
self.stride = stride
def forward(self, x):
assert (x.data.dim() == 4)
B, C, H, W = x.data.shape
hs = self.stride
ws = self.stride
assert (H % hs == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(H)
assert (W % ws == 0), "The stride " + str(self.stride) + " is not a proper divisor of height " + str(W)
x = x.view(B, C, H // hs, hs, W // ws, ws).transpose(-2, -3).contiguous()
x = x.view(B, C, H // hs * W // ws, hs, ws)
x = x.view(B, C, H // hs * W // ws, hs * ws).transpose(-1, -2).contiguous()
x = x.view(B, C, ws * hs, H // ws, W // ws).transpose(1, 2).contiguous()
x = x.view(B, C * ws * hs, H // ws, W // ws)
return x
def create_modules(blocks):
net_info = blocks[0] # Captures the information about the input and pre-processing
module_list = nn.ModuleList()
index = 0 # indexing blocks helps with implementing route layers (skip connections)
prev_filters = 3
output_filters = []
for x in blocks:
module = nn.Sequential()
if x["type"] == "net":
continue
# If it's a convolutional layer
if x["type"] == "convolutional":
# Get the info about the layer
activation = x["activation"]
try:
batch_normalize = int(x["batch_normalize"])
bias = False
except:
batch_normalize = 0
bias = True
filters = int(x["filters"])
padding = int(x["pad"])
kernel_size = int(x["size"])
stride = int(x["stride"])
if padding:
pad = (kernel_size - 1) // 2
else:
pad = 0
# Add the convolutional layer
conv = nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=bias)
module.add_module("conv_{0}".format(index), conv)
# Add the Batch Norm Layer
if batch_normalize:
bn = nn.BatchNorm2d(filters)
module.add_module("batch_norm_{0}".format(index), bn)
# Check the activation.
# It is either Linear or a Leaky ReLU for YOLO
if activation == "leaky":
activn = nn.LeakyReLU(0.1, inplace=True)
module.add_module("leaky_{0}".format(index), activn)
# If it's an upsampling layer
# We use Bilinear2dUpsampling
elif x["type"] == "upsample":
stride = int(x["stride"])
# upsample = Upsample(stride)
upsample = nn.Upsample(scale_factor=2, mode="nearest")
module.add_module("upsample_{}".format(index), upsample)
# If it is a route layer
elif x["type"] == "route":
x["layers"] = x["layers"].split(',')
# Start of a route
start = int(x["layers"][0])
if len(x["layers"]) <= 2:
# end, if there exists one.
try:
end = int(x["layers"][1])
except:
end = 0
# Positive anotation
if start > 0:
start = start - index
if end > 0:
end = end - index
route = EmptyLayer()
module.add_module("route_{0}".format(index), route)
if end < 0:
filters = output_filters[index + start] + output_filters[index + end]
else:
filters = output_filters[index + start]
else: # SPP-route
assert len(x["layers"]) == 4
round = EmptyLayer()
module.add_module("route_{0}".format(index), route)
filters = output_filters[index + start] + output_filters[index + int(x["layers"][1])] \
+ output_filters[index + int(x["layers"][2])] + output_filters[index + int(x["layers"][3])]
# shortcut corresponds to skip connection
elif x["type"] == "shortcut":
from_ = int(x["from"])
shortcut = EmptyLayer()
module.add_module("shortcut_{}".format(index), shortcut)
elif x["type"] == "maxpool":
stride = int(x["stride"])
size = int(x["size"])
if stride != 1:
maxpool = nn.MaxPool2d(size, stride)
else:
maxpool = MaxPoolStride1(size)
# maxpool = nn.MaxPool2d(size, stride=1, padding=size-1)
module.add_module("maxpool_{}".format(index), maxpool)
# Yolo is the detection layer
elif x["type"] == "yolo":
mask = x["mask"].split(",")
mask = [int(x) for x in mask]
anchors = x["anchors"].split(",")
anchors = [int(a) for a in anchors]
anchors = [(anchors[i], anchors[i + 1]) for i in range(0, len(anchors), 2)]
anchors = [anchors[i] for i in mask]
detection = DetectionLayer(anchors)
module.add_module("Detection_{}".format(index), detection)
else:
print("Something I dunno")
assert False
module_list.append(module)
prev_filters = filters
output_filters.append(filters)
index += 1
return (net_info, module_list)
class Darknet(nn.Module):
def __init__(self, cfgfile, args):
super(Darknet, self).__init__()
self.args = args
self.blocks = parse_cfg(cfgfile)
self.net_info, self.module_list = create_modules(self.blocks)
self.header = torch.IntTensor([0, 0, 0, 0])
self.seen = 0
def get_blocks(self):
return self.blocks
def get_module_list(self):
return self.module_list
def forward(self, x):
detections = []
modules = self.blocks[1:]
outputs = {} # We cache the outputs for the route layer
write = 0
for i in range(len(modules)):
module_type = (modules[i]["type"])
if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
x = self.module_list[i](x)
outputs[i] = x
elif module_type == "route":
layers = modules[i]["layers"]
layers = [int(a) for a in layers]
if (layers[0]) > 0:
layers[0] = layers[0] - i
if len(layers) == 1:
x = outputs[i + (layers[0])]
elif len(layers) == 2:
if (layers[1]) > 0:
layers[1] = layers[1] - i
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
x = torch.cat((map1, map2), 1)
elif len(layers) == 4: # SPP
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
map3 = outputs[i + layers[2]]
map4 = outputs[i + layers[3]]
x = torch.cat((map1, map2, map3, map4), 1)
outputs[i] = x
elif module_type == "shortcut":
from_ = int(modules[i]["from"])
x = outputs[i - 1] + outputs[i + from_]
outputs[i] = x
elif module_type == 'yolo':
anchors = self.module_list[i][0].anchors
# Get the input dimensions
inp_dim = int(self.net_info["height"])
# Get the number of classes
num_classes = int(modules[i]["classes"])
# Output the result
# x = x.data.to(self.args.device) # three dimensions
# 不要使用x.data获取数据,这个已经被废弃掉了
x = x.to(self.args.device)
# torch.Size([1, 255, 19, 19]), torch.Size([1, 255, 38, 38]), torch.Size([1, 255, 76, 76])
x = predict_transform(x, inp_dim, anchors, num_classes, self.args)
# x = torch.randn(1, 255, 19, 19, dtype=torch.float32, device='cuda')
if type(x) == int:
continue
if not write:
detections = x
write = 1
else:
detections = torch.cat((detections, x), 1)
outputs[i] = outputs[i - 1]
try:
return detections
except:
return 0
def load_weights(self, weightfile):
# Open the weights file
fp = open(weightfile, "rb")
# The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. IMages seen
header = np.fromfile(fp, dtype=np.int32, count=5)
self.header = torch.from_numpy(header)
self.seen = self.header[3]
# The rest of the values are the weights
# Let's load them up
weights = np.fromfile(fp, dtype=np.float32)
ptr = 0
for i in range(len(self.module_list)):
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
model = self.module_list[i]
try:
batch_normalize = int(self.blocks[i + 1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
# Get the number of weights of Batch Norm Layer
num_bn_biases = bn.bias.numel()
# Load the weights
bn_biases = torch.from_numpy(weights[ptr:ptr + num_bn_biases])
ptr += num_bn_biases
bn_weights = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_mean = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
bn_running_var = torch.from_numpy(weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Cast the loaded weights into dims of model weights.
bn_biases = bn_biases.view_as(bn.bias.data)
bn_weights = bn_weights.view_as(bn.weight.data)
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn_running_var = bn_running_var.view_as(bn.running_var)
# Copy the data to model
bn.bias.data.copy_(bn_biases)
bn.weight.data.copy_(bn_weights)
bn.running_mean.copy_(bn_running_mean)
bn.running_var.copy_(bn_running_var)
else:
# Number of biases
num_biases = conv.bias.numel()
# Load the weights
conv_biases = torch.from_numpy(weights[ptr: ptr + num_biases])
ptr = ptr + num_biases
# reshape the loaded weights according to the dims of the model weights
conv_biases = conv_biases.view_as(conv.bias.data)
# Finally copy the data
conv.bias.data.copy_(conv_biases)
# Let us load the weights for the Convolutional layers
num_weights = conv.weight.numel()
# Do the same as above for weights
conv_weights = torch.from_numpy(weights[ptr:ptr + num_weights])
ptr = ptr + num_weights
conv_weights = conv_weights.view_as(conv.weight.data)
conv.weight.data.copy_(conv_weights)
def save_weights(self, savedfile, cutoff=0):
if cutoff <= 0:
cutoff = len(self.blocks) - 1
fp = open(savedfile, 'wb')
# Attach the header at the top of the file
self.header[3] = self.seen
header = self.header
header = header.numpy()
header.tofile(fp)
# Now, let us save the weights
for i in range(len(self.module_list)):
module_type = self.blocks[i + 1]["type"]
if (module_type) == "convolutional":
model = self.module_list[i]
try:
batch_normalize = int(self.blocks[i + 1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
# If the parameters are on GPU, convert them back to CPU
# We don't convert the parameter to GPU
# Instead. we copy the parameter and then convert it to CPU
# This is done as weight are need to be saved during training
cpu(bn.bias.data).numpy().tofile(fp)
cpu(bn.weight.data).numpy().tofile(fp)
cpu(bn.running_mean).numpy().tofile(fp)
cpu(bn.running_var).numpy().tofile(fp)
else:
cpu(conv.bias.data).numpy().tofile(fp)
# Let us save the weights for the Convolutional layers
cpu(conv.weight.data).numpy().tofile(fp)
#
# dn = Darknet('cfg/yolov3.cfg')
# dn.load_weights("yolov3.weights")
# inp = get_test_input()
# a, interms = dn(inp)
# dn.eval()
# a_i, interms_i = dn(inp)
| 33.896154 | 119 | 0.525304 |
4a246c4af13b4746a6d0c0eb2bc8f39d277e7f9e | 5,300 | py | Python | pyFM/mesh/laplacian.py | Yang-L1/pyFM | bfc9cf58da81441c13dbfe0645872e82b6038521 | [
"MIT"
] | null | null | null | pyFM/mesh/laplacian.py | Yang-L1/pyFM | bfc9cf58da81441c13dbfe0645872e82b6038521 | [
"MIT"
] | null | null | null | pyFM/mesh/laplacian.py | Yang-L1/pyFM | bfc9cf58da81441c13dbfe0645872e82b6038521 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.sparse as sparse
def dia_area_mat(vertices, faces, faces_areas=None):
"""
Compute the diagonal matrix of lumped vertex area for mesh laplacian.
Entry i on the diagonal is the area of vertex i, approximated as one third
of adjacent triangles
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
faces_area : (m,) - Optional, array of per-face area
Output
-----------------------------
A : (n,n) sparse diagonal matrix of vertex areas
"""
N = vertices.shape[0]
# Compute face area
if faces_areas is None:
v1 = vertices[faces[:,0]] # (m,3)
v2 = vertices[faces[:,1]] # (m,3)
v3 = vertices[faces[:,2]] # (m,3)
faces_areas = 0.5 * np.linalg.norm(np.cross(v2-v1,v3-v1),axis=1) # (m,)
I = np.concatenate([faces[:,0], faces[:,1], faces[:,2]])
J = np.zeros_like(I)
V = np.concatenate([faces_areas, faces_areas, faces_areas])/3
# Get array of vertex areas
vertex_areas = np.array(sparse.coo_matrix((V, (I, J)), shape=(N, 1)).todense()).flatten()
A = sparse.dia_matrix((vertex_areas,0), shape=(N, N))
return A
def fem_area_mat(vertices, faces, faces_areas=None):
"""
Compute the area matrix for mesh laplacian using finite elements method.
Entry (i,i) is 1/6 of the sum of the area of surrounding triangles
Entry (i,j) is 1/12 of the sum of the area of triangles using edge (i,j)
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
faces_area : (m,) - Optional, array of per-face area
Output
-----------------------------
A : (n,n) sparse area matrix
"""
N = vertices.shape[0]
# Compute face area
if faces_areas is None:
v1 = vertices[faces[:,0]] # (m,3)
v2 = vertices[faces[:,1]] # (m,3)
v3 = vertices[faces[:,2]] # (m,3)
faces_areas = 0.5 * np.linalg.norm(np.cross(v2-v1, v3-v1), axis=1) # (m,)
# Use similar construction as cotangent weights
I = np.concatenate([faces[:,0], faces[:,1], faces[:,2]]) # (3m,)
J = np.concatenate([faces[:,1], faces[:,2], faces[:,0]]) # (3m,)
S = np.concatenate([faces_areas,faces_areas,faces_areas]) # (3m,)
In = np.concatenate([I, J, I]) # (9m,)
Jn = np.concatenate([J, I, I]) # (9m,)
Sn = 1/12 * np.concatenate([S, S, 2*S]) # (9m,)
A = sparse.coo_matrix((Sn, (In, Jn)), shape=(N, N)).tocsc()
return A
def cotangent_weights(vertices, faces):
"""
Compute the cotengenant weights matrix for mesh laplacian.
Entry (i,i) is 1/6 of the sum of the area of surrounding triangles
Entry (i,j) is 1/12 of the sum of the area of triangles using edge (i,j)
Parameters
-----------------------------
vertices : (n,3) array of vertices coordinates
faces : (m,3) array of vertex indices defining faces
faces_area : (m,) - Optional, array of per-face area
Output
-----------------------------
A : (n,n) sparse area matrix
"""
N = vertices.shape[0]
v1 = vertices[faces[:,0]] # (m,3)
v2 = vertices[faces[:,1]] # (m,3)
v3 = vertices[faces[:,2]] # (m,3)
# Edge lengths indexed by opposite vertex
u1 = v3 - v2
u2 = v1 - v3
u3 = v2 - v1
L1 = np.linalg.norm(u1,axis=1) # (m,)
L2 = np.linalg.norm(u2,axis=1) # (m,)
L3 = np.linalg.norm(u3,axis=1) # (m,)
# Compute cosine of angles
A1 = np.einsum('ij,ij->i', -u2, u3) / (L2*L3) # (m,)
A2 = np.einsum('ij,ij->i', u1, -u3) / (L1*L3) # (m,)
A3 = np.einsum('ij,ij->i', -u1, u2) / (L1*L2) # (m,)
# Use cot(arccos(x)) = x/sqrt(1-x^2)
I = np.concatenate([faces[:,0], faces[:,1], faces[:,2]])
J = np.concatenate([faces[:,1], faces[:,2], faces[:,0]])
S = np.concatenate([A3,A1,A2])
S = 0.5 * S / np.sqrt(1-S**2)
In = np.concatenate([I, J, I, J])
Jn = np.concatenate([J, I, I, J])
Sn = np.concatenate([-S, -S, S, S])
W = sparse.coo_matrix((Sn, (In, Jn)), shape=(N, N)).tocsc()
return W
def laplacian_spectrum(W, A, spectrum_size=200):
"""
Solves the generalized eigenvalue problem.
Change solver if necessary
Parameters
-----------------------------
W : (n,n) - sparse matrix of cotangent weights
A : (n,n) - sparse matrix of area weights
spectrum_size : int - number of eigenvalues to compute
"""
try:
eigenvalues, eigenvectors = sparse.linalg.eigsh(W, k=spectrum_size, M=A,
sigma=-0.01)
except RuntimeError:
# Initial eigenvector values:
print('PB WITH LBO')
init_eigenvecs = np.random.random((A.shape[0], spectrum_size))
eigenvalues, eigenvectors = sparse.linalg.lobpcg(W, init_eigenvecs,
B=A, largest=False, maxiter=40)
eigenvalues = np.real(eigenvalues)
sorting_arr = np.argsort(eigenvalues)
eigenvalues = eigenvalues[sorting_arr]
eigenvectors = eigenvectors[:,sorting_arr]
return eigenvalues, eigenvectors
| 33.333333 | 93 | 0.559623 |
4a246dd38c527d8e599365bd17bd49722c3cf89a | 1,165 | py | Python | src/MisoDispenser.py | alim4/MisoDispenser | 137a5cb1b4a351ca6a13c312c9e0643b9d420f92 | [
"MIT"
] | null | null | null | src/MisoDispenser.py | alim4/MisoDispenser | 137a5cb1b4a351ca6a13c312c9e0643b9d420f92 | [
"MIT"
] | null | null | null | src/MisoDispenser.py | alim4/MisoDispenser | 137a5cb1b4a351ca6a13c312c9e0643b9d420f92 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import RPi.GPIO as GPIO
import time
import schedule
GPIO_PIN =17
FREQUENCY = 50
MIN_DUTY = 2.2
MAX_DUTY = 7.5
SLEEP_DURATION = 0.8
def feed(t):
print(t)
GPIO.setmode(GPIO.BCM)
GPIO.setup(GPIO_PIN, GPIO.OUT)
try:
servo = GPIO.PWM(GPIO_PIN, FREQUENCY)
servo.start(MIN_DUTY)
for index in range(0, 3):
dutyCycle = MIN_DUTY if (index % 2 == 0) else MAX_DUTY
servo.ChangeDutyCycle(dutyCycle)
time.sleep(SLEEP_DURATION)
finally:
# Pulses to reset servo to prevent buzzing
servo.ChangeDutyCycle(MAX_DUTY + 0.1)
time.sleep(SLEEP_DURATION)
servo.ChangeDutyCycle(MAX_DUTY - 0.1)
time.sleep(SLEEP_DURATION)
servo.stop()
GPIO.cleanup()
if __name__ == '__main__':
print("Starting program")
feeding_schedule = ["06:00", "17:00", "21:00"]
for t in feeding_schedule:
print("Scheduling a feeding at %s." % t)
schedule.every().day.at(t).do(feed, "Feeding Miso at %s" % t)
while True:
schedule.run_pending()
time.sleep(60)
| 23.3 | 69 | 0.591416 |
4a246e054eaa2490307d2d9db60e4e19a8aeb6ea | 154 | py | Python | ghdl_old/ghdl/__init__.py | dtanay2004/ansible-role-helloworld2 | db257404708a3cd3ec595d66a78331fc0afb7d88 | [
"BSD-3-Clause"
] | null | null | null | ghdl_old/ghdl/__init__.py | dtanay2004/ansible-role-helloworld2 | db257404708a3cd3ec595d66a78331fc0afb7d88 | [
"BSD-3-Clause"
] | null | null | null | ghdl_old/ghdl/__init__.py | dtanay2004/ansible-role-helloworld2 | db257404708a3cd3ec595d66a78331fc0afb7d88 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Top-level package for GitHub Downloader."""
__author__ = """Tanay"""
__email__ = '[email protected]'
__version__ = '0.1.0'
| 19.25 | 46 | 0.642857 |
4a246e0593fc0648829ef704d0d1bbab0a1dc0e3 | 211 | py | Python | setup.py | krouth17/mlops_main | ed21946d0696036006ccbf188c35846c9a0016e1 | [
"MIT"
] | null | null | null | setup.py | krouth17/mlops_main | ed21946d0696036006ccbf188c35846c9a0016e1 | [
"MIT"
] | null | null | null | setup.py | krouth17/mlops_main | ed21946d0696036006ccbf188c35846c9a0016e1 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='src',
packages=find_packages(),
version='0.1.0',
description='its a demo prj using mlops',
author='mlops_main',
license='MIT',
)
| 19.181818 | 45 | 0.658768 |
4a246e997abafb4016011f313979847b9142face | 66 | py | Python | podostemaceous/__init__.py | cfe316/podostemaceous | de464059c68ad3fda62a28ec1f6610382bc25b67 | [
"MIT"
] | 1 | 2020-01-10T13:23:31.000Z | 2020-01-10T13:23:31.000Z | podostemaceous/__init__.py | cfe316/podostemaceous | de464059c68ad3fda62a28ec1f6610382bc25b67 | [
"MIT"
] | null | null | null | podostemaceous/__init__.py | cfe316/podostemaceous | de464059c68ad3fda62a28ec1f6610382bc25b67 | [
"MIT"
] | null | null | null | # A really great package for calculations ancillary to DSMC codes
| 33 | 65 | 0.818182 |
4a246ec951205055d226b0a02e2045e2a168ff73 | 2,558 | py | Python | Lib/site-packages/django_notifications/management/commands/notifications.py | jiangyifan123/EngLearner | a54c205a6e6bf3b2af366b56a7b7f97344fa1466 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/django_notifications/management/commands/notifications.py | jiangyifan123/EngLearner | a54c205a6e6bf3b2af366b56a7b7f97344fa1466 | [
"bzip2-1.0.6"
] | null | null | null | Lib/site-packages/django_notifications/management/commands/notifications.py | jiangyifan123/EngLearner | a54c205a6e6bf3b2af366b56a7b7f97344fa1466 | [
"bzip2-1.0.6"
] | null | null | null | """
Django notifications management commands.
Usage:
python manage.py notifications backends - displays all the available notification
backends
python manage.py notifications configured_backends - displays all the configured
notification backends
python manage.py notifications settings [backend_key] - displays the settings for
the specified backend
"""
from django.core.management.base import BaseCommand, CommandError
from django_notifications import backends
from django_notifications.management.commands.color import style
VALID_COMMANDS = {
'backends': (0, None),
'configured_backends': (0, None),
'settings': (1, '[backend name]')
}
class Command(BaseCommand):
help = 'Manage notifications'
args = 'subcommand'
requires_model_validation = False
def handle(self, *args, **kwargs):
if len(args) == 0:
raise CommandError('You need to specify a subcommand: %s' % \
(', ' . join(VALID_COMMANDS)))
else:
sub_command = args[0]
if not sub_command in VALID_COMMANDS.keys():
raise CommandError('Invalid subcommand specified. Valid choices are: %s' % \
(', ' . join(VALID_COMMANDS.keys())))
argument_count = VALID_COMMANDS[sub_command][0]
argument_help = VALID_COMMANDS[sub_command][1]
if argument_count != len(args) - 1:
raise CommandError('Invalid number of arguments. Usage: %s %s' % \
(sub_command, argument_help if argument_help else ''))
command = getattr(self, 'CMD_%s' % (sub_command))
command(*args[1:])
def CMD_backends(self):
try:
available_backends = backends.get_available_backends()
print style.GREEN('Available backends:')
for (key, value, description) in available_backends:
print '- ', style.BOLD('%s' % (key)), '- %s (%s)' % (value, description)
except KeyboardInterrupt:
pass
def CMD_configured_backends(self):
try:
configured_backends = backends.get_available_backends(configured_only = True)
print style.GREEN('Available and configured backends:')
for (key, value, description) in configured_backends:
print '- ', style.BOLD('%s' % (key)), '- %s (%s)' % (value, description)
except KeyboardInterrupt:
pass
def CMD_settings(self, backend_key):
try:
backend_settings = backends.get_settings(backend_key)
print style.GREEN('Settings for "%s" backend:' % (backend_key))
for (key, value) in backend_settings.iteritems():
print style.BOLD('%s:' % (key)), '\t\t %s' % (value)
except EnvironmentError:
print 'Invalid backend key'
except KeyboardInterrupt:
pass | 30.452381 | 82 | 0.704848 |
4a246edfc78f197d3205a158ee19893beb490230 | 2,134 | py | Python | dfirtrack_artifacts/migrations/0002_default_values.py | thomas-kropeit/dfirtrack | b1e0e659af7bc8085cfe2d269ddc651f9f4ba585 | [
"Apache-2.0"
] | 273 | 2018-04-18T22:09:15.000Z | 2021-06-04T09:15:48.000Z | dfirtrack_artifacts/migrations/0002_default_values.py | stuhli/dfirtrack | 9260c91e4367b36d4cb1ae7efe4e2d2452f58e6e | [
"Apache-2.0"
] | 75 | 2018-08-31T11:05:37.000Z | 2021-06-08T14:15:07.000Z | dfirtrack_artifacts/migrations/0002_default_values.py | thomas-kropeit/dfirtrack | b1e0e659af7bc8085cfe2d269ddc651f9f4ba585 | [
"Apache-2.0"
] | 61 | 2018-11-12T22:55:48.000Z | 2021-06-06T15:16:16.000Z | from django.db import migrations
from django.utils.text import slugify
def insert_artifactstatus(apps, schema_editor):
# We can't import the migrated model directly as it may be a newer
# version than this migration expects. We use the historical version.
Artifactstatus = apps.get_model('dfirtrack_artifacts', 'Artifactstatus')
initial_values = [
'10_needs_analysis',
'20_requested',
'21_requested_again',
'25_collection_ongoing',
'30_processing_ongoing',
'40_import_ongoing',
'50_ready_for_analysis',
'60_analysis_ongoing',
'70_analysis_finished',
'90_not_analyzed',
'95_not_available',
]
# We need to call slugify() here, because our own save() is not called by migrations!
# We also do not make use of .objects.bulk_create() due to its known caveats, see:
# https://docs.djangoproject.com/en/3.2/ref/models/querysets/#bulk-create
for name in initial_values:
Artifactstatus.objects.create(
artifactstatus_name=name, artifactstatus_slug=slugify(name)
)
def insert_artifacttypes(apps, schema_editor):
# We can't import the migrated model directly as it may be a newer
# version than this migration expects. We use the historical version.
Artifacttype = apps.get_model('dfirtrack_artifacts', 'Artifacttype')
initial_values = [
'File',
'Image',
'Information',
'Triage',
]
# We need to call slugify() here, because our own save() is not called by migrations!
# We also do not make use of .objects.bulk_create() due to its known caveats, see:
# https://docs.djangoproject.com/en/3.2/ref/models/querysets/#bulk-create
for name in initial_values:
Artifacttype.objects.create(
artifacttype_name=name, artifacttype_slug=slugify(name)
)
class Migration(migrations.Migration):
dependencies = [
('dfirtrack_artifacts', '0001_initial'),
]
operations = [
migrations.RunPython(insert_artifactstatus),
migrations.RunPython(insert_artifacttypes),
]
| 33.34375 | 89 | 0.679007 |
4a246f259ca3f2a936a0a5ab1b22060aedd2dab4 | 2,733 | py | Python | ashierlib/test/utils_test.py | google/ashier | 76710289b2c459ed1ef8ca822cfa1d5fbad05b89 | [
"Apache-2.0"
] | 26 | 2015-01-20T06:22:45.000Z | 2021-04-20T20:38:09.000Z | ashierlib/test/utils_test.py | google/ashier | 76710289b2c459ed1ef8ca822cfa1d5fbad05b89 | [
"Apache-2.0"
] | null | null | null | ashierlib/test/utils_test.py | google/ashier | 76710289b2c459ed1ef8ca822cfa1d5fbad05b89 | [
"Apache-2.0"
] | 11 | 2015-01-25T18:35:01.000Z | 2021-10-12T00:49:54.000Z | #!/usr/bin/python
#
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ashier: Template-based scripting for terminal interactions.
Ashier is a program that serves the same purpose as expect(1): it helps
users script terminal interactions. However, unlike expect, Ashier is
programming language agnostic and provides a readable template language
for terminal output matching. These features make scripted terminal
interactions simpler to create and easier to maintain.
This module contains unit tests for the utils module.
"""
__author__ = '[email protected] (Chuan-kai Lin)'
import unittest
from .. import utils
class TestSplitNone(unittest.TestCase):
"""Unit tests for utils.SplitNone()."""
def DoTest(self, arg, expected):
self.assertEqual(
utils.SplitNone(arg), expected)
def testEmpty(self):
self.DoTest([], [])
def testOnlyNone(self):
self.DoTest([None], [])
def testOnlyNones(self):
self.DoTest([None, None, None], [])
def testStartNone(self):
self.DoTest([None, 3, 5], [[3, 5]])
def testEndNone(self):
self.DoTest([4, 2, None, None], [[4, 2]])
def testStartEndNone(self):
self.DoTest([None, 5, 0, None, None], [[5, 0]])
def testSplitInTwo(self):
self.DoTest([7, None, None, 6, 2], [[7], [6, 2]])
def testSplitInThree(self):
self.DoTest([2, None, 5, 3, None, 4], [[2], [5, 3], [4]])
class TestRemoveRegexBindingGroups(unittest.TestCase):
"""Unit tests for utils.RemoveRegexBindingGroups()."""
def DoTest(self, arg, expected):
self.assertEqual(
utils.RemoveRegexBindingGroups(arg), expected)
def testNoBindingGroup(self):
self.DoTest(r'abc', r'abc')
def testBindingGroup(self):
self.DoTest(r'a(bc)', r'a(?:bc)')
def testBindingGroups(self):
self.DoTest(r'a(bc)(def)', r'a(?:bc)(?:def)')
def testNestedBindingGroups(self):
self.DoTest(r'a((bc))', r'a(?:(?:bc))')
def testEscapedParens(self):
self.DoTest(r'a\(b\)', r'a\(b\)')
def testEscapedBackSlashes(self):
self.DoTest(r'a\\(b\\)', r'a\\(?:b\\)')
self.DoTest(r'a\\\(b\\)', r'a\\\(b\\)')
self.DoTest(r'a\\\\(b\\)', r'a\\\\(?:b\\)')
if __name__ == '__main__':
unittest.main()
| 27.887755 | 74 | 0.676546 |
4a246fbf82a2b998b9f9206d7fcf574f4be06136 | 4,162 | py | Python | src/mpy_device/serial_repl.py | stefanhoelzl/amp-fuse | 1ba23dc75499d1b6b8cb66401948a98dd3b35763 | [
"MIT"
] | 2 | 2017-12-06T14:14:06.000Z | 2022-02-16T14:03:31.000Z | src/mpy_device/serial_repl.py | stefanhoelzl/mpy-fuse | 1ba23dc75499d1b6b8cb66401948a98dd3b35763 | [
"MIT"
] | null | null | null | src/mpy_device/serial_repl.py | stefanhoelzl/mpy-fuse | 1ba23dc75499d1b6b8cb66401948a98dd3b35763 | [
"MIT"
] | 1 | 2018-04-08T11:57:39.000Z | 2018-04-08T11:57:39.000Z | import re
import serial
from .base_device import BaseDevice, MpyDeviceError
class SerialRepl(BaseDevice):
"""
micropython board interface
This module provides a MpyDevice class to communicate with a micropython
device. It allows to execute python code remote on the device.
.. code-block:: python
with MpyDevice('dev/tty.SLAB_USBtoUART') as dev:
dev.exec('import machine')
freq = dev.eval('machine.freq()')
dev.execfile('main.py')
"""
CTRL_A = b'\x01'
CTRL_B = b'\x02'
CTRL_C = b'\x03'
CTRL_D = b'\x04'
ENTER_REPL = CTRL_B
ENTER_RAW_REPL = CTRL_A
SOFT_REBOOT = CTRL_D
COMMAND_TERMINATION = CTRL_D
FLUSH_SIZE = 1024
DEFAULT_BAUDRATE = 115200
def __init__(self, dev):
self.dev = dev
self.serial = None
self.mpy_version = None
self.git_hash = None
self.build_date = None
self.board_type = None
self.connect()
def connect(self):
self.serial = serial.Serial(self.dev,
baudrate=SerialRepl.DEFAULT_BAUDRATE,
timeout=0)
self.serial.write(SerialRepl.CTRL_C+SerialRepl.CTRL_C)
self.flush()
def flush(self):
while self.serial.read(SerialRepl.FLUSH_SIZE) != b'':
pass
def read_until(self, until, output=None):
buf = b''
while buf[-len(until):] != until.encode():
c = self.serial.read(1)
if output:
print(c.decode('utf-8'), file=output, end='')
output.flush()
buf += c
return buf[:-len(until)].decode('utf8')
def readline(self):
return self.read_until('\r\n')
def readlines(self, num):
for _ in range(num):
yield self.readline()
def set_info_from_string(self, s):
pattern = r'MicroPython v(?P<version>\d+\.\d+\.\d+-\d+)' \
r'-(?P<git_hash>.{9}) ' \
r'on (?P<date>\d{4}-\d{2}-\d{2}); ' \
r'(?P<board>.+)'
match = re.match(pattern, s)
if match:
self.mpy_version = match.group('version')
self.git_hash = match.group('git_hash')
self.build_date = match.group('date')
self.board_type = match.group('board')
return True
return False
def enter_repl(self):
self.serial.write(SerialRepl.ENTER_REPL)
lines = self.readlines(4)
while not self.set_info_from_string(next(lines)):
pass
if next(lines) != 'Type "help()" for more information.':
raise SerialRepl('Enter REPL response mismatch')
if next(lines) != '>>> ':
raise SerialRepl('Error starting REPL')
def enter_raw_repl(self):
self.serial.write(SerialRepl.ENTER_RAW_REPL)
self.read_until('raw REPL; CTRL-B to exit\r\n>')
def close(self):
self.serial.close()
def exec(self, command, output=None):
"""
Executes a python expression or statement on the device
:param command: Python command (expression or statement) to execute
:param output: File-object to redirect the output of stdout
:raises: MpyDeviceError: if the command raises an Exception on the board
:return: output on stdout as string
"""
self.serial.write(command.encode() + SerialRepl.COMMAND_TERMINATION)
self.read_until('OK', output=None)
ret = self.read_until('\x04', output=output)
err = self.read_until('\x04', output=None)
if err:
raise MpyDeviceError(err)
return ret
def eval(self, expression, output=None):
"""
Evaluates an python expression on the device and returns the
return-value as string.
:param expression: Python expression to evaluate
:param output: File-object to redirect the output of stdout
:return: Return value of the expression as string
"""
ret = self.exec('print({})'.format(expression), output=output)
ret = ret.strip()
return ret
| 30.379562 | 80 | 0.58025 |
4a24709a20a373ae5dd4a1d8c288e84f4087fc35 | 157 | py | Python | streamad/process/__init__.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | 4 | 2020-12-06T03:05:25.000Z | 2020-12-09T12:39:51.000Z | streamad/process/__init__.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | null | null | null | streamad/process/__init__.py | Fengrui-Liu/streamingTS | 1c5fcb9751c44a5fc69dcb237b48d93204b721e9 | [
"Apache-2.0"
] | null | null | null | from .zscore_Thresholder import ZScoreThresholder
from .tdigest_Thresholder import TDigestThresholder
__all__ = ["ZScoreThresholder", "TDigestThresholder"]
| 31.4 | 53 | 0.853503 |
4a247140162a37bb23c71366dc819ca6caa60df2 | 29,972 | py | Python | openstack/baremetal/v1/node.py | morganseznec/openstacksdk | 7b245c16556a04497ce701d959a889eca6f26a83 | [
"Apache-2.0"
] | null | null | null | openstack/baremetal/v1/node.py | morganseznec/openstacksdk | 7b245c16556a04497ce701d959a889eca6f26a83 | [
"Apache-2.0"
] | null | null | null | openstack/baremetal/v1/node.py | morganseznec/openstacksdk | 7b245c16556a04497ce701d959a889eca6f26a83 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import _log
from openstack.baremetal.v1 import _common
from openstack import exceptions
from openstack import resource
from openstack import utils
_logger = _log.setup_logging('openstack')
class ValidationResult(object):
"""Result of a single interface validation.
:ivar result: Result of a validation, ``True`` for success, ``False`` for
failure, ``None`` for unsupported interface.
:ivar reason: If ``result`` is ``False`` or ``None``, explanation of
the result.
"""
def __init__(self, result, reason):
self.result = result
self.reason = reason
class Node(_common.ListMixin, resource.Resource):
resources_key = 'nodes'
base_path = '/nodes'
# capabilities
allow_create = True
allow_fetch = True
allow_commit = True
allow_delete = True
allow_list = True
commit_method = 'PATCH'
commit_jsonpatch = True
_query_mapping = resource.QueryParameters(
'associated', 'conductor_group', 'driver', 'fault',
'provision_state', 'resource_class',
fields={'name': 'fields', 'type': _common.comma_separated_list},
instance_id='instance_uuid',
is_maintenance='maintenance',
)
# The allocation_uuid field introduced in 1.52 (Stein).
_max_microversion = '1.52'
# Properties
#: The UUID of the allocation associated with this node. Added in API
#: microversion 1.52.
allocation_id = resource.Body("allocation_uuid")
#: The UUID of the chassis associated wit this node. Can be empty or None.
chassis_id = resource.Body("chassis_uuid")
#: The current clean step.
clean_step = resource.Body("clean_step")
#: Hostname of the conductor currently handling this ndoe. Added in API
# microversion 1.49.
conductor = resource.Body("conductor")
#: Conductor group this node is managed by. Added in API microversion 1.46.
conductor_group = resource.Body("conductor_group")
#: Timestamp at which the node was last updated.
created_at = resource.Body("created_at")
#: The current deploy step. Added in API microversion 1.44.
deploy_step = resource.Body("deploy_step")
#: The name of the driver.
driver = resource.Body("driver")
#: All the metadata required by the driver to manage this node. List of
#: fields varies between drivers, and can be retrieved from the
#: :class:`openstack.baremetal.v1.driver.Driver` resource.
driver_info = resource.Body("driver_info", type=dict)
#: Internal metadata set and stored by node's driver. This is read-only.
driver_internal_info = resource.Body("driver_internal_info", type=dict)
#: A set of one or more arbitrary metadata key and value pairs.
extra = resource.Body("extra")
#: Fault type that caused the node to enter maintenance mode.
#: Introduced in API microversion 1.42.
fault = resource.Body("fault")
#: The UUID of the node resource.
id = resource.Body("uuid", alternate_id=True)
#: Information used to customize the deployed image, e.g. size of root
#: partition, config drive in the form of base64 encoded string and other
#: metadata.
instance_info = resource.Body("instance_info")
#: UUID of the nova instance associated with this node.
instance_id = resource.Body("instance_uuid")
#: Override enabling of automated cleaning. Added in API microversion 1.47.
is_automated_clean_enabled = resource.Body("automated_clean", type=bool)
#: Whether console access is enabled on this node.
is_console_enabled = resource.Body("console_enabled", type=bool)
#: Whether node is currently in "maintenance mode". Nodes put into
#: maintenance mode are removed from the available resource pool.
is_maintenance = resource.Body("maintenance", type=bool)
# Whether the node is protected from undeploying. Added in API microversion
# 1.48.
is_protected = resource.Body("protected", type=bool)
#: Any error from the most recent transaction that started but failed to
#: finish.
last_error = resource.Body("last_error")
#: A list of relative links, including self and bookmark links.
links = resource.Body("links", type=list)
#: user settable description of the reason why the node was placed into
#: maintenance mode.
maintenance_reason = resource.Body("maintenance_reason")
#: Human readable identifier for the node. May be undefined. Certain words
#: are reserved. Added in API microversion 1.5
name = resource.Body("name")
#: Links to the collection of ports on this node.
ports = resource.Body("ports", type=list)
#: Links to the collection of portgroups on this node. Available since
#: API microversion 1.24.
port_groups = resource.Body("portgroups", type=list)
#: The current power state. Usually "power on" or "power off", but may be
#: "None" if service is unable to determine the power state.
power_state = resource.Body("power_state")
#: Physical characteristics of the node. Content populated by the service
#: during inspection.
properties = resource.Body("properties", type=dict)
# The reason why this node is protected. Added in API microversion 1.48.
protected_reason = resource.Body("protected_reason")
#: The current provisioning state of the node.
provision_state = resource.Body("provision_state")
#: The current RAID configuration of the node.
raid_config = resource.Body("raid_config")
#: The name of an service conductor host which is holding a lock on this
#: node, if a lock is held.
reservation = resource.Body("reservation")
#: A string to be used by external schedulers to identify this node as a
#: unit of a specific type of resource. Added in API microversion 1.21.
resource_class = resource.Body("resource_class")
#: Links to the collection of states.
states = resource.Body("states", type=list)
#: The requested state if a provisioning action has been requested. For
#: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``,
#: ``ACTIVE`` etc.
target_provision_state = resource.Body("target_provision_state")
#: The requested state during a state transition.
target_power_state = resource.Body("target_power_state")
#: The requested RAID configuration of the node which will be applied when
#: the node next transitions through the CLEANING state.
target_raid_config = resource.Body("target_raid_config")
#: Traits of the node. Introduced in API microversion 1.37.
traits = resource.Body("traits", type=list)
#: Timestamp at which the node was last updated.
updated_at = resource.Body("updated_at")
# Hardware interfaces grouped together for convenience.
#: BIOS interface to use when setting BIOS properties of the node.
#: Introduced in API microversion 1.40.
bios_interface = resource.Body("bios_interface")
#: Boot interface to use when configuring boot of the node.
#: Introduced in API microversion 1.31.
boot_interface = resource.Body("boot_interface")
#: Console interface to use when working with serial console.
#: Introduced in API microversion 1.31.
console_interface = resource.Body("console_interface")
#: Deploy interface to use when deploying the node.
#: Introduced in API microversion 1.31.
deploy_interface = resource.Body("deploy_interface")
#: Inspect interface to use when inspecting the node.
#: Introduced in API microversion 1.31.
inspect_interface = resource.Body("inspect_interface")
#: Management interface to use for management actions on the node.
#: Introduced in API microversion 1.31.
management_interface = resource.Body("management_interface")
#: Network interface provider to use when plumbing the network connections
#: for this node. Introduced in API microversion 1.20.
network_interface = resource.Body("network_interface")
#: Power interface to use for power actions on the node.
#: Introduced in API microversion 1.31.
power_interface = resource.Body("power_interface")
#: RAID interface to use for configuring RAID on the node.
#: Introduced in API microversion 1.31.
raid_interface = resource.Body("raid_interface")
#: Rescue interface to use for rescuing of the node.
#: Introduced in API microversion 1.38.
rescue_interface = resource.Body("rescue_interface")
#: Storage interface to use when attaching remote storage.
#: Introduced in API microversion 1.33.
storage_interface = resource.Body("storage_interface")
#: Vendor interface to use for vendor-specific actions on the node.
#: Introduced in API microversion 1.31.
vendor_interface = resource.Body("vendor_interface")
def _consume_body_attrs(self, attrs):
if 'provision_state' in attrs and attrs['provision_state'] is None:
# API version 1.1 uses None instead of "available". Make it
# consistent.
attrs['provision_state'] = 'available'
return super(Node, self)._consume_body_attrs(attrs)
def create(self, session, *args, **kwargs):
"""Create a remote resource based on this instance.
The overridden version is capable of handling the populated
``provision_state`` field of one of three values: ``enroll``,
``manageable`` or ``available``. The default is currently
``available``, since it's the only state supported by all API versions.
Note that Bare Metal API 1.4 is required for ``manageable`` and
1.11 is required for ``enroll``.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: This :class:`Resource` instance.
:raises: ValueError if the Node's ``provision_state`` is not one of
``None``, ``enroll``, ``manageable`` or ``available``.
:raises: :exc:`~openstack.exceptions.NotSupported` if
the ``provision_state`` cannot be reached with any API version
supported by the server.
"""
expected_provision_state = self.provision_state
if expected_provision_state is None:
expected_provision_state = 'available'
if expected_provision_state not in ('enroll',
'manageable',
'available'):
raise ValueError(
"Node's provision_state must be one of 'enroll', "
"'manageable' or 'available' for creation, got %s" %
expected_provision_state)
session = self._get_session(session)
# Verify that the requested provision state is reachable with the API
# version we are going to use.
try:
expected_version = _common.STATE_VERSIONS[expected_provision_state]
except KeyError:
pass
else:
self._assert_microversion_for(
session, 'create', expected_version,
error_message="Cannot create a node with initial provision "
"state %s" % expected_provision_state)
# Ironic cannot set provision_state itself, so marking it as unchanged
self._body.clean(only={'provision_state'})
super(Node, self).create(session, *args, **kwargs)
if (self.provision_state == 'enroll'
and expected_provision_state != 'enroll'):
self.set_provision_state(session, 'manage', wait=True)
if (self.provision_state == 'manageable'
and expected_provision_state == 'available'):
self.set_provision_state(session, 'provide', wait=True)
if (self.provision_state == 'available'
and expected_provision_state == 'manageable'):
self.set_provision_state(session, 'manage', wait=True)
return self
def set_provision_state(self, session, target, config_drive=None,
clean_steps=None, rescue_password=None,
wait=False, timeout=None):
"""Run an action modifying this node's provision state.
This call is asynchronous, it will return success as soon as the Bare
Metal service acknowledges the request.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param target: Provisioning action, e.g. ``active``, ``provide``.
See the Bare Metal service documentation for available actions.
:param config_drive: Config drive to pass to the node, only valid
for ``active` and ``rebuild`` targets. You can use functions from
:mod:`openstack.baremetal.configdrive` to build it.
:param clean_steps: Clean steps to execute, only valid for ``clean``
target.
:param rescue_password: Password for the rescue operation, only valid
for ``rescue`` target.
:param wait: Whether to wait for the target state to be reached.
:param timeout: Timeout (in seconds) to wait for the target state to be
reached. If ``None``, wait without timeout.
:return: This :class:`Node` instance.
:raises: ValueError if ``config_drive``, ``clean_steps`` or
``rescue_password`` are provided with an invalid ``target``.
"""
session = self._get_session(session)
version = None
if target in _common.PROVISIONING_VERSIONS:
version = '1.%d' % _common.PROVISIONING_VERSIONS[target]
if config_drive:
# Some config drive actions require a higher version.
if isinstance(config_drive, dict):
version = '1.56'
elif target == 'rebuild':
version = '1.35'
version = utils.pick_microversion(session, version)
body = {'target': target}
if config_drive:
if target not in ('active', 'rebuild'):
raise ValueError('Config drive can only be provided with '
'"active" and "rebuild" targets')
# Not a typo - ironic accepts "configdrive" (without underscore)
body['configdrive'] = config_drive
if clean_steps is not None:
if target != 'clean':
raise ValueError('Clean steps can only be provided with '
'"clean" target')
body['clean_steps'] = clean_steps
if rescue_password is not None:
if target != 'rescue':
raise ValueError('Rescue password can only be provided with '
'"rescue" target')
body['rescue_password'] = rescue_password
if wait:
try:
expected_state = _common.EXPECTED_STATES[target]
except KeyError:
raise ValueError('For target %s the expected state is not '
'known, cannot wait for it' % target)
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'states', 'provision')
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set provision state for bare metal node {node} "
"to {target}".format(node=self.id, target=target))
exceptions.raise_from_response(response, error_message=msg)
if wait:
return self.wait_for_provision_state(session,
expected_state,
timeout=timeout)
else:
return self.fetch(session)
def wait_for_provision_state(self, session, expected_state, timeout=None,
abort_on_failed_state=True):
"""Wait for the node to reach the expected state.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param expected_state: The expected provisioning state to reach.
:param timeout: If ``wait`` is set to ``True``, specifies how much (in
seconds) to wait for the expected state to be reached. The value of
``None`` (the default) means no client-side timeout.
:param abort_on_failed_state: If ``True`` (the default), abort waiting
if the node reaches a failure state which does not match the
expected one. Note that the failure state for ``enroll`` ->
``manageable`` transition is ``enroll`` again.
:return: This :class:`Node` instance.
"""
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for node %(node)s to reach "
"target state '%(state)s'" % {'node': self.id,
'state': expected_state}):
self.fetch(session)
if self._check_state_reached(session, expected_state,
abort_on_failed_state):
return self
_logger.debug('Still waiting for node %(node)s to reach state '
'"%(target)s", the current state is "%(state)s"',
{'node': self.id, 'target': expected_state,
'state': self.provision_state})
def wait_for_reservation(self, session, timeout=None):
"""Wait for a lock on the node to be released.
Bare metal nodes in ironic have a reservation lock that
is used to represent that a conductor has locked the node
while performing some sort of action, such as changing
configuration as a result of a machine state change.
This lock can occur during power syncronization, and prevents
updates to objects attached to the node, such as ports.
Note that nothing prevents a conductor from acquiring the lock again
after this call returns, so it should be treated as best effort.
Returns immediately if there is no reservation on the node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param timeout: How much (in seconds) to wait for the lock to be
released. The value of ``None`` (the default) means no timeout.
:return: This :class:`Node` instance.
"""
if self.reservation is None:
return self
for count in utils.iterate_timeout(
timeout,
"Timeout waiting for the lock to be released on node %s" %
self.id):
self.fetch(session)
if self.reservation is None:
return self
_logger.debug('Still waiting for the lock to be released on node '
'%(node)s, currently locked by conductor %(host)s',
{'node': self.id, 'host': self.reservation})
def _check_state_reached(self, session, expected_state,
abort_on_failed_state=True):
"""Wait for the node to reach the expected state.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param expected_state: The expected provisioning state to reach.
:param abort_on_failed_state: If ``True`` (the default), abort waiting
if the node reaches a failure state which does not match the
expected one. Note that the failure state for ``enroll`` ->
``manageable`` transition is ``enroll`` again.
:return: ``True`` if the target state is reached
:raises: SDKException if ``abort_on_failed_state`` is ``True`` and
a failure state is reached.
"""
# NOTE(dtantsur): microversion 1.2 changed None to available
if (self.provision_state == expected_state
or (expected_state == 'available'
and self.provision_state is None)):
return True
elif not abort_on_failed_state:
return False
if self.provision_state.endswith(' failed'):
raise exceptions.SDKException(
"Node %(node)s reached failure state \"%(state)s\"; "
"the last error is %(error)s" %
{'node': self.id, 'state': self.provision_state,
'error': self.last_error})
# Special case: a failure state for "manage" transition can be
# "enroll"
elif (expected_state == 'manageable'
and self.provision_state == 'enroll' and self.last_error):
raise exceptions.SDKException(
"Node %(node)s could not reach state manageable: "
"failed to verify management credentials; "
"the last error is %(error)s" %
{'node': self.id, 'error': self.last_error})
# TODO(dtantsur): waiting for power state
def set_power_state(self, session, target):
"""Run an action modifying this node's power state.
This call is asynchronous, it will return success as soon as the Bare
Metal service acknowledges the request.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param target: Target power state, e.g. "rebooting", "power on".
See the Bare Metal service documentation for available actions.
"""
session = self._get_session(session)
if target.startswith("soft "):
version = '1.27'
else:
version = None
version = utils.pick_microversion(session, version)
# TODO(dtantsur): server timeout support
body = {'target': target}
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'states', 'power')
response = session.put(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
msg = ("Failed to set power state for bare metal node {node} "
"to {target}".format(node=self.id, target=target))
exceptions.raise_from_response(response, error_message=msg)
def attach_vif(self, session, vif_id, retry_on_conflict=True):
"""Attach a VIF to the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID. A VIF can only be attached to one node
at a time.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param string vif_id: Backend-specific VIF ID.
:param retry_on_conflict: Whether to retry HTTP CONFLICT errors.
This can happen when either the VIF is already used on a node or
the node is locked. Since the latter happens more often, the
default value is True.
:return: ``None``
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'commit', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs')
body = {'id': vif_id}
retriable_status_codes = _common.RETRIABLE_STATUS_CODES
if not retry_on_conflict:
retriable_status_codes = set(retriable_status_codes) - {409}
response = session.post(
request.url, json=body,
headers=request.headers, microversion=version,
retriable_status_codes=retriable_status_codes)
msg = ("Failed to attach VIF {vif} to bare metal node {node}"
.format(node=self.id, vif=vif_id))
exceptions.raise_from_response(response, error_message=msg)
def detach_vif(self, session, vif_id, ignore_missing=True):
"""Detach a VIF from the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param string vif_id: Backend-specific VIF ID.
:param bool ignore_missing: When set to ``False``
:class:`~openstack.exceptions.ResourceNotFound` will be
raised when the VIF does not exist. Otherwise, ``False``
is returned.
:return: ``True`` if the VIF was detached, otherwise ``False``.
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'commit', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs', vif_id)
response = session.delete(
request.url, headers=request.headers, microversion=version,
retriable_status_codes=_common.RETRIABLE_STATUS_CODES)
if ignore_missing and response.status_code == 400:
_logger.debug('VIF %(vif)s was already removed from node %(node)s',
{'vif': vif_id, 'node': self.id})
return False
msg = ("Failed to detach VIF {vif} from bare metal node {node}"
.format(node=self.id, vif=vif_id))
exceptions.raise_from_response(response, error_message=msg)
return True
def list_vifs(self, session):
"""List IDs of VIFs attached to the node.
The exact form of the VIF ID depends on the network interface used by
the node. In the most common case it is a Network service port
(NOT a Bare Metal port) ID.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:return: List of VIF IDs as strings.
:raises: :exc:`~openstack.exceptions.NotSupported` if the server
does not support the VIF API.
"""
session = self._get_session(session)
version = self._assert_microversion_for(
session, 'fetch', _common.VIF_VERSION,
error_message=("Cannot use VIF attachment API"))
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'vifs')
response = session.get(
request.url, headers=request.headers, microversion=version)
msg = ("Failed to list VIFs attached to bare metal node {node}"
.format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
return [vif['id'] for vif in response.json()['vifs']]
def validate(self, session, required=('boot', 'deploy', 'power')):
"""Validate required information on a node.
:param session: The session to use for making this request.
:type session: :class:`~keystoneauth1.adapter.Adapter`
:param required: List of interfaces that are required to pass
validation. The default value is the list of minimum required
interfaces for provisioning.
:return: dict mapping interface names to :class:`ValidationResult`
objects.
:raises: :exc:`~openstack.exceptions.ValidationException` if validation
fails for a required interface.
"""
session = self._get_session(session)
version = self._get_microversion_for(session, 'fetch')
request = self._prepare_request(requires_id=True)
request.url = utils.urljoin(request.url, 'validate')
response = session.get(request.url, headers=request.headers,
microversion=version)
msg = ("Failed to validate node {node}".format(node=self.id))
exceptions.raise_from_response(response, error_message=msg)
result = response.json()
if required:
failed = [
'%s (%s)' % (key, value.get('reason', 'no reason'))
for key, value in result.items()
if key in required and not value.get('result')
]
if failed:
raise exceptions.ValidationException(
'Validation failed for required interfaces of node {node}:'
' {failures}'.format(node=self.id,
failures=', '.join(failed)))
return {key: ValidationResult(value.get('result'), value.get('reason'))
for key, value in result.items()}
NodeDetail = Node
| 45.969325 | 79 | 0.640131 |
4a24739e446ea1c195153d63b6f3e364168025da | 3,335 | py | Python | monasca-log-api-2.9.0/monasca_log_api/app/base/model.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | null | null | null | monasca-log-api-2.9.0/monasca_log_api/app/base/model.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 5 | 2019-08-14T06:46:03.000Z | 2021-12-13T20:01:25.000Z | monasca-log-api-2.9.0/monasca_log_api/app/base/model.py | scottwedge/OpenStack-Stein | 7077d1f602031dace92916f14e36b124f474de15 | [
"Apache-2.0"
] | 2 | 2020-03-15T01:24:15.000Z | 2020-07-22T20:34:26.000Z | # Copyright 2016 FUJITSU LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
import six
from monasca_common.rest import utils as rest_utils
def serialize_envelope(envelope):
"""Returns json representation of an envelope.
:return: json object of envelope
:rtype: six.text_type
"""
json = rest_utils.as_json(envelope, ensure_ascii=False)
if six.PY2:
raw = six.text_type(json.replace(r'\\', r'\\\\'), encoding='utf-8',
errors='replace')
else:
raw = json
return raw
class LogEnvelopeException(Exception):
pass
class Envelope(dict):
def __init__(self, log, meta):
if not log:
error_msg = 'Envelope cannot be created without log'
raise LogEnvelopeException(error_msg)
if 'tenantId' not in meta or not meta.get('tenantId'):
error_msg = 'Envelope cannot be created without tenant'
raise LogEnvelopeException(error_msg)
creation_time = self._get_creation_time()
super(Envelope, self).__init__(
log=log,
creation_time=creation_time,
meta=meta
)
@staticmethod
def _get_creation_time():
return timeutils.utcnow_ts()
@classmethod
def new_envelope(cls, log, tenant_id, region, dimensions=None):
"""Creates new log envelope
Log envelope is combined ouf of following properties
* log - dict
* creation_time - timestamp
* meta - meta block
Example output json would like this:
.. code-block:: json
{
"log": {
"message": "Some message",
"dimensions": {
"hostname": "devstack"
}
},
"creation_time": 1447834886,
"meta": {
"tenantId": "e4bd29509eda473092d32aadfee3e7b1",
"region": "pl"
}
}
:param dict log: original log element (containing message and other
params
:param str tenant_id: tenant id to be put in meta field
:param str region: region to be put in meta field
:param dict dimensions: additional dimensions to be appended to log
object dimensions
"""
if dimensions:
log['dimensions'].update(dimensions)
log_meta = {
'region': region,
'tenantId': tenant_id
}
return cls(log, log_meta)
@property
def log(self):
return self.get('log', None)
@property
def creation_time(self):
return self.get('creation_time', None)
@property
def meta(self):
return self.get('meta', None)
| 27.791667 | 75 | 0.593703 |
4a2473a244fb625c2d1ab3afbc3eae31a419c9c2 | 2,640 | py | Python | utility/application/handlers/upload_handlers.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | utility/application/handlers/upload_handlers.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | utility/application/handlers/upload_handlers.py | vinthedark/snet-marketplace-service | 66ed9d093b00f09d3e28ef4d86c4e4c125037d06 | [
"MIT"
] | null | null | null | import base64
from uuid import uuid4
from common.constant import StatusCode
from common.exception_handler import exception_handler
from common.logger import get_logger
from common.utils import generate_lambda_response, validate_dict
from utility.application.upload_service import UploadService
from utility.config import SLACK_HOOK, NETWORK_ID, ALLOWED_CONTENT_TYPE, FILE_EXTENSION
from utility.constants import UPLOAD_TYPE_DETAILS, TEMP_FILE_DIR
from utility.exceptions import EXCEPTIONS, InvalidContentType, BadRequestException
logger = get_logger(__name__)
@exception_handler(SLACK_HOOK=SLACK_HOOK, NETWORK_ID=NETWORK_ID, logger=logger, EXCEPTIONS=EXCEPTIONS)
def upload_file(event, context):
headers = event["headers"]
if "content-type" not in headers:
if "Content-Type" not in headers:
logger.error(f"Content type not found content type")
raise InvalidContentType()
else:
content_type = headers["Content-Type"]
else:
content_type = headers["content-type"]
username = event["requestContext"]["authorizer"]["claims"]["email"]
query_string_parameter = event["queryStringParameters"]
if content_type not in ALLOWED_CONTENT_TYPE:
logger.error(f"Invalid Content type {content_type}")
raise InvalidContentType()
if not ("type" in query_string_parameter and
query_string_parameter["type"] in UPLOAD_TYPE_DETAILS) or len(event["body"]) == 0:
logger.error(f"Invalid request for content_type: {content_type} query_params: {query_string_parameter}")
raise BadRequestException()
upload_request_type = query_string_parameter["type"]
query_string_parameter.pop("type")
if not validate_dict(query_string_parameter, UPLOAD_TYPE_DETAILS[upload_request_type]["required_query_params"]):
logger.error(f"Failed to get required query params content_type: {content_type} "
f"upload_type: {upload_request_type} params: {query_string_parameter}")
raise BadRequestException()
file_extension = FILE_EXTENSION[content_type]
temp_file_path = f"{TEMP_FILE_DIR}/{uuid4().hex}_upload.{file_extension}"
raw_file_data = base64.b64decode(event["body"])
with open(temp_file_path, 'wb') as file:
file.write(raw_file_data)
response = UploadService().store_file(
upload_request_type, {"file_path": temp_file_path, "file_extension": file_extension},
query_string_parameter, username)
return generate_lambda_response(
StatusCode.OK,
{"status": "success", "data": {"url": response}, "error": {}}, cors_enabled=True
)
| 42.580645 | 116 | 0.738258 |
4a2473b36c5bbacec64020f9b4bd5b7900239161 | 1,471 | py | Python | ingestion/src/metadata/ingestion/models/user.py | tomtor/OpenMetadata | c925168f6503c6db74742b87caf1a4011b89beea | [
"Apache-2.0"
] | 864 | 2021-08-13T23:48:45.000Z | 2022-03-31T18:36:30.000Z | ingestion/src/metadata/ingestion/models/user.py | tomtor/OpenMetadata | c925168f6503c6db74742b87caf1a4011b89beea | [
"Apache-2.0"
] | 2,701 | 2021-08-14T06:05:12.000Z | 2022-03-31T23:48:32.000Z | ingestion/src/metadata/ingestion/models/user.py | tomtor/OpenMetadata | c925168f6503c6db74742b87caf1a4011b89beea | [
"Apache-2.0"
] | 144 | 2021-08-16T20:44:08.000Z | 2022-03-29T14:12:30.000Z | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
from typing import Any, Dict, List, Optional
from metadata.ingestion.models.json_serializable import JsonSerializable
UNQUOTED_SUFFIX = ":UNQUOTED"
class MetadataOrg(JsonSerializable):
"""
Catalog Org Model
"""
def __init__(self, name: str, documentation: str = "") -> None:
""" """
self.name = name
self.documentation = documentation
class MetadataTeam(JsonSerializable):
"""
Catalog Team Model
"""
def __init__(self, name: str, description: str = "") -> None:
""" """
self.name = name.replace(" ", "_")
self.display_name = name
self.description = description
class MetadataRole(JsonSerializable):
"""
Catalog Role
"""
def __init__(self, name: str, documentation: str = ""):
""" """
self.name = name
self.documentation = documentation
| 27.754717 | 75 | 0.673012 |
4a2473ca54e31d9713e25beed218eb8641602cbd | 1,711 | py | Python | housemonitor/lib/pubsubaid.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | 1 | 2021-06-28T06:52:03.000Z | 2021-06-28T06:52:03.000Z | housemonitor/lib/pubsubaid.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | null | null | null | housemonitor/lib/pubsubaid.py | gary-pickens/HouseMonitor | 4b169bdbeed9013e1824d4bb929970ae0c27a6c9 | [
"MIT"
] | null | null | null | '''
Created on Nov 6, 2012
@author: Gary
'''
from pubsub import pub
from pubsub import utils
from base import Base
from pprint import pprint, pformat
from housemonitor.lib.constants import Constants
from pubsub.utils import printTreeDocs
class PubSubAid( Base ):
'''
PubSubAid provides subscriptions for some lingering topics and sets setTopicUnspecifiedFatal
to true. This will prevent using topics that do not have someplace to go.
'''
@property
def logger_name( self ):
""" Set the logger name. This needs to be added to house_monitoring_logging.conf"""
return 'PubSubAid'
def __init__( self ):
'''
Constructor
'''
super( PubSubAid, self ).__init__()
try:
self.logger.debug( "PubSubAid starting" )
pub.subscribe( self.step, Constants.TopicNames.Step )
pub.subscribe( self.outputs, Constants.TopicNames.Outputs )
pub.subscribe( self.all_topics, Constants.TopicNames.ALL_TOPICS )
# printTreeDocs( extra='LaDA' )
# print()
pub.setTopicUnspecifiedFatal( True )
self.logger.debug( "PubSubAid ending" )
except Exception as ex:
self.logger.exception( 'exception in PubSupAid {}'.format( ex ) )
def step( self ):
'''
A catch all for topic step.
'''
self.logger.debug( "process topic step" )
def outputs( self ):
'''
A catch all for topic outputs.
'''
self.logger.debug( "'outputs' topic received" )
def all_topics( self ):
'''
A catch all for all topics.
'''
self.logger.debug( 'all topics received' )
| 29 | 96 | 0.613092 |
4a2473cae505121968cdb6a73a2e80430063d779 | 42,489 | py | Python | tensorflow/python/ops/math_ops.py | Mr-Kumar-Abhishek/tensordrift | a9ca5173b2252b0de5dd754147b275e85298e522 | [
"Apache-2.0"
] | 2 | 2019-04-21T12:10:20.000Z | 2020-12-27T19:06:31.000Z | tensorflow/python/ops/math_ops.py | cleargraphinc/tensorflow | 21fac39c471dede0e4ae62dd60e2b0b85db48415 | [
"Apache-2.0"
] | null | null | null | tensorflow/python/ops/math_ops.py | cleargraphinc/tensorflow | 21fac39c471dede0e4ae62dd60e2b0b85db48415 | [
"Apache-2.0"
] | null | null | null | """## Arithmetic Operators
TensorFlow provides several operations that you can use to add basic arithmetic
operators to your graph.
@@add
@@sub
@@mul
@@div
@@mod
## Basic Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions to your graph.
@@add_n
@@abs
@@neg
@@sign
@@inv
@@square
@@round
@@sqrt
@@rsqrt
@@pow
@@exp
@@log
@@ceil
@@floor
@@maximum
@@minimum
@@cos
@@sin
## Matrix Math Functions
TensorFlow provides several operations that you can use to add basic
mathematical functions for matrices to your graph.
@@diag
@@transpose
@@matmul
@@batch_matmul
@@matrix_determinant
@@batch_matrix_determinant
@@matrix_inverse
@@batch_matrix_inverse
@@cholesky
@@batch_cholesky
## Complex Number Functions
TensorFlow provides several operations that you can use to add complex number
functions to your graph.
@@complex
@@complex_abs
@@conj
@@imag
@@real
## Reduction
TensorFlow provides several operations that you can use to perform
common math computations that reduce various dimensions of a tensor.
@@reduce_sum
@@reduce_prod
@@reduce_min
@@reduce_max
@@reduce_mean
@@reduce_all
@@reduce_any
@@accumulate_n
## Segmentation
TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it defines a mapping from the first dimension onto
`segment_ids`. The `segment_ids` tensor should be the size of
the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
where `k<d0`.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.
For example:
```python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.segment_sum(c, tf.constant([0, 0, 1]))
==> [[0 0 0 0]
[5 6 7 8]]
```
@@segment_sum
@@segment_prod
@@segment_min
@@segment_max
@@segment_mean
@@unsorted_segment_sum
@@sparse_segment_sum
@@sparse_segment_mean
## Sequence Comparison and Indexing
TensorFlow provides several operations that you can use to add sequence
comparison and index extraction to your graph. You can use these operations to
determine sequence differences and determine the indexes of specific values in
a tensor.
@@argmin
@@argmax
@@listdiff
@@where
@@unique
@@edit_distance
@@invert_permutation
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import six.moves
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import types
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import gen_state_ops
# pylint: disable=wildcard-import,undefined-variable
from tensorflow.python.ops.gen_math_ops import *
# Aliases for some automatically-generated names.
argmax = gen_math_ops.arg_max
argmin = gen_math_ops.arg_min
linspace = gen_math_ops.lin_space
# pylint: disable=anomalous-backslash-in-string,protected-access
def abs(x, name=None):
"""Computes the absolute value of a tensor.
Given a tensor of real numbers `x`, this operation returns a tensor
containing the absolute value of each element in `x`. For example, if x is
an input element and y is an output element, this operation computes
\\\\(y = |x|\\\\).
See [`tf.complex_abs()`](#tf_complex_abs) to compute the absolute value of a complex
number.
Args:
x: A `Tensor` of type `float`, `double`, `int32`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor` the same size and type as `x` with absolute values.
"""
with ops.op_scope([x], name, "Abs") as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype == types.complex64:
return gen_math_ops.complex_abs(x, name=name)
return gen_math_ops._abs(x, name=name)
def pow(x, y, name=None):
"""Computes the power of one value to another.
Given a tensor `x` and a tensor `y`, this operation computes \\\\(x^y\\\\) for
corresponding elements in `x` and `y`. For example:
```
# tensor 'x' is [[2, 2]], [3, 3]]
# tensor 'y' is [[8, 16], [2, 3]]
tf.pow(x, y) ==> [[256, 65536], [9, 27]]
```
Args:
x: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
y: A `Tensor` of type `float`, `double`, `int32`, `complex64`, or `int64`.
name: A name for the operation (optional).
Returns:
A `Tensor`.
"""
with ops.op_scope([x], name, "Pow") as name:
return gen_math_ops._pow(x, y, name=name)
def complex(real, imag, name=None):
"""Converts two real numbers to a complex number.
Given a tensor `real` representing the real part of a complex number, and a
tensor `imag` representing the imaginary part of a complex number, this
operation computes complex numbers elementwise of the form \\\\(a + bj\\\\),
where *a* represents the `real` part and *b* represents the `imag` part.
The input tensors `real` and `imag` must be the same shape.
For example:
```
# tensor 'real' is [2.25, 3.25]
# tensor `imag` is [4.75, 5.75]
tf.complex(real, imag) ==> [[2.25 + 4.74j], [3.25 + 5.75j]]
```
Args:
real: A `Tensor` of type `float`.
imag: A `Tensor` of type `float`.
name: A name for the operation (optional).
Returns:
A `Tensor` of type `complex64`.
"""
with ops.op_scope([real, imag], name, "Complex") as name:
return gen_math_ops._complex(real, imag, name=name)
def round(x, name=None):
"""Rounds the values of a tensor to the nearest integer, element-wise.
For example:
```python
# 'a' is [0.9, 2.5, 2.3, -4.4]
tf.round(a) ==> [ 1.0, 3.0, 2.0, -4.0 ]
```
Args:
x: A `Tensor` of type `float` or `double`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as `x`.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return floor(x + 0.5, name=name)
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.
The operation casts `x` (in case of `Tensor`) or `x.values`
(in case of `SparseTensor`) to `dtype`.
For example:
```python
# tensor `a` is [1.8, 2.2], dtype=tf.float
tf.cast(a, tf.int32) ==> [1, 2] # dtype=tf.int32
```
Args:
x: A `Tensor` or `SparseTensor`.
dtype: The destination type.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x`.
Raises:
TypeError: If `x` cannot be cast to the `dtype`.
"""
with ops.op_scope([x], name, "Cast") as name:
if isinstance(x, ops.SparseTensor):
values_cast = cast(x.values, dtype, name=name)
return ops.SparseTensor(x.indices, values_cast, x.shape)
else:
# TODO(touts): Handle what Josh said.
#
# Could return ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype == dtype:
return x
return gen_math_ops.cast(x, dtype, name=name)
def to_float(x, name="ToFloat"):
"""Casts a tensor to type `float32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float32`.
Raises:
TypeError: If `x` cannot be cast to the `float32`.
"""
return cast(x, types.float32, name=name)
def to_double(x, name="ToDouble"):
"""Casts a tensor to type `float64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `float64`.
Raises:
TypeError: If `x` cannot be cast to the `float64`.
"""
return cast(x, types.float64, name=name)
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type `int32`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int32`.
Raises:
TypeError: If `x` cannot be cast to the `int32`.
"""
return cast(x, types.int32, name=name)
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type `int64`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `int64`.
Raises:
TypeError: If `x` cannot be cast to the `int64`.
"""
return cast(x, types.int64, name=name)
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type `bfloat16`.
Args:
x: A `Tensor` or `SparseTensor`.
name: A name for the operation (optional).
Returns:
A `Tensor` or `SparseTensor` with same shape as `x` with type `bfloat16`.
Raises:
TypeError: If `x` cannot be cast to the `bfloat16`.
"""
return cast(x, types.bfloat16, name=name)
ops.Tensor._override_operator("__neg__", neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator. Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse. This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", logical_not)
def _OverrideBinaryOperatorHelper(func, op_name):
"""Register operators with different tensor and scalar versions.
Args:
func: the operator
op_name: name of the operator being overridden
"""
def binary_op_wrapper(x, y):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(x, ops.Tensor)
y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
return func(x, y, name=name)
ops.Tensor._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
def r_binary_op_wrapper(y, x):
with ops.op_scope([x, y], None, op_name) as name:
assert isinstance(y, ops.Tensor)
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)
ops.Tensor._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
# Conversion table for __truediv__. None entries mean no conversion required.
_TRUEDIV_TABLE = {
types.uint8: types.float32,
types.int8: types.float32,
types.int16: types.float32,
types.int32: types.float64,
types.int64: types.float64,
types.float32: None,
types.float64: None,
types.complex64: None,
}
def truediv(x, y, name=None):
"""Divides x / y elementwise, always producing floating point results.
The same as `tf.div` for floating point arguments, but casts integer arguments
to floating point before dividing so that the result is always floating point.
This op is generated by normal `x / y` division in Python 3 and in Python 2.7
with `from __future__ import division`. If you want integer division that
rounds down, use `x // y` or `tf.floordiv`.
`x` and `y` must have the same numeric type. If the inputs are floating
point, the output will have the same type. If the inputs are integral, the
inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
and `int64` (matching the behavior of Numpy).
Args:
x: `Tensor` numerator of numeric type.
y: `Tensor` denominator of numeric type.
name: A name for the operation (optional).
Returns:
`x / y` evaluated in floating point.
Raises:
TypeError: If `x` and `y` have different dtypes.
"""
with ops.op_scope([x, y], name, "truediv") as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return div(x, y, name=name)
def floordiv(x, y, name=None):
"""Divides `x / y` elementwise, rounding down for floating point.
The same as `tf.div(x,y)`, but uses `tf.floor(tf.div(x,y))` for floating
point arguments so that the result is always an integer (though possibly an
integer represented as floating point). This op is generated by `x // y`
floor division in Python 3 and in Python 2.7 with
`from __future__ import division`.
Note that for efficiency, __floordiv__ uses C semantics for negative numbers
(unlike Python and Numpy).
`x` and `y` must have the same type, and the result will have the same type
as well.
Args:
x: `Tensor` numerator of real numeric type.
y: `Tensor` numerator of real numeric type.
name: A name for the operation (optional).
Returns:
`x / y` rounded down (except possibly for integers in C).
Raises:
TypeError: If the inputs are complex.
"""
with ops.op_scope([x, y], name, "floordiv") as name:
x = ops.convert_to_tensor(x, name="x")
dtype = x.dtype
if dtype.is_floating:
return floor(div(x, y), name=name)
else:
if not dtype.is_integer:
raise TypeError("Expected floating point or integer, got %r" % dtype)
return div(x, y, name=name)
_OverrideBinaryOperatorHelper(add, "add")
_OverrideBinaryOperatorHelper(sub, "sub")
_OverrideBinaryOperatorHelper(mul, "mul")
_OverrideBinaryOperatorHelper(div, "div")
_OverrideBinaryOperatorHelper(truediv, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(mod, "mod")
def logical_xor(x, y, name="LogicalXor"):
"""x ^ y = (x | y) & ~(x & y)."""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return logical_and(logical_or(x, y), logical_not(logical_and(x, y)),
name=name)
_OverrideBinaryOperatorHelper(logical_and, "and")
_OverrideBinaryOperatorHelper(logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")
ops.Tensor._override_operator("__lt__", less)
ops.Tensor._override_operator("__le__", less_equal)
ops.Tensor._override_operator("__gt__", greater)
ops.Tensor._override_operator("__ge__", greater_equal)
def range(start, limit=None, delta=1, name="range"):
"""Creates a sequence of integers.
Creates a sequence of integers that begins at `start` and extends by
increments of `delta` up to but not including `limit`.
Like the Python builtin `range`, `start` defaults to 0, so that
`range(n) = range(0, n)`.
For example:
```
# 'start' is 3
# 'limit' is 18
# 'delta' is 3
tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15]
# 'limit' is 5
tf.range(limit) ==> [0, 1, 2, 3, 4]
```
Args:
start: A 0-D (scalar) of type `int32`. First entry in sequence.
Defaults to 0.
limit: A 0-D (scalar) of type `int32`. Upper limit of sequence,
exclusive.
delta: A 0-D `Tensor` (scalar) of type `int32`. Optional. Default is 1.
Number that increments `start`.
name: A name for the operation (optional).
Returns:
An 1-D `int32` `Tensor`.
"""
if limit is None:
start, limit = 0, start
return gen_math_ops._range(start, limit, delta, name=name)
@ops.RegisterShape("Range")
def _RangeShape(op):
start_value = tensor_util.ConstantValue(op.inputs[0])
limit_value = tensor_util.ConstantValue(op.inputs[1])
delta_value = tensor_util.ConstantValue(op.inputs[2])
if start_value is None or limit_value is None or delta_value is None:
return [tensor_shape.vector(None)]
else:
return [tensor_shape.vector((limit_value - start_value + delta_value - 1) //
delta_value)]
# Reduction operations
def _ReductionDims(x, reduction_indices):
"""Returns range(0, rank(x)) if reduction_indices is None."""
if reduction_indices is not None:
return reduction_indices
else:
return range(0, array_ops.rank(x))
def reduce_sum(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the sum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1, 1, 1]]
# [1, 1, 1]]
tf.reduce_sum(x) ==> 6
tf.reduce_sum(x, 0) ==> [2, 2, 2]
tf.reduce_sum(x, 1) ==> [3, 3]
tf.reduce_sum(x, 1, keep_dims=True) ==> [[3], [3]]
tf.reduce_sum(x, [0, 1]) ==> 6
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._sum(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_mean(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the mean of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[1., 1. ]]
# [2., 2.]]
tf.reduce_mean(x) ==> 1.5
tf.reduce_mean(x, 0) ==> [1.5, 1.5]
tf.reduce_mean(x, 1) ==> [1., 2.]
```
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._mean(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_prod(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the product of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._prod(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_min(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the minimum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._min(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_max(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the maximum of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
Args:
input_tensor: The tensor to reduce. Should have numeric type.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._max(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_all(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_all(x) ==> False
tf.reduce_all(x, 0) ==> [False, False]
tf.reduce_all(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._all(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def reduce_any(input_tensor, reduction_indices=None, keep_dims=False,
name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.
Reduces `input_tensor` along the dimensions given in `reduction_indices`.
Unless `keep_dims` is true, the rank of the tensor is reduced by 1 for each
entry in `reduction_indices`. If `keep_dims` is true, the reduced dimensions
are retained with length 1.
If `reduction_indices` has no entries, all dimensions are reduced, and a
tensor with a single element is returned.
For example:
```python
# 'x' is [[True, True]]
# [False, False]]
tf.reduce_any(x) ==> True
tf.reduce_any(x, 0) ==> [True, True]
tf.reduce_any(x, 1) ==> [True, False]
```
Args:
input_tensor: The boolean tensor to reduce.
reduction_indices: The dimensions to reduce. If `None` (the defaut),
reduces all dimensions.
keep_dims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
Returns:
The reduced tensor.
"""
return gen_math_ops._any(input_tensor, _ReductionDims(input_tensor,
reduction_indices),
keep_dims, name=name)
def matmul(a, b,
transpose_a=False, transpose_b=False,
a_is_sparse=False, b_is_sparse=False,
name=None):
"""Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
The inputs must be two-dimensional matrices, with matching inner dimensions,
possibly after transposition.
Both matrices must be of the same type. The supported types are:
`float`, `double`, `int32`, `complex64`.
Either matrix can be transposed on the fly by setting the corresponding flag
to `True`. This is `False` by default.
If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
`a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
For example:
```python
# 2-D tensor `a`
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3]) => [[1. 2. 3.]
[4. 5. 6.]]
# 2-D tensor `b`
b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2]) => [[7. 8.]
[9. 10.]
[11. 12.]]
c = tf.matmul(a, b) => [[58 64]
[139 154]]
```
Args:
a: `Tensor` of type `float`, `double`, `int32` or `complex64`.
b: `Tensor` with same type as `a`.
transpose_a: If `True`, `a` is transposed before multiplication.
transpose_b: If `True`, `b` is transposed before multiplication.
a_is_sparse: If `True`, `a` is treated as a sparse matrix.
b_is_sparse: If `True`, `b` is treated as a sparse matrix.
name: Name for the operation (optional).
Returns:
A `Tensor` of the same type as `a`.
"""
with ops.op_scope([a, b], name, "MatMul") as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
if a.dtype == types.float32 and (a_is_sparse or b_is_sparse):
return sparse_matmul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
else:
return gen_math_ops._mat_mul(a, b,
transpose_a=transpose_a,
transpose_b=transpose_b,
name=name)
sparse_matmul = gen_math_ops._sparse_mat_mul
batch_matmul = gen_math_ops._batch_mat_mul
ops.RegisterShape("MatMul")(common_shapes.matmul_shape)
ops.RegisterShape("SparseMatMul")(common_shapes.matmul_shape)
def _as_indexed_slices(x):
"""Convert 'x' to IndexedSlices.
Convert a dense Tensor to a block-sparse IndexedSlices.
Args:
x: Either a Tensor object, or an IndexedSlices object.
Returns:
An IndexedSlices object.
Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape(x)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)
def _as_indexed_slices_list(inputs):
"""Convert all elements of 'inputs' to IndexedSlices.
Additionally, homogenize the types of all the indices to
either int32 or int64.
Args:
inputs: List containing either Tensor or IndexedSlices objects.
Returns:
A list of IndexedSlices objects.
Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i) for i in inputs]
with_int32_index = [o.indices for o in outputs
if o.indices.dtype == types.int32]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == types.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, types.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.
Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
otherwise, these are inferred.
For example:
```python
# tensor 'a' is [[1, 2], [3, 4]
# tensor `b` is [[5, 0], [0, 6]]
tf.accumulate_n([a, b, a]) ==> [[7, 4], [6, 14]]
# Explicitly pass shape and type
tf.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
==> [[7, 4], [6, 14]]
```
Args:
inputs: A list of `Tensor` objects, each with same shape and type.
shape: Shape of elements of `inputs`.
tensor_dtype: The type of `inputs`.
name: A name for the operation (optional).
Returns:
A `Tensor` of same shape and type as the elements of `inputs`.
Raises:
ValueError: If `inputs` don't all have same shape and dtype or the shape
cannot be inferred.
"""
if tensor_dtype is None:
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")
tensor_dtype = inputs[0].dtype
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())
if not shape.is_fully_defined():
# TODO(pbar): Make a version of assign_add that accepts an uninitialized
# lvalue, and takes its shape from that? This would allow accumulate_n to
# work in all situations that add_n currently works.
raise ValueError("Cannot infer the shape of the accumulator for "
"accumulate_n. Pass the shape argument, or set the shape "
"of at least one of the inputs.")
with ops.op_scope(inputs, name, "AccumulateN") as name:
var = gen_state_ops._temporary_variable(shape=shape, dtype=tensor_dtype)
var_name = var.op.name
var = state_ops.assign(var, array_ops.zeros_like(inputs[0]))
update_ops = []
for input_tensor in inputs:
op = state_ops.assign_add(var, input_tensor, use_locking=True)
update_ops.append(op)
with ops.control_dependencies(update_ops):
return gen_state_ops._destroy_temporary_variable(var,
var_name=var_name,
name=name)
@ops.RegisterShape("BatchMatMul")
def _BatchMatMulShape(op):
"""Shape function for BatchMatMul op."""
a_shape = op.inputs[0].get_shape()
adj_a = op.get_attr("adj_x")
b_shape = op.inputs[1].get_shape()
adj_b = op.get_attr("adj_y")
if not a_shape.is_fully_defined() or not b_shape.is_fully_defined():
return [tensor_shape.unknown_shape()]
batch_dims = a_shape[:-2].merge_with(b_shape[:-2])
output_rows = a_shape[-1] if adj_a else a_shape[-2]
output_cols = b_shape[-2] if adj_b else b_shape[-1]
inner_a = a_shape[-2] if adj_a else a_shape[-1]
inner_b = b_shape[-1] if adj_b else b_shape[-2]
inner_a.assert_is_compatible_with(inner_b)
return [batch_dims.concatenate([output_rows, output_cols])]
def sigmoid(x, name=None):
"""Computes sigmoid of `x` element-wise.
Specifically, `y = 1 / (1 + exp(-x))`.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32`
otherwise the return type is `quint8`.
"""
with ops.op_scope([x], name, "Sigmoid") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._sigmoid(x, name=name)
def tanh(x, name=None):
"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: A Tensor with type `float`, `double`, `int32`, `complex64`, `int64`,
or `qint32`.
name: A name for the operation (optional).
Returns:
A Tensor with the same type as `x` if `x.dtype != qint32` otherwise
the return type is `quint8`.
"""
with ops.op_scope([x], name, "Tanh") as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops._tanh(x, name=name)
ops.RegisterShape("Abs")(common_shapes.unchanged_shape)
ops.RegisterShape("Ceil")(common_shapes.unchanged_shape)
ops.RegisterShape("Conj")(common_shapes.unchanged_shape)
ops.RegisterShape("Cos")(common_shapes.unchanged_shape)
ops.RegisterShape("Exp")(common_shapes.unchanged_shape)
ops.RegisterShape("Floor")(common_shapes.unchanged_shape)
ops.RegisterShape("Imag")(common_shapes.unchanged_shape)
ops.RegisterShape("Inv")(common_shapes.unchanged_shape)
ops.RegisterShape("IsFinite")(common_shapes.unchanged_shape)
ops.RegisterShape("IsInf")(common_shapes.unchanged_shape)
ops.RegisterShape("IsNan")(common_shapes.unchanged_shape)
ops.RegisterShape("Log")(common_shapes.unchanged_shape)
ops.RegisterShape("LogicalNot")(common_shapes.unchanged_shape)
ops.RegisterShape("Neg")(common_shapes.unchanged_shape)
ops.RegisterShape("Real")(common_shapes.unchanged_shape)
ops.RegisterShape("Rsqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Sign")(common_shapes.unchanged_shape)
ops.RegisterShape("Sin")(common_shapes.unchanged_shape)
ops.RegisterShape("Sqrt")(common_shapes.unchanged_shape)
ops.RegisterShape("Square")(common_shapes.unchanged_shape)
ops.RegisterShape("Sigmoid")(common_shapes.unchanged_shape)
ops.RegisterShape("Tanh")(common_shapes.unchanged_shape)
ops.RegisterShape("Cast")(common_shapes.unchanged_shape)
ops.RegisterShape("ComplexAbs")(common_shapes.unchanged_shape)
@ops.RegisterShape("Add")
@ops.RegisterShape("Complex")
@ops.RegisterShape("Div")
@ops.RegisterShape("Equal")
@ops.RegisterShape("Greater")
@ops.RegisterShape("GreaterEqual")
@ops.RegisterShape("Less")
@ops.RegisterShape("LessEqual")
@ops.RegisterShape("LogicalAnd")
@ops.RegisterShape("LogicalOr")
@ops.RegisterShape("Maximum")
@ops.RegisterShape("Minimum")
@ops.RegisterShape("Mod")
@ops.RegisterShape("Mul")
@ops.RegisterShape("NotEqual")
@ops.RegisterShape("Pow")
@ops.RegisterShape("Sub")
def _BroadcastShape(op):
"""Common shape function for binary operators that broadcast their inputs."""
shape_x = op.inputs[0].get_shape()
shape_y = op.inputs[1].get_shape()
if shape_x.ndims is None or shape_y.ndims is None:
return [tensor_shape.unknown_shape()]
# To compute the broadcasted dimensions, we zip together shape_x and shape_y,
# and pad with 1 to make them the same length.
broadcasted_dims = reversed(list(six.moves.zip_longest(
reversed(shape_x.dims),
reversed(shape_y.dims),
fillvalue=tensor_shape.Dimension(1))))
# Next we combine the dimensions according to the numpy broadcasting rules.
# http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html
return_dims = []
for (dim_x, dim_y) in broadcasted_dims:
if dim_x.value is None or dim_y.value is None:
# One or both dimensions is unknown. If either dimension is greater than
# 1, we assume that the program is correct, and the other dimension will
# be broadcast to match it.
# TODO(mrry): If we eliminate the shape checks in C++, we must still
# assert that the unknown dim is either 1 or the same as the known dim.
if dim_x.value is not None and dim_x.value > 1:
return_dims.append(dim_x)
elif dim_y.value is not None and dim_y.value > 1:
return_dims.append(dim_y)
else:
return_dims.append(None)
elif dim_x.value == 1:
# We will broadcast dim_x to dim_y.
return_dims.append(dim_y)
elif dim_y.value == 1:
# We will broadcast dim_y to dim_x.
return_dims.append(dim_x)
elif dim_x.value == dim_y.value:
# The dimensions are compatible, so output is the same size in that
# dimension.
return_dims.append(dim_x.merge_with(dim_y))
else:
raise ValueError("Incompatible shapes for broadcasting: %s and %s"
% (shape_x, shape_y))
return [tensor_shape.TensorShape(return_dims)]
@ops.RegisterShape("AddN")
def _AddNShape(op):
merged_shape = tensor_shape.unknown_shape()
for input_ in op.inputs:
merged_shape = merged_shape.merge_with(input_.get_shape())
return [merged_shape]
@ops.RegisterShape("Select")
def _SelectShape(op):
# All three inputs must have the same shape.
return [op.inputs[0].get_shape()
.merge_with(op.inputs[1].get_shape())
.merge_with(op.inputs[2].get_shape())]
@ops.RegisterShape("ArgMax")
@ops.RegisterShape("ArgMin")
def _ArgOpShape(op):
"""Common shape function for arg-reduction ops."""
dimension_shape = op.inputs[1].get_shape()
dimension_shape.assert_is_compatible_with(tensor_shape.scalar())
input_shape = op.inputs[0].get_shape()
if input_shape.ndims is None:
return [tensor_shape.unknown_shape()]
elif input_shape.ndims <= 1:
return [tensor_shape.scalar()]
dimension = tensor_util.ConstantValue(op.inputs[1])
if dimension is None:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims - 1)]
elif 0 <= dimension and dimension < input_shape.ndims:
returned_shape = []
for i, dim in enumerate(input_shape.dims):
if i != dimension:
returned_shape.append(dim)
return [tensor_shape.TensorShape(returned_shape)]
else:
raise ValueError(
"dimension (%d) must be in the range [0, %d), where %d is the number "
"of dimensions in the input"
% (dimension, input_shape.ndims, input_shape.ndims))
@ops.RegisterShape("All")
@ops.RegisterShape("Any")
@ops.RegisterShape("Max")
@ops.RegisterShape("Mean")
@ops.RegisterShape("Min")
@ops.RegisterShape("Prod")
@ops.RegisterShape("Sum")
def _ReductionShape(op):
"""Common shape function for reduction ops."""
input_shape = op.inputs[0].get_shape()
reduction_indices = tensor_util.ConstantValue(op.inputs[1])
keep_dims = op.get_attr("keep_dims")
if reduction_indices is None or input_shape.ndims is None:
if keep_dims:
return [tensor_shape.unknown_shape(ndims=input_shape.ndims)]
else:
return [tensor_shape.unknown_shape()]
# Turn reduction_indices from scalar to vector if necessary
reduction_indices = np.ravel(reduction_indices)
for reduction_index in reduction_indices:
if reduction_index < 0 or reduction_index >= input_shape.ndims:
raise ValueError("Invalid reduction dimension %d for input with %d "
"dimensions" % (reduction_index, input_shape.ndims))
returned_dims = []
if keep_dims:
for i, dim in enumerate(input_shape.dims):
if i in reduction_indices:
returned_dims.append(1)
else:
returned_dims.append(dim)
else:
for i, dim in enumerate(input_shape.dims):
if i not in reduction_indices:
returned_dims.append(dim)
return [tensor_shape.TensorShape(returned_dims)]
@ops.RegisterShape("SegmentMax")
@ops.RegisterShape("SegmentMean")
@ops.RegisterShape("SegmentMin")
@ops.RegisterShape("SegmentProd")
@ops.RegisterShape("SegmentSum")
def _SegmentReductionShape(op):
"""Common shape function for segment reduction ops."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
segment_ids_shape.assert_has_rank(1)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMean")
@ops.RegisterShape("SparseSegmentSum")
def _SparseSegmentReductionShape(op):
"""Common shape function for sparse segment reduction ops."""
data_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape()
indices_shape.assert_has_rank(1)
segment_ids_shape = op.inputs[2].get_shape()
segment_ids_shape.assert_has_rank(1)
indices_shape.assert_is_compatible_with(segment_ids_shape)
return [tensor_shape.TensorShape([None]).concatenate(data_shape[1:])]
@ops.RegisterShape("SparseSegmentMeanGrad")
def _SparseSegmentMeanGradShape(op):
"""Shape function for the SparseSegmentMeanGrad op."""
input_shape = op.inputs[0].get_shape()
indices_shape = op.inputs[1].get_shape().with_rank(1)
unused_segment_ids_shape = op.inputs[2].get_shape().merge_with(indices_shape)
unused_output_dim0_shape = op.inputs[3].get_shape().merge_with(
tensor_shape.scalar())
output_dim0 = tensor_util.ConstantValue(op.inputs[3])
if output_dim0 is not None:
dim0 = output_dim0[0]
else:
dim0 = None
return [tensor_shape.TensorShape([dim0]).concatenate(input_shape[1:])]
@ops.RegisterShape("UnsortedSegmentSum")
def _UnsortedSegmentSumShape(op):
"""Shape function for UnsortedSegmentSum."""
data_shape = op.inputs[0].get_shape()
segment_ids_shape = op.inputs[1].get_shape()
mid = segment_ids_shape.ndims
if mid is None:
return [tensor_shape.unknown_shape()]
else:
num_segments = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.TensorShape([num_segments]).concatenate(
data_shape[mid:])]
@ops.RegisterShape("LinSpace")
def _LinspaceShape(op):
num = tensor_util.ConstantValue(op.inputs[2])
return [tensor_shape.vector(num)]
| 32.434351 | 86 | 0.681776 |
4a24742792501df22eaa8fdd49779ad01d064afb | 2,640 | py | Python | manifest/generator.py | ebi-ait/ingest-exporter | 9fc9e1b92caa7816f4d5d312ac92e50907f30aed | [
"Apache-2.0"
] | null | null | null | manifest/generator.py | ebi-ait/ingest-exporter | 9fc9e1b92caa7816f4d5d312ac92e50907f30aed | [
"Apache-2.0"
] | 13 | 2020-06-08T08:18:02.000Z | 2022-02-15T00:13:11.000Z | manifest/generator.py | ebi-ait/ingest-exporter | 9fc9e1b92caa7816f4d5d312ac92e50907f30aed | [
"Apache-2.0"
] | null | null | null | from ingest.api.ingestapi import IngestApi
from manifest.manifests import AssayManifest
from exporter.graph.experiment_graph import ExperimentGraph
from exporter.graph.graph_crawler import GraphCrawler
from exporter.metadata import MetadataResource, DataFile
from typing import Dict, List
class ManifestGenerator:
def __init__(self, ingest_client: IngestApi, graph_crawler: GraphCrawler):
self.ingest_client = ingest_client
self.graph_crawler = graph_crawler
def generate_manifest(self, process_uuid: str, submission_uuid: str) -> AssayManifest:
process = self.get_process(process_uuid)
project = self.project_for_process(process)
experiment_graph = self.graph_crawler.generate_complete_experiment_graph(process, project)
assay_manifest = ManifestGenerator.assay_manifest_from_experiment_graph(experiment_graph, submission_uuid)
return assay_manifest
def get_process(self, process_uuid: str) -> MetadataResource:
return MetadataResource.from_dict(self.ingest_client.get_entity_by_uuid('processes', process_uuid))
def project_for_process(self, process: MetadataResource) -> MetadataResource:
return MetadataResource.from_dict(list(self.ingest_client.get_related_entities("projects", process.full_resource, "projects"))[0])
@staticmethod
def assay_manifest_from_experiment_graph(experiment_graph: ExperimentGraph, submission_uuid: str) -> AssayManifest:
assay_manifest = AssayManifest()
assay_manifest.envelopeUuid = submission_uuid
assay_manifest.fileProjectMap.update(ManifestGenerator.metadata_uuid_map_from_graph(experiment_graph, "project"))
assay_manifest.fileBiomaterialMap.update(ManifestGenerator.metadata_uuid_map_from_graph(experiment_graph, "biomaterial"))
assay_manifest.fileProcessMap.update(ManifestGenerator.metadata_uuid_map_from_graph(experiment_graph, "process"))
assay_manifest.fileProtocolMap.update(ManifestGenerator.metadata_uuid_map_from_graph(experiment_graph, "protocol"))
assay_manifest.fileFilesMap.update(ManifestGenerator.metadata_uuid_map_from_graph(experiment_graph, "file"))
assay_manifest.dataFiles = [DataFile.from_file_metadata(m).uuid for m in experiment_graph.nodes.get_nodes()
if m.metadata_type == "file"]
return assay_manifest
@staticmethod
def metadata_uuid_map_from_graph(experiment_graph: ExperimentGraph, metadata_type: str) -> Dict[str, List[str]]:
return dict([(m.uuid, [m.uuid]) for m in experiment_graph.nodes.get_nodes() if m.metadata_type == metadata_type])
| 53.877551 | 138 | 0.781061 |
4a2474694210b8edf3d98daf28f58387042830de | 1,005 | py | Python | deepsoccer_jetson/scripts/deepsoccer_infrared.py | kimbring2/jetbot_gazebo | 385348a6bd451fb9d752bf0417138b2eacf91e48 | [
"Apache-1.1"
] | 19 | 2020-04-07T07:07:39.000Z | 2022-02-12T22:24:06.000Z | deepsoccer_jetson/scripts/deepsoccer_infrared.py | kimbring2/jetbot_gazebo | 385348a6bd451fb9d752bf0417138b2eacf91e48 | [
"Apache-1.1"
] | 5 | 2020-03-07T07:30:47.000Z | 2021-02-04T13:11:03.000Z | deepsoccer_jetson/scripts/deepsoccer_infrared.py | kimbring2/jetbot_gazebo | 385348a6bd451fb9d752bf0417138b2eacf91e48 | [
"Apache-1.1"
] | 16 | 2020-03-17T00:17:54.000Z | 2022-03-21T18:27:13.000Z | #!/usr/bin/env python
import rospy
import time
import RPi.GPIO as GPIO
from std_msgs.msg import String
# Pin Definitions
input_pin = 27 # BOARD pin 12, BCM pin 18
def getInfraredData():
value = GPIO.input(input_pin)
if value == GPIO.HIGH:
value_str = "False"
else:
value_str = "True"
return value_str
print("Value read from pin {} : {}".format(input_pin, value_str))
# initialization
if __name__ == '__main__':
# Pin Setup:
# Board pin-numbering scheme
GPIO.setmode(GPIO.BCM) # BCM pin-numbering scheme from Raspberry Pi
GPIO.setup(input_pin, GPIO.IN) # set pin as an input pin
# setup ros node
pub = rospy.Publisher('deepsoccer_infrared', String, queue_size=10)
rospy.init_node('deepsoccer_infrared')
r = rospy.Rate(200) # 10hz
while not rospy.is_shutdown():
lidar_value = getInfraredData()
if lidar_value != None:
pub.publish(str(lidar_value))
r.sleep()
| 25.125 | 72 | 0.640796 |
4a2474ac99c728184abd5bd300435df3e4b5ec54 | 1,002 | py | Python | server/valid.py | jonadar/Final-project | 3712a78cf19cb6019093901b5e7c8fb9171da0ba | [
"MIT"
] | null | null | null | server/valid.py | jonadar/Final-project | 3712a78cf19cb6019093901b5e7c8fb9171da0ba | [
"MIT"
] | null | null | null | server/valid.py | jonadar/Final-project | 3712a78cf19cb6019093901b5e7c8fb9171da0ba | [
"MIT"
] | null | null | null | def CPass(pss):
err = ''
capital = 0
numbers = 0
if len(pss)<=6:
err+= "password must be longer then 6 letters\n"
if pss.find(' ')!=-1:
err += 'must not contain spaces\n'
for i in pss:
if i.isupper():
capital+=1
if capital==0:
err+='You must use at least one capital letter\n'
for i in pss:
if i.isdigit():
numbers+=1
if numbers ==0:
err+="you must use at least one number\n"
if err!='':
return err
return True
def CUser(user):
err=''
if len(user)<=6:
err+= "user name must be longer then 6 letters"
if user.find(' ')!=-1:
err += 'must not contain spaces\n'
if err!='':
return err
return True
if __name__ == '__main__':
print CPass('matan kriel # %')
print CPass('Aan1')
print CPass('matan 2')
print CUser("jnkjnjndf")
print CUser("sdf")
| 20.44898 | 58 | 0.492016 |
4a24755210a95166714fd196e142f305b505eba1 | 701 | py | Python | apps/solicitudes/urls.py | eduardoagreda/requisiciones | 34579f813db3f4f9631cdc14a40a9d3ffed35723 | [
"MIT"
] | null | null | null | apps/solicitudes/urls.py | eduardoagreda/requisiciones | 34579f813db3f4f9631cdc14a40a9d3ffed35723 | [
"MIT"
] | 5 | 2020-06-06T01:24:17.000Z | 2021-06-09T18:54:24.000Z | apps/solicitudes/urls.py | eduardoagreda/requisiciones | 34579f813db3f4f9631cdc14a40a9d3ffed35723 | [
"MIT"
] | null | null | null | from django.urls import path
from apps.solicitudes.views import add_solicitudes, delete_solicitudes, edit_solicitudes, lista_solicitudes, SolicitudesList, DetalleSolicitudes, DeleteSolicitudes
urlpatterns = [
path('solicitudes/crear/', add_solicitudes, name='add_solicitudes'),
path('solicitudes/<int:pk>/editar/', edit_solicitudes, name='edit_solicitudes'),
path('solicitudes/<int:pk>/eliminar/', DeleteSolicitudes.as_view(), name='delete_solicitudes'),
path('solicitudes/<int:pk>/detalle/', DetalleSolicitudes.as_view(), name='read_solicitudes'),
path('solicitudes/listar/', lista_solicitudes, name='lista_solicitudes'),
path('api/solicitudes/listar/', SolicitudesList),
]
| 53.923077 | 163 | 0.771755 |
4a24790d3a0494fc90c680c53aa6d4927a67fdf7 | 3,010 | py | Python | src/spagetti.py | takumihonda/BAIU2018_5.3.6 | 7d0768406aa44a0aeb03eacd5ab0a1141f5158e4 | [
"MIT"
] | null | null | null | src/spagetti.py | takumihonda/BAIU2018_5.3.6 | 7d0768406aa44a0aeb03eacd5ab0a1141f5158e4 | [
"MIT"
] | null | null | null | src/spagetti.py | takumihonda/BAIU2018_5.3.6 | 7d0768406aa44a0aeb03eacd5ab0a1141f5158e4 | [
"MIT"
] | null | null | null | import numpy as np
from netCDF4 import Dataset
from datetime import datetime
from datetime import timedelta
import os
import sys
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import matplotlib.dates as mdates
from tools_BAIU import get_lonlat, get_var, prep_proj_multi, get_grads_JMA, def_cmap, draw_rec
quick = True
#quick = False
if quick:
res="c"
else:
res="l"
def main(
stime=datetime( 2018, 7, 5, 0 ), dth=24,
vtime=datetime( 2018, 7, 5, 0 ),
etime=datetime( 2018, 7, 5, 0 ), nvar="PW", hpa=500 ):
bbox = {'facecolor':'w', 'alpha':0.95, 'pad':2}
fig, ( ax1 ) = plt.subplots( 1, 1, figsize=( 8, 6.5 ) )
fig.subplots_adjust( left=0.06, bottom=0.07, right=0.97, top=0.95,
wspace=0.15, hspace=0.2)
fac = 1.0
TOP = "/data_ballantine02/miyoshi-t/honda/SCALE-LETKF/BAIU2018_5.3.6"
INFO = {"TOP": TOP, }
lon2d, lat2d = get_lonlat( INFO, stime=datetime( 2018, 7, 1, 0 ) )
cmap_, levs_, unit_, extend_, nvar_, fac_ = def_cmap( nvar=nvar, hpa=hpa )
mmax = 50 # debug
evar = np.zeros( ( mmax, lon2d.shape[0], lon2d.shape[1] ) )
t = 0
for m in range( mmax ):
evar[m,:,:] = get_var( INFO, nvar=nvar, stime=stime, vtime=vtime, m=m+1, adt=timedelta( hours=dth ), hpa=hpa )
lons = 111
lone = 159
late = 46
lats = 19
m_l = prep_proj_multi('merc', [ ax1 ], res=res, ll_lon=lons, ur_lon=lone,
ll_lat=lats, ur_lat=late, fs=8 )
x2d_, y2d_ = m_l[0](lon2d, lat2d)
levs = [ 980.0 ]
for m in range( mmax ) :
# ax1.contour( x2d_, y2d_, evar[m,:,:], levels=levs )
mslp_ = evar[m,:,:]
cy, cx = np.unravel_index( np.argmin(mslp_), mslp_.shape )
x_, y_ = m_l[0]( lon2d[cy,cx], lat2d[cy,cx] )
m_l[0].plot( x_, y_, marker='o', color='b',
markersize=10.0, )
#print( lon2d[cy,cx], lat2d[cy,cx] )
tit = "Initial: {0:} valid: {1:}".format( stime.strftime('%m/%d'), vtime.strftime('%m/%d') )
fig.suptitle( tit, fontsize=14 )
opath = "png/mslp_mem"
ofig = "1p_MSLP_v{0:}_s{1:}".format( vtime.strftime('%m%d'), stime.strftime('%m%d') )
if not quick:
os.makedirs(opath, exist_ok=True)
ofig = os.path.join(opath, ofig + ".png")
plt.savefig(ofig,bbox_inches="tight", pad_inches = 0.1)
print(ofig)
plt.clf()
else:
print(ofig)
plt.show()
etime = datetime( 2018, 7, 7, 0 )
#nvar = "RAIN"
adt = timedelta( hours=24 )
vtime = datetime( 2018, 6, 30, 0 )
#vtime = datetime( 2018, 7, 2, 0 )
#vtime = datetime( 2018, 7, 4, 0 )
#vtime = datetime( 2018, 7, 5, 0 )
stime = datetime( 2018, 6, 27, 0 )
etime = datetime( 2018, 7, 4, 0 )
etime = datetime( 2018, 7, 2, 0 )
etime = vtime
nvar = "MSLP"
hpa = [ 500 ]
time = stime
while time <= etime:
main( vtime=vtime,
stime=time,
nvar=nvar, hpa=hpa, etime=etime, )
time += timedelta( days=1 )
| 23.700787 | 117 | 0.573754 |
4a24793f3947c3bfcda1817b3b5813e2bca10bb6 | 139 | py | Python | tests/regression/boston/ws_boston_DecisionTreeRegressor_postgresql_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 1 | 2019-07-09T14:45:18.000Z | 2019-07-09T14:45:18.000Z | tests/regression/boston/ws_boston_DecisionTreeRegressor_postgresql_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 5 | 2017-11-13T13:35:37.000Z | 2021-11-11T12:57:20.000Z | tests/regression/boston/ws_boston_DecisionTreeRegressor_postgresql_code_gen.py | antoinecarme/sklearn2sql_heroku | d680db10683daa419324461eeea851dd8b103ad5 | [
"BSD-3-Clause"
] | 1 | 2021-09-19T15:05:33.000Z | 2021-09-19T15:05:33.000Z | from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("DecisionTreeRegressor" , "boston" , "postgresql")
| 27.8 | 69 | 0.81295 |
4a247b18b3558d6061f2d9f3bbdf55bd4af9c808 | 5,249 | py | Python | resume.py | mianyuwang/resume | dc702f4714cbaebb0935460122509196cfd893c5 | [
"MIT"
] | null | null | null | resume.py | mianyuwang/resume | dc702f4714cbaebb0935460122509196cfd893c5 | [
"MIT"
] | 2 | 2020-06-23T05:41:09.000Z | 2020-07-29T19:17:33.000Z | resume.py | mianyuwang/resume | dc702f4714cbaebb0935460122509196cfd893c5 | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
resume.py is a pre-processor for Markdown resumes, targeting the pandoc
document processor.
Pandoc extended Markdown supports embedded HTML (like all compliant Markdown
parser) and a subset of LaTeX, but when outputting LaTeX any unrecognized
LaTeX commands will simply be passed through.
This means you can keep your resume in pure markdown and define pre-processing
functions that do different things with different parts of the input depending
on the target output format.
Currently, the main feature is extraction of contact details. They are
expected to begin on the fourth line, following the header and a blank line,
and extend until the next blank line. Lines with bullets (•) will be split
into separate lines.
Michael White
=============
72 Bower St. #1 • Medford, MA, 02155
617-899-1621
You can then define a function for an output format like this:
def tex(lines, contact_lines, *args):
'''
Returns the pre-processed Markdown output suitable for tex processing,
as a string.
lines -- a list of lines, without the contact lines
contact_lines -- the extracted contact lines
args -- any extra command-line arguments
'''
And finally run it like this:
python resume.py tex < resume.md
"""
import hashlib
import sys
import re
GRAVATAR = "http://www.gravatar.com/avatar/{hash}?s=200"
class Processor(object):
handlers = {}
def register(self, fn):
self.handlers[fn.__name__] = fn
return fn
def process(self, format, lines, contact_lines, *args):
try:
handler = self.handlers[format]
except KeyError:
raise Exception("Unknown format: %s" % format)
return handler(lines, contact_lines, *args)
processor = Processor()
@processor.register
def tex(lines, contact_lines, *args):
def sub(pattern, repl, string, **kwargs):
"""Replacement for re.sub that doesn't replace pattern it's inside the
first latex command argument brackets. Kind of a hack."""
flags = kwargs.pop('flags', 0) | re.X | re.M
num_groups = re.compile(pattern, flags).groups
pattern = r"""
(^|}{) # beginning of line or second argument
([^{}\n\r]*) # disallow { and }
%s
([^{}\n\r]*)
""" % pattern
repl = re.sub(r"\\(\d)",
lambda m: r"\%d" % (int(m.group(1)) + 2), repl)
return re.sub(pattern, r"\1\2%s\%d" % (repl, num_groups + 3), string,
flags=flags, **kwargs)
# pandoc doesn't seem to support markdown inside latex blocks, so we're
# just going to hardcode the two most common link formats for now so people
# can put links in their contact info
def replace_links(line):
line = re.sub(r"<([^:]+@[^:]+?)>", r"\href{mailto:\1}{\1}", line)
line = re.sub(r"<(http.+?)>", r"\url{\1}", line)
line = re.sub(r"<(https.+?)>", r"\url{\1}", line)
return re.sub(r"\[([^\]]+)\]\(([^\)]+)\)", r"\href{\2}{\1}", line)
contact_lines = "\n\n".join(map(replace_links, contact_lines))
# replacements to apply to the text in contact_lines, because it won't be
# processed by pandoc
replace = {
'~': r"\\textasciitilde{}"
}
escape = ['#']
for search in replace:
contact_lines = sub(search, replace[search], contact_lines)
for c in escape:
contact_lines = sub(r'([^\\])\%s' % c, r'\1\%s' % c, contact_lines)
lines.insert(0, "\\begin{nospace}\\begin{flushright}\n\\vspace{-2em}" +
contact_lines +
"\n\\end{flushright}\\end{nospace}\n")
return "".join(lines)
@processor.register
def html(lines, contact_lines, *args):
untex = ['LaTeX']
for word in untex:
# yuck
replace = lambda l: l.replace(r"\%s" % word, word)
lines = list(map(replace, lines))
contact_lines = list(map(replace, contact_lines))
gravatar = None
for line in contact_lines:
if '@' in line and '--no-gravatar' not in args:
gravatar = GRAVATAR.format(
hash=hashlib.md5(line.lower().strip('<>').encode('utf-8')).hexdigest())
break
if gravatar is not None:
contact_lines.insert(0, "<img src='{}' />".format(gravatar))
lines.insert(0, "<div id='container'><div id='contact'>%s</div>\n" %
("<p>" + "</p><p>".join(contact_lines) + "</p>"))
lines.insert(1, "<div>")
lines.append("</div>")
return "".join(lines)
def main():
try:
format = sys.argv[1]
except IndexError:
raise Exception("No format specified")
if '-h' in sys.argv or '--help' in sys.argv:
sys.stderr.write(
"Usage: python resume.py tex|html [--no-gravatar] < INPUT.md\n")
raise SystemExit
lines = sys.stdin.readlines()
contact_lines = []
for line in lines[3:]:
lines.remove(line)
parts = [x.strip() for x in line.split("•")]
if parts == ['']:
break
contact_lines.extend(parts)
print(processor.process(format, lines, contact_lines, *sys.argv[1:]))
if __name__ == '__main__':
main()
| 30.166667 | 87 | 0.597638 |
4a247b523210677eb71da8f3bcd80a48ce021989 | 1,887 | bzl | Python | test/standard_cxx_flags_test/tests.bzl | dirac/rules_foreign_cc | 0423ac9810f0f992da6b47f3c6b6b9d925ffc6ca | [
"Apache-2.0"
] | null | null | null | test/standard_cxx_flags_test/tests.bzl | dirac/rules_foreign_cc | 0423ac9810f0f992da6b47f3c6b6b9d925ffc6ca | [
"Apache-2.0"
] | null | null | null | test/standard_cxx_flags_test/tests.bzl | dirac/rules_foreign_cc | 0423ac9810f0f992da6b47f3c6b6b9d925ffc6ca | [
"Apache-2.0"
] | 1 | 2019-12-18T07:27:00.000Z | 2019-12-18T07:27:00.000Z | """ TODO """
load("@rules_foreign_cc//tools/build_defs:cc_toolchain_util.bzl", "CxxFlagsInfo", "get_flags_info")
def _impl(ctx):
flags = get_flags_info(ctx)
assert_contains_once(flags.assemble, "-fblah0")
assert_contains_once(flags.assemble, "-fblah2")
assert_contains_once(flags.cc, "-fblah0")
assert_contains_once(flags.cc, "-fblah2")
assert_contains_once(flags.cxx, "-fblah0")
assert_contains_once(flags.cxx, "-fblah1")
assert_contains_once(flags.cxx_linker_executable, "-fblah3")
assert_contains_once(flags.cxx_linker_shared, "-fblah3")
if "-fblah3" in flags.cxx_linker_static:
fail("Static linker flags should not contain '-fblah3'")
exe = ctx.outputs.out
ctx.actions.write(
output = exe,
is_executable = True,
# The file must not be empty because running an empty .bat file as a
# subprocess fails on Windows, so we write one space to it.
content = " ",
)
return [DefaultInfo(files = depset([exe]), executable = exe)]
def assert_contains_once(arr, value):
cnt = 0
for elem in arr:
if elem == value:
cnt = cnt + 1
if cnt == 0:
fail("Did not find " + value)
if cnt > 1:
fail("Value is included multiple times: " + value)
_flags_test = rule(
implementation = _impl,
attrs = {
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
"out": attr.output(),
},
toolchains = ["@bazel_tools//tools/cpp:toolchain_type"],
fragments = ["cpp"],
test = True,
)
def flags_test(name, **kwargs):
_flags_test(
name = name,
# On Windows we need the ".bat" extension.
# On other platforms the extension doesn't matter.
# Therefore we can use ".bat" on every platform.
out = name + ".bat",
**kwargs
)
| 29.952381 | 101 | 0.63275 |
4a247cc9752e2066a943b74fa3ae68dbb2c185bb | 121 | py | Python | refitt/core/__init__.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 4 | 2020-09-11T01:15:11.000Z | 2021-05-12T16:46:48.000Z | refitt/core/__init__.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 12 | 2021-03-20T03:24:53.000Z | 2022-02-19T03:20:43.000Z | refitt/core/__init__.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 2 | 2021-02-01T23:49:39.000Z | 2021-12-11T19:01:23.000Z | # SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Core systems within REFITT."""
| 24.2 | 47 | 0.752066 |
4a247cdd4c252f2cd21cac01147082fabdab175d | 464 | py | Python | add-audio-to-video.py | suomiy/timelapse-to-video | 3bd369553afa1d323c3a974bd6554217bcb4d8d4 | [
"MIT"
] | null | null | null | add-audio-to-video.py | suomiy/timelapse-to-video | 3bd369553afa1d323c3a974bd6554217bcb4d8d4 | [
"MIT"
] | null | null | null | add-audio-to-video.py | suomiy/timelapse-to-video | 3bd369553afa1d323c3a974bd6554217bcb4d8d4 | [
"MIT"
] | 1 | 2021-05-04T00:06:23.000Z | 2021-05-04T00:06:23.000Z | #!/usr/bin/env python3
from argparse import ArgumentTypeError
from util.utils import eprint
from arg_parses.audio_parser import ArgParser
from operation.audio import add_audio
def run(settings):
add_audio(settings)
if __name__ == "__main__":
settings = None
try:
settings = ArgParser.require_args()
run(settings)
except ArgumentTypeError as e:
eprint(e)
finally:
if settings:
settings.destroy()
| 20.173913 | 45 | 0.6875 |
4a247d3bc1d8ea55e73bd304fe982dd31c408d4e | 7,201 | py | Python | opencood/models/voxel_net.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | opencood/models/voxel_net.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | opencood/models/voxel_net.py | CARLAlover/OpenCOOD | dd42cc7a31bc261ea2461b3068ed6111f13ff437 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import torch.nn.functional as F
import torch
from torch.autograd import Variable
from opencood.models.sub_modules.pillar_vfe import PillarVFE
from opencood.utils.common_utils import torch_tensor_to_numpy
# conv2d + bn + relu
class Conv2d(nn.Module):
def __init__(self, in_channels, out_channels, k, s, p, activation=True,
batch_norm=True):
super(Conv2d, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=k,
stride=s, padding=p)
if batch_norm:
self.bn = nn.BatchNorm2d(out_channels)
else:
self.bn = None
self.activation = activation
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.activation:
return F.relu(x, inplace=True)
else:
return x
# conv3d + bn + relu
class Conv3d(nn.Module):
def __init__(self, in_channels, out_channels, k, s, p, batch_norm=True):
super(Conv3d, self).__init__()
self.conv = nn.Conv3d(in_channels, out_channels, kernel_size=k,
stride=s, padding=p)
if batch_norm:
self.bn = nn.BatchNorm3d(out_channels)
else:
self.bn = None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
return F.relu(x, inplace=True)
# Fully Connected Network
class FCN(nn.Module):
def __init__(self, cin, cout):
super(FCN, self).__init__()
self.cout = cout
self.linear = nn.Linear(cin, cout)
self.bn = nn.BatchNorm1d(cout)
def forward(self, x):
# KK is the stacked k across batch
kk, t, _ = x.shape
x = self.linear(x.view(kk * t, -1))
x = F.relu(self.bn(x))
return x.view(kk, t, -1)
# Voxel Feature Encoding layer
class VFE(nn.Module):
def __init__(self, cin, cout, T):
super(VFE, self).__init__()
assert cout % 2 == 0
self.units = cout // 2
self.fcn = FCN(cin, self.units)
self.T = T
def forward(self, x, mask):
# point-wise feature
pwf = self.fcn(x)
# locally aggregated feature
laf = torch.max(pwf, 1)[0].unsqueeze(1).repeat(1, self.T, 1)
# point-wise concat feature
pwcf = torch.cat((pwf, laf), dim=2)
# apply mask
mask = mask.unsqueeze(2).repeat(1, 1, self.units * 2)
pwcf = pwcf * mask.float()
return pwcf
# Stacked Voxel Feature Encoding
class SVFE(nn.Module):
def __init__(self, T):
super(SVFE, self).__init__()
self.vfe_1 = VFE(7, 32, T)
self.vfe_2 = VFE(32, 128, T)
self.fcn = FCN(128, 128)
def forward(self, x):
mask = torch.ne(torch.max(x, 2)[0], 0)
x = self.vfe_1(x, mask)
x = self.vfe_2(x, mask)
x = self.fcn(x)
# element-wise max pooling
x = torch.max(x, 1)[0]
return x
# Convolutional Middle Layer
class CML(nn.Module):
def __init__(self):
super(CML, self).__init__()
self.conv3d_1 = Conv3d(64, 64, 3, s=(2, 1, 1), p=(1, 1, 1))
self.conv3d_2 = Conv3d(64, 64, 3, s=(1, 1, 1), p=(0, 1, 1))
self.conv3d_3 = Conv3d(64, 64, 3, s=(2, 1, 1), p=(1, 1, 1))
def forward(self, x):
x = self.conv3d_1(x)
x = self.conv3d_2(x)
x = self.conv3d_3(x)
return x
# Region Proposal Network
class RPN(nn.Module):
def __init__(self, anchor_num=2):
super(RPN, self).__init__()
self.anchor_num = anchor_num
self.block_1 = [Conv2d(128, 128, 3, 2, 1)]
self.block_1 += [Conv2d(128, 128, 3, 1, 1) for _ in range(3)]
self.block_1 = nn.Sequential(*self.block_1)
self.block_2 = [Conv2d(128, 128, 3, 2, 1)]
self.block_2 += [Conv2d(128, 128, 3, 1, 1) for _ in range(5)]
self.block_2 = nn.Sequential(*self.block_2)
self.block_3 = [Conv2d(128, 256, 3, 2, 1)]
self.block_3 += [nn.Conv2d(256, 256, 3, 1, 1) for _ in range(5)]
self.block_3 = nn.Sequential(*self.block_3)
self.deconv_1 = nn.Sequential(nn.ConvTranspose2d(256, 256, 4, 4, 0),
nn.BatchNorm2d(256))
self.deconv_2 = nn.Sequential(nn.ConvTranspose2d(128, 256, 2, 2, 0),
nn.BatchNorm2d(256))
self.deconv_3 = nn.Sequential(nn.ConvTranspose2d(128, 256, 1, 1, 0),
nn.BatchNorm2d(256))
self.score_head = Conv2d(768, self.anchor_num, 1, 1, 0,
activation=False, batch_norm=False)
self.reg_head = Conv2d(768, 7 * self.anchor_num, 1, 1, 0,
activation=False, batch_norm=False)
def forward(self, x):
x = self.block_1(x)
x_skip_1 = x
x = self.block_2(x)
x_skip_2 = x
x = self.block_3(x)
x_0 = self.deconv_1(x)
x_1 = self.deconv_2(x_skip_2)
x_2 = self.deconv_3(x_skip_1)
x = torch.cat((x_0, x_1, x_2), 1)
return self.score_head(x), self.reg_head(x)
class VoxelNet(nn.Module):
def __init__(self, args):
super(VoxelNet, self).__init__()
self.svfe = PillarVFE(args['pillar_vfe'],
num_point_features=4,
voxel_size=args['voxel_size'],
point_cloud_range=args['lidar_range'])
# self.svfe = SVFE(args['T'])
self.cml = CML()
self.rpn = RPN(args['anchor_num'])
self.N = args['N']
self.D = args['D']
self.H = args['H']
self.W = args['W']
self.T = args['T']
self.anchor_num = args['anchor_num']
def voxel_indexing(self, sparse_features, coords):
dim = sparse_features.shape[-1]
dense_feature = Variable(
torch.zeros(dim, self.N, self.D, self.H, self.W).cuda())
dense_feature[:, coords[:, 0], coords[:, 1], coords[:, 2],
coords[:, 3]] = sparse_features.transpose(0, 1)
return dense_feature.transpose(0, 1)
def forward(self, data_dict):
voxel_features = data_dict['processed_lidar']['voxel_features']
voxel_coords = data_dict['processed_lidar']['voxel_coords']
voxel_num_points = data_dict['processed_lidar']['voxel_num_points']
batch_dict = {'voxel_features': voxel_features,
'voxel_coords': voxel_coords,
'voxel_num_points': voxel_num_points}
# feature learning network
vwfs = self.svfe(batch_dict)['pillar_features']
voxel_coords = torch_tensor_to_numpy(voxel_coords)
vwfs = self.voxel_indexing(vwfs, voxel_coords)
# convolutional middle network
vwfs = self.cml(vwfs)
# region proposal network
# merge the depth and feature dim into one, output probability score
# map and regression map
psm, rm = self.rpn(vwfs.view(self.N, -1, self.H, self.W))
output_dict = {'psm': psm,
'rm': rm}
return output_dict
| 31.308696 | 76 | 0.558811 |
4a247d82e37d04e9bb548c13084e08c6db20f710 | 6,182 | py | Python | pyds8k/utils.py | 27149chen/pyds8k | e6e52af236330e4da4b08d749d835b7809ae2308 | [
"Apache-2.0"
] | 7 | 2020-04-16T11:20:02.000Z | 2021-04-21T13:39:14.000Z | pyds8k/utils.py | 27149chen/pyds8k | e6e52af236330e4da4b08d749d835b7809ae2308 | [
"Apache-2.0"
] | 1 | 2020-07-15T02:56:56.000Z | 2020-08-03T17:16:53.000Z | pyds8k/utils.py | 27149chen/pyds8k | e6e52af236330e4da4b08d749d835b7809ae2308 | [
"Apache-2.0"
] | 6 | 2020-01-03T05:54:26.000Z | 2022-03-31T09:42:27.000Z | ##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import os
import time
import configparser
from importlib import import_module
from pyds8k.messages import GET_CONFIG_SETTINGS_IOERROR, \
GET_CONFIG_SETTINGS_ERROR
_PATH = os.path.abspath(os.path.dirname(__file__))
CONFIG_FILE_NAME = 'config.ini'
CONFIG_FILE_PATH = os.path.join(_PATH, CONFIG_FILE_NAME)
logger = None
# HTTP STATUS CODES
HTTP200 = 200
HTTP204 = 204
HTTP404 = 404
HTTP500 = 500
# HTTP METHODS
POSTA = 'POST-to-Append'
POST = 'POST'
GET = 'GET'
PUT = 'PUT'
PATCH = 'PATCH'
DELETE = 'DELETE'
def _get_logger():
from logging import getLogger
from pyds8k import PYDS8K_DEFAULT_LOGGER
global logger
if not logger:
logger = getLogger(PYDS8K_DEFAULT_LOGGER)
return logger
def get_subclasses(cls):
subclasses = cls.__subclasses__()
for sub in list(subclasses):
subclasses.extend(get_subclasses(sub))
return subclasses
def get_config_settings(category="settings"):
result_dict = dict()
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
for setting, value in config.items(category):
result_dict[setting] = value
except IOError as e:
_get_logger().debug(GET_CONFIG_SETTINGS_IOERROR.format(
CONFIG_FILE_PATH,
str(e)
)
)
except Exception as e:
_get_logger().error(GET_CONFIG_SETTINGS_ERROR.format(str(e)))
return result_dict
def get_config_all():
result_dict = dict()
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
for section in config.sections():
result_dict[section] = dict()
for setting, value in config.items(section):
result_dict[section][setting] = value
except IOError as e:
_get_logger().debug(GET_CONFIG_SETTINGS_IOERROR.format(
CONFIG_FILE_PATH,
str(e)
)
)
except Exception as e:
_get_logger().error(GET_CONFIG_SETTINGS_ERROR.format(str(e)))
return result_dict
def get_config_all_items():
result_dict = dict()
try:
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
for section in config.sections():
for setting, value in config.items(section):
result_dict[setting] = value
except IOError as e:
_get_logger().debug(GET_CONFIG_SETTINGS_IOERROR.format(
CONFIG_FILE_PATH,
str(e)
)
)
except Exception as e:
_get_logger().error(GET_CONFIG_SETTINGS_ERROR.format(str(e)))
return result_dict
def get_config_by_name(name):
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
return config.get('settings', name)
def set_config_by_name(name, value):
config = configparser.ConfigParser()
config.read(CONFIG_FILE_PATH)
config.set('settings', name, value)
with open(CONFIG_FILE_PATH, 'wb') as config_file:
config.write(config_file)
"""
def get_default_service_type():
return get_config_by_name('default_service_type')
def get_default_service_version():
return get_config_by_name('default_service_version')
def get_runtime_service_type():
return get_config_by_name('runtime_service_type')
def get_service_type():
return get_runtime_service_type() or get_default_service_type()
def set_runtime_service_type(service_type):
set_config_by_name('runtime_service_type', service_type)
"""
def get_request_parser_class(service_type):
prefix = service_type
Parser = import_module('{0}.dataParser.{1}'.format(__package__, prefix)
)
return Parser.RequestParser
def get_response_parser_class(service_type):
prefix = service_type
Parser = import_module('{0}.dataParser.{1}'.format(__package__, prefix)
)
return Parser.ResponseParser
def timer(func):
def inner(self, *args, **kwargs):
start = time.time()
result = func(self, *args, **kwargs)
end = time.time()
_get_logger().info(
"Successfully called method '{}' in {} seconds".format(
func.__name__,
round(end - start, 2)
)
)
return result
return inner
def res_timer_recorder(func):
def inner(self, *args, **kwargs):
start = time.time()
res = func(self, *args, **kwargs)
end = time.time()
sec = round(end - start, 2)
if not res:
_get_logger().info(
"Successfully got 0 resources in {} seconds".format(sec)
)
return []
_get_logger().info(
"Successfully got {} resources in {} seconds, \
{} seconds per 100 instances.".format(
len(res),
sec,
round(sec / len(res) * 100, 2)
)
)
return res
return inner
def dictionarize(func):
def inner(self, *args, **kwargs):
res_obj = func(self, *args, **kwargs)
if not isinstance(res_obj, list):
res_obj = [res_obj, ]
coverted = []
for res in res_obj:
coverted.append(res.representation)
return coverted
return inner
def is_absolute_url(url):
if url.startswith('/'):
return False
elif '//' in url:
return True
# Don't verify the URI's validation here.
else:
return True
| 27.353982 | 78 | 0.628276 |
4a24811b1ecd59a058668e18100b5c3fa3276460 | 69 | py | Python | src/gh_template_py/_constants.py | nuuuwan/gh_template_py | 5a3c4296eb41f7b68dc6bf6922167509b8e8f8ee | [
"MIT"
] | null | null | null | src/gh_template_py/_constants.py | nuuuwan/gh_template_py | 5a3c4296eb41f7b68dc6bf6922167509b8e8f8ee | [
"MIT"
] | null | null | null | src/gh_template_py/_constants.py | nuuuwan/gh_template_py | 5a3c4296eb41f7b68dc6bf6922167509b8e8f8ee | [
"MIT"
] | null | null | null | """Constants."""
CACHE_NAME = 'gh_template_py'
CACHE_TIMEOUT = 3600
| 13.8 | 29 | 0.724638 |
4a248197b49409f5cf40f1a84af7a9436f7fee76 | 705 | py | Python | softwarecollections/pages/views.py | pmkovar/softwarecollections | 11d7056ce8fd9b07c2a916c42f6eef7925249e38 | [
"BSD-3-Clause"
] | null | null | null | softwarecollections/pages/views.py | pmkovar/softwarecollections | 11d7056ce8fd9b07c2a916c42f6eef7925249e38 | [
"BSD-3-Clause"
] | null | null | null | softwarecollections/pages/views.py | pmkovar/softwarecollections | 11d7056ce8fd9b07c2a916c42f6eef7925249e38 | [
"BSD-3-Clause"
] | null | null | null | from django.conf import settings
from django.http.response import Http404
from django.shortcuts import render
from django.template.base import TemplateDoesNotExist
def page(request, path, template_dir=None):
if template_dir:
parts = [template_dir, request.LANGUAGE_CODE]
else:
parts = [request.LANGUAGE_CODE]
if path:
# add parts nice parts of path
parts.extend([part for part in path.strip().split('/') if part not in ('','.','..')])
template_name = '/'.join(parts) + '.html'
try:
return render(request, template_name)
except TemplateDoesNotExist as e:
if settings.DEBUG:
raise
else:
raise Http404
| 30.652174 | 93 | 0.652482 |
4a2481d3e6315b3e3e624b26cf16d3379b053fe7 | 1,104 | py | Python | test/test_resources.py | berkayyibis/Corridor-Detection | c08179518cce1fa9c31b1b324b95ce29a71e7b4f | [
"Apache-2.0"
] | 2 | 2021-01-07T06:50:18.000Z | 2021-01-07T07:51:10.000Z | test/test_resources.py | berkayyibis/Corridor-Detection | c08179518cce1fa9c31b1b324b95ce29a71e7b4f | [
"Apache-2.0"
] | 4 | 2020-12-10T20:31:54.000Z | 2022-03-18T15:07:19.000Z | test/test_resources.py | meteergen/Corridor-Detection | cf446a38c0d71dd85e93842606115567a4216b2f | [
"Apache-2.0"
] | 1 | 2021-01-08T06:46:44.000Z | 2021-01-08T06:46:44.000Z | # coding=utf-8
"""Resources test.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = '[email protected]'
__date__ = '2020-10-09'
__copyright__ = 'Copyright 2020, Metehan Ergen / Hacettepe University - Politecnico di Milano'
import unittest
from qgis.PyQt.QtGui import QIcon
class CorridorDetectionDialogTest(unittest.TestCase):
"""Test rerources work."""
def setUp(self):
"""Runs before each test."""
pass
def tearDown(self):
"""Runs after each test."""
pass
def test_icon_png(self):
"""Test we can click OK."""
path = ':/plugins/CorridorDetection/icon.png'
icon = QIcon(path)
self.assertFalse(icon.isNull())
if __name__ == "__main__":
suite = unittest.makeSuite(CorridorDetectionResourcesTest)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite)
| 24.533333 | 94 | 0.679348 |
4a2481e29e4dcf82a17d9e5e29764897d6c9746b | 1,023 | py | Python | cogs/help.py | CopyandPaste-png/stewie-discord-bot | f4cd4e6c0ce5ab3da2f59b2f94c2fc491eb68032 | [
"MIT"
] | null | null | null | cogs/help.py | CopyandPaste-png/stewie-discord-bot | f4cd4e6c0ce5ab3da2f59b2f94c2fc491eb68032 | [
"MIT"
] | null | null | null | cogs/help.py | CopyandPaste-png/stewie-discord-bot | f4cd4e6c0ce5ab3da2f59b2f94c2fc491eb68032 | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
class HelpCommand(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def help(self, ctx:commands.Context):
"""A custom help command
Parameters
_________
None
NB:
a @commands.group() decorator is required to make this custom help command work.
Basically it allows for subcommands(@helpfor.command()) to be called and invoked through
$helpfor <command_name>, similar to how the default help command works.
"""
# guild_prefix queries the database (main.GUILDS) to get the prefix for the guild
pf = "."
em = discord.Embed(title="COMMANDS")
em.add_field(name=f"{pf}search <term>", value="Finds a pack", inline=False)
em.add_field(name=f"{pf}add <name> <code>", value="Adds pack", inline=False)
await ctx.send(embed=em)
def setup(bot):
bot.add_cog(HelpCommand(bot))
| 31 | 96 | 0.645161 |
4a24820596eb641ed4a726e6ffe71bfaa1910460 | 1,249 | py | Python | autobahn/wamp/gen/wamp/proto/Abort.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 1,670 | 2015-10-12T15:46:22.000Z | 2022-03-30T22:12:53.000Z | autobahn/wamp/gen/wamp/proto/Abort.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 852 | 2015-10-16T22:11:03.000Z | 2022-03-27T07:57:01.000Z | autobahn/wamp/gen/wamp/proto/Abort.py | rapyuta-robotics/autobahn-python | c08e9e352d526a7fd0885bb94706366a432ada1a | [
"MIT"
] | 790 | 2015-10-15T08:46:12.000Z | 2022-03-30T12:22:13.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
class Abort(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAbort(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Abort()
x.Init(buf, n + offset)
return x
# Abort
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Abort
def Reason(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# Abort
def Message(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
def AbortStart(builder): builder.StartObject(2)
def AbortAddReason(builder, reason): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(reason), 0)
def AbortAddMessage(builder, message): builder.PrependUOffsetTRelativeSlot(1, flatbuffers.number_types.UOffsetTFlags.py_type(message), 0)
def AbortEnd(builder): return builder.EndObject()
| 32.025641 | 137 | 0.684548 |
4a2482134c900939eb6275f9a28c1305ed6d6428 | 13,343 | py | Python | dsub/commands/dstat.py | wnojopra/dsub | 8278b37848123be759f1c9f66788ad32457fead5 | [
"Apache-2.0"
] | 146 | 2018-05-22T17:31:31.000Z | 2022-03-31T18:24:55.000Z | dsub/commands/dstat.py | wnojopra/dsub | 8278b37848123be759f1c9f66788ad32457fead5 | [
"Apache-2.0"
] | 117 | 2018-06-04T22:38:05.000Z | 2022-02-23T17:49:19.000Z | dsub/commands/dstat.py | wnojopra/dsub | 8278b37848123be759f1c9f66788ad32457fead5 | [
"Apache-2.0"
] | 27 | 2018-06-06T16:21:52.000Z | 2022-01-03T18:29:17.000Z | # Lint as: python3
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""View dsub job and task status.
Follows the model of bjobs, sinfo, qstat, etc.
"""
# Try to keep the default behavior rational based on real usage patterns.
# Most common usage:
# * User kicked off one or more single-operation jobs, or
# * User kicked off a single "array job".
# * User just wants to check on the status of their own running jobs.
#
# qstat and hence dstat.py defaults to listing jobs for the current user, so
# there is no need to include user information in the default output.
import sys
import time
from ..lib import dsub_util
from ..lib import job_model
from ..lib import output_formatter
from ..lib import param_util
from ..lib import resources
from ..providers import provider_base
def get_credentials(args):
"""Returns credentials for API requests."""
# Across dsub, dstat, ddel, defer to the provider for credentials handling
return provider_base.credentials_from_args(args)
def _parse_arguments():
"""Parses command line arguments.
Returns:
A Namespace of parsed arguments.
"""
# Handle version flag and exit if it was passed.
param_util.handle_version_flag()
parser = provider_base.create_parser(sys.argv[0])
parser.add_argument(
'--version', '-v', default=False, help='Print the dsub version and exit.')
parser.add_argument(
'--jobs',
'-j',
nargs='*',
help='A list of jobs IDs on which to check status')
parser.add_argument(
'--names',
'-n',
nargs='*',
help='A list of job names on which to check status')
parser.add_argument(
'--tasks',
'-t',
nargs='*',
help='A list of task IDs on which to check status')
parser.add_argument(
'--attempts',
nargs='*',
help='A list of task attempts on which to check status')
parser.add_argument(
'--users',
'-u',
nargs='*',
default=[],
help="""Lists only those jobs which were submitted by the list of users.
Use "*" to list jobs of any user.""")
parser.add_argument(
'--status',
'-s',
nargs='*',
default=['RUNNING'],
choices=['RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED', '*'],
help="""Lists only those jobs which match the specified status(es).
Choose from {'RUNNING', 'SUCCESS', 'FAILURE', 'CANCELED'}.
Use "*" to list jobs of any status.""",
metavar='STATUS')
parser.add_argument(
'--age',
help="""List only those jobs newer than the specified age. Ages can be
listed using a number followed by a unit. Supported units are
s (seconds), m (minutes), h (hours), d (days), w (weeks).
For example: '7d' (7 days). Bare numbers are treated as UTC.""")
parser.add_argument(
'--label',
nargs='*',
action=param_util.ListParamAction,
default=[],
help='User labels to match. Tasks returned must match all labels.',
metavar='KEY=VALUE')
parser.add_argument(
'--poll-interval',
default=10,
type=int,
help='Polling interval (in seconds) for checking job status '
'when --wait is set.')
parser.add_argument(
'--wait', action='store_true', help='Wait until jobs have all completed.')
parser.add_argument(
'--limit',
default=0,
type=int,
help='The maximum number of tasks to list. The default is unlimited.')
parser.add_argument(
'--format',
choices=['text', 'json', 'yaml', 'provider-json'],
help='Set the output format.')
output_style = parser.add_mutually_exclusive_group()
output_style.add_argument(
'--full',
'-f',
action='store_true',
help='Display output with full task information'
' and input parameters.')
output_style.add_argument(
'--summary',
action='store_true',
help='Display a summary of the results, grouped by (job, status).')
# Shared between the "google-cls-v2" and "google-v2" providers
google_common = parser.add_argument_group(
title='google-common',
description="""Options common to the "google", "google-cls-v2", and
"google-v2" providers""")
google_common.add_argument(
'--project', help='Cloud project ID in which to find and the job(s)')
google_cls_v2 = parser.add_argument_group(
title='"google-cls-v2" provider options',
description='See also the "google-common" options listed')
google_cls_v2.add_argument(
'--location',
default=job_model.DEFAULT_LOCATION,
help="""Specifies the Google Cloud region to which the dsub job was
submitted. (default: {})""".format(job_model.DEFAULT_LOCATION))
return provider_base.parse_args(
parser, {
'google-cls-v2': ['project'],
'google-v2': ['project'],
'test-fails': [],
'local': [],
}, sys.argv[1:])
def main():
# Parse args and validate
args = _parse_arguments()
# Compute the age filter (if any)
create_time_min = param_util.age_to_create_time(args.age)
# Set up the output formatter
if args.format == 'json':
formatter = output_formatter.JsonOutput(args.full)
elif args.format == 'text':
formatter = output_formatter.TextOutput(args.full)
elif args.format == 'yaml':
formatter = output_formatter.YamlOutput(args.full)
elif args.format == 'provider-json':
formatter = output_formatter.JsonOutput(args.full)
else:
# If --full is passed, then format defaults to yaml.
# Else format defaults to text
if args.full:
formatter = output_formatter.YamlOutput(args.full)
else:
formatter = output_formatter.TextOutput(args.full)
# Set up the Genomics Pipelines service interface
provider = provider_base.get_provider(
args, resources, credentials_fn=get_credentials)
with dsub_util.replace_print():
provider_base.emit_provider_message(provider)
# Set poll interval to zero if --wait is not set.
poll_interval = args.poll_interval if args.wait else 0
# Make sure users were provided, or try to fill from OS user. This cannot
# be made into a default argument since some environments lack the ability
# to provide a username automatically.
user_ids = set(args.users) if args.users else {dsub_util.get_os_user()}
labels = param_util.parse_pair_args(args.label, job_model.LabelParam)
job_producer = dstat_job_producer(
provider=provider,
statuses=set(args.status) if args.status else None,
user_ids=user_ids,
job_ids=set(args.jobs) if args.jobs else None,
job_names=set(args.names) if args.names else None,
task_ids=set(args.tasks) if args.tasks else None,
task_attempts=set(args.attempts) if args.attempts else None,
labels=labels if labels else None,
create_time_min=create_time_min,
max_tasks=args.limit,
full_output=args.full,
summary_output=args.summary,
poll_interval=poll_interval,
raw_format=bool(args.format == 'provider-json'))
# Track if any jobs are running in the event --wait was requested.
for poll_event_tasks in job_producer:
rows = poll_event_tasks
formatter.prepare_and_print_table(rows, args.summary)
def dstat_job_producer(provider,
statuses,
user_ids=None,
job_ids=None,
job_names=None,
task_ids=None,
task_attempts=None,
labels=None,
create_time_min=None,
create_time_max=None,
max_tasks=0,
full_output=False,
summary_output=False,
poll_interval=0,
raw_format=False):
"""Generate jobs as lists of task dicts ready for formatting/output.
Args:
provider: an instantiated dsub provider.
statuses: a set of status strings that eligible jobs may match.
user_ids: a set of user strings that eligible jobs may match.
job_ids: a set of job-id strings eligible jobs may match.
job_names: a set of job-name strings eligible jobs may match.
task_ids: a set of task-id strings eligible tasks may match.
task_attempts: a set of task-attempt strings eligible tasks may match.
labels: set of LabelParam that all tasks must match.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
max_tasks: (int) maximum number of tasks to return per dstat job lookup.
full_output: (bool) return all dsub fields.
summary_output: (bool) return a summary of the job list.
poll_interval: (int) wait time between poll events, dstat will poll jobs
until all jobs succeed or fail. Set to zero to disable
polling and return after the first lookup.
raw_format: (bool) set True to prevent dsub from normalizing the task dict,
this defaults to False and should only be set True if a
provider-specific view of tasks is absolutely required.
(NB: provider interfaces change over time, no transition path
will be provided for users depending on this flag).
Yields:
lists of task dictionaries - each list representing a dstat poll event.
"""
some_job_running = True
while some_job_running:
# Get a batch of jobs.
tasks = provider.lookup_job_tasks(
statuses,
user_ids=user_ids,
job_ids=job_ids,
job_names=job_names,
task_ids=task_ids,
task_attempts=task_attempts,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max,
max_tasks=max_tasks,
page_size=max_tasks)
some_job_running = False
formatted_tasks = []
for task in tasks:
if 0 < max_tasks <= len(formatted_tasks):
break
# Format tasks as specified.
if raw_format:
formatted_tasks.append(task.raw_task_data())
else:
formatted_tasks.append(
output_formatter.prepare_row(task, full_output, summary_output))
# Determine if any of the jobs are running.
if task.get_field('task-status') == 'RUNNING':
some_job_running = True
# Yield the tasks and determine if the loop should continue.
yield formatted_tasks
if poll_interval and some_job_running:
time.sleep(poll_interval)
else:
break
def lookup_job_tasks(provider,
statuses,
user_ids=None,
job_ids=None,
job_names=None,
task_ids=None,
task_attempts=None,
labels=None,
create_time_min=None,
create_time_max=None,
max_tasks=0,
page_size=0,
summary_output=False):
"""Generate formatted jobs individually, in order of create-time.
Args:
provider: an instantiated dsub provider.
statuses: a set of status strings that eligible jobs may match.
user_ids: a set of user strings that eligible jobs may match.
job_ids: a set of job-id strings eligible jobs may match.
job_names: a set of job-name strings eligible jobs may match.
task_ids: a set of task-id strings eligible tasks may match.
task_attempts: a set of task-attempt strings eligible tasks may match.
labels: set of LabelParam that all tasks must match.
create_time_min: a timezone-aware datetime value for the earliest create
time of a task, inclusive.
create_time_max: a timezone-aware datetime value for the most recent create
time of a task, inclusive.
max_tasks: (int) maximum number of tasks to return per dstat job lookup.
page_size: the page size to use for each query to the backend. May be
ignored by some provider implementations.
summary_output: (bool) summarize the job list.
Yields:
Individual task dictionaries with associated metadata
"""
tasks_generator = provider.lookup_job_tasks(
statuses,
user_ids=user_ids,
job_ids=job_ids,
job_names=job_names,
task_ids=task_ids,
task_attempts=task_attempts,
labels=labels,
create_time_min=create_time_min,
create_time_max=create_time_max,
max_tasks=max_tasks,
page_size=page_size)
# Yield formatted tasks.
for task in tasks_generator:
yield output_formatter.prepare_row(task, True, summary_output)
if __name__ == '__main__':
main()
| 36.062162 | 80 | 0.657348 |
4a24836fc4537e4edfa4c0415c1ea79ce7aed75f | 4,738 | py | Python | DozukiPDFBackup.py | VIPQualityPost/DozukiPDFBackup | b7b5d69e5906ad17ca20d4902c810d59f369f39f | [
"Apache-2.0"
] | null | null | null | DozukiPDFBackup.py | VIPQualityPost/DozukiPDFBackup | b7b5d69e5906ad17ca20d4902c810d59f369f39f | [
"Apache-2.0"
] | null | null | null | DozukiPDFBackup.py | VIPQualityPost/DozukiPDFBackup | b7b5d69e5906ad17ca20d4902c810d59f369f39f | [
"Apache-2.0"
] | null | null | null | # Program for crawling Dozuki sites and exporting guides in PDF
# format while retaining online file-structure.
# Released 03/22/2020 Matei Jordache
#
from selenium import webdriver
from selenium.common.exceptions import TimeoutException
import time, bs4, requests, os
#
windows = True
browser = webdriver.Firefox() #choose chrome if you want, use webdriver.Chrome()
dozukiurl = "https://org.dozuki.com" #Domain to start collecting guides at. Use root directory to get all.
dozukiemail = "[email protected]" # Email for login
dozukipwd = "dozukipwd" # Password for login
dirpath = [r"C:\Users\Dozuki\BackupGuides"] # List of strings to tell where we are in the file tree
# Waiting routine, still needs work (document.readyState return "complete" before finishing)
def waitload():
time.sleep(3)
#wait = WebDriverWait(browser, 10)
#try:
# wait.until(EC.visibility_of_all_elements_located((By.TAG_NAME, 'html')))
# time.sleep(2)
#except TimeoutException:
# time.sleep(0.5)
# Reouting to fix filepath
def appendpath(cattag):
global dirpath, soup
soup = bs4.BeautifulSoup(browser.page_source, features="html.parser") #Get HTML from page
try:
categorytitle = soup.find(cattag).text #Find the title of category or guide
except AttributeError:
waitload()
appendpath(cattag) # Retry to make the soup
categorytitle = categorytitle.strip().replace('/','').replace(' ', '_') #Remove whitespace and format
dirpath.append(os.path.join(dirpath[-1], categorytitle))
# Category routine
def categoryscrape():
global dirpath, soup
appendpath('h1')
subcategory = browser.find_elements_by_class_name('categoryAnchor') #see if there are subcategories
waitload()
for i in range(len(subcategory)): #run through all subcategories
try:
browser.find_elements_by_class_name('categoryAnchor')[i].click() #choose a category
waitload()
categoryscrape() #Repeat to check for more subcategories
except IndexError: #Catch if try to click before page loaded fully
waitload() # Wait and then try again
i = i-1 #So we don't jump to the next element after catch error
waitload()
#Sifting guides from other content
guide = browser.find_elements_by_class_name('cell') #discover how many guides
waitload()
for j in range(len(guide)): # Run through all the perceived guides
try:
wikitext = soup.find_all(class_="pageTitle wikiTitle originalText bordered") # Wikis look like guides
if wikitext != []: # If wiki, go back and try next
browser.execute_script("window.history.go(-1)")
else:
browser.find_elements_by_class_name('cell')[j].click() # Embedded documents look more like guides than wikis
waitload()
pdffile = browser.find_elements_by_tag_name('iframe')
if pdffile == []: # Capture guide
waitload()
guidescrape()
else: # Not a guide, some other embedded content
browser.execute_script("window.history.go(-1)")
waitload()
except IndexError: #Catch if try to choose element before page fully loaded
waitload()
j = j-1 # So we don't skip any elements after catching error
continue
browser.execute_script("window.history.go(-1)") # Go up a directory in the filepath on Dozuki
dirpath.pop(-1) #Go up a directory in the filepath for storage
waitload()
#Guide routine
def guidescrape():
global dirpath, soup
appendpath('h1') #Get unique guide title add to path
browser.find_element_by_link_text('Options').click()
browser.find_element_by_link_text('Download PDF').click()
dlurl = browser.current_url #pass url to requests to download outside browser
response = requests.get(dlurl)
guidepath = dirpath[-1] # Chop off / to add file extension
os.makedirs(dirpath[-2], exist_ok=True) # Check if directory exists and create if not there
with open(guidepath + ".pdf", 'wb') as f:
f.write(response.content) # Write .pdf to file
print(guidepath + ".pdf") #So we can see what guides got processed
dirpath.pop(-1) # Stop specifying this guide as path
browser.execute_script("window.history.go(-2)") # Go back to parent directory from PDF page
waitload()
# Login and initialization
browser.get(dozukiurl)
loginElem = browser.find_element_by_id('email')
loginElem.send_keys(dozukiemail)
pwdElem = browser.find_element_by_id('password')
pwdElem.send_keys(dozukipwd)
pwdElem.submit()
waitload()
categoryscrape()
browser.quit() # All done, close everything up
| 46.910891 | 124 | 0.686154 |
4a2484c7abd4e247734f871030260a70fe89e43a | 3,239 | py | Python | profiles_project/settings.py | irot94/profiles-rest-api | 907bd4ffdb63e3d03899cbbfd69bd0373a331232 | [
"MIT"
] | null | null | null | profiles_project/settings.py | irot94/profiles-rest-api | 907bd4ffdb63e3d03899cbbfd69bd0373a331232 | [
"MIT"
] | null | null | null | profiles_project/settings.py | irot94/profiles-rest-api | 907bd4ffdb63e3d03899cbbfd69bd0373a331232 | [
"MIT"
] | null | null | null | """
Django settings for profiles_project project.
Generated by 'django-admin startproject' using Django 3.1.7.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '2(i_vk^eifalofp*l&rg1^m5eb!=n^-=s$$&kwfwn)ocn(cb1w'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['192.168.0.171', '127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'profiles_api'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'profiles_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'profiles_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'profiles_api.UserProfile'
| 25.706349 | 91 | 0.698364 |
4a248506c844140e02d31cf566212f387ba99f0f | 14,150 | py | Python | watcher/decision_engine/model/notification/cinder.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 64 | 2015-10-18T02:57:24.000Z | 2022-01-13T11:27:51.000Z | watcher/decision_engine/model/notification/cinder.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | null | null | null | watcher/decision_engine/model/notification/cinder.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 35 | 2015-12-25T13:53:21.000Z | 2021-07-19T15:50:16.000Z | # -*- encoding: utf-8 -*-
# Copyright 2017 NEC Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log
from watcher.common import cinder_helper
from watcher.common import exception
from watcher.decision_engine.model import element
from watcher.decision_engine.model.notification import base
from watcher.decision_engine.model.notification import filtering
LOG = log.getLogger(__name__)
class CinderNotification(base.NotificationEndpoint):
def __init__(self, collector):
super(CinderNotification, self).__init__(collector)
self._cinder = None
@property
def cinder(self):
if self._cinder is None:
self._cinder = cinder_helper.CinderHelper()
return self._cinder
def update_pool(self, pool, data):
"""Update the storage pool using the notification data."""
pool.update({
"total_capacity_gb": data['total'],
"free_capacity_gb": data['free'],
"provisioned_capacity_gb": data['provisioned'],
"allocated_capacity_gb": data['allocated'],
"virtual_free": data['virtual_free']
})
node_name = pool.name.split("#")[0]
node = self.get_or_create_node(node_name)
self.cluster_data_model.map_pool(pool, node)
LOG.debug("Mapped pool %s to %s", pool.name, node.host)
def update_pool_by_api(self, pool):
"""Update the storage pool using the API data."""
if not pool:
return
_pool = self.cinder.get_storage_pool_by_name(pool.name)
pool.update({
"total_volumes": _pool.total_volumes,
"total_capacity_gb": _pool.total_capacity_gb,
"free_capacity_gb": _pool.free_capacity_gb,
"provisioned_capacity_gb": _pool.provisioned_capacity_gb,
"allocated_capacity_gb": _pool.allocated_capacity_gb
})
node_name = pool.name.split("#")[0]
node = self.get_or_create_node(node_name)
self.cluster_data_model.map_pool(pool, node)
LOG.debug("Mapped pool %s to %s", pool.name, node.host)
def create_storage_node(self, name):
"""Create the storage node by querying the Cinder API."""
try:
_node = self.cinder.get_storage_node_by_name(name)
_volume_type = self.cinder.get_volume_type_by_backendname(
# name is formatted as host@backendname
name.split('@')[1])
storage_node = element.StorageNode(
host=_node.host,
zone=_node.zone,
state=_node.state,
status=_node.status,
volume_type=_volume_type)
return storage_node
except Exception as exc:
LOG.exception(exc)
LOG.debug("Could not create storage node %s.", name)
raise exception.StorageNodeNotFound(name=name)
def get_or_create_node(self, name):
"""Get storage node by name, otherwise create storage node"""
if name is None:
LOG.debug("Storage node name not provided: skipping")
return
try:
return self.cluster_data_model.get_node_by_name(name)
except exception.StorageNodeNotFound:
# The node didn't exist yet so we create a new node object
node = self.create_storage_node(name)
LOG.debug("New storage node created: %s", name)
self.cluster_data_model.add_node(node)
LOG.debug("New storage node added: %s", name)
return node
def create_pool(self, pool_name):
"""Create the storage pool by querying the Cinder API."""
try:
_pool = self.cinder.get_storage_pool_by_name(pool_name)
pool = element.Pool(
name=_pool.name,
total_volumes=_pool.total_volumes,
total_capacity_gb=_pool.total_capacity_gb,
free_capacity_gb=_pool.free_capacity_gb,
provisioned_capacity_gb=_pool.provisioned_capacity_gb,
allocated_capacity_gb=_pool.allocated_capacity_gb)
return pool
except Exception as exc:
LOG.exception(exc)
LOG.debug("Could not refresh the pool %s.", pool_name)
raise exception.PoolNotFound(name=pool_name)
def get_or_create_pool(self, name):
if not name:
LOG.debug("Pool name not provided: skipping")
return
try:
return self.cluster_data_model.get_pool_by_pool_name(name)
except exception.PoolNotFound:
# The pool didn't exist yet so we create a new pool object
pool = self.create_pool(name)
LOG.debug("New storage pool created: %s", name)
self.cluster_data_model.add_pool(pool)
LOG.debug("New storage pool added: %s", name)
return pool
def get_or_create_volume(self, volume_id, pool_name=None):
try:
if pool_name:
self.get_or_create_pool(pool_name)
except exception.PoolNotFound:
LOG.warning("Could not find storage pool %(pool)s for "
"volume %(volume)s",
dict(pool=pool_name, volume=volume_id))
try:
return self.cluster_data_model.get_volume_by_uuid(volume_id)
except exception.VolumeNotFound:
# The volume didn't exist yet so we create a new volume object
volume = element.Volume(uuid=volume_id)
self.cluster_data_model.add_volume(volume)
return volume
def update_volume(self, volume, data):
"""Update the volume using the notification data."""
def _keyReplace(key):
if key == 'instance_uuid':
return 'server_id'
if key == 'id':
return 'attachment_id'
attachments = [
{_keyReplace(k): v for k, v in iter(d.items())
if k in ('instance_uuid', 'id')}
for d in data['volume_attachment']
]
# glance_metadata is provided if volume is bootable
bootable = False
if 'glance_metadata' in data:
bootable = True
volume.update({
"name": data['display_name'] or "",
"size": data['size'],
"status": data['status'],
"attachments": attachments,
"snapshot_id": data['snapshot_id'] or "",
"project_id": data['tenant_id'],
"metadata": data['metadata'],
"bootable": bootable
})
try:
# if volume is under pool, let's update pool element.
# get existing pool or create pool by cinder api
pool = self.get_or_create_pool(data['host'])
self.update_pool_by_api(pool)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
self.update_volume_mapping(volume, pool)
def update_volume_mapping(self, volume, pool):
if pool is None:
self.cluster_data_model.add_volume(volume)
LOG.debug("Volume %s not yet attached to any pool: skipping",
volume.uuid)
return
try:
try:
current_pool = (
self.cluster_data_model.get_pool_by_volume(
volume) or self.get_or_create_pool(pool.name))
except exception.PoolNotFound as exc:
LOG.exception(exc)
# If we can't create the pool,
# we consider the volume as unmapped
current_pool = None
LOG.debug("Mapped pool %s found", pool.name)
if current_pool and pool != current_pool:
LOG.debug("Unmapping volume %s from %s",
volume.uuid, pool.name)
self.cluster_data_model.unmap_volume(volume, current_pool)
except exception.VolumeNotFound:
# The instance didn't exist yet so we map it for the first time
LOG.debug("New volume: mapping it to %s", pool.name)
finally:
if pool:
self.cluster_data_model.map_volume(volume, pool)
LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name)
def delete_volume(self, volume, pool):
try:
self.cluster_data_model.delete_volume(volume)
except Exception:
LOG.info("Volume %s already deleted", volume.uuid)
try:
if pool:
# if volume is under pool, let's update pool element.
# get existing pool or create pool by cinder api
pool = self.get_or_create_pool(pool.name)
self.update_pool_by_api(pool)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
class CapacityNotificationEndpoint(CinderNotification):
@property
def filter_rule(self):
"""Cinder capacity notification filter"""
return filtering.NotificationFilter(
publisher_id=r'capacity.*',
event_type='capacity.pool',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
name = payload['name_to_id']
try:
pool = self.get_or_create_pool(name)
self.update_pool(pool, payload)
except exception.PoolNotFound as exc:
LOG.exception(exc)
class VolumeNotificationEndpoint(CinderNotification):
publisher_id_regex = r'^volume.*'
class VolumeCreateEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.create.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
self.update_volume(volume, payload)
class VolumeUpdateEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.update.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
self.update_volume(volume, payload)
class VolumeAttachEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.attach.end',
)
class VolumeDetachEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.detach.end',
)
class VolumeResizeEnd(VolumeUpdateEnd):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.resize.end',
)
class VolumeDeleteEnd(VolumeNotificationEndpoint):
@property
def filter_rule(self):
"""Cinder volume notification filter"""
return filtering.NotificationFilter(
publisher_id=self.publisher_id_regex,
event_type='volume.delete.end',
)
def info(self, ctxt, publisher_id, event_type, payload, metadata):
ctxt.request_id = metadata['message_id']
ctxt.project_domain = event_type
LOG.info("Event '%(event)s' received from %(publisher)s "
"with metadata %(metadata)s",
dict(event=event_type,
publisher=publisher_id,
metadata=metadata))
LOG.debug(payload)
volume_id = payload['volume_id']
poolname = payload['host']
volume = self.get_or_create_volume(volume_id, poolname)
try:
pool = self.get_or_create_pool(poolname)
except exception.PoolNotFound as exc:
LOG.exception(exc)
pool = None
self.delete_volume(volume, pool)
| 36.658031 | 75 | 0.609046 |
4a2486651c7eff97536490e216b258fef893cb6c | 804 | py | Python | 684.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | 684.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | 684.py | OmangRawat/Leetcode | 6fa696367ef9c5e6b08940b11e2202382d1afc07 | [
"MIT"
] | null | null | null | """
---> Redundant Connection
---> Medium
"""
class Solution:
def findRedundantConnection(self, edges):
n = len(edges)
root = {i: i for i in range(1, n + 1)}
def find_parent(comp):
if root[comp] == comp:
return comp
return find_parent(root[comp])
for i, j in edges:
u = find_parent(i)
v = find_parent(j)
if u == v:
return [i, j]
else:
root[v] = u
in_edges = [[1, 2], [2, 3], [3, 4], [1, 4], [1, 5]]
a = Solution()
print(a.findRedundantConnection(in_edges))
"""
Make components whichever are connected using Union find then return if any of the component have same parent that means
it has been already connected by any other edges
"""
| 20.615385 | 121 | 0.539801 |
4a248767097ef0b771cce3446dca098525a9ddc2 | 222 | py | Python | tests/samples/all_columns.py | spamegg1/snoop | 2d169d003de4382717f45592f5799983c26a8573 | [
"MIT"
] | 751 | 2019-07-03T13:40:38.000Z | 2022-03-30T02:28:00.000Z | tests/samples/all_columns.py | spamegg1/snoop | 2d169d003de4382717f45592f5799983c26a8573 | [
"MIT"
] | 42 | 2019-07-04T19:30:36.000Z | 2022-03-26T09:19:19.000Z | tests/samples/all_columns.py | spamegg1/snoop | 2d169d003de4382717f45592f5799983c26a8573 | [
"MIT"
] | 30 | 2019-07-14T15:55:27.000Z | 2022-03-19T16:38:12.000Z | from snoop.configuration import Config
snoop = Config(columns='time thread thread_ident file full_file function function_qualname').snoop
def main():
@snoop
def foo():
x = 1
y = x + 2
foo()
| 17.076923 | 98 | 0.644144 |
4a2487f5c8ccbd4c1980363ee7fceef12212d5af | 1,424 | py | Python | DailyProgrammer/DP20120730C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | 2 | 2020-12-23T18:59:22.000Z | 2021-04-14T13:16:09.000Z | DailyProgrammer/DP20120730C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | DailyProgrammer/DP20120730C.py | DayGitH/Python-Challenges | bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf | [
"MIT"
] | null | null | null | """
[7/30/2012] Challenge #83 [difficult] (Digits of the square-root of 2)
https://www.reddit.com/r/dailyprogrammer/comments/xdx8q/7302012_challenge_83_difficult_digits_of_the/
The square-root of 2 is, as [Hippasus of Metapontum](http://en.wikipedia.org/wiki/Hippasus) discovered to his sorrow,
irrational. Among other things, this means that its decimal expansion goes on forever and never repeats.
Here, for instance, [is the first 100000 digits](http://pastebin.com/tQ3NwP05) of the square-root of 2.
Except that it's not!
I, evil genius that I am, have changed exactly one of those 100000 digits to something else, so that it is slightly
wrong. Write a program that finds what digit I changed, what I changed it from and what I changed it to.
Now, there are a number of places online where you can get a gigantic decimal expansion of sqrt(2), and the easiest way
to solve this problem would be to simply load one of those files in as a string and compare it to this file, and the
number would pop right out. But the point of this challenge is to try and do it with math, not the internet, so that
solution is prohibited!
* Thanks to [MmmVomit](http://www.reddit.com/user/MmmVomit) for suggesting (a version of) this problem at
/r/dailyprogrammer_ideas! If you have a problem that you think would be good for us, head on over there and suggest it!
"""
def main():
pass
if __name__ == "__main__":
main()
| 52.740741 | 119 | 0.76264 |
4a2488d77a539a546584ec4625ff8e1d264a0d3c | 16,726 | py | Python | tools/reflection_generator/java_class.py | jondong/crosswalk | 2c25a329fe43480a3db2cfc4309279eda107d072 | [
"BSD-3-Clause"
] | null | null | null | tools/reflection_generator/java_class.py | jondong/crosswalk | 2c25a329fe43480a3db2cfc4309279eda107d072 | [
"BSD-3-Clause"
] | null | null | null | tools/reflection_generator/java_class.py | jondong/crosswalk | 2c25a329fe43480a3db2cfc4309279eda107d072 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# Copyright (c) 2014 Intel Corporation. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
from string import Template
from java_class_component import Enum, Field
from java_method import Method
class JavaClassLoader(object):
"""Manager class maintains all loaded java classes."""
def __init__(self, src_path, class_list):
self._src_path = src_path
self._class_list = class_list
self._java_data_map = {}
for clazz in self._class_list:
self.LoadJavaFile(clazz)
for key,java_data in self._java_data_map.items():
for method in java_data._methods:
method.PrepareStrings()
def IsInternalClass(self, clazz):
return clazz in self._class_list
def GetJavaData(self, clazz):
return self._java_data_map.get(clazz)
def LoadJavaFile(self, clazz):
if self._java_data_map.has_key(clazz):
return
file_name = os.path.join(self._src_path, '%s.java' % clazz)
try:
file_handle = open(file_name, 'r')
file_content = file_handle.read()
file_handle.close()
except Exception:
print 'Error reading input Java file, please check.'
return
java_data = InternalJavaFileData(self)
java_data.SetClassContent(file_content)
self._java_data_map[clazz] = java_data
def GenerateDoc(self, doc):
if not doc:
return ''
def ReplaceInternal(matchobj):
match = matchobj.group(0)
if self.IsInternalClass(match):
return self.GetJavaData(match).wrapper_name
else:
return match
return re.sub('XWalk[a-zA-Z_0-9]*Internal',
ReplaceInternal, doc).lstrip('\n')
class InternalJavaFileData(object):
"""Data class stores the generator information of internal class."""
ANNOTATION_CREATE_INTERNALLY = 'createInternally'
ANNOTATION_CREATE_EXTERNALLY = 'createExternally'
ANNOTATION_EXTEND_CLASS = 'extendClass'
ANNOTATION_NO_INSTANCE = 'noInstance'
ANNOTATION_INSTANCE = 'instance'
ANNOTATION_IMPL = 'impl'
def __init__(self, class_loader):
self._class_loader = class_loader
self._class_name = ''
self._bridge_name = ''
self._wrapper_name = ''
self._class_type = '' # class or interface
self._class_doc = ''
self._class_annotations = {}
self._methods = []
self._fields = []
self._imports = []
self._enums = {}
self._package_name = ''
self._need_default_constructor = True
@property
def class_name(self):
return self._class_name
@property
def bridge_name(self):
return self._bridge_name
@property
def wrapper_name(self):
return self._wrapper_name
@property
def class_type(self):
return self._class_type
@property
def class_doc(self):
return self._class_doc
@property
def class_annotations(self):
return self._class_annotations
@property
def methods(self):
return self._methods
@property
def fields(self):
return self._fields
@property
def imports(self):
return self._imports
@property
def enums(self):
return self._enums
@property
def package_name(self):
return self._package_name
@property
def need_default_constructor(self):
return self._need_default_constructor
def GetJavaData(self, clazz):
return self._class_loader.GetJavaData(clazz)
def IsInternalClass(self, clazz):
return self._class_loader.IsInternalClass(clazz)
def MangleInternalNameToBridgeName(self, internal_name):
if not self.IsInternalClass(internal_name):
return internal_name
else:
return internal_name.replace('Internal', 'Bridge')
def MangleInternalNameToWrapperName(self, internal_name):
if not self.IsInternalClass(internal_name):
return internal_name
else:
return internal_name.replace('Internal', '')
def SetClassContent(self, content):
self.ExtractPackageName(content)
self.ExtractImports(content)
self.ExtractClassProperties(content)
self.ExtractMethods(content)
self.ExtractFields(content)
self.ExtractEnums(content)
def ExtractPackageName(self, java_content):
package_re = re.compile('\s*package\s+(?P<package>[a-zA-Z0-9._]+)\s*;')
for match in re.finditer(package_re, java_content):
self._package_name = match.group('package')
def ExtractImports(self, java_content):
imports_re = re.compile('\s*import\s+(?P<imported>[a-zA-Z0-9._*]+)\s*;')
for match in re.finditer(imports_re, java_content):
imported = match.group('imported')
# Determine whether the import rule should be ignored for generated code.
# TODO: Currently we only use a blacklist to filter the import rule.
if imported.startswith('org.xwalk.core.internal') or \
imported.startswith('org.chromium'):
continue
self._imports.append(imported)
def ExtractClassProperties(self, java_content):
class_re = re.compile(
'(?P<class_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<annotation_content>[a-zA-Z0-9.,=\s]*)\)?'
'\s*public\s+([a-z]+\s+)*'
'(?P<type>(class|interface))\s+'
'(?P<class_name>[a-zA-Z0-9]*)')
for match in re.finditer(class_re, java_content):
annotation_content = match.group('annotation_content')
self._class_name = match.group('class_name')
self._bridge_name = \
self.MangleInternalNameToBridgeName(self._class_name)
self._wrapper_name = \
self.MangleInternalNameToWrapperName(self._class_name)
self._class_type = match.group('type')
self._class_doc = match.group('class_doc')
self.ParseClassAnnotations(annotation_content)
def ParseClassAnnotations(self, annotation):
"""Class annotation contains the following optional attributes:
'extendClass' - The class have to extend
'createExternally' - boolean
'craeteInternally' - boolean
'noInstance' - boolean
'isConst' - boolean
'impl' - Class to impl
'instance - instance'"""
extend_class_re = re.compile('extendClass\s*=\s*'
'(?P<extend_class>[a-zA-Z0-9.]+)')
for match in re.finditer(extend_class_re, annotation):
extend_class = match.group('extend_class')
self._class_annotations['extendClass'] = extend_class
create_internally_re = re.compile('createInternally\s*=\s*'
'(?P<create_internally>(true|false))')
for match in re.finditer(create_internally_re, annotation):
create_internally = match.group('create_internally')
if create_internally == 'true':
self._class_annotations['createInternally'] = True
self._need_default_constructor = False
elif create_internally == 'false':
self._class_annotations['createInternally'] = False
create_externally_re = re.compile('createExternally\s*=\s*'
'(?P<create_externally>(true|false))')
for match in re.finditer(create_externally_re, annotation):
create_externally = match.group('create_externally')
if create_externally == 'true':
self._class_annotations['createExternally'] = True
elif create_externally == 'false':
self._class_annotations['createExternally'] = False
no_instance_re = re.compile('noInstance\s*=\s*'
'(?P<no_instance>(true|false))')
for match in re.finditer(no_instance_re, annotation):
no_instance = match.group('no_instance')
if no_instance == 'true':
self._class_annotations['noInstance'] = True
self._need_default_constructor = False
elif no_instance == 'false':
self._class_annotations['noInstance'] = False
is_const_re = re.compile('isConst\s*=\s*'
'(?P<is_const>(true|false))')
for match in re.finditer(is_const_re, annotation):
is_const = match.group('is_const')
if is_const == 'true':
self._class_annotations['isConst'] = True
elif is_const == 'false':
self._class_annotations['isConst'] = False
impl_re = re.compile('impl\s*=\s*'
'(?P<impl>[a-zA-Z0-9.]+)')
for match in re.finditer(impl_re, annotation):
impl = match.group('impl')
self._class_annotations['impl'] = impl
instance_re = re.compile('instance\s*=\s*'
'(?P<instance>[a-zA-Z0-9.]+)')
for match in re.finditer(instance_re, annotation):
instance = match.group('instance')
self._class_annotations['instance'] = instance
def ExtractMethods(self, java_content):
constructor_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9\$%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\>\<]*)\)')
for match in re.finditer(constructor_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
True, # is_constructor
False, # is_static
False, # is_abstract
method_name, None,
method_params, method_annotation, method_doc)
self._methods.append(method)
self._need_default_constructor = False
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+(?P<method_return>[a-zA-Z0-9]+)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\]\[\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
False, # is_static
False, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+static\s+(synchronized\s+)*'
'(?P<method_return>[a-zA-Z0-9]+)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
True, # is_static
False, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
method_re = re.compile(
'(?P<method_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\(?'
'(?P<method_annotation>[a-zA-Z0-9%,\s\(\)\{\};._"=]*)\)?'
'\s*public\s+abstract\s+(synchronized\s+)*'
'(?P<method_return>[a-zA-Z0-9]+)\s+'
'(?P<method_name>[a-zA-Z0-9]+)\('
'(?P<method_params>[a-zA-Z0-9\s,\[\]\<\>]*)\)')
for match in re.finditer(method_re, java_content):
method_annotation = match.group('method_annotation')
method_name = match.group('method_name')
method_params = match.group('method_params')
method_return = match.group('method_return')
method_doc = match.group('method_doc')
method = Method(
self._class_name,
self._class_loader,
False, # is_constructor
False, # is_static
True, # is_abstract
method_name, method_return, method_params,
method_annotation, method_doc)
self._methods.append(method)
def ExtractFields(self, java_content):
field_re = re.compile(
'(?P<field_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\s*public\s+static\s+final\s+'
'(?P<field_type>[a-zA-Z0-9_]+)\s+'
'(?P<field_name>[a-zA-Z0-9_]+)\s*=\s*'
'(?P<field_value>[a-zA-Z0-9-_"]+)\s*;')
for match in re.finditer(field_re, java_content):
field_type = match.group('field_type')
field_name = match.group('field_name')
field_value = match.group('field_value')
field_doc = match.group('field_doc')
field_object = Field(field_type, field_name, field_value, field_doc)
self._fields.append(field_object)
def ExtractEnums(self, java_content):
enum_re = re.compile(
'(?P<enum_doc>(\n\s*/\*\*.*\n(\s+\*(.)*\n)+\s+\*/\s*)?)\n'
'\s*@XWalkAPI\s*public\s+enum\s+'
'(?P<enum_name>[a-zA-Z0-9_]+)\s+{'
'(?P<enum_content>(.|\n)*?)\s*}')
for match in re.finditer(enum_re, java_content):
enum_name = match.group('enum_name')
enum_content = match.group('enum_content')
enum_doc = match.group('enum_doc')
enum_object = Enum(enum_name, enum_content, enum_doc)
self._enums[enum_name] = enum_object
def HasNoInstanceAnnotation(self):
return self._class_annotations.get(
InternalJavaFileData.ANNOTATION_NO_INSTANCE, False)
def HasCreateInternallyAnnotation(self):
return self._class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False)
def HasInstanceCreateInternallyAnnotation(self):
instance = None
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, None)
if clazz:
instance = self.GetJavaData(clazz.replace('.class', ''))
if instance:
return instance.HasCreateInternallyAnnotation()
else:
return self.HasCreateInternallyAnnotation()
def UseAsInstanceInBridgeCall(self, var):
return '%s.getWrapper()' % self.UseAsReturnInBridgeSuperCall(var)
def UseAsInstanceInBridgeOverrideCall(self, var):
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if self.GetJavaData(clazz).class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False):
return self.UseAsReturnInBridgeSuperCall(var)
return '(%s) %s' % (self.GetJavaData(clazz).bridge_name, var)
def UseAsReturnInBridgeSuperCall(self, var):
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if self.GetJavaData(clazz).class_annotations.get(
InternalJavaFileData.ANNOTATION_CREATE_INTERNALLY, False):
typed_var_template = Template('(${VAR} instanceof ${BRIDGE_TYPE} ?'\
' ((${BRIDGE_TYPE}) ${VAR} ) : new ${BRIDGE_TYPE}(${INTERNAL_VAR}))')
value = {'VAR': var,
'BRIDGE_TYPE': self.GetJavaData(clazz).bridge_name,
'INTERNAL_VAR': var if clazz == self._class_name else\
'(%s) %s' % (clazz, var)}
var = typed_var_template.substitute(value)
return var
def UseAsInstanceInBridgeSuperCall(self, var):
# pylint: disable=R0201
return var
def UseAsInstanceInWrapperCall(self, var):
clazz = self._class_annotations.get('instance', self._class_name)
clazz = clazz.replace('.class', '')
if clazz != self._class_name:
var = '((%s) %s)' % (self.GetJavaData(clazz).wrapper_name, var)
return '%s.getBridge()' % var
def UseAsTypeInWrapperCall(self):
return self._wrapper_name
def GetBridgeName(self, subclass=None):
if not self.IsInternalClass(self._class_name):
return self._class_name
else:
clazz = self._class_annotations.get(
InternalJavaFileData.ANNOTATION_INSTANCE, self._class_name)
clazz = clazz.replace('.class', '')
if not subclass:
return self.GetJavaData(clazz).bridge_name
else:
return clazz + '$' + subclass
def GetWrapperName(self, subclass=None):
if not self.IsInternalClass(self._class_name):
return self._class_name
else:
if not subclass:
return self._wrapper_name
else:
return "%s$%s" % (self._wrapper_name, subclass.replace('Internal', ''))
| 35.587234 | 79 | 0.650424 |
4a2488da0c80bab005311df8ceb611fff9c76323 | 27,742 | py | Python | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
] | null | null | null | sdk/textanalytics/azure-ai-textanalytics/tests/test_recognize_linked_entities_async.py | conniey/azure-sdk-for-python | f779de8e53dbec033f98f976284e6d9491fd60b3 | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import os
import pytest
import platform
import functools
from azure.core.exceptions import HttpResponseError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.core.pipeline.transport import AioHttpTransport
from multidict import CIMultiDict, CIMultiDictProxy
from azure.ai.textanalytics.aio import TextAnalyticsClient
from azure.ai.textanalytics import (
VERSION,
DetectLanguageInput,
TextDocumentInput,
TextAnalyticsApiVersion,
)
from testcase import GlobalTextAnalyticsAccountPreparer
from testcase import TextAnalyticsClientPreparer as _TextAnalyticsClientPreparer
from asynctestcase import AsyncTextAnalyticsTest
# pre-apply the client_cls positional argument so it needn't be explicitly passed below
TextAnalyticsClientPreparer = functools.partial(_TextAnalyticsClientPreparer, TextAnalyticsClient)
class AiohttpTestTransport(AioHttpTransport):
"""Workaround to vcrpy bug: https://github.com/kevin1024/vcrpy/pull/461
"""
async def send(self, request, **config):
response = await super(AiohttpTestTransport, self).send(request, **config)
if not isinstance(response.headers, CIMultiDictProxy):
response.headers = CIMultiDictProxy(CIMultiDict(response.internal_response.headers))
response.content_type = response.headers.get("content-type")
return response
class TestRecognizeLinkedEntities(AsyncTextAnalyticsTest):
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_no_single_input(self, client):
with self.assertRaises(TypeError):
response = await client.recognize_linked_entities("hello world")
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_all_successful_passing_dict(self, client):
docs = [{"id": "1", "language": "en", "text": "Microsoft was founded by Bill Gates and Paul Allen"},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs, show_stats=True)
for doc in response:
self.assertEqual(len(doc.entities), 3)
self.assertIsNotNone(doc.id)
self.assertIsNotNone(doc.statistics)
for entity in doc.entities:
self.assertIsNotNone(entity.name)
self.assertIsNotNone(entity.language)
self.assertIsNotNone(entity.data_source_entity_id)
self.assertIsNotNone(entity.url)
self.assertIsNotNone(entity.data_source)
self.assertIsNotNone(entity.matches)
for match in entity.matches:
self.assertIsNotNone(match.offset)
self.assertIsNotNone(match.length)
self.assertNotEqual(match.length, 0)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_all_successful_passing_text_document_input(self, client):
docs = [
TextDocumentInput(id="1", text="Microsoft was founded by Bill Gates and Paul Allen"),
TextDocumentInput(id="2", text="Microsoft fue fundado por Bill Gates y Paul Allen")
]
response = await client.recognize_linked_entities(docs)
for doc in response:
self.assertEqual(len(doc.entities), 3)
for entity in doc.entities:
self.assertIsNotNone(entity.name)
self.assertIsNotNone(entity.language)
self.assertIsNotNone(entity.data_source_entity_id)
self.assertIsNotNone(entity.url)
self.assertIsNotNone(entity.data_source)
self.assertIsNotNone(entity.matches)
for match in entity.matches:
self.assertIsNotNone(match.offset)
self.assertIsNotNone(match.length)
self.assertNotEqual(match.length, 0)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_passing_only_string(self, client):
docs = [
u"Microsoft was founded by Bill Gates and Paul Allen",
u"Microsoft fue fundado por Bill Gates y Paul Allen",
u""
]
response = await client.recognize_linked_entities(docs)
self.assertEqual(len(response[0].entities), 3)
self.assertEqual(len(response[1].entities), 3)
self.assertTrue(response[2].is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_input_with_some_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "es", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
self.assertTrue(response[0].is_error)
self.assertFalse(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_input_with_all_errors(self, client):
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "Spanish", "text": "Microsoft fue fundado por Bill Gates y Paul Allen"}]
response = await client.recognize_linked_entities(docs)
self.assertTrue(response[0].is_error)
self.assertTrue(response[1].is_error)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_too_many_documents(self, client):
docs = ["One", "Two", "Three", "Four", "Five", "Six"]
with pytest.raises(HttpResponseError) as excinfo:
await client.recognize_linked_entities(docs)
assert excinfo.value.status_code == 400
assert excinfo.value.error.code == "InvalidDocumentBatch"
assert "Batch request contains too many records" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_output_same_order_as_input(self, client):
docs = [
TextDocumentInput(id="1", text="one"),
TextDocumentInput(id="2", text="two"),
TextDocumentInput(id="3", text="three"),
TextDocumentInput(id="4", text="four"),
TextDocumentInput(id="5", text="five")
]
response = await client.recognize_linked_entities(docs)
for idx, doc in enumerate(response):
self.assertEqual(str(idx + 1), doc.id)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"text_analytics_account_key": ""})
async def test_empty_credential_class(self, client):
with self.assertRaises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"text_analytics_account_key": "xxxxxxxxxxxx"})
async def test_bad_credentials(self, client):
with self.assertRaises(ClientAuthenticationError):
response = await client.recognize_linked_entities(
["This is written in English."]
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_document_input(self, client):
docs = "This is the wrong type"
with self.assertRaises(TypeError):
response = await client.recognize_linked_entities(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_mixing_inputs(self, client):
docs = [
{"id": "1", "text": "Microsoft was founded by Bill Gates and Paul Allen."},
TextDocumentInput(id="2", text="I did not like the hotel we stayed at. It was too expensive."),
u"You cannot mix string input with the above inputs"
]
with self.assertRaises(TypeError):
response = await client.recognize_linked_entities(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_out_of_order_ids(self, client):
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.recognize_linked_entities(docs)
in_order = ["56", "0", "22", "19", "1"]
for idx, resp in enumerate(response):
self.assertEqual(resp.id, in_order[idx])
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_show_stats_and_model_version(self, client):
def callback(response):
self.assertIsNotNone(response)
self.assertIsNotNone(response.model_version, msg=response.raw_response)
self.assertIsNotNone(response.raw_response)
self.assertEqual(response.statistics.document_count, 5)
self.assertEqual(response.statistics.transaction_count, 4)
self.assertEqual(response.statistics.valid_document_count, 4)
self.assertEqual(response.statistics.erroneous_document_count, 1)
docs = [{"id": "56", "text": ":)"},
{"id": "0", "text": ":("},
{"id": "22", "text": ""},
{"id": "19", "text": ":P"},
{"id": "1", "text": ":D"}]
response = await client.recognize_linked_entities(
docs,
show_stats=True,
model_version="latest",
raw_response_hook=callback
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_batch_size_over_limit(self, client):
docs = [u"hello world"] * 1050
with self.assertRaises(HttpResponseError):
response = await client.recognize_linked_entities(docs)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"fr\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = await client.recognize_linked_entities(docs, language="fr", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
u"This was the best day of my life.",
u"I did not like the hotel we stayed at. It was too expensive.",
u"The restaurant was not as good as I hoped."
]
response = await client.recognize_linked_entities(docs, language="", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_per_item_dont_use_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [{"id": "1", "language": "", "text": "I will go to the park."},
{"id": "2", "language": "", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_obj_input(self, client):
def callback(resp):
language_str = "\"language\": \"de\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian."),
TextDocumentInput(id="4", text="Este es un document escrito en Español."),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.recognize_linked_entities(docs, language="de", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_obj_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [
TextDocumentInput(id="1", text="I should take my cat to the veterinarian.", language="es"),
TextDocumentInput(id="2", text="Este es un document escrito en Español.", language="es"),
TextDocumentInput(id="3", text="猫は幸せ"),
]
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_whole_batch_language_hint_and_dict_per_item_hints(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 2)
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 1)
docs = [{"id": "1", "language": "es", "text": "I will go to the park."},
{"id": "2", "language": "es", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"default_language": "es"})
async def test_client_passed_default_language_hint(self, client):
def callback(resp):
language_str = "\"language\": \"es\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
def callback_2(resp):
language_str = "\"language\": \"en\""
language = resp.http_request.body.count(language_str)
self.assertEqual(language, 3)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
response = await client.recognize_linked_entities(docs, language="en", raw_response_hook=callback_2)
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_language_hint_method(self, client):
response = await client.recognize_linked_entities(
["This should fail because we're passing in an invalid language hint"], language="notalanguage"
)
self.assertEqual(response[0].error.code, 'UnsupportedLanguageCode')
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_invalid_language_hint_docs(self, client):
response = await client.recognize_linked_entities(
[{"id": "1", "language": "notalanguage", "text": "This should fail because we're passing in an invalid language hint"}]
)
self.assertEqual(response[0].error.code, 'UnsupportedLanguageCode')
@GlobalTextAnalyticsAccountPreparer()
async def test_rotate_subscription_key(self, resource_group, location, text_analytics_account, text_analytics_account_key):
credential = AzureKeyCredential(text_analytics_account_key)
client = TextAnalyticsClient(text_analytics_account, credential)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs)
self.assertIsNotNone(response)
credential.update("xxx") # Make authentication fail
with self.assertRaises(ClientAuthenticationError):
response = await client.recognize_linked_entities(docs)
credential.update(text_analytics_account_key) # Authenticate successfully again
response = await client.recognize_linked_entities(docs)
self.assertIsNotNone(response)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_user_agent(self, client):
def callback(resp):
self.assertIn("azsdk-python-ai-textanalytics/{} Python/{} ({})".format(
VERSION, platform.python_version(), platform.platform()),
resp.http_request.headers["User-Agent"]
)
docs = [{"id": "1", "text": "I will go to the park."},
{"id": "2", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": "The restaurant had really good food."}]
response = await client.recognize_linked_entities(docs, raw_response_hook=callback)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_attribute_error_no_result_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.recognize_linked_entities(docs)
# Attributes on DocumentError
self.assertTrue(response[0].is_error)
self.assertEqual(response[0].id, "1")
self.assertIsNotNone(response[0].error)
# Result attribute not on DocumentError, custom error message
try:
entities = response[0].entities
except AttributeError as custom_error:
self.assertEqual(
custom_error.args[0],
'\'DocumentError\' object has no attribute \'entities\'. '
'The service was unable to process this document:\nDocument Id: 1\nError: '
'InvalidDocument - Document text is empty.\n'
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_attribute_error_nonexistent_attribute(self, client):
docs = [{"id": "1", "text": ""}]
response = await client.recognize_linked_entities(docs)
# Attribute not found on DocumentError or result obj, default behavior/message
try:
entities = response[0].attribute_not_on_result_or_error
except AttributeError as default_behavior:
self.assertEqual(
default_behavior.args[0],
'\'DocumentError\' object has no attribute \'attribute_not_on_result_or_error\''
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_bad_model_version_error(self, client):
docs = [{"id": "1", "language": "english", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.recognize_linked_entities(docs, model_version="bad")
except HttpResponseError as err:
self.assertEqual(err.error.code, "ModelVersionIncorrect")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_errors(self, client):
text = ""
for _ in range(5121):
text += "x"
docs = [{"id": "1", "text": ""},
{"id": "2", "language": "english", "text": "I did not like the hotel we stayed at."},
{"id": "3", "text": text}]
doc_errors = await client.recognize_linked_entities(docs)
self.assertEqual(doc_errors[0].error.code, "InvalidDocument")
self.assertIsNotNone(doc_errors[0].error.message)
self.assertEqual(doc_errors[1].error.code, "UnsupportedLanguageCode")
self.assertIsNotNone(doc_errors[1].error.message)
self.assertEqual(doc_errors[2].error.code, "InvalidDocument")
self.assertIsNotNone(doc_errors[2].error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_document_warnings(self, client):
# No warnings actually returned for recognize_linked_entities. Will update when they add
docs = [
{"id": "1", "text": "This won't actually create a warning :'("},
]
result = await client.recognize_linked_entities(docs)
for doc in result:
doc_warnings = doc.warnings
self.assertEqual(len(doc_warnings), 0)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_not_passing_list_for_docs(self, client):
docs = {"id": "1", "text": "hello world"}
with pytest.raises(TypeError) as excinfo:
await client.recognize_linked_entities(docs)
assert "Input documents cannot be a dict" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_missing_input_records_error(self, client):
docs = []
with pytest.raises(ValueError) as excinfo:
await client.recognize_linked_entities(docs)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_passing_none_docs(self, client):
with pytest.raises(ValueError) as excinfo:
await client.recognize_linked_entities(None)
assert "Input documents can not be empty or None" in str(excinfo.value)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_duplicate_ids_error(self, client):
# Duplicate Ids
docs = [{"id": "1", "text": "hello world"},
{"id": "1", "text": "I did not like the hotel we stayed at."}]
try:
result = await client.recognize_linked_entities(docs)
except HttpResponseError as err:
self.assertEqual(err.error.code, "InvalidDocument")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_batch_size_over_limit_error(self, client):
# Batch size over limit
docs = [u"hello world"] * 1001
try:
response = await client.recognize_linked_entities(docs)
except HttpResponseError as err:
self.assertEqual(err.error.code, "InvalidDocumentBatch")
self.assertIsNotNone(err.error.message)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_language_kwarg_spanish(self, client):
def callback(response):
language_str = "\"language\": \"es\""
self.assertEqual(response.http_request.body.count(language_str), 1)
self.assertIsNotNone(response.model_version)
self.assertIsNotNone(response.statistics)
res = await client.recognize_linked_entities(
documents=["Bill Gates is the CEO of Microsoft."],
model_version="latest",
show_stats=True,
language="es",
raw_response_hook=callback
)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_pass_cls(self, client):
def callback(pipeline_response, deserialized, _):
return "cls result"
res = await client.recognize_linked_entities(
documents=["Test passing cls to endpoint"],
cls=callback
)
assert res == "cls result"
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer()
async def test_offset_length(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
entities = result[0].entities
# the entities are being returned in a non-sequential order by the service
microsoft_entity = [entity for entity in entities if entity.name == "Microsoft"][0]
bill_gates_entity = [entity for entity in entities if entity.name == "Bill Gates"][0]
paul_allen_entity = [entity for entity in entities if entity.name == "Paul Allen"][0]
self.assertEqual(microsoft_entity.matches[0].offset, 0)
self.assertEqual(microsoft_entity.matches[0].length, 9)
self.assertEqual(bill_gates_entity.matches[0].offset, 25)
self.assertEqual(bill_gates_entity.matches[0].length, 10)
self.assertEqual(paul_allen_entity.matches[0].offset, 40)
self.assertEqual(paul_allen_entity.matches[0].length, 10)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
async def test_no_offset_length_v3_linked_entity_match(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
entities = result[0].entities
self.assertIsNone(entities[0].matches[0].offset)
self.assertIsNone(entities[0].matches[0].length)
self.assertIsNone(entities[1].matches[0].offset)
self.assertIsNone(entities[1].matches[0].length)
self.assertIsNone(entities[2].matches[0].offset)
self.assertIsNone(entities[2].matches[0].length)
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={"api_version": TextAnalyticsApiVersion.V3_0})
async def test_string_index_type_not_fail_v3(self, client):
# make sure that the addition of the string_index_type kwarg for v3.1-preview.1 doesn't
# cause v3.0 calls to fail
await client.recognize_linked_entities(["please don't fail"])
# currently only have this as playback since the dev endpoint is unreliable
@pytest.mark.playback_test_only
@GlobalTextAnalyticsAccountPreparer()
@TextAnalyticsClientPreparer(client_kwargs={
"api_version": TextAnalyticsApiVersion.V3_1_PREVIEW_2,
"text_analytics_account_key": os.environ.get('AZURE_TEXT_ANALYTICS_KEY'),
"text_analytics_account": "https://cognitiveusw2dev.azure-api.net/"
})
async def test_bing_id(self, client):
result = await client.recognize_linked_entities(["Microsoft was founded by Bill Gates and Paul Allen"])
for doc in result:
for entity in doc.entities:
assert entity.bing_entity_search_api_id # this checks if it's None and if it's empty
| 43.41471 | 131 | 0.658676 |
4a24893fa9b700fcb076009b33e43592383c6ca8 | 76,132 | py | Python | edk2/IntelFsp2Pkg/Tools/GenCfgOpt.py | TheMindVirus/pftf-rpi4 | 6070b65a02e5ab3ad774d52620c1d136f17c5df3 | [
"BSD-2-Clause-Patent",
"MIT"
] | 1 | 2021-12-03T05:07:39.000Z | 2021-12-03T05:07:39.000Z | edk2/IntelFsp2Pkg/Tools/GenCfgOpt.py | TheMindVirus/pftf-rpi4 | 6070b65a02e5ab3ad774d52620c1d136f17c5df3 | [
"BSD-2-Clause-Patent",
"MIT"
] | null | null | null | edk2/IntelFsp2Pkg/Tools/GenCfgOpt.py | TheMindVirus/pftf-rpi4 | 6070b65a02e5ab3ad774d52620c1d136f17c5df3 | [
"BSD-2-Clause-Patent",
"MIT"
] | null | null | null | ## @ GenCfgOpt.py
#
# Copyright (c) 2014 - 2021, Intel Corporation. All rights reserved.<BR>
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
##
import os
import re
import sys
import struct
from datetime import date
from functools import reduce
# Generated file copyright header
__copyright_txt__ = """## @file
#
# THIS IS AUTO-GENERATED FILE BY BUILD TOOLS AND PLEASE DO NOT MAKE MODIFICATION.
#
# This file lists all VPD informations for a platform collected by build.exe.
#
# Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
"""
__copyright_bsf__ = """/** @file
Boot Setting File for Platform Configuration.
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
This program and the accompanying materials
are licensed and made available under the terms and conditions of the BSD License
which accompanies this distribution. The full text of the license may be found at
http://opensource.org/licenses/bsd-license.php
THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
This file is automatically generated. Please do NOT modify !!!
**/
"""
__copyright_h__ = """/** @file
Copyright (c) %4d, Intel Corporation. All rights reserved.<BR>
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
This file is automatically generated. Please do NOT modify !!!
**/
"""
BuildOptionPcd = []
class CLogicalExpression:
def __init__(self):
self.index = 0
self.string = ''
def errExit(self, err = ''):
print ("ERROR: Express parsing for:")
print (" %s" % self.string)
print (" %s^" % (' ' * self.index))
if err:
print ("INFO : %s" % err)
raise SystemExit
def getNonNumber (self, n1, n2):
if not n1.isdigit():
return n1
if not n2.isdigit():
return n2
return None
def getCurr(self, lens = 1):
try:
if lens == -1:
return self.string[self.index :]
else:
if self.index + lens > len(self.string):
lens = len(self.string) - self.index
return self.string[self.index : self.index + lens]
except Exception:
return ''
def isLast(self):
return self.index == len(self.string)
def moveNext(self, len = 1):
self.index += len
def skipSpace(self):
while not self.isLast():
if self.getCurr() in ' \t':
self.moveNext()
else:
return
def normNumber (self, val):
return True if val else False
def getNumber(self, var):
var = var.strip()
if re.match('^0x[a-fA-F0-9]+$', var):
value = int(var, 16)
elif re.match('^[+-]?\d+$', var):
value = int(var, 10)
else:
value = None
return value
def parseValue(self):
self.skipSpace()
var = ''
while not self.isLast():
char = self.getCurr()
if re.match('^[\w.]', char):
var += char
self.moveNext()
else:
break
val = self.getNumber(var)
if val is None:
value = var
else:
value = "%d" % val
return value
def parseSingleOp(self):
self.skipSpace()
if re.match('^NOT\W', self.getCurr(-1)):
self.moveNext(3)
op = self.parseBrace()
val = self.getNumber (op)
if val is None:
self.errExit ("'%s' is not a number" % op)
return "%d" % (not self.normNumber(int(op)))
else:
return self.parseValue()
def parseBrace(self):
self.skipSpace()
char = self.getCurr()
if char == '(':
self.moveNext()
value = self.parseExpr()
self.skipSpace()
if self.getCurr() != ')':
self.errExit ("Expecting closing brace or operator")
self.moveNext()
return value
else:
value = self.parseSingleOp()
return value
def parseCompare(self):
value = self.parseBrace()
while True:
self.skipSpace()
char = self.getCurr()
if char in ['<', '>']:
self.moveNext()
next = self.getCurr()
if next == '=':
op = char + next
self.moveNext()
else:
op = char
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid number for comparision" % test)
elif char in ['=', '!']:
op = self.getCurr(2)
if op in ['==', '!=']:
self.moveNext(2)
result = self.parseBrace()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber((eval (value + op + result)))
else:
value = "%d" % self.normNumber(eval ("'" + value + "'" + op + "'" + result + "'"))
else:
break
else:
break
return value
def parseAnd(self):
value = self.parseCompare()
while True:
self.skipSpace()
if re.match('^AND\W', self.getCurr(-1)):
self.moveNext(3)
result = self.parseCompare()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(int(value) & int(result))
else:
self.errExit ("'%s' is not a valid op number for AND" % test)
else:
break
return value
def parseOrXor(self):
value = self.parseAnd()
op = None
while True:
self.skipSpace()
op = None
if re.match('^XOR\W', self.getCurr(-1)):
self.moveNext(3)
op = '^'
elif re.match('^OR\W', self.getCurr(-1)):
self.moveNext(2)
op = '|'
else:
break
if op:
result = self.parseAnd()
test = self.getNonNumber(result, value)
if test is None:
value = "%d" % self.normNumber(eval (value + op + result))
else:
self.errExit ("'%s' is not a valid op number for XOR/OR" % test)
return value
def parseExpr(self):
return self.parseOrXor()
def getResult(self):
value = self.parseExpr()
self.skipSpace()
if not self.isLast():
self.errExit ("Unexpected character found '%s'" % self.getCurr())
test = self.getNumber(value)
if test is None:
self.errExit ("Result '%s' is not a number" % value)
return int(value)
def evaluateExpress (self, Expr):
self.index = 0
self.string = Expr
if self.getResult():
Result = True
else:
Result = False
return Result
class CGenCfgOpt:
def __init__(self, Mode = ''):
self.Debug = False
self.Error = ''
self.Mode = Mode
self._GlobalDataDef = """
GlobalDataDef
SKUID = 0, "DEFAULT"
EndGlobalData
"""
self._BuidinOptionTxt = """
List &EN_DIS
Selection 0x1 , "Enabled"
Selection 0x0 , "Disabled"
EndList
"""
self._BsfKeyList = ['FIND','NAME','HELP','TYPE','PAGE', 'PAGES', 'BLOCK', 'OPTION','CONDITION','ORDER', 'MARKER', 'SUBT']
self._HdrKeyList = ['HEADER','STRUCT', 'EMBED', 'COMMENT']
self._BuidinOption = {'$EN_DIS' : 'EN_DIS'}
self._MacroDict = {}
self._VarDict = {}
self._PcdsDict = {}
self._CfgBlkDict = {}
self._CfgPageDict = {}
self._BsfTempDict = {}
self._CfgItemList = []
self._DscLines = []
self._DscFile = ''
self._MapVer = 0
self._DscTime = 0
def ParseMacros (self, MacroDefStr):
# ['-DABC=1', '-D', 'CFG_DEBUG=1', '-D', 'CFG_OUTDIR=Build']
self._MacroDict = {}
IsExpression = False
for Macro in MacroDefStr:
if Macro.startswith('-D'):
IsExpression = True
if len(Macro) > 2:
Macro = Macro[2:]
else :
continue
if IsExpression:
IsExpression = False
Match = re.match("(\w+)=(.+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = Match.group(2)
else:
Match = re.match("(\w+)", Macro)
if Match:
self._MacroDict[Match.group(1)] = ''
if len(self._MacroDict) == 0:
Error = 1
else:
Error = 0
if self.Debug:
print ("INFO : Macro dictionary:")
for Each in self._MacroDict:
print (" $(%s) = [ %s ]" % (Each , self._MacroDict[Each]))
return Error
def EvaulateIfdef (self, Macro):
Result = Macro in self._MacroDict
if self.Debug:
print ("INFO : Eval Ifdef [%s] : %s" % (Macro, Result))
return Result
def ExpandMacros (self, Input, Preserve = False):
Line = Input
Match = re.findall("\$\(\w+\)", Input)
if Match:
for Each in Match:
Variable = Each[2:-1]
if Variable in self._MacroDict:
Line = Line.replace(Each, self._MacroDict[Variable])
else:
if self.Debug:
print ("WARN : %s is not defined" % Each)
if not Preserve:
Line = Line.replace(Each, Each[2:-1])
return Line
def ExpandPcds (self, Input):
Line = Input
Match = re.findall("(\w+\.\w+)", Input)
if Match:
for PcdName in Match:
if PcdName in self._PcdsDict:
Line = Line.replace(PcdName, self._PcdsDict[PcdName])
else:
if self.Debug:
print ("WARN : %s is not defined" % PcdName)
return Line
def EvaluateExpress (self, Expr):
ExpExpr = self.ExpandPcds(Expr)
ExpExpr = self.ExpandMacros(ExpExpr)
LogExpr = CLogicalExpression()
Result = LogExpr.evaluateExpress (ExpExpr)
if self.Debug:
print ("INFO : Eval Express [%s] : %s" % (Expr, Result))
return Result
def ValueToByteArray (self, ValueStr, Length):
Match = re.match("\{\s*FILE:(.+)\}", ValueStr)
if Match:
FileList = Match.group(1).split(',')
Result = bytearray()
for File in FileList:
File = File.strip()
BinPath = os.path.join(os.path.dirname(self._DscFile), File)
Result.extend(bytearray(open(BinPath, 'rb').read()))
else:
try:
Result = bytearray(self.ValueToList(ValueStr, Length))
except ValueError as e:
raise Exception ("Bytes in '%s' must be in range 0~255 !" % ValueStr)
if len(Result) < Length:
Result.extend(b'\x00' * (Length - len(Result)))
elif len(Result) > Length:
raise Exception ("Value '%s' is too big to fit into %d bytes !" % (ValueStr, Length))
return Result[:Length]
def ValueToList (self, ValueStr, Length):
if ValueStr[0] == '{':
Result = []
BinList = ValueStr[1:-1].split(',')
InBitField = False
LastInBitField = False
Value = 0
BitLen = 0
for Element in BinList:
InBitField = False
Each = Element.strip()
if len(Each) == 0:
pass
else:
if Each[0] in ['"', "'"]:
Result.extend(list(bytearray(Each[1:-1], 'utf-8')))
elif ':' in Each:
Match = re.match("(.+):(\d+)b", Each)
if Match is None:
raise Exception("Invald value list format '%s' !" % Each)
InBitField = True
CurrentBitLen = int(Match.group(2))
CurrentValue = ((self.EvaluateExpress(Match.group(1)) & (1<<CurrentBitLen) - 1)) << BitLen
else:
Result.append(self.EvaluateExpress(Each.strip()))
if InBitField:
Value += CurrentValue
BitLen += CurrentBitLen
if LastInBitField and ((not InBitField) or (Element == BinList[-1])):
if BitLen % 8 != 0:
raise Exception("Invald bit field length!")
Result.extend(Val2Bytes(Value, BitLen // 8))
Value = 0
BitLen = 0
LastInBitField = InBitField
elif ValueStr.startswith("'") and ValueStr.endswith("'"):
Result = Str2Bytes (ValueStr, Length)
elif ValueStr.startswith('"') and ValueStr.endswith('"'):
Result = Str2Bytes (ValueStr, Length)
else:
Result = Val2Bytes (self.EvaluateExpress(ValueStr), Length)
return Result
def FormatListValue(self, ConfigDict):
Struct = ConfigDict['struct']
if Struct not in ['UINT8','UINT16','UINT32','UINT64']:
return
dataarray = []
binlist = ConfigDict['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
dataarray.append(value)
unit = int(Struct[4:]) / 8
if int(ConfigDict['length']) != unit * len(dataarray):
raise Exception("Array size is not proper for '%s' !" % ConfigDict['cname'])
bytearray = []
for each in dataarray:
value = each
for loop in range(int(unit)):
bytearray.append("0x%02X" % (value & 0xFF))
value = value >> 8
newvalue = '{' + ','.join(bytearray) + '}'
ConfigDict['value'] = newvalue
return ""
def ParseDscFile (self, DscFile, FvDir):
Hardcode = False
AutoAlign = False
self._CfgItemList = []
self._CfgPageDict = {}
self._CfgBlkDict = {}
self._DscFile = DscFile
self._FvDir = FvDir
self._DscLines = []
self._BsfTempDict = {}
# Initial DSC time is parent DSC time.
self._DscTime = os.path.getmtime(DscFile)
CfgDict = {}
IsDefSect = False
IsPcdSect = False
IsUpdSect = False
IsVpdSect = False
IsTmpSect = False
TemplateName = ''
IfStack = []
ElifStack = []
Error = 0
ConfigDict = {}
if type(DscFile) is list:
# it is DSC lines already
DscLines = DscFile
self._DscFile = '.'
else:
DscFd = open(DscFile, "r")
DscLines = DscFd.readlines()
DscFd.close()
self._DscFile = DscFile
SkipLines = 0
MaxAlign = 32 #Default align to 32, but if there are 64 bit unit, align to 64
SizeAlign = 0 #record the struct max align
Base = 0 #Starting offset of sub-structure.
while len(DscLines):
DscLine = DscLines.pop(0).strip()
if SkipLines == 0:
self._DscLines.append (DscLine)
else:
SkipLines = SkipLines - 1
if len(DscLine) == 0:
continue
Handle = False
Match = re.match("^\[(.+)\]", DscLine)
if Match is not None:
IsDefSect = False
IsPcdSect = False
IsVpdSect = False
IsUpdSect = False
IsTmpSect = False
SectionName = Match.group(1).lower()
if SectionName == "Defines".lower():
IsDefSect = True
if (SectionName == "PcdsFeatureFlag".lower() or SectionName == "PcdsFixedAtBuild".lower()):
IsPcdSect = True
elif SectionName == "PcdsDynamicVpd.Tmp".lower():
IsTmpSect = True
elif SectionName == "PcdsDynamicVpd.Upd".lower():
ConfigDict = {}
ConfigDict['header'] = 'ON'
ConfigDict['region'] = 'UPD'
ConfigDict['order'] = -1
ConfigDict['page'] = ''
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['marker'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['subreg'] = []
ConfigDict['condition'] = ''
ConfigDict['option'] = ''
IsUpdSect = True
Offset = 0
else:
if IsDefSect or IsPcdSect or IsUpdSect or IsVpdSect or IsTmpSect:
Match = False if DscLine[0] != '!' else True
if Match:
Match = re.match("^!(else|endif|ifdef|ifndef|if|elseif|include)\s*(.+)?$", DscLine.split("#")[0])
Keyword = Match.group(1) if Match else ''
Remaining = Match.group(2) if Match else ''
Remaining = '' if Remaining is None else Remaining.strip()
if Keyword in ['if', 'elseif', 'ifdef', 'ifndef', 'include'] and not Remaining:
raise Exception ("ERROR: Expression is expected after '!if' or !elseif' for line '%s'" % DscLine)
if Keyword == 'else':
if IfStack:
IfStack[-1] = not IfStack[-1]
else:
raise Exception ("ERROR: No paired '!if' found for '!else' for line '%s'" % DscLine)
elif Keyword == 'endif':
if IfStack:
IfStack.pop()
Level = ElifStack.pop()
if Level > 0:
del IfStack[-Level:]
else:
raise Exception ("ERROR: No paired '!if' found for '!endif' for line '%s'" % DscLine)
elif Keyword == 'ifdef' or Keyword == 'ifndef':
Result = self.EvaulateIfdef (Remaining)
if Keyword == 'ifndef':
Result = not Result
IfStack.append(Result)
ElifStack.append(0)
elif Keyword == 'if' or Keyword == 'elseif':
Result = self.EvaluateExpress(Remaining)
if Keyword == "if":
ElifStack.append(0)
IfStack.append(Result)
else: #elseif
if IfStack:
IfStack[-1] = not IfStack[-1]
IfStack.append(Result)
ElifStack[-1] = ElifStack[-1] + 1
else:
raise Exception ("ERROR: No paired '!if' found for '!elif' for line '%s'" % DscLine)
else:
if IfStack:
Handle = reduce(lambda x,y: x and y, IfStack)
else:
Handle = True
if Handle:
Match = re.match("!include\s+(.+)", DscLine)
if Match:
IncludeFilePath = Match.group(1)
IncludeFilePath = self.ExpandMacros(IncludeFilePath)
PackagesPath = os.getenv("PACKAGES_PATH")
if PackagesPath:
for PackagePath in PackagesPath.split(os.pathsep):
IncludeFilePathAbs = os.path.join(os.path.normpath(PackagePath), os.path.normpath(IncludeFilePath))
if os.path.exists(IncludeFilePathAbs):
IncludeDsc = open(IncludeFilePathAbs, "r")
break
else:
IncludeDsc = open(IncludeFilePath, "r")
if IncludeDsc == None:
print("ERROR: Cannot open file '%s'" % IncludeFilePath)
raise SystemExit
# Update DscTime when newer DSC time found.
CurrentDscTime = os.path.getmtime(os.path.realpath(IncludeDsc.name))
if CurrentDscTime > self._DscTime:
self._DscTime = CurrentDscTime
NewDscLines = IncludeDsc.readlines()
IncludeDsc.close()
DscLines = NewDscLines + DscLines
del self._DscLines[-1]
Offset = 0
else:
if DscLine.startswith('!'):
print("ERROR: Unrecognized directive for line '%s'" % DscLine)
raise SystemExit
if not Handle:
del self._DscLines[-1]
continue
if IsDefSect:
#DEFINE UPD_TOOL_GUID = 8C3D856A-9BE6-468E-850A-24F7A8D38E09
#DEFINE FSP_T_UPD_TOOL_GUID = 34686CA3-34F9-4901-B82A-BA630F0714C6
#DEFINE FSP_M_UPD_TOOL_GUID = 39A250DB-E465-4DD1-A2AC-E2BD3C0E2385
#DEFINE FSP_S_UPD_TOOL_GUID = CAE3605B-5B34-4C85-B3D7-27D54273C40F
Match = re.match("^\s*(?:DEFINE\s+)*(\w+)\s*=\s*(.+)", DscLine)
if Match:
self._MacroDict[Match.group(1)] = self.ExpandMacros(Match.group(2))
if self.Debug:
print ("INFO : DEFINE %s = [ %s ]" % (Match.group(1), self.ExpandMacros(Match.group(2))))
elif IsPcdSect:
#gSiPkgTokenSpaceGuid.PcdTxtEnable|FALSE
#gSiPkgTokenSpaceGuid.PcdOverclockEnable|TRUE
Match = re.match("^\s*([\w\.]+)\s*\|\s*(\w+)", DscLine)
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
if self.Debug:
print ("INFO : PCD %s = [ %s ]" % (Match.group(1), Match.group(2)))
i = 0
while i < len(BuildOptionPcd):
Match = re.match("\s*([\w\.]+)\s*\=\s*(\w+)", BuildOptionPcd[i])
if Match:
self._PcdsDict[Match.group(1)] = Match.group(2)
i += 1
elif IsTmpSect:
# !BSF DEFT:{GPIO_TMPL:START}
Match = re.match("^\s*#\s+(!BSF)\s+DEFT:{(.+?):(START|END)}", DscLine)
if Match:
if Match.group(3) == 'START' and not TemplateName:
TemplateName = Match.group(2).strip()
self._BsfTempDict[TemplateName] = []
if Match.group(3) == 'END' and (TemplateName == Match.group(2).strip()) and TemplateName:
TemplateName = ''
else:
if TemplateName:
Match = re.match("^!include\s*(.+)?$", DscLine)
if Match:
continue
self._BsfTempDict[TemplateName].append(DscLine)
else:
Match = re.match("^\s*#\s+(!BSF|@Bsf|!HDR)\s+(.+)", DscLine)
if Match:
Remaining = Match.group(2)
if Match.group(1) == '!BSF' or Match.group(1) == '@Bsf':
Match = re.match("(?:^|.+\s+)PAGES:{(.+?)}", Remaining)
if Match:
# !BSF PAGES:{HSW:"Haswell System Agent", LPT:"Lynx Point PCH"}
PageList = Match.group(1).split(',')
for Page in PageList:
Page = Page.strip()
Match = re.match("(\w+):\"(.+)\"", Page)
self._CfgPageDict[Match.group(1)] = Match.group(2)
Match = re.match("(?:^|.+\s+)BLOCK:{NAME:\"(.+)\"\s*,\s*VER:\"(.+)\"\s*}", Remaining)
if Match:
self._CfgBlkDict['name'] = Match.group(1)
self._CfgBlkDict['ver'] = Match.group(2)
for Key in self._BsfKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
if Key in ['NAME', 'HELP', 'OPTION'] and Match.group(1).startswith('+'):
ConfigDict[Key.lower()] += Match.group(1)[1:]
else:
ConfigDict[Key.lower()] = Match.group(1)
else:
for Key in self._HdrKeyList:
Match = re.match("(?:^|.+\s+)%s:{(.+?)}" % Key, Remaining)
if Match:
ConfigDict[Key.lower()] = Match.group(1)
Match = re.match("^\s*#\s+@Prompt\s+(.+)", DscLine)
if Match:
ConfigDict['name'] = Match.group(1)
Match = re.match("^\s*#\s*@ValidList\s*(.+)\s*\|\s*(.+)\s*\|\s*(.+)\s*", DscLine)
if Match:
if Match.group(2).strip() in self._BuidinOption:
ConfigDict['option'] = Match.group(2).strip()
else:
OptionValueList = Match.group(2).split(',')
OptionStringList = Match.group(3).split(',')
Index = 0
for Option in OptionValueList:
Option = Option.strip()
ConfigDict['option'] = ConfigDict['option'] + str(Option) + ':' + OptionStringList[Index].strip()
Index += 1
if Index in range(len(OptionValueList)):
ConfigDict['option'] += ', '
ConfigDict['type'] = "Combo"
Match = re.match("^\s*#\s*@ValidRange\s*(.+)\s*\|\s*(.+)\s*-\s*(.+)\s*", DscLine)
if Match:
if "0x" in Match.group(2) or "0x" in Match.group(3):
ConfigDict['type'] = "EditNum, HEX, (%s,%s)" % (Match.group(2), Match.group(3))
else:
ConfigDict['type'] = "EditNum, DEC, (%s,%s)" % (Match.group(2), Match.group(3))
Match = re.match("^\s*##\s+(.+)", DscLine)
if Match:
ConfigDict['help'] = Match.group(1)
# Check VPD/UPD
if IsUpdSect:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+|\*)\s*\|\s*(\d+|0x[0-9a-fA-F]+)\s*\|\s*(.+)",DscLine)
else:
Match = re.match("^([_a-zA-Z0-9]+).([_a-zA-Z0-9]+)\s*\|\s*(0x[0-9A-F]+)(?:\s*\|\s*(.+))?", DscLine)
if Match:
ConfigDict['space'] = Match.group(1)
ConfigDict['cname'] = Match.group(2)
if Match.group(3) != '*':
Hardcode = True
Offset = int (Match.group(3), 16)
else:
AutoAlign = True
if Hardcode and AutoAlign:
print("Hardcode and auto-align mixed mode is not supported by GenCfgOpt")
raise SystemExit
ConfigDict['offset'] = Offset
if ConfigDict['order'] == -1:
ConfigDict['order'] = ConfigDict['offset'] << 8
else:
(Major, Minor) = ConfigDict['order'].split('.')
ConfigDict['order'] = (int (Major, 16) << 8 ) + int (Minor, 16)
if IsUpdSect:
Value = Match.group(5).strip()
if Match.group(4).startswith("0x"):
Length = int (Match.group(4), 16)
else :
Length = int (Match.group(4))
Offset += Length
else:
Value = Match.group(4)
if Value is None:
Value = ''
Value = Value.strip()
if '|' in Value:
Match = re.match("^.+\s*\|\s*(.+)", Value)
if Match:
Value = Match.group(1)
Length = -1
ConfigDict['length'] = Length
Match = re.match("\$\((\w+)\)", Value)
if Match:
if Match.group(1) in self._MacroDict:
Value = self._MacroDict[Match.group(1)]
ConfigDict['value'] = Value
if (len(Value) > 0) and (Value[0] == '{'):
Value = self.FormatListValue(ConfigDict)
if ConfigDict['name'] == '':
# Clear BSF specific items
ConfigDict['bsfname'] = ''
ConfigDict['help'] = ''
ConfigDict['type'] = ''
ConfigDict['option'] = ''
if IsUpdSect and AutoAlign:
ItemLength = int(ConfigDict['length'])
ItemOffset = int(ConfigDict['offset'])
ItemStruct = ConfigDict['struct']
Unit = 1
if ItemLength in [1, 2, 4, 8] and not ConfigDict['value'].startswith('{'):
Unit = ItemLength
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = 8
if ItemStruct != '':
UnitDict = {'UINT8':1, 'UINT16':2, 'UINT32':4, 'UINT64':8}
if ItemStruct in ['UINT8', 'UINT16', 'UINT32', 'UINT64']:
Unit = UnitDict[ItemStruct]
# If there are 64 bit unit, align to 64
if Unit == 8:
MaxAlign = 64
SizeAlign = max(SizeAlign, Unit)
if (ConfigDict['embed'].find(':START') != -1):
Base = ItemOffset
SubOffset = ItemOffset - Base
SubRemainder = SubOffset % Unit
if SubRemainder:
Diff = Unit - SubRemainder
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
if (ConfigDict['embed'].find(':END') != -1):
Remainder = Offset % (MaxAlign/8) # MaxAlign is either 32 or 64
if Remainder:
Diff = int((MaxAlign/8) - Remainder)
Offset = Offset + Diff
ItemOffset = ItemOffset + Diff
MaxAlign = 32 # Reset to default 32 align when struct end
if (ConfigDict['cname'] == 'UpdTerminator'):
# ItemLength is the size of UpdTerminator
# Itemlength might be 16, 32, or 64
# Struct align to 64 if UpdTerminator
# or struct size is 64 bit, else align to 32
Remainder = Offset % max(ItemLength/8, 4, SizeAlign)
Offset = Offset + ItemLength
if Remainder:
Diff = int(max(ItemLength/8, 4, SizeAlign) - Remainder)
ItemOffset = ItemOffset + Diff
ConfigDict['offset'] = ItemOffset
self._CfgItemList.append(ConfigDict.copy())
ConfigDict['name'] = ''
ConfigDict['find'] = ''
ConfigDict['struct'] = ''
ConfigDict['embed'] = ''
ConfigDict['comment'] = ''
ConfigDict['marker'] = ''
ConfigDict['order'] = -1
ConfigDict['subreg'] = []
ConfigDict['option'] = ''
else:
# It could be a virtual item as below
# !BSF FIELD:{SerialDebugPortAddress0:1}
# or
# @Bsf FIELD:{SerialDebugPortAddress0:1b}
Match = re.match("^\s*#\s+(!BSF|@Bsf)\s+FIELD:{(.+):(\d+)([Bb])?}", DscLine)
if Match:
SubCfgDict = ConfigDict.copy()
if (Match.group(4) == None) or (Match.group(4) == 'B'):
UnitBitLen = 8
elif Match.group(4) == 'b':
UnitBitLen = 1
else:
print("ERROR: Invalide BSF FIELD length for line '%s'" % DscLine)
raise SystemExit
SubCfgDict['cname'] = Match.group(2)
SubCfgDict['bitlength'] = int (Match.group(3)) * UnitBitLen
if SubCfgDict['bitlength'] > 0:
LastItem = self._CfgItemList[-1]
if len(LastItem['subreg']) == 0:
SubOffset = 0
else:
SubOffset = LastItem['subreg'][-1]['bitoffset'] + LastItem['subreg'][-1]['bitlength']
SubCfgDict['bitoffset'] = SubOffset
LastItem['subreg'].append (SubCfgDict.copy())
ConfigDict['name'] = ''
return Error
def GetBsfBitFields (self, subitem, bytes):
start = subitem['bitoffset']
end = start + subitem['bitlength']
bitsvalue = ''.join('{0:08b}'.format(i) for i in bytes[::-1])
bitsvalue = bitsvalue[::-1]
bitslen = len(bitsvalue)
if start > bitslen or end > bitslen:
raise Exception ("Invalid bits offset [%d,%d] %d for %s" % (start, end, bitslen, subitem['name']))
return '0x%X' % (int(bitsvalue[start:end][::-1], 2))
def UpdateSubRegionDefaultValue (self):
Error = 0
for Item in self._CfgItemList:
if len(Item['subreg']) == 0:
continue
bytearray = []
if Item['value'][0] == '{':
binlist = Item['value'][1:-1].split(',')
for each in binlist:
each = each.strip()
if each.startswith('0x'):
value = int(each, 16)
else:
value = int(each)
bytearray.append(value)
else:
if Item['value'].startswith('0x'):
value = int(Item['value'], 16)
else:
value = int(Item['value'])
idx = 0
while idx < Item['length']:
bytearray.append(value & 0xFF)
value = value >> 8
idx = idx + 1
for SubItem in Item['subreg']:
valuestr = self.GetBsfBitFields(SubItem, bytearray)
SubItem['value'] = valuestr
return Error
def NoDscFileChange (self, OutPutFile):
NoFileChange = True
if not os.path.exists(OutPutFile):
NoFileChange = False
else:
OutputTime = os.path.getmtime(OutPutFile)
if self._DscTime > OutputTime:
NoFileChange = False
return NoFileChange
def CreateSplitUpdTxt (self, UpdTxtFile):
GuidList = ['FSP_T_UPD_TOOL_GUID','FSP_M_UPD_TOOL_GUID','FSP_S_UPD_TOOL_GUID']
SignatureList = ['0x545F', '0x4D5F','0x535F'] # _T, _M, and _S signature for FSPT, FSPM, FSPS
for Index in range(len(GuidList)):
UpdTxtFile = ''
FvDir = self._FvDir
if GuidList[Index] not in self._MacroDict:
self.Error = "%s definition is missing in DSC file" % (GuidList[Index])
return 1
if UpdTxtFile == '':
UpdTxtFile = os.path.join(FvDir, self._MacroDict[GuidList[Index]] + '.txt')
if (self.NoDscFileChange (UpdTxtFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD TXT file'
return 256
TxtFd = open(UpdTxtFile, "w")
TxtFd.write("%s\n" % (__copyright_txt__ % date.today().year))
NextOffset = 0
SpaceIdx = 0
StartAddr = 0
EndAddr = 0
Default = 'DEFAULT|'
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
StartAddr = Item['offset']
NextOffset = StartAddr
InRange = True
if Item['cname'] == 'UpdTerminator' and InRange == True:
EndAddr = Item['offset']
InRange = False
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == SignatureList[Index]:
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != 'UPD':
continue
Offset = Item['offset']
if StartAddr > Offset or EndAddr < Offset:
continue
if NextOffset < Offset:
# insert one line
TxtFd.write("%s.UnusedUpdSpace%d|%s0x%04X|0x%04X|{0}\n" % (Item['space'], SpaceIdx, Default, NextOffset - StartAddr, Offset - NextOffset))
SpaceIdx = SpaceIdx + 1
NextOffset = Offset + Item['length']
TxtFd.write("%s.%s|%s0x%04X|%s|%s\n" % (Item['space'],Item['cname'],Default,Item['offset'] - StartAddr,Item['length'],Item['value']))
TxtFd.close()
return 0
def CreateVarDict (self):
Error = 0
self._VarDict = {}
if len(self._CfgItemList) > 0:
Item = self._CfgItemList[-1]
self._VarDict['_LENGTH_'] = '%d' % (Item['offset'] + Item['length'])
for Item in self._CfgItemList:
Embed = Item['embed']
Match = re.match("^(\w+):(\w+):(START|END)", Embed)
if Match:
StructName = Match.group(1)
VarName = '_%s_%s_' % (Match.group(3), StructName)
if Match.group(3) == 'END':
self._VarDict[VarName] = Item['offset'] + Item['length']
self._VarDict['_LENGTH_%s_' % StructName] = \
self._VarDict['_END_%s_' % StructName] - self._VarDict['_START_%s_' % StructName]
if Match.group(2).startswith('TAG_'):
if (self.Mode != 'FSP') and (self._VarDict['_LENGTH_%s_' % StructName] % 4):
raise Exception("Size of structure '%s' is %d, not DWORD aligned !" % (StructName, self._VarDict['_LENGTH_%s_' % StructName]))
self._VarDict['_TAG_%s_' % StructName] = int (Match.group(2)[4:], 16) & 0xFFF
else:
self._VarDict[VarName] = Item['offset']
if Item['marker']:
self._VarDict['_OFFSET_%s_' % Item['marker'].strip()] = Item['offset']
return Error
def UpdateBsfBitUnit (self, Item):
BitTotal = 0
BitOffset = 0
StartIdx = 0
Unit = None
UnitDec = {1:'BYTE', 2:'WORD', 4:'DWORD', 8:'QWORD'}
for Idx, SubItem in enumerate(Item['subreg']):
if Unit is None:
Unit = SubItem['bitunit']
BitLength = SubItem['bitlength']
BitTotal += BitLength
BitOffset += BitLength
if BitOffset > 64 or BitOffset > Unit * 8:
break
if BitOffset == Unit * 8:
for SubIdx in range (StartIdx, Idx + 1):
Item['subreg'][SubIdx]['bitunit'] = Unit
BitOffset = 0
StartIdx = Idx + 1
Unit = None
if BitOffset > 0:
raise Exception ("Bit fields cannot fit into %s for '%s.%s' !" % (UnitDec[Unit], Item['cname'], SubItem['cname']))
ExpectedTotal = Item['length'] * 8
if Item['length'] * 8 != BitTotal:
raise Exception ("Bit fields total length (%d) does not match length (%d) of '%s' !" % (BitTotal, ExpectedTotal, Item['cname']))
def UpdateDefaultValue (self):
Error = 0
for Idx, Item in enumerate(self._CfgItemList):
if len(Item['subreg']) == 0:
Value = Item['value']
if (len(Value) > 0) and (Value[0] == '{' or Value[0] == "'" or Value[0] == '"'):
# {XXX} or 'XXX' strings
self.FormatListValue(self._CfgItemList[Idx])
else:
Match = re.match("(0x[0-9a-fA-F]+|[0-9]+)", Value)
if not Match:
NumValue = self.EvaluateExpress (Value)
Item['value'] = '0x%X' % NumValue
else:
ValArray = self.ValueToByteArray (Item['value'], Item['length'])
for SubItem in Item['subreg']:
SubItem['value'] = self.GetBsfBitFields(SubItem, ValArray)
self.UpdateBsfBitUnit (Item)
return Error
def ProcessMultilines (self, String, MaxCharLength):
Multilines = ''
StringLength = len(String)
CurrentStringStart = 0
StringOffset = 0
BreakLineDict = []
if len(String) <= MaxCharLength:
while (StringOffset < StringLength):
if StringOffset >= 1:
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
StringOffset += 1
if BreakLineDict != []:
for Each in BreakLineDict:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
else:
Multilines = " %s\n" % String
else:
NewLineStart = 0
NewLineCount = 0
FoundSpaceChar = False
while (StringOffset < StringLength):
if StringOffset >= 1:
if NewLineCount >= MaxCharLength - 1:
if String[StringOffset] == ' ' and StringLength - StringOffset > 10:
BreakLineDict.append (NewLineStart + NewLineCount)
NewLineStart = NewLineStart + NewLineCount
NewLineCount = 0
FoundSpaceChar = True
elif StringOffset == StringLength - 1 and FoundSpaceChar == False:
BreakLineDict.append (0)
if String[StringOffset - 1] == '\\' and String[StringOffset] == 'n':
BreakLineDict.append (StringOffset + 1)
NewLineStart = StringOffset + 1
NewLineCount = 0
StringOffset += 1
NewLineCount += 1
if BreakLineDict != []:
BreakLineDict.sort ()
for Each in BreakLineDict:
if Each > 0:
Multilines += " %s\n" % String[CurrentStringStart:Each].lstrip()
CurrentStringStart = Each
if StringLength - CurrentStringStart > 0:
Multilines += " %s\n" % String[CurrentStringStart:].lstrip()
return Multilines
def CreateField (self, Item, Name, Length, Offset, Struct, BsfName, Help, Option, BitsLength = None):
PosName = 28
PosComment = 30
NameLine=''
HelpLine=''
OptionLine=''
if Length == 0 and Name == 'Dummy':
return '\n'
IsArray = False
if Length in [1,2,4,8]:
Type = "UINT%d" % (Length * 8)
if Name.startswith("UnusedUpdSpace") and Length != 1:
IsArray = True
Type = "UINT8"
else:
IsArray = True
Type = "UINT8"
if Item and Item['value'].startswith('{'):
Type = "UINT8"
IsArray = True
if Struct != '':
Type = Struct
if Struct in ['UINT8','UINT16','UINT32','UINT64']:
IsArray = True
Unit = int(Type[4:]) / 8
Length = Length / Unit
else:
IsArray = False
if IsArray:
Name = Name + '[%d]' % Length
if len(Type) < PosName:
Space1 = PosName - len(Type)
else:
Space1 = 1
if BsfName != '':
NameLine=" - %s\n" % BsfName
else:
NameLine="\n"
if Help != '':
HelpLine = self.ProcessMultilines (Help, 80)
if Option != '':
OptionLine = self.ProcessMultilines (Option, 80)
if Offset is None:
OffsetStr = '????'
else:
OffsetStr = '0x%04X' % Offset
if BitsLength is None:
BitsLength = ''
else:
BitsLength = ' : %d' % BitsLength
return "\n/** Offset %s%s%s%s**/\n %s%s%s%s;\n" % (OffsetStr, NameLine, HelpLine, OptionLine, Type, ' ' * Space1, Name, BitsLength)
def PostProcessBody (self, TextBody):
NewTextBody = []
OldTextBody = []
IncludeLine = False
StructName = ''
VariableName = ''
IsUpdHdrDefined = False
IsUpdHeader = False
for Line in TextBody:
SplitToLines = Line.splitlines()
MatchComment = re.match("^/\*\sCOMMENT:(\w+):([\w|\W|\s]+)\s\*/\s([\s\S]*)", SplitToLines[0])
if MatchComment:
if MatchComment.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if IsUpdHdrDefined != True or IsUpdHeader != True:
CommentLine = " " + MatchComment.group(2) + "\n"
NewTextBody.append("/**" + CommentLine + "**/\n")
Line = Line[(len(SplitToLines[0]) + 1):]
Match = re.match("^/\*\sEMBED_STRUCT:(\w+):(\w+):(START|END)\s\*/\s([\s\S]*)", Line)
if Match:
Line = Match.group(4)
if Match.group(1) == 'FSP_UPD_HEADER':
IsUpdHeader = True
else:
IsUpdHeader = False
if Match and Match.group(3) == 'START':
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('typedef struct {\n')
StructName = Match.group(1)
VariableName = Match.group(2)
MatchOffset = re.search('/\*\*\sOffset\s0x([a-fA-F0-9]+)', Line)
if MatchOffset:
Offset = int(MatchOffset.group(1), 16)
else:
Offset = None
Line
IncludeLine = True
OldTextBody.append (self.CreateField (None, VariableName, 0, Offset, StructName, '', '', ''))
if IncludeLine:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append (Line)
else:
OldTextBody.append (Line)
if Match and Match.group(3) == 'END':
if (StructName != Match.group(1)) or (VariableName != Match.group(2)):
print ("Unmatched struct name '%s' and '%s' !" % (StructName, Match.group(1)))
else:
if IsUpdHdrDefined != True or IsUpdHeader != True:
NewTextBody.append ('} %s;\n\n' % StructName)
IsUpdHdrDefined = True
IncludeLine = False
NewTextBody.extend(OldTextBody)
return NewTextBody
def WriteLinesWithoutTailingSpace (self, HeaderFd, Line):
TxtBody2 = Line.splitlines(True)
for Line2 in TxtBody2:
Line2 = Line2.rstrip()
Line2 += '\n'
HeaderFd.write (Line2)
return 0
def CreateHeaderFile (self, InputHeaderFile):
FvDir = self._FvDir
HeaderFileName = 'FspUpd.h'
HeaderFile = os.path.join(FvDir, HeaderFileName)
# Check if header needs to be recreated
if (self.NoDscFileChange (HeaderFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD header file'
return 256
TxtBody = []
for Item in self._CfgItemList:
if str(Item['cname']) == 'Signature' and Item['length'] == 8:
Value = int(Item['value'], 16)
Chars = []
while Value != 0x0:
Chars.append(chr(Value & 0xFF))
Value = Value >> 8
SignatureStr = ''.join(Chars)
# Signature will be _T / _M / _S for FSPT / FSPM / FSPS accordingly
if '_T' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPT_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_M' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPM_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
elif '_S' in SignatureStr[6:6+2]:
TxtBody.append("#define FSPS_UPD_SIGNATURE %s /* '%s' */\n\n" % (Item['value'], SignatureStr))
TxtBody.append("\n")
for Region in ['UPD']:
UpdOffsetTable = []
UpdSignature = ['0x545F', '0x4D5F', '0x535F'] #['_T', '_M', '_S'] signature for FSPT, FSPM, FSPS
UpdStructure = ['FSPT_UPD', 'FSPM_UPD', 'FSPS_UPD']
for Item in self._CfgItemList:
if Item["cname"] == 'Signature' and Item["value"][0:6] in UpdSignature:
Item["offset"] = 0 # re-initialize offset to 0 when new UPD structure starting
UpdOffsetTable.append (Item["offset"])
for UpdIdx in range(len(UpdOffsetTable)):
CommentLine = ""
for Item in self._CfgItemList:
if Item["comment"] != '' and Item["offset"] >= UpdOffsetTable[UpdIdx]:
MatchComment = re.match("^(U|V)PD_DATA_REGION:([\w|\W|\s]+)", Item["comment"])
if MatchComment and MatchComment.group(1) == Region[0]:
CommentLine = " " + MatchComment.group(2) + "\n"
TxtBody.append("/**" + CommentLine + "**/\n")
elif Item["offset"] >= UpdOffsetTable[UpdIdx] and Item["comment"] == '':
Match = re.match("^FSP([\w|\W|\s])_UPD", UpdStructure[UpdIdx])
if Match:
TxtBody.append("/** Fsp " + Match.group(1) + " UPD Configuration\n**/\n")
TxtBody.append("typedef struct {\n")
NextOffset = 0
SpaceIdx = 0
Offset = 0
LastVisible = True
ResvOffset = 0
ResvIdx = 0
LineBuffer = []
InRange = False
for Item in self._CfgItemList:
if Item['cname'] == 'Signature' and str(Item['value'])[0:6] == UpdSignature[UpdIdx] or Region[0] == 'V':
InRange = True
if InRange != True:
continue
if Item['cname'] == 'UpdTerminator':
InRange = False
if Item['region'] != Region:
continue
if Item["offset"] < UpdOffsetTable[UpdIdx]:
continue
NextVisible = LastVisible
if LastVisible and (Item['header'] == 'OFF'):
NextVisible = False
ResvOffset = Item['offset']
elif (not LastVisible) and Item['header'] == 'ON':
NextVisible = True
Name = "Reserved" + Region[0] + "pdSpace%d" % ResvIdx
ResvIdx = ResvIdx + 1
TxtBody.append(self.CreateField (Item, Name, Item["offset"] - ResvOffset, ResvOffset, '', '', '', ''))
if Offset < Item["offset"]:
if LastVisible:
Name = "Unused" + Region[0] + "pdSpace%d" % SpaceIdx
LineBuffer.append(self.CreateField (Item, Name, Item["offset"] - Offset, Offset, '', '', '', ''))
SpaceIdx = SpaceIdx + 1
Offset = Item["offset"]
LastVisible = NextVisible
Offset = Offset + Item["length"]
if LastVisible:
for Each in LineBuffer:
TxtBody.append (Each)
LineBuffer = []
Comment = Item["comment"]
Embed = Item["embed"].upper()
if Embed.endswith(':START') or Embed.endswith(':END'):
if not Comment == '' and Embed.endswith(':START'):
Marker = '/* COMMENT:%s */ \n' % Item["comment"]
Marker = Marker + '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
Marker = '/* EMBED_STRUCT:%s */ ' % Item["embed"]
else:
if Embed == '':
Marker = ''
else:
self.Error = "Invalid embedded structure format '%s'!\n" % Item["embed"]
return 4
Line = Marker + self.CreateField (Item, Item["cname"], Item["length"], Item["offset"], Item['struct'], Item['name'], Item['help'], Item['option'])
TxtBody.append(Line)
if Item['cname'] == 'UpdTerminator':
break
TxtBody.append("} " + UpdStructure[UpdIdx] + ";\n\n")
# Handle the embedded data structure
TxtBody = self.PostProcessBody (TxtBody)
HeaderTFileName = 'FsptUpd.h'
HeaderMFileName = 'FspmUpd.h'
HeaderSFileName = 'FspsUpd.h'
UpdRegionCheck = ['FSPT', 'FSPM', 'FSPS'] # FSPX_UPD_REGION
UpdConfigCheck = ['FSP_T', 'FSP_M', 'FSP_S'] # FSP_X_CONFIG, FSP_X_TEST_CONFIG, FSP_X_RESTRICTED_CONFIG
UpdSignatureCheck = ['FSPT_UPD_SIGNATURE', 'FSPM_UPD_SIGNATURE', 'FSPS_UPD_SIGNATURE']
ExcludedSpecificUpd = ['FSPT_ARCH_UPD', 'FSPM_ARCH_UPD', 'FSPS_ARCH_UPD']
IncLines = []
if InputHeaderFile != '':
if not os.path.exists(InputHeaderFile):
self.Error = "Input header file '%s' does not exist" % InputHeaderFile
return 6
InFd = open(InputHeaderFile, "r")
IncLines = InFd.readlines()
InFd.close()
for item in range(len(UpdRegionCheck)):
if UpdRegionCheck[item] == 'FSPT':
HeaderFd = open(os.path.join(FvDir, HeaderTFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderTFileName))
elif UpdRegionCheck[item] == 'FSPM':
HeaderFd = open(os.path.join(FvDir, HeaderMFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderMFileName))
elif UpdRegionCheck[item] == 'FSPS':
HeaderFd = open(os.path.join(FvDir, HeaderSFileName), "w")
FileBase = os.path.basename(os.path.join(FvDir, HeaderSFileName))
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <%s>\n\n" % HeaderFileName)
HeaderFd.write("#pragma pack(1)\n\n")
Export = False
for Line in IncLines:
Match = re.search ("!EXPORT\s+([A-Z]+)\s+EXTERNAL_BOOTLOADER_STRUCT_(BEGIN|END)\s+", Line)
if Match:
if Match.group(2) == "BEGIN" and Match.group(1) == UpdRegionCheck[item]:
Export = True
continue
else:
Export = False
continue
if Export:
HeaderFd.write(Line)
HeaderFd.write("\n")
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("}\s([_A-Z0-9]+);", Line)
if Match and (UpdRegionCheck[item] in Match.group(1) or UpdConfigCheck[item] in Match.group(1)) and (ExcludedSpecificUpd[item] not in Match.group(1)):
EndIndex = Index
StructStart.append(StartIndex)
StructEnd.append(EndIndex)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
HeaderFd = open(HeaderFile, "w")
FileBase = os.path.basename(HeaderFile)
FileName = FileBase.replace(".", "_").upper()
HeaderFd.write("%s\n" % (__copyright_h__ % date.today().year))
HeaderFd.write("#ifndef __%s__\n" % FileName)
HeaderFd.write("#define __%s__\n\n" % FileName)
HeaderFd.write("#include <FspEas.h>\n\n")
HeaderFd.write("#pragma pack(1)\n\n")
for item in range(len(UpdRegionCheck)):
Index = 0
StartIndex = 0
EndIndex = 0
StructStart = []
StructStartWithComment = []
StructEnd = []
for Line in TxtBody:
Index += 1
Match = re.match("(typedef struct {)", Line)
if Match:
StartIndex = Index - 1
Match = re.match("#define\s([_A-Z0-9]+)\s*", Line)
if Match and (UpdSignatureCheck[item] in Match.group(1) or UpdSignatureCheck[item] in Match.group(1)):
StructStart.append(Index - 1)
StructEnd.append(Index)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index == StructStart[Item]:
Match = re.match("^(/\*\*\s*)", Line)
if Match:
StructStartWithComment.append(StructStart[Item])
else:
StructStartWithComment.append(StructStart[Item] + 1)
Index = 0
for Line in TxtBody:
Index += 1
for Item in range(len(StructStart)):
if Index >= StructStartWithComment[Item] and Index <= StructEnd[Item]:
self.WriteLinesWithoutTailingSpace(HeaderFd, Line)
HeaderFd.write("#pragma pack()\n\n")
HeaderFd.write("#endif\n")
HeaderFd.close()
return 0
def WriteBsfStruct (self, BsfFd, Item):
LogExpr = CLogicalExpression()
if Item['type'] == "None":
Space = "gPlatformFspPkgTokenSpaceGuid"
else:
Space = Item['space']
Line = " $%s_%s" % (Space, Item['cname'])
Match = re.match("\s*\{([x0-9a-fA-F,\s]+)\}\s*", Item['value'])
if Match:
DefaultValue = Match.group(1).strip()
else:
DefaultValue = Item['value'].strip()
if 'bitlength' in Item:
BsfFd.write(" %s%s%4d bits $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['bitlength'], DefaultValue))
else:
BsfFd.write(" %s%s%4d bytes $_DEFAULT_ = %s\n" % (Line, ' ' * (64 - len(Line)), Item['length'], DefaultValue))
TmpList = []
if Item['type'] == "Combo":
if not Item['option'] in self._BuidinOption:
OptList = Item['option'].split(',')
for Option in OptList:
Option = Option.strip()
(OpVal, OpStr) = Option.split(':')
test = LogExpr.getNumber (OpVal)
if test is None:
raise Exception("Selection Index '%s' is not a number" % OpVal)
TmpList.append((OpVal, OpStr))
return TmpList
def WriteBsfOption (self, BsfFd, Item):
PcdName = Item['space'] + '_' + Item['cname']
WriteHelp = 0
if Item['type'] == "Combo":
if Item['option'] in self._BuidinOption:
Options = self._BuidinOption[Item['option']]
else:
Options = PcdName
BsfFd.write(' %s $%s, "%s", &%s,\n' % (Item['type'], PcdName, Item['name'], Options))
WriteHelp = 1
elif Item['type'].startswith("EditNum"):
Match = re.match("EditNum\s*,\s*(HEX|DEC)\s*,\s*\((\d+|0x[0-9A-Fa-f]+)\s*,\s*(\d+|0x[0-9A-Fa-f]+)\)", Item['type'])
if Match:
BsfFd.write(' EditNum $%s, "%s", %s,\n' % (PcdName, Item['name'], Match.group(1)))
WriteHelp = 2
elif Item['type'].startswith("EditText"):
BsfFd.write(' %s $%s, "%s",\n' % (Item['type'], PcdName, Item['name']))
WriteHelp = 1
elif Item['type'] == "Table":
Columns = Item['option'].split(',')
if len(Columns) != 0:
BsfFd.write(' %s $%s "%s",' % (Item['type'], PcdName, Item['name']))
for Col in Columns:
Fmt = Col.split(':')
if len(Fmt) != 3:
raise Exception("Column format '%s' is invalid !" % Fmt)
try:
Dtype = int(Fmt[1].strip())
except:
raise Exception("Column size '%s' is invalid !" % Fmt[1])
BsfFd.write('\n Column "%s", %d bytes, %s' % (Fmt[0].strip(), Dtype, Fmt[2].strip()))
BsfFd.write(',\n')
WriteHelp = 1
if WriteHelp > 0:
HelpLines = Item['help'].split('\\n\\r')
FirstLine = True
for HelpLine in HelpLines:
if FirstLine:
FirstLine = False
BsfFd.write(' Help "%s"\n' % (HelpLine))
else:
BsfFd.write(' "%s"\n' % (HelpLine))
if WriteHelp == 2:
BsfFd.write(' "Valid range: %s ~ %s"\n' % (Match.group(2), Match.group(3)))
def GenerateBsfFile (self, BsfFile):
if BsfFile == '':
self.Error = "BSF output file '%s' is invalid" % BsfFile
return 1
if (self.NoDscFileChange (BsfFile)):
# DSC has not been modified yet
# So don't have to re-generate other files
self.Error = 'No DSC file change, skip to create UPD BSF file'
return 256
Error = 0
OptionDict = {}
BsfFd = open(BsfFile, "w")
BsfFd.write("%s\n" % (__copyright_bsf__ % date.today().year))
BsfFd.write("%s\n" % self._GlobalDataDef)
BsfFd.write("StructDef\n")
NextOffset = -1
for Item in self._CfgItemList:
if Item['find'] != '':
BsfFd.write('\n Find "%s"\n' % Item['find'])
NextOffset = Item['offset'] + Item['length']
if Item['name'] != '':
if NextOffset != Item['offset']:
BsfFd.write(" Skip %d bytes\n" % (Item['offset'] - NextOffset))
if len(Item['subreg']) > 0:
NextOffset = Item['offset']
BitsOffset = NextOffset * 8
for SubItem in Item['subreg']:
BitsOffset += SubItem['bitlength']
if SubItem['name'] == '':
if 'bitlength' in SubItem:
BsfFd.write(" Skip %d bits\n" % (SubItem['bitlength']))
else:
BsfFd.write(" Skip %d bytes\n" % (SubItem['length']))
else:
Options = self.WriteBsfStruct(BsfFd, SubItem)
if len(Options) > 0:
OptionDict[SubItem['space']+'_'+SubItem['cname']] = Options
NextBitsOffset = (Item['offset'] + Item['length']) * 8
if NextBitsOffset > BitsOffset:
BitsGap = NextBitsOffset - BitsOffset
BitsRemain = BitsGap % 8
if BitsRemain:
BsfFd.write(" Skip %d bits\n" % BitsRemain)
BitsGap -= BitsRemain
BytesRemain = int(BitsGap / 8)
if BytesRemain:
BsfFd.write(" Skip %d bytes\n" % BytesRemain)
NextOffset = Item['offset'] + Item['length']
else:
NextOffset = Item['offset'] + Item['length']
Options = self.WriteBsfStruct(BsfFd, Item)
if len(Options) > 0:
OptionDict[Item['space']+'_'+Item['cname']] = Options
BsfFd.write("\nEndStruct\n\n")
BsfFd.write("%s" % self._BuidinOptionTxt)
for Each in OptionDict:
BsfFd.write("List &%s\n" % Each)
for Item in OptionDict[Each]:
BsfFd.write(' Selection %s , "%s"\n' % (Item[0], Item[1]))
BsfFd.write("EndList\n\n")
BsfFd.write("BeginInfoBlock\n")
BsfFd.write(' PPVer "%s"\n' % (self._CfgBlkDict['ver']))
BsfFd.write(' Description "%s"\n' % (self._CfgBlkDict['name']))
BsfFd.write("EndInfoBlock\n\n")
for Each in self._CfgPageDict:
BsfFd.write('Page "%s"\n' % self._CfgPageDict[Each])
BsfItems = []
for Item in self._CfgItemList:
if Item['name'] != '':
if Item['page'] != Each:
continue
if len(Item['subreg']) > 0:
for SubItem in Item['subreg']:
if SubItem['name'] != '':
BsfItems.append(SubItem)
else:
BsfItems.append(Item)
BsfItems.sort(key=lambda x: x['order'])
for Item in BsfItems:
self.WriteBsfOption (BsfFd, Item)
BsfFd.write("EndPage\n\n")
BsfFd.close()
return Error
def Usage():
print ("GenCfgOpt Version 0.56")
print ("Usage:")
print (" GenCfgOpt UPDTXT PlatformDscFile BuildFvDir [-D Macros]")
print (" GenCfgOpt HEADER PlatformDscFile BuildFvDir InputHFile [-D Macros]")
print (" GenCfgOpt GENBSF PlatformDscFile BuildFvDir BsfOutFile [-D Macros]")
def Main():
#
# Parse the options and args
#
i = 1
GenCfgOpt = CGenCfgOpt()
while i < len(sys.argv):
if sys.argv[i].strip().lower() == "--pcd":
BuildOptionPcd.append(sys.argv[i+1])
i += 1
i += 1
argc = len(sys.argv)
if argc < 4:
Usage()
return 1
else:
DscFile = sys.argv[2]
if not os.path.exists(DscFile):
print ("ERROR: Cannot open DSC file '%s' !" % DscFile)
return 2
OutFile = ''
if argc > 4:
if sys.argv[4][0] == '-':
Start = 4
else:
OutFile = sys.argv[4]
Start = 5
if argc > Start:
if GenCfgOpt.ParseMacros(sys.argv[Start:]) != 0:
print ("ERROR: Macro parsing failed !")
return 3
FvDir = sys.argv[3]
if not os.path.exists(FvDir):
os.makedirs(FvDir)
if GenCfgOpt.ParseDscFile(DscFile, FvDir) != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 5
if GenCfgOpt.UpdateSubRegionDefaultValue() != 0:
print ("ERROR: %s !" % GenCfgOpt.Error)
return 7
if sys.argv[1] == "UPDTXT":
Ret = GenCfgOpt.CreateSplitUpdTxt(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return Ret
elif sys.argv[1] == "HEADER":
Ret = GenCfgOpt.CreateHeaderFile(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return 8
return Ret
elif sys.argv[1] == "GENBSF":
Ret = GenCfgOpt.GenerateBsfFile(OutFile)
if Ret != 0:
# No change is detected
if Ret == 256:
print ("INFO: %s !" % (GenCfgOpt.Error))
else :
print ("ERROR: %s !" % (GenCfgOpt.Error))
return 9
return Ret
else:
if argc < 5:
Usage()
return 1
print ("ERROR: Unknown command '%s' !" % sys.argv[1])
Usage()
return 1
return 0
return 0
if __name__ == '__main__':
sys.exit(Main())
| 42.508096 | 170 | 0.461554 |
4a2489d999f6ea020a985ad463465b0c710e5946 | 10,201 | py | Python | tensorflow/contrib/learn/python/learn/estimators/rnn.py | larry-fuy/tensorflow_xeonphi | 787ab22d490e79ea8c06511d60d6cddf1b2dd2c2 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/learn/python/learn/estimators/rnn.py | larry-fuy/tensorflow_xeonphi | 787ab22d490e79ea8c06511d60d6cddf1b2dd2c2 | [
"Apache-2.0"
] | null | null | null | tensorflow/contrib/learn/python/learn/estimators/rnn.py | larry-fuy/tensorflow_xeonphi | 787ab22d490e79ea8c06511d60d6cddf1b2dd2c2 | [
"Apache-2.0"
] | 1 | 2019-03-21T15:49:35.000Z | 2019-03-21T15:49:35.000Z | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Recurrent Neural Network estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn import models
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators.base import TensorFlowEstimator
def null_input_op_fn(x):
"""This function does no transformation on the inputs, used as default."""
return x
class TensorFlowRNNClassifier(TensorFlowEstimator, _sklearn.ClassifierMixin):
"""TensorFlow RNN Classifier model."""
def __init__(self,
rnn_size,
n_classes,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
class_weight=None,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNClassifier instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
n_classes: Number of classes in the target.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are
supposed to have weight one.
continue_training: when continue_training is True, once initialized
model will be continually trained on every call of fit.
config: RunConfig object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNClassifier, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
class_weight=class_weight,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.logistic_regression,
self.sequence_length, self.initial_state,
self.attn_length, self.attn_size,
self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('logistic_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('logistic_regression/weights')
class TensorFlowRNNRegressor(TensorFlowEstimator, _sklearn.RegressorMixin):
"""TensorFlow RNN Regressor model."""
def __init__(self,
rnn_size,
cell_type='gru',
num_layers=1,
input_op_fn=null_input_op_fn,
initial_state=None,
bidirectional=False,
sequence_length=None,
attn_length=None,
attn_size=None,
attn_vec_size=None,
n_classes=0,
batch_size=32,
steps=50,
optimizer='Adagrad',
learning_rate=0.1,
clip_gradients=5.0,
continue_training=False,
config=None,
verbose=1):
"""Initializes a TensorFlowRNNRegressor instance.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument x for input and returns transformed x.
bidirectional: boolean, Whether this is a bidirectional rnn.
sequence_length: If sequence_length is provided, dynamic calculation
is performed. This saves computational time when unrolling past max
sequence length.
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell state.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is
used. Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
````python
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
````
continue_training: when continue_training is True, once initialized
model will be continually trained on every call of fit.
config: RunConfig object that controls the configurations of the
session, e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
* 0: the algorithm and debug information is muted.
* 1: trainer prints the progress.
* 2: log device placement is printed.
"""
self.rnn_size = rnn_size
self.cell_type = cell_type
self.input_op_fn = input_op_fn
self.bidirectional = bidirectional
self.num_layers = num_layers
self.sequence_length = sequence_length
self.initial_state = initial_state
self.attn_length = attn_length
self.attn_size = attn_size
self.attn_vec_size = attn_vec_size
super(TensorFlowRNNRegressor, self).__init__(
model_fn=self._model_fn,
n_classes=n_classes,
batch_size=batch_size,
steps=steps,
optimizer=optimizer,
learning_rate=learning_rate,
clip_gradients=clip_gradients,
continue_training=continue_training,
config=config,
verbose=verbose)
def _model_fn(self, x, y):
return models.get_rnn_model(self.rnn_size, self.cell_type, self.num_layers,
self.input_op_fn, self.bidirectional,
models.linear_regression, self.sequence_length,
self.initial_state, self.attn_length,
self.attn_size, self.attn_vec_size)(x, y)
@property
def bias_(self):
"""Returns bias of the rnn layer."""
return self.get_variable_value('linear_regression/bias')
@property
def weights_(self):
"""Returns weights of the rnn layer."""
return self.get_variable_value('linear_regression/weights')
| 40.967871 | 85 | 0.657975 |
4a248a156c2674c64730d1a0b3db72fc0918097c | 6,867 | py | Python | ui/visualizations/VolumeVisualizationSimple.py | berendkleinhaneveld/Registrationshop | 0d6f3ee5324865cdcb419369139f37c39dfe9a1c | [
"MIT"
] | 25 | 2015-11-08T16:36:54.000Z | 2022-01-20T16:03:28.000Z | ui/visualizations/VolumeVisualizationSimple.py | berendkleinhaneveld/Registrationshop | 0d6f3ee5324865cdcb419369139f37c39dfe9a1c | [
"MIT"
] | 2 | 2016-12-01T23:13:08.000Z | 2017-07-25T02:40:49.000Z | ui/visualizations/VolumeVisualizationSimple.py | berendkleinhaneveld/Registrationshop | 0d6f3ee5324865cdcb419369139f37c39dfe9a1c | [
"MIT"
] | 10 | 2016-07-05T14:39:16.000Z | 2022-01-01T02:05:55.000Z | """
VolumeVisualizationSimple
:Authors:
Berend Klein Haneveld
"""
from VolumeVisualization import VolumeVisualization
from VolumeVisualization import VisualizationTypeSimple
from vtk import vtkVolumeProperty
from vtk import vtkColorTransferFunction
from vtk import vtkPiecewiseFunction
from ui.widgets.SliderFloatWidget import SliderFloatWidget
from ui.widgets.ColorWidget import ColorChoiceWidget
from core.decorators import overrides
from PySide.QtGui import QWidget
from PySide.QtGui import QGridLayout
from PySide.QtGui import QGroupBox
from PySide.QtCore import Qt
class VolumeVisualizationSimple(VolumeVisualization):
"""
VolumeVisualization subclass for a simple visualization.
"""
def __init__(self):
super(VolumeVisualizationSimple, self).__init__()
self.visualizationType = VisualizationTypeSimple
# Create the volume property
self.volProp = vtkVolumeProperty()
self.volProp.SetIndependentComponents(True)
self.volProp.SetInterpolationTypeToLinear()
self.volProp.ShadeOn()
self.volProp.SetAmbient(0.3)
self.volProp.SetDiffuse(0.8)
self.volProp.SetSpecular(0.2)
self.volProp.SetSpecularPower(10.0)
self.volProp.SetScalarOpacityUnitDistance(0.8919)
self.minimum = 0
self.maximum = 1
self.lowerBound = 0
self.upperBound = 1
colors = [[255, 139, 0], [0, 147, 255], [0, 255, 147], [213, 100, 255], [255, 75, 75]]
self.colors = map(lambda x: [x[0] / 255.0, x[1] / 255.0, x[2] / 255.0], colors)
self.color = self.colors[0]
self.opacity = 1.0
self.colorFunction = None
self.opacityFunction = None
@overrides(VolumeVisualization)
def getParameterWidget(self):
"""
Returns a widget with sliders / fields with which properties of this
volume property can be adjusted.
:rtype: QWidget
"""
self.lowerBoundSlider = SliderFloatWidget()
self.lowerBoundSlider.setName("Lower:")
self.lowerBoundSlider.setRange([self.minimum, self.maximum])
self.lowerBoundSlider.setValue(self.lowerBound)
self.lowerBoundSlider.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lowerBoundSlider.valueChanged.connect(self.valueChanged)
self.upperBoundSlider = SliderFloatWidget()
self.upperBoundSlider.setName("Upper:")
self.upperBoundSlider.setRange([self.minimum, self.maximum])
self.upperBoundSlider.setValue(self.upperBound)
self.upperBoundSlider.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.upperBoundSlider.valueChanged.connect(self.valueChanged)
layoutGroup = QGridLayout()
layoutGroup.setAlignment(Qt.AlignTop)
layoutGroup.setContentsMargins(5, 0, 0, 0)
layoutGroup.setSpacing(0)
layoutGroup.addWidget(self.lowerBoundSlider)
layoutGroup.addWidget(self.upperBoundSlider)
groupbox = QGroupBox("Thresholds:")
groupbox.setLayout(layoutGroup)
self.opacitySlider = SliderFloatWidget()
self.opacitySlider.setName("Opacity:")
self.opacitySlider.setRange([0.0, 1.0])
self.opacitySlider.setValue(self.opacity)
self.opacitySlider.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.opacitySlider.valueChanged.connect(self.valueChanged)
self.colorChooser = ColorChoiceWidget()
self.colorChooser.setName("Color:")
self.colorChooser.setColors(self.colors)
self.colorChooser.setColor(self.color)
self.colorChooser.setMinimumHeight(self.upperBoundSlider.sizeHint().height())
self.colorChooser.label.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.colorChooser.valueChanged.connect(self.valueChanged)
otherLayoutGroup = QGridLayout()
otherLayoutGroup.setAlignment(Qt.AlignTop)
otherLayoutGroup.setContentsMargins(5, 0, 0, 0)
otherLayoutGroup.setSpacing(0)
otherLayoutGroup.addWidget(self.opacitySlider)
# otherLayoutGroup.addWidget(self.colorChooser)
# otherBox = QGroupBox("Color and opacity:")
otherBox = QGroupBox()
otherBox.setLayout(otherLayoutGroup)
layout = QGridLayout()
layout.setAlignment(Qt.AlignTop)
layout.setContentsMargins(0, 0, 0, 0)
layout.setHorizontalSpacing(0)
layout.addWidget(groupbox)
layout.addWidget(otherBox)
widget = QWidget()
widget.setLayout(layout)
try:
from ColumnResizer import ColumnResizer
self.columnResizer = ColumnResizer()
self.columnResizer.addWidgetsFromLayout(self.lowerBoundSlider.layout(), 0)
self.columnResizer.addWidgetsFromLayout(self.upperBoundSlider.layout(), 0)
self.columnResizer.addWidgetsFromLayout(self.colorChooser.layout(), 0)
self.columnResizer.addWidgetsFromLayout(self.opacitySlider.layout(), 0)
self.otherColRes = ColumnResizer()
self.otherColRes.addWidgetsFromLayout(self.lowerBoundSlider.layout(), 2)
self.otherColRes.addWidgetsFromLayout(self.upperBoundSlider.layout(), 2)
self.otherColRes.addWidgetsFromLayout(self.opacitySlider.layout(), 2)
except Exception, e:
print e
return widget
@overrides(VolumeVisualization)
def setImageData(self, imageData):
if imageData is None:
self.minimum = 0.0
self.maximum = 1.0
self.lowerBound = self.minimum
self.upperBound = self.maximum
self.opacity = 1.0
return
self.minimum, self.maximum = imageData.GetScalarRange()
self.lowerBound = self.minimum
self.upperBound = self.maximum
self.opacity = 1.0
@overrides(VolumeVisualization)
def setMapper(self, mapper):
pass
@overrides(VolumeVisualization)
def shaderType(self):
return 0
@overrides(VolumeVisualization)
def updateTransferFunction(self):
r, g, b = self.color
# Transfer functions and properties
if not self.colorFunction:
self.colorFunction = vtkColorTransferFunction()
else:
self.colorFunction.RemoveAllPoints()
self.colorFunction.AddRGBPoint(self.minimum, r*0.7, g*0.7, b*0.7)
self.colorFunction.AddRGBPoint(self.maximum, r, g, b)
if not self.opacityFunction:
self.opacityFunction = vtkPiecewiseFunction()
else:
self.opacityFunction.RemoveAllPoints()
self.opacityFunction.AddPoint(self.minimum, 0)
self.opacityFunction.AddPoint(self.lowerBound, 0)
self.opacityFunction.AddPoint(self.lowerBound+0.0001, self.opacity)
self.opacityFunction.AddPoint(self.upperBound-0.0001, self.opacity)
self.opacityFunction.AddPoint(self.upperBound, 0)
self.opacityFunction.AddPoint(self.maximum+0.0001, 0)
self.volProp.SetColor(self.colorFunction)
self.volProp.SetScalarOpacity(self.opacityFunction)
self.updatedTransferFunction.emit()
@overrides(VolumeVisualization)
def valueChanged(self, value):
"""
This method is called when the value of one of the sliders / fields is
adjusted. Argument value is unused. It is just there so that it can be
connected to the signals of the interface elements.
:type value: int
"""
self.lowerBound = min(self.lowerBoundSlider.value(), self.upperBoundSlider.value())
self.upperBound = max(self.lowerBoundSlider.value(), self.upperBoundSlider.value())
self.color = self.colorChooser.color
self.opacity = self.opacitySlider.value()
self.updateTransferFunction()
| 34.164179 | 88 | 0.778797 |
4a248b9fe7f379c3dbede93bddf1909ef2f1d975 | 12,806 | py | Python | mypy/options.py | deeper-x/mypy | 0f76dfcb9720472f0d8145bff81c01314e1bef84 | [
"PSF-2.0"
] | null | null | null | mypy/options.py | deeper-x/mypy | 0f76dfcb9720472f0d8145bff81c01314e1bef84 | [
"PSF-2.0"
] | null | null | null | mypy/options.py | deeper-x/mypy | 0f76dfcb9720472f0d8145bff81c01314e1bef84 | [
"PSF-2.0"
] | 1 | 2020-02-27T15:22:03.000Z | 2020-02-27T15:22:03.000Z | from collections import OrderedDict
import re
import pprint
import sys
from typing import Dict, List, Mapping, Optional, Pattern, Set, Tuple
from mypy import defaults
class BuildType:
STANDARD = 0
MODULE = 1
PROGRAM_TEXT = 2
class Options:
"""Options collected from flags."""
PER_MODULE_OPTIONS = {
"ignore_missing_imports",
"follow_imports",
"follow_imports_for_stubs",
"disallow_any_generics",
"disallow_any_unimported",
"disallow_any_expr",
"disallow_any_decorated",
"disallow_any_explicit",
"disallow_subclassing_any",
"disallow_untyped_calls",
"disallow_untyped_defs",
"check_untyped_defs",
"debug_cache",
"strict_optional_whitelist",
"show_none_errors",
"warn_no_return",
"warn_return_any",
"warn_unused_ignores",
"ignore_errors",
"strict_boolean",
"no_implicit_optional",
"always_true",
"always_false",
"strict_optional",
"disallow_untyped_decorators",
"local_partial_types",
}
OPTIONS_AFFECTING_CACHE = ((PER_MODULE_OPTIONS |
{"quick_and_dirty", "platform", "bazel"})
- {"debug_cache"})
def __init__(self) -> None:
# Cache for clone_for_module()
self.per_module_cache = None # type: Optional[Dict[str, Options]]
# -- build options --
self.build_type = BuildType.STANDARD
self.python_version = sys.version_info[:2] # type: Tuple[int, int]
# The executable used to search for PEP 561 packages. If this is None,
# then mypy does not search for PEP 561 packages.
self.python_executable = sys.executable # type: Optional[str]
self.platform = sys.platform
self.custom_typing_module = None # type: Optional[str]
self.custom_typeshed_dir = None # type: Optional[str]
self.mypy_path = [] # type: List[str]
self.report_dirs = {} # type: Dict[str, str]
self.ignore_missing_imports = False
self.follow_imports = 'normal' # normal|silent|skip|error
# Whether to respect the follow_imports setting even for stub files.
# Intended to be used for disabling specific stubs.
self.follow_imports_for_stubs = False # type: bool
# disallow_any options
self.disallow_any_generics = False
self.disallow_any_unimported = False
self.disallow_any_expr = False
self.disallow_any_decorated = False
self.disallow_any_explicit = False
# Disallow calling untyped functions from typed ones
self.disallow_untyped_calls = False
# Disallow defining untyped (or incompletely typed) functions
self.disallow_untyped_defs = False
# Disallow defining incompletely typed functions
self.disallow_incomplete_defs = False
# Type check unannotated functions
self.check_untyped_defs = False
# Disallow decorating typed functions with untyped decorators
self.disallow_untyped_decorators = False
# Disallow subclassing values of type 'Any'
self.disallow_subclassing_any = False
# Also check typeshed for missing annotations
self.warn_incomplete_stub = False
# Warn about casting an expression to its inferred type
self.warn_redundant_casts = False
# Warn about falling off the end of a function returning non-None
self.warn_no_return = True
# Warn about returning objects of type Any when the function is
# declared with a precise type
self.warn_return_any = False
# Warn about unused '# type: ignore' comments
self.warn_unused_ignores = False
# Warn about unused '[mypy-<pattern>] config sections
self.warn_unused_configs = False
# Files in which to ignore all non-fatal errors
self.ignore_errors = False
# Only allow booleans in conditions
self.strict_boolean = False
# Apply strict None checking
self.strict_optional = True
# Show "note: In function "foo":" messages.
self.show_error_context = False
# Files in which to allow strict-Optional related errors
# TODO: Kill this in favor of show_none_errors
self.strict_optional_whitelist = None # type: Optional[List[str]]
# Alternate way to show/hide strict-None-checking related errors
self.show_none_errors = True
# Don't assume arguments with default values of None are Optional
self.no_implicit_optional = False
# Variable names considered True
self.always_true = [] # type: List[str]
# Variable names considered False
self.always_false = [] # type: List[str]
# Use script name instead of __main__
self.scripts_are_modules = False
# Config file name
self.config_file = None # type: Optional[str]
# Write junit.xml to given file
self.junit_xml = None # type: Optional[str]
# Caching and incremental checking options
self.incremental = True
self.cache_dir = defaults.CACHE_DIR
self.debug_cache = False
self.quick_and_dirty = False
self.skip_version_check = False
self.fine_grained_incremental = False
# Include fine-grained dependencies in written cache files
self.cache_fine_grained = False
# Read cache files in fine-grained incremental mode (cache must include dependencies)
self.use_fine_grained_cache = False
# Paths of user plugins
self.plugins = [] # type: List[str]
# Per-module options (raw)
self.per_module_options = OrderedDict() # type: OrderedDict[str, Dict[str, object]]
self.glob_options = [] # type: List[Tuple[str, Pattern[str]]]
self.unused_configs = set() # type: Set[str]
# -- development options --
self.verbosity = 0 # More verbose messages (for troubleshooting)
self.pdb = False
self.show_traceback = False
self.dump_type_stats = False
self.dump_inference_stats = False
# -- test options --
# Stop after the semantic analysis phase
self.semantic_analysis_only = False
# Use stub builtins fixtures to speed up tests
self.use_builtins_fixtures = False
# -- experimental options --
self.shadow_file = None # type: Optional[List[Tuple[str, str]]]
self.show_column_numbers = False # type: bool
self.dump_graph = False
self.dump_deps = False
# If True, partial types can't span a module top level and a function
self.local_partial_types = False
# Some behaviors are changed when using Bazel (https://bazel.build).
self.bazel = False
# List of package roots -- directories under these are packages even
# if they don't have __init__.py.
self.package_root = [] # type: List[str]
self.cache_map = {} # type: Dict[str, Tuple[str, str]]
def snapshot(self) -> object:
"""Produce a comparable snapshot of this Option"""
d = dict(self.__dict__)
del d['per_module_cache']
return d
def __eq__(self, other: object) -> bool:
return self.__class__ == other.__class__ and self.__dict__ == other.__dict__
def __ne__(self, other: object) -> bool:
return not self == other
def __repr__(self) -> str:
return 'Options({})'.format(pprint.pformat(self.snapshot()))
def apply_changes(self, changes: Dict[str, object]) -> 'Options':
new_options = Options()
new_options.__dict__.update(self.__dict__)
new_options.__dict__.update(changes)
return new_options
def build_per_module_cache(self) -> None:
self.per_module_cache = {}
# Config precedence is as follows:
# 1. Concrete section names: foo.bar.baz
# 2. "Unstructured" glob patterns: foo.*.baz, in the order
# they appear in the file (last wins)
# 3. "Well-structured" wildcard patterns: foo.bar.*, in specificity order.
# Since structured configs inherit from structured configs above them in the hierarchy,
# we need to process per-module configs in a careful order.
# We have to process foo.* before foo.bar.* before foo.bar,
# and we need to apply *.bar to foo.bar but not to foo.bar.*.
# To do this, process all well-structured glob configs before non-glob configs and
# exploit the fact that foo.* sorts earlier ASCIIbetically (unicodebetically?)
# than foo.bar.*.
# (A section being "processed last" results in its config "winning".)
# Unstructured glob configs are stored and are all checked for each module.
unstructured_glob_keys = [k for k in self.per_module_options.keys()
if '*' in k[:-1]]
structured_keys = [k for k in self.per_module_options.keys()
if '*' not in k[:-1]]
wildcards = sorted(k for k in structured_keys if k.endswith('.*'))
concrete = [k for k in structured_keys if not k.endswith('.*')]
for glob in unstructured_glob_keys:
self.glob_options.append((glob, self.compile_glob(glob)))
# We (for ease of implementation) treat unstructured glob
# sections as used if any real modules use them or if any
# concrete config sections use them. This means we need to
# track which get used while constructing.
self.unused_configs = set(unstructured_glob_keys)
for key in wildcards + concrete:
# Find what the options for this key would be, just based
# on inheriting from parent configs.
options = self.clone_for_module(key)
# And then update it with its per-module options.
self.per_module_cache[key] = options.apply_changes(self.per_module_options[key])
# Add the more structured sections into unused configs, since
# they only count as used if actually used by a real module.
self.unused_configs.update(structured_keys)
def clone_for_module(self, module: str) -> 'Options':
"""Create an Options object that incorporates per-module options.
NOTE: Once this method is called all Options objects should be
considered read-only, else the caching might be incorrect.
"""
if self.per_module_cache is None:
self.build_per_module_cache()
assert self.per_module_cache is not None
# If the module just directly has a config entry, use it.
if module in self.per_module_cache:
self.unused_configs.discard(module)
return self.per_module_cache[module]
# If not, search for glob paths at all the parents. So if we are looking for
# options for foo.bar.baz, we search foo.bar.baz.*, foo.bar.*, foo.*,
# in that order, looking for an entry.
# This is technically quadratic in the length of the path, but module paths
# don't actually get all that long.
options = self
path = module.split('.')
for i in range(len(path), 0, -1):
key = '.'.join(path[:i] + ['*'])
if key in self.per_module_cache:
self.unused_configs.discard(key)
options = self.per_module_cache[key]
break
# OK and *now* we need to look for unstructured glob matches.
# We only do this for concrete modules, not structured wildcards.
if not module.endswith('.*'):
for key, pattern in self.glob_options:
if pattern.match(module):
self.unused_configs.discard(key)
options = options.apply_changes(self.per_module_options[key])
# We could update the cache to directly point to modules once
# they have been looked up, but in testing this made things
# slower and not faster, so we don't bother.
return options
def compile_glob(self, s: str) -> Pattern[str]:
# Compile one of the glob patterns to a regex so that '.*' can
# match *zero or more* module sections. This means we compile
# '.*' into '(\..*)?'.
parts = s.split('.')
expr = re.escape(parts[0]) if parts[0] != '*' else '.*'
for part in parts[1:]:
expr += re.escape('.' + part) if part != '*' else '(\..*)?'
return re.compile(expr + '\\Z')
def select_options_affecting_cache(self) -> Mapping[str, object]:
return {opt: getattr(self, opt) for opt in self.OPTIONS_AFFECTING_CACHE}
| 39.524691 | 95 | 0.634702 |
4a248c335fbd73344a65a5f9796954445480bbad | 8,396 | py | Python | tests/test_views.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | 4 | 2020-10-13T04:34:21.000Z | 2022-02-18T05:34:03.000Z | tests/test_views.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | 1 | 2020-09-29T06:59:36.000Z | 2020-09-29T06:59:36.000Z | tests/test_views.py | crazytruth/insanic | f9b61611317d873fe7688a5fd13eecb9a496ead5 | [
"MIT"
] | null | null | null | import pytest
from sanic.exceptions import _sanic_exceptions
from sanic.response import json
from insanic import Insanic, authentication, permissions, status
from insanic.choices import UserLevels
from insanic.errors import GlobalErrorCodes
from insanic.views import InsanicView
def test_view_allowed_methods():
class TestView(InsanicView):
def patch(self, request):
return
view = TestView()
assert view.allowed_methods == ["PATCH"]
def test_view_default_response_headers():
class TestView(InsanicView):
def patch(self, request):
return
view = TestView()
assert view.default_response_headers == {"Allow": "PATCH"}
def test_view_invalid_method():
app = Insanic("test")
response_body = {"insanic": "Gotta go insanely fast!"}
class DummyView(InsanicView):
authentication_classes = ()
permission_classes = ()
def get(self, request):
return json(response_body)
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.post("/")
assert response.status == status.HTTP_405_METHOD_NOT_ALLOWED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.method_not_allowed
)
def test_not_found():
app = Insanic("test")
class DummyView(InsanicView):
authentication_classes = ()
def get(self, request):
return json({})
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.get("/aaaa")
assert response.status == status.HTTP_404_NOT_FOUND
def test_view_only_json_authentication():
app = Insanic("test")
class DummyView(InsanicView):
authentication_classes = (authentication.JSONWebTokenAuthentication,)
def get(self, request):
return json({})
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.get("/")
assert response.status == status.HTTP_401_UNAUTHORIZED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.authentication_credentials_missing
)
def test_view_permission(test_user_token_factory):
app = Insanic("test")
response_body = {"insanic": "Gotta go insanely fast!"}
class DummyView(InsanicView):
authentication_classes = (authentication.JSONWebTokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request):
return json(response_body)
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.get("/")
assert response.status == status.HTTP_401_UNAUTHORIZED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.authentication_credentials_missing
)
request, response = app.test_client.get(
"/", headers={"Authorization": "Bearer lalfjafafa"}
)
assert response.status == status.HTTP_401_UNAUTHORIZED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.signature_not_decodable
)
user, token = test_user_token_factory(
level=UserLevels.BANNED, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_401_UNAUTHORIZED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.inactive_user
)
user, token = test_user_token_factory(
level=UserLevels.DEACTIVATED, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_401_UNAUTHORIZED
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.inactive_user
)
user, token = test_user_token_factory(
level=UserLevels.ACTIVE, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_200_OK
assert response.json == response_body
user, token = test_user_token_factory(
level=UserLevels.STAFF, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_200_OK
assert response.json == response_body
@pytest.mark.parametrize(
"user_level", range(UserLevels.ACTIVE, UserLevels.STAFF, 10)
)
def test_permission_denied(test_user_token_factory, user_level, monkeypatch):
app = Insanic("test")
response_body = {"insanic": "Gotta go insanely fast!"}
class DummyView(InsanicView):
authentication_classes = (authentication.JSONWebTokenAuthentication,)
permission_classes = (permissions.IsAdminUser,)
def get(self, request):
return json(response_body)
app.add_route(DummyView.as_view(), "/")
user, token = test_user_token_factory(
level=user_level, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_403_FORBIDDEN
assert (
GlobalErrorCodes(response.json["error_code"]["value"])
== GlobalErrorCodes.permission_denied
)
@pytest.mark.parametrize(
"user_level", range(UserLevels.STAFF, UserLevels.STAFF + 100, 10)
)
def test_is_admin(test_user_token_factory, user_level):
app = Insanic("test")
response_body = {"insanic": "Gotta go insanely fast!"}
class DummyView(InsanicView):
authentication_classes = (authentication.JSONWebTokenAuthentication,)
permission_classes = (permissions.IsAdminUser,)
def get(self, request):
return json(response_body)
app.add_route(DummyView.as_view(), "/")
user, token = test_user_token_factory(
level=user_level, return_with_user=True
)
request, response = app.test_client.get(
"/", headers={"Authorization": token, "x-consumer-username": user.id}
)
assert response.status == status.HTTP_200_OK
assert response.json == response_body
def test_throttle():
app = Insanic("test")
wait_time = 1000
from insanic.throttles import BaseThrottle
class ForceThrottle(BaseThrottle):
async def allow_request(self, *args, **kwargs):
return False
def wait(self, *args, **kwargs):
return wait_time
class DummyView(InsanicView):
authentication_classes = ()
permission_classes = ()
throttle_classes = (ForceThrottle,)
def get(self, request):
return json({"hello": "bye"})
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.get("/")
assert response.status == status.HTTP_429_TOO_MANY_REQUESTS
assert str(wait_time) in response.json["description"]
@pytest.mark.parametrize("sanic_exception", _sanic_exceptions.values())
def test_sanic_error_handling(sanic_exception):
app = Insanic("test")
class ContentRange:
total = 120
if sanic_exception.status_code == 416:
raised_exception = sanic_exception("a", ContentRange())
elif sanic_exception.status_code == 405:
raised_exception = sanic_exception("a", "HEAD", ["GET"])
else:
raised_exception = sanic_exception("a")
class DummyView(InsanicView):
authentication_classes = ()
permission_classes = ()
def get(self, request):
raise raised_exception
app.add_route(DummyView.as_view(), "/")
request, response = app.test_client.get("/")
assert response.status == raised_exception.status_code
assert response.json["description"] == "a"
if hasattr(raised_exception, "headers"):
for k, v in raised_exception.headers.items():
if (
raised_exception.status_code == 405
and k.lower() == "content-length"
):
continue
assert k.lower() in response.headers.keys()
assert str(v) == response.headers[k]
| 28.65529 | 77 | 0.669485 |
4a248c8a8202bcfd03548f9e35a0e9b056e27d97 | 3,704 | py | Python | Keithley/Keithley6221_Control.py | bklebel/Cryostat-GUI | 2fd523b8358777d00a4ca6db655582e87796bc64 | [
"MIT"
] | null | null | null | Keithley/Keithley6221_Control.py | bklebel/Cryostat-GUI | 2fd523b8358777d00a4ca6db655582e87796bc64 | [
"MIT"
] | 1 | 2018-10-02T21:32:55.000Z | 2018-10-02T21:32:55.000Z | Keithley/Keithley6221_Control.py | bklebel/Cryostat-GUI | 2fd523b8358777d00a4ca6db655582e87796bc64 | [
"MIT"
] | 3 | 2018-08-27T12:50:48.000Z | 2018-09-28T09:08:42.000Z | # from PyQt5 import QtWidgets, QtGui
from PyQt5.QtCore import pyqtSignal, pyqtSlot
# from PyQt5.uic import loadUi
from Keithley.Keithley6221 import Keithley6221
from pyvisa.errors import VisaIOError
from copy import deepcopy
# from util import AbstractThread
from util import AbstractEventhandlingThread
from util import ExceptionHandling
class Keithley6221_Updater(AbstractEventhandlingThread):
"""This is the worker thread, which updates all instrument data of a Keithely 6221
For each method of the device class (except collecting data), there is a wrapping method,
which we can call by a signal, from the main thread. This wrapper sends
the corresponding value to the device.
There is a second method for all wrappers, which accepts
the corresponding value, and stores it, so it can be sent upon acknowledgment
The information from the device is collected in regular intervals (method "running"),
and subsequently sent to the main thread. It is packed in a dict,
the keys of which are displayed in the "sensors" dict in this class.
"""
sensors = dict(
Current_A=None,
# Start_Current = None,
# Step_Current = None,
# Stop_Current = None
)
def __init__(self, InstrumentAddress="", **kwargs):
super().__init__(**kwargs)
self.Keithley6221 = Keithley6221(InstrumentAddress=InstrumentAddress)
self.__name__ = "Keithley6221_Updater " + InstrumentAddress
self.Current_A_value = 0
self.Current_A_storage = 0 # if power is turned off
self.OutputOn = self.getstatus() # 0 == OFF, 1 == ON
# self.Start_Current_value = 0
# self.Step_Current_value = 0
# self.Stop_Current_value = 0
def getCurrent_A(self):
return self.Current_A_value
@pyqtSlot()
@ExceptionHandling
def disable(self):
self.Keithley6221.disable()
self.Current_A_storage = self.Current_A_value
# for logging/application running:
self.Current_A_value = 0
self.OutputOn = self.Keithley6221.getstatus()[0]
@pyqtSlot()
@ExceptionHandling
def enable(self):
self.Keithley6221.enable()
self.Current_A_value = self.Current_A_storage
self.setCurrent_A()
self.OutputOn = self.Keithley6221.getstatus()[0]
@pyqtSlot()
@ExceptionHandling
def getstatus(self):
return int(self.Keithley6221.getstatus()[0])
@ExceptionHandling
def toggle_frontpanel(self, bools, text="In sequence..."):
if bools:
self.Keithley6221.enable_frontpanel(text)
else:
self.Keithley6221.disable_frontpanel()
@pyqtSlot()
@ExceptionHandling
def setCurrent_A(self):
self.Keithley6221.setCurrent(self.Current_A_value)
self.sig_Infodata.emit(deepcopy(dict(Current_A=self.Current_A_value)))
@pyqtSlot()
@ExceptionHandling
def setSweep(self):
self.Keithley6221.SetupSweet(
self.Start_Current_value, self.Step_Current_value, self.Stop_Current_value
)
@pyqtSlot()
@ExceptionHandling
def startSweep(self):
self.Keithley6221.StartSweep()
@pyqtSlot(float)
def gettoset_Current_A(self, value):
self.Current_A_value = value
self.Current_A_storage = value
@pyqtSlot(float)
def gettoset_Start_Current(self, value):
self.Start_Current_value = value
@pyqtSlot(float)
def gettoset_Step_Current(self, value):
self.Step_Current_value = value
@pyqtSlot(float)
def gettoset_Stop_Current(self, value):
self.Stop_Current_value = value
| 31.65812 | 97 | 0.679806 |
4a248ee6b4f9becb760070ef1a8681cff766deb9 | 2,734 | py | Python | market-prices/market-prices.py | barrucadu/hledger-scripts | b9803e127e63da6a921015aca97d99050341a9b1 | [
"MIT"
] | 52 | 2018-06-05T16:14:50.000Z | 2022-03-25T16:00:49.000Z | market-prices/market-prices.py | barrucadu/hledger-scripts | b9803e127e63da6a921015aca97d99050341a9b1 | [
"MIT"
] | 4 | 2019-06-28T09:35:57.000Z | 2021-06-11T15:51:16.000Z | market-prices/market-prices.py | barrucadu/hledger-scripts | b9803e127e63da6a921015aca97d99050341a9b1 | [
"MIT"
] | 7 | 2019-09-07T23:31:02.000Z | 2021-08-08T18:58:15.000Z | #!/usr/bin/env python3
from html.parser import HTMLParser
import json
import sys
import time
import urllib.request
def get_coinbase(base, currency):
req = urllib.request.Request(
"https://api.coinbase.com/v2/prices/{}-{}/spot/".format(base, currency),
headers={"CB-VERSION": "2018-05-25"})
with urllib.request.urlopen(req) as response:
resp = json.load(response)
return resp['data']['amount']
def ft_find_price(url, currency):
class FTPriceFinder(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.found = None
self.isnext = False
def handle_data(self, data):
if data == 'Price ({})'.format(currency):
self.isnext = True
elif self.isnext:
self.found = data
self.isnext = False
req = urllib.request.Request(url)
with urllib.request.urlopen(req) as response:
html = response.read().decode('utf-8')
finder = FTPriceFinder()
finder.feed(html)
if finder.found is None:
raise Exception("could not find price")
else:
return finder.found
def get_ft_currency(base, currency):
return ft_find_price(
"https://markets.ft.com/data/currencies/tearsheet/summary?s={}{}".format(base, currency),
currency)
def get_ft_fund(isin, currency):
return ft_find_price(
"https://markets.ft.com/data/funds/tearsheet/summary?s={}:{}".format(isin, currency),
currency)
config = json.load(sys.stdin)
symbols = config.get('symbols', {})
for commodity, cconfig in config.get('commodities', {}).items():
try:
try:
provider = cconfig['provider']
except KeyError:
raise Exception("missing provider")
currency = cconfig.get('currency', 'GBP')
if provider == 'coinbase':
rate = get_coinbase(
cconfig.get('base', commodity),
currency)
elif provider == 'ft_currency':
rate = get_ft_currency(
cconfig.get('base', commodity),
currency)
elif provider == 'ft_fund':
rate = get_ft_fund(
cconfig.get('isin', commodity),
currency)
else:
raise Exception("unknown provider '{}'".format(provider))
date = time.strftime('%Y-%m-%d')
if currency in symbols:
print('P {} {} {}{}'.format(date, commodity, symbols[currency], rate))
else:
print('P {} {} {} {}'.format(date, commodity, rate, currency))
except Exception as e:
print("; error processing commodity '{}': {}".format(commodity, e))
| 30.719101 | 97 | 0.576079 |
4a2492248a0f84e1ed4c3375a8ba01f73e800853 | 579 | py | Python | sims-g2/recover-dg/a12/conv-rate.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 1 | 2019-12-19T16:21:13.000Z | 2019-12-19T16:21:13.000Z | sims-g2/recover-dg/a12/conv-rate.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | null | null | null | sims-g2/recover-dg/a12/conv-rate.py | ammarhakim/ammar-simjournal | 85b64ddc9556f01a4fab37977864a7d878eac637 | [
"MIT",
"Unlicense"
] | 2 | 2020-01-08T06:23:33.000Z | 2020-01-08T07:06:50.000Z | from pylab import *
import math
style.use('../code/postgkyl.mplstyle')
dat = loadtxt("error-dx.txt")
N = dat[:,0]*1.0
err = dat[:,1]
err1 = err- err[-1] # this gets rid of dt errors assuming dx is very small
for i in range(1,N.shape[0]-1):
dxOrder = math.log(err1[i-1]/err1[i])/log(N[i]/N[i-1])
print("%g %g" % (1/N[i], dxOrder))
fig, ax = plt.subplots(1,1)
ax.loglog(N[:-1], err1[:-1])
ax.set_xticks([2,4,8,16])
ax.set_xlim(0,20)
ax.get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_xlabel(r'$N$')
ax.set_ylabel('Error')
grid()
show()
| 23.16 | 74 | 0.644214 |
4a249402f086eb3815c54b54411804cae6d49689 | 6,003 | py | Python | openstack_dashboard/test/integration_tests/tests/test_flavors.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 9 | 2016-06-03T03:53:24.000Z | 2017-05-20T16:53:23.000Z | openstack_dashboard/test/integration_tests/tests/test_flavors.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 1 | 2016-09-08T10:57:46.000Z | 2016-09-08T10:59:06.000Z | openstack_dashboard/test/integration_tests/tests/test_flavors.py | Mirantis/mos-horizon | d2444220d959c8b921436bd75459c2face0e71d2 | [
"Apache-2.0"
] | 4 | 2016-08-01T10:50:15.000Z | 2017-02-22T12:11:19.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack_dashboard.test.integration_tests import helpers
from openstack_dashboard.test.integration_tests.regions import messages
class TestFlavors(helpers.AdminTestCase):
FLAVOR_NAME = helpers.gen_random_resource_name("flavor")
def flavor_create(self, name=None, selected_projects=None):
name = name or self.FLAVOR_NAME
flavors_page = self.home_pg.go_to_system_flavorspage()
flavors_page.create_flavor(name=name, vcpus=1, ram=1024, root_disk=20,
ephemeral_disk=0, swap_disk=0,
selected_projects=selected_projects)
self.assertTrue(
flavors_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(flavors_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(flavors_page.is_flavor_present(name))
return flavors_page
def flavor_delete(self, name=None):
name = name or self.FLAVOR_NAME
flavors_page = self.home_pg.go_to_system_flavorspage()
flavors_page.delete_flavor(name)
self.assertTrue(
flavors_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(flavors_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(flavors_page.is_flavor_present(name))
def test_flavor_create(self):
"""tests the flavor creation and deletion functionalities:
* creates a new flavor
* verifies the flavor appears in the flavors table
* deletes the newly created flavor
* verifies the flavor does not appear in the table after deletion
"""
self.flavor_create()
self.flavor_delete()
def test_flavor_update_metadata(self):
"""Test update flavor metadata
* logs in as admin user
* creates a new flavor
* verifies the flavor appears in the flavors table
* verifies that Metadata column of the table contains 'No'
* invokes action 'Update Metadata' for the new flavor
* adds custom filed 'metadata'
* adds value 'flavor' for the custom filed 'metadata'
* verifies that Metadata column of the table is updated to Yes
* deletes the newly created flavor
* verifies the flavor does not appear in the table after deletion
"""
new_metadata = {'metadata1': helpers.gen_random_resource_name("value"),
'metadata2': helpers.gen_random_resource_name("value")}
flavors_page = self.flavor_create()
self.assertTrue(
flavors_page.get_metadata_column_value(self.FLAVOR_NAME) == 'No')
flavors_page.add_custom_metadata(self.FLAVOR_NAME, new_metadata)
self.assertTrue(
flavors_page.get_metadata_column_value(self.FLAVOR_NAME) == 'Yes')
results = flavors_page.check_flavor_metadata(self.FLAVOR_NAME,
new_metadata)
self.flavor_delete()
self.assertSequenceTrue(results) # custom matcher
def test_edit_flavor(self):
new_flavor_name = 'new-' + self.FLAVOR_NAME
flavors_page = self.flavor_create()
flavors_page.edit_flavor(name=self.FLAVOR_NAME,
new_name=new_flavor_name)
self.assertTrue(
flavors_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(flavors_page.find_message_and_dismiss(messages.ERROR))
self.assertTrue(flavors_page.is_flavor_present(new_flavor_name))
self.flavor_delete(new_flavor_name)
def test_modify_flavor_access(self):
self.flavor_create(selected_projects=['admin'])
assert self.FLAVOR_NAME in self._available_flavors()
self.home_pg.log_out()
self.home_pg = self.login_pg.login(self.DEMO_NAME, self.DEMO_PASSWORD)
self.home_pg.change_project(self.DEMO_PROJECT)
assert self.FLAVOR_NAME not in self._available_flavors()
self.home_pg.log_out()
self.home_pg = self.login_pg.login(self.ADMIN_NAME,
self.ADMIN_PASSWORD)
self.home_pg.change_project(self.ADMIN_PROJECT)
self.flavor_delete()
def _available_flavors(self):
instances_page = self.home_pg.go_to_compute_instancespage()
launch_instance_form = \
instances_page.instances_table.launch_instance_ng()
launch_instance_form.switch_to(2)
available_flavor_names = \
launch_instance_form.flavors.available_items.keys()
launch_instance_form.cancel()
return available_flavor_names
def test_delete_flavors(self):
names = [self.FLAVOR_NAME + str(i) for i in range(3)]
for name in names:
flavors_page = self.flavor_create(name)
name = names.pop()
flavors_page.delete_flavors(name)
self.assertTrue(
flavors_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(flavors_page.find_message_and_dismiss(messages.ERROR))
self.assertFalse(flavors_page.is_flavor_present(name))
flavors_page.delete_flavors(*names)
self.assertTrue(
flavors_page.find_message_and_dismiss(messages.SUCCESS))
self.assertFalse(flavors_page.find_message_and_dismiss(messages.ERROR))
self.assertSequenceFalse(
[flavors_page.is_flavor_present(name) for name in names])
| 44.139706 | 79 | 0.685157 |
4a24944d9c4d6221eed5b0efb9b18e315a7f06fa | 3,093 | py | Python | compareOldvNewTypeII.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | compareOldvNewTypeII.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | compareOldvNewTypeII.py | admar505/python-tools | 743c0e41e6700efa3817fdb09c451f8fffccd1b3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys,os,re,fileinput,argparse
import csv
parser = argparse.ArgumentParser(description="for TypeII RESULTS files, compare new with old. thanks")
parser.add_argument("--old",help="previous call file, bgzipped and tabixed as -p vcf",required=True)
parser.add_argument("--new",help="fresh call file",required=True)
parser.add_argument("--trns",help="file of transcripts, gene names, and positions.")
args = parser.parse_args()
oldfi = args.old
newfi = args.new
bedfi = args.trns
olds = open(oldfi,'r')
news = open(newfi,'r')
trnss = csv.DictReader(open(bedfi,'r'),delimiter='\t')
#parse results in a map or dict, or what??
#-------------------------------------here by DEFSgONS!!----------------------------------*
def getVal(arr,TAG,trn):
value = None
cols = arr.split('\t')
if cols[1] == 'NULL':
cols[1] = '1000'
if cols[0] == 'chr2' and cols[1] == '234668879':
cols[1] = '234668919'
def __getNM__(ar):
returncall = None
validnm = False
geteff = re.search('EFF_HGVS=(\S+)',ar)
getnm = re.search('([NVX][GMR]_\d+)\:\S+',geteff.group(1))
if getnm is not None:
returncall = getnm.group(1)
validnm = True
else:
returncall = geteff.group(1)
return [validnm,returncall]
if TAG == 'location':
if cols[0] == 'chrN':
nmid = __getNM__(arr)
if nmid[0] is True:
value = ":".join(trn[nmid[1]])
else:
value = "chrN:1000"
else:
value = cols[0] + ":" + cols[1]
elif TAG == 'EFF_HGVS':
getnm = re.search('(EFF_HGVS=([NVX][MGR]_\d+\:\S+))',arr)
if getnm is not None:
value = getnm.group(2)
else:
value = cols[4]
elif TAG == 'chrN':
getnm = re.search('(EFF_HGVS=\S+)',arr)
if getnm is not None:
value = getnm.group(1)
else:
value = cols[4]
return value
def callNotFound(new_input):
print "NEW_CALL_UNKNOWN\t" + new_input.strip()
#####----------------MAIN--------------#### #####----------------MAIN--------------####
transdat = {}
for line in trnss:
transdat[line['trans']] = [line['chr'],line['start']]
olddat = {}#stores the old calls. chr:pos
for oln in olds:
olddat[getVal(oln,'location',transdat)] = getVal(oln,'EFF_HGVS',transdat)
olddat[getVal(oln,'chrN',transdat)] = getVal(oln,'chrN',transdat)
#print str(getVal(oln,'chrN',transdat))
print "position\tnewcall\toldcall"
for nln in news:
col = nln.split('\t')
loc = col[0] + ":" + col[1]
if loc in olddat and col[0] != 'chrN':
effnew = getVal(nln,'EFF_HGVS',transdat)
print loc +"\t" + effnew + "\t" + olddat[loc]
elif col[0] == 'chrN':
effnew = getVal(nln,'chrN',transdat)
if effnew in olddat and effnew is not None:
print loc +"\t" + str(effnew) + "\t" + olddat[effnew]
else:
callNotFound(nln)
else:
callNotFound(nln)
| 22.413043 | 102 | 0.539606 |
4a249450015a0b7cf4cfb027dd692805268c6e5b | 4,129 | py | Python | eventmanagement/events/migrations/0001_initial.py | AvanishCodes/manage-it | 9f70337a74be9713fea9dbb64e4d55ae0972956b | [
"MIT"
] | 1 | 2020-11-22T08:48:50.000Z | 2020-11-22T08:48:50.000Z | eventmanagement/events/migrations/0001_initial.py | AvanishCodes/manage-it | 9f70337a74be9713fea9dbb64e4d55ae0972956b | [
"MIT"
] | null | null | null | eventmanagement/events/migrations/0001_initial.py | AvanishCodes/manage-it | 9f70337a74be9713fea9dbb64e4d55ae0972956b | [
"MIT"
] | null | null | null | # Generated by Django 3.1.2 on 2020-11-19 08:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Organization',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('orgName', models.CharField(blank=True, default='', max_length=200)),
('Al1', models.CharField(blank=True, default=models.CharField(blank=True, default='', max_length=200), max_length=100)),
('Al2', models.CharField(blank=True, default='', max_length=100)),
('Al3', models.CharField(blank=True, default='', max_length=100)),
('district', models.CharField(blank=True, default='', max_length=40)),
('state', models.CharField(blank=True, default='', max_length=100)),
('country', models.CharField(blank=True, default='India', max_length=100)),
('pinCode', models.IntegerField(default=0)),
('logo', models.ImageField(blank=True, upload_to='')),
],
),
migrations.CreateModel(
name='Speaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('speakerName', models.CharField(max_length=100)),
('speakerPic', models.ImageField(blank=True, upload_to='')),
('speakerLinkedIn', models.URLField(blank=True)),
('speakerIG', models.URLField(blank=True)),
('speakerFB', models.URLField(blank=True)),
('speakerTwitter', models.URLField(blank=True)),
('speakerWebSite', models.URLField(blank=True)),
],
),
migrations.CreateModel(
name='Participant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('partName', models.CharField(max_length=100)),
('partCountry', models.CharField(max_length=100)),
('partContact', models.BigIntegerField(blank=True)),
('partMail', models.EmailField(blank=True, max_length=254)),
('partPic', models.ImageField(blank=True, upload_to='')),
('partOrg', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, to='events.organization')),
],
),
migrations.CreateModel(
name='Organizer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('OrganizerName', models.CharField(max_length=100)),
('OrganizerContactNumber', models.BigIntegerField(blank=True)),
('OrganizerMail', models.EmailField(blank=True, max_length=254)),
('OrganizerPic', models.ImageField(blank=True, upload_to='')),
('OrganizerOrganization', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, to='events.organization')),
],
),
migrations.CreateModel(
name='Event',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('eventName', models.CharField(default='', max_length=100)),
('eventDescription', models.TextField(blank=True, default='', max_length=1000)),
('eventStartTime', models.DateTimeField(blank=True)),
('eventEndTime', models.DateTimeField(blank=True)),
('eventVenue', models.CharField(blank=True, max_length=200)),
('eventURL', models.URLField(blank=True)),
('eventSpeaker', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.DO_NOTHING, to='events.speaker')),
],
),
]
| 51.6125 | 147 | 0.584645 |
4a2494cb80e6f714e0a67bfa8ba0327142b3406c | 400 | py | Python | tests/create_exchanges.py | jthiltges/gracc-archive | 8c4dca1b1e563983852333d18570dee2a241b48a | [
"Apache-2.0"
] | null | null | null | tests/create_exchanges.py | jthiltges/gracc-archive | 8c4dca1b1e563983852333d18570dee2a241b48a | [
"Apache-2.0"
] | 5 | 2017-04-06T18:06:56.000Z | 2017-12-19T21:04:29.000Z | tests/create_exchanges.py | jthiltges/gracc-archive | 8c4dca1b1e563983852333d18570dee2a241b48a | [
"Apache-2.0"
] | 3 | 2017-03-21T18:37:10.000Z | 2019-12-02T19:07:22.000Z | import pika
credentials = pika.PlainCredentials("guest", "guest")
parameters = pika.ConnectionParameters("localhost",
5672, "/", credentials)
conn = pika.adapters.blocking_connection.BlockingConnection(parameters)
channel = conn.channel()
channel.exchange_declare(exchange="gracc.osg.raw", exchange_type='fanout', durable=True, auto_delete=False)
| 30.769231 | 107 | 0.695 |
4a2495a218846d9f1f1a72f7686510909c00a853 | 7,747 | py | Python | cumulusci/core/tests/test_tasks.py | 1handclapping/CumulusCI | cb7b061d049c5f05503a4ef23ac198342496a949 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/core/tests/test_tasks.py | 1handclapping/CumulusCI | cb7b061d049c5f05503a4ef23ac198342496a949 | [
"BSD-3-Clause"
] | null | null | null | cumulusci/core/tests/test_tasks.py | 1handclapping/CumulusCI | cb7b061d049c5f05503a4ef23ac198342496a949 | [
"BSD-3-Clause"
] | null | null | null | """ Tests for the CumulusCI task module """
import unittest
import logging
import collections
import mock
from cumulusci.core.tasks import BaseTask
from cumulusci.core.flows import BaseFlow
from cumulusci.core.config import BaseGlobalConfig
from cumulusci.core.config import BaseProjectConfig
from cumulusci.core.config import TaskConfig
from cumulusci.core.config import OrgConfig
from cumulusci.core.config import FlowConfig
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.exceptions import TaskRequiresSalesforceOrg
from cumulusci.core.tests.utils import MockLoggingHandler
import cumulusci.core
ORG_ID = "00D000000000001"
USERNAME = "sample@example"
class _TaskHasResult(BaseTask):
def _run_task(self):
return -1
class _SfdcTask(BaseTask):
salesforce_task = True
def _run_task(self):
return -1
class TestBaseTaskCallable(unittest.TestCase):
""" Tests for the BaseTask callable interface.
BaseTask is a callable interface
BaseTask has return_values and results
BaseTask has basic logging
"""
task_class = BaseTask
@classmethod
def setUpClass(cls):
super(TestBaseTaskCallable, cls).setUpClass()
logger = logging.getLogger(cumulusci.core.tasks.__name__)
logger.setLevel(logging.DEBUG)
cls._task_log_handler = MockLoggingHandler(logging.DEBUG)
logger.addHandler(cls._task_log_handler)
def setUp(self):
self.global_config = BaseGlobalConfig()
self.project_config = BaseProjectConfig(
self.global_config, config={"noyaml": True}
)
self.org_config = OrgConfig({"username": USERNAME, "org_id": ORG_ID}, "test")
self.task_config = TaskConfig()
self._task_log_handler.reset()
self.task_log = self._task_log_handler.messages
@mock.patch("cumulusci.core.tasks.time.sleep", mock.Mock())
def test_retry_on_exception(self):
""" calling _retry() should call try until the task succeeds. """
task_config = TaskConfig(
{"options": {"retries": 5, "retry_interval": 1, "retry_interval_add": 1}}
)
task = BaseTask(self.project_config, task_config, self.org_config)
task._try = mock.Mock(side_effect=[Exception, Exception, 1])
task._retry()
self.assertEqual(task._try.call_count, 3)
@mock.patch("cumulusci.core.tasks.time.sleep", mock.Mock())
def test_retry_until_too_many(self):
""" calling _retry should call try until the retry count is exhausted. """
task_config = TaskConfig(
{"options": {"retries": 5, "retry_interval": 1, "retry_interval_add": 1}}
)
task = BaseTask(self.project_config, task_config, self.org_config)
task._try = mock.Mock(
side_effect=[
RuntimeError(5),
RuntimeError(4),
RuntimeError(3),
RuntimeError(2),
RuntimeError(1),
RuntimeError(0),
]
)
with self.assertRaises(RuntimeError) as cm:
task._retry()
self.assertEqual(cm.exception.args[0], 0) # assert it was the final call
self.assertEqual(task._try.call_count, 6)
self.assertEqual(task.options["retry_interval"], 6)
def test_task_is_callable(self):
""" BaseTask is Callable """
task = self.__class__.task_class(
self.project_config, self.task_config, self.org_config
)
self.assertIsInstance(task, collections.Callable)
def test_option_overrides(self):
task = self.__class__.task_class(
self.project_config, self.task_config, self.org_config, foo="bar"
)
self.assertEqual("bar", task.options["foo"])
def test_dynamic_options(self):
""" Option values can lookup values from project_config """
self.project_config.config["foo"] = {"bar": "baz"}
self.task_config.config["options"] = {"test_option": "$project_config.foo__bar"}
task = self.__class__.task_class(
self.project_config, self.task_config, self.org_config
)
self.assertEqual("baz", task.options["test_option"])
def test_validates_missing_options(self):
class Task(BaseTask):
task_options = {"test_option": {"required": True}}
with self.assertRaises(TaskOptionsError):
task = Task(self.project_config, self.task_config, self.org_config)
def test_get_return_values(self):
""" Callable interface returns retvals """
class _TaskReturnsStuff(BaseTask):
def _run_task(self):
self.return_values["name"] = "return!"
task = _TaskReturnsStuff(self.project_config, self.task_config, self.org_config)
return_values = task()
self.assertIn("name", return_values)
def test_get_task_result(self):
""" Task results available as an instance member """
task = _TaskHasResult(self.project_config, self.task_config, self.org_config)
task()
self.assertEqual(task.result, -1)
def test_task_logs_name_not_org(self):
""" A task logs the task class name to info (and not creds) """
task = _TaskHasResult(self.project_config, self.task_config, self.org_config)
task()
self.assertTrue(any("_TaskHasResult" in s for s in self.task_log["info"]))
self.assertFalse(any(ORG_ID in s for s in self.task_log["info"]))
def test_salesforce_task_logs_org_id(self):
""" A salesforce_task will also log the org id & username """
task = _SfdcTask(self.project_config, self.task_config, self.org_config)
task()
self.assertTrue(any(ORG_ID in s for s in self.task_log["info"]))
def test_salesforce_task_no_org(self):
with self.assertRaises(TaskRequiresSalesforceOrg):
_SfdcTask(self.project_config, self.task_config)
@mock.patch("cumulusci.core.flows.BaseFlow._init_org")
def test_no_id_if_run_from_flow(self, mock_class):
""" A salesforce_task will not log the org id if run from a flow """
mock_class.return_value = None
task = _SfdcTask(
self.project_config,
self.task_config,
self.org_config,
flow=BaseFlow(self.project_config, FlowConfig(), self.org_config),
)
task()
self.assertFalse(any(ORG_ID in s for s in self.task_log["info"]))
def test_run_task(self):
task = BaseTask(self.project_config, self.task_config, self.org_config)
with self.assertRaises(NotImplementedError):
task()
def test_try(self):
task = BaseTask(self.project_config, self.task_config, self.org_config)
with self.assertRaises(NotImplementedError):
task._try()
def test_is_retry_valid(self):
task = BaseTask(self.project_config, self.task_config, self.org_config)
self.assertTrue(task._is_retry_valid(None))
def test_poll_action(self):
task = BaseTask(self.project_config, self.task_config, self.org_config)
with self.assertRaises(NotImplementedError):
task._poll_action()
@mock.patch("cumulusci.core.tasks.time.sleep", mock.Mock())
def test_poll(self):
task = BaseTask(self.project_config, self.task_config, self.org_config)
task.i = 0
def mimic_polling():
task.i += 1
if task.i > 3:
task.poll_complete = True
task._poll_action = mock.Mock(side_effect=mimic_polling)
task._poll()
self.assertEqual(4, task.poll_count)
self.assertEqual(1, task.poll_interval_level)
self.assertEqual(2, task.poll_interval_s)
| 35.374429 | 88 | 0.662966 |
4a2495f19d9a49b418cc8243070b7dfda11aca9c | 593 | py | Python | paxos/2pc/examples/bootstrap.py | Sean10/Algorithm_code | 46ff1cb5b81400cbcc324dabdf4298bf7a55e5eb | [
"BSD-3-Clause"
] | null | null | null | paxos/2pc/examples/bootstrap.py | Sean10/Algorithm_code | 46ff1cb5b81400cbcc324dabdf4298bf7a55e5eb | [
"BSD-3-Clause"
] | 7 | 2021-03-19T04:41:21.000Z | 2021-10-19T15:46:36.000Z | paxos/paxos/examples/bootstrap.py | Sean10/Algorithm_code | 46ff1cb5b81400cbcc324dabdf4298bf7a55e5eb | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2021/1/24 1:59 AM
# @Author : sean10
# @Site :
# @File : bootstrap.py
# @Software: PyCharm
"""
"""
import subprocess
import yaml
import signal
def main():
with open("nodes.yaml", "r") as f:
nodes = yaml.load(f, yaml.SafeLoader)
for i, node in enumerate(nodes["nodes"]):
# signal.signal(signal.SIGHUP, )
process = subprocess.Popen(f"python3 main.py --hostname node{i} -p {node.split(':')[-1]}", shell=True, stdin=subprocess.DEVNULL)
# process.
if __name__ == "__main__":
main() | 21.962963 | 136 | 0.596965 |
4a2497a62346334654c7b89205b1983c08eee44d | 41,161 | py | Python | conda_build/render.py | DerThorsten/conda-build | 729c0cea03677dae0e2e15b7ec6d98619b5d4401 | [
"BSD-3-Clause"
] | null | null | null | conda_build/render.py | DerThorsten/conda-build | 729c0cea03677dae0e2e15b7ec6d98619b5d4401 | [
"BSD-3-Clause"
] | null | null | null | conda_build/render.py | DerThorsten/conda-build | 729c0cea03677dae0e2e15b7ec6d98619b5d4401 | [
"BSD-3-Clause"
] | null | null | null | # (c) Continuum Analytics, Inc. / http://continuum.io
# All Rights Reserved
#
# conda is distributed under the terms of the BSD 3-clause license.
# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.
from collections import OrderedDict, defaultdict
from locale import getpreferredencoding
import json
import os
from os.path import isdir, isfile, abspath
import random
import re
import shutil
import string
import subprocess
import sys
import tarfile
import tempfile
import yaml
from .conda_interface import (PY3, UnsatisfiableError, ProgressiveFetchExtract,
TemporaryDirectory)
from .conda_interface import execute_actions
from .conda_interface import pkgs_dirs
from .conda_interface import conda_43
from .conda_interface import specs_from_url
from .conda_interface import memoized
from .utils import CONDA_PACKAGE_EXTENSION_V1, CONDA_PACKAGE_EXTENSION_V2
from conda_build import exceptions, utils, environ
from conda_build.metadata import MetaData, combine_top_level_metadata_with_output
import conda_build.source as source
from conda_build.variants import (get_package_variants, list_of_dicts_to_dict_of_lists,
filter_by_key_value)
from conda_build.exceptions import DependencyNeedsBuildingError
from conda_build.index import get_build_index
# from conda_build.jinja_context import pin_subpackage_against_outputs
def odict_representer(dumper, data):
return dumper.represent_dict(data.items())
yaml.add_representer(set, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(tuple, yaml.representer.SafeRepresenter.represent_list)
yaml.add_representer(OrderedDict, odict_representer)
def bldpkg_path(m):
'''
Returns path to built package's tarball given its ``Metadata``.
'''
subdir = 'noarch' if m.noarch or m.noarch_python else m.config.host_subdir
if not hasattr(m, 'type'):
if m.config.conda_pkg_format == "2":
pkg_type = "conda_v2"
else:
pkg_type = "conda"
else:
pkg_type = m.type
# the default case will switch over to conda_v2 at some point
if pkg_type == "conda":
path = os.path.join(m.config.output_folder, subdir, f'{m.dist()}{CONDA_PACKAGE_EXTENSION_V1}')
elif pkg_type == "conda_v2":
path = os.path.join(m.config.output_folder, subdir, f'{m.dist()}{CONDA_PACKAGE_EXTENSION_V2}')
else:
path = f'{m.type} file for {m.name()} in: {os.path.join(m.config.output_folder, subdir)}'
return path
def actions_to_pins(actions):
specs = []
if conda_43:
spec_name = lambda x: x.dist_name
else:
spec_name = lambda x: str(x)
if 'LINK' in actions:
specs = [' '.join(spec_name(spec).split()[0].rsplit('-', 2)) for spec in actions['LINK']]
return specs
def _categorize_deps(m, specs, exclude_pattern, variant):
subpackages = []
dependencies = []
pass_through_deps = []
dash_or_under = re.compile("[-_]")
# ones that get filtered from actual versioning, to exclude them from the hash calculation
for spec in specs:
if not exclude_pattern or not exclude_pattern.match(spec):
is_subpackage = False
spec_name = spec.split()[0]
for entry in m.get_section('outputs'):
name = entry.get('name')
if name == spec_name:
subpackages.append(' '.join((name, m.version())))
is_subpackage = True
if not is_subpackage:
dependencies.append(spec)
# fill in variant version iff no version at all is provided
for key, value in variant.items():
# for sake of comparison, ignore dashes and underscores
if (dash_or_under.sub("", key) == dash_or_under.sub("", spec_name) and
not re.search(r'%s\s+[0-9a-zA-Z\_\.\<\>\=\*]' % spec_name, spec)):
dependencies.append(" ".join((spec_name, value)))
elif exclude_pattern.match(spec):
pass_through_deps.append(spec)
return subpackages, dependencies, pass_through_deps
def get_env_dependencies(m, env, variant, exclude_pattern=None,
permit_unsatisfiable_variants=False,
merge_build_host_on_same_platform=True):
specs = m.get_depends_top_and_out(env)
# replace x.x with our variant's numpy version, or else conda tries to literally go get x.x
if env in ('build', 'host'):
no_xx_specs = []
for spec in specs:
if ' x.x' in spec:
pkg_name = spec.split()[0]
no_xx_specs.append(' '.join((pkg_name, variant.get(pkg_name, ""))))
else:
no_xx_specs.append(spec)
specs = no_xx_specs
subpackages, dependencies, pass_through_deps = _categorize_deps(m, specs, exclude_pattern, variant)
dependencies = set(dependencies)
unsat = None
random_string = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(10))
with TemporaryDirectory(prefix="_", suffix=random_string) as tmpdir:
try:
actions = environ.get_install_actions(tmpdir, tuple(dependencies), env,
subdir=getattr(m.config, f'{env}_subdir'),
debug=m.config.debug,
verbose=m.config.verbose,
locking=m.config.locking,
bldpkgs_dirs=tuple(m.config.bldpkgs_dirs),
timeout=m.config.timeout,
disable_pip=m.config.disable_pip,
max_env_retry=m.config.max_env_retry,
output_folder=m.config.output_folder,
channel_urls=tuple(m.config.channel_urls))
except (UnsatisfiableError, DependencyNeedsBuildingError) as e:
# we'll get here if the environment is unsatisfiable
if hasattr(e, 'packages'):
unsat = ', '.join(e.packages)
else:
unsat = e.message
if permit_unsatisfiable_variants:
actions = {}
else:
raise
specs = actions_to_pins(actions)
return (utils.ensure_list((specs + subpackages + pass_through_deps) or
m.meta.get('requirements', {}).get(env, [])),
actions, unsat)
def strip_channel(spec_str):
if hasattr(spec_str, 'decode'):
spec_str = spec_str.decode()
if ':' in spec_str:
spec_str = spec_str.split("::")[-1]
return spec_str
def get_pin_from_build(m, dep, build_dep_versions):
dep_split = dep.split()
dep_name = dep_split[0]
build = ''
if len(dep_split) >= 3:
build = dep_split[2]
pin = None
version = build_dep_versions.get(dep_name) or m.config.variant.get(dep_name)
if (version and dep_name in m.config.variant.get('pin_run_as_build', {}) and
not (dep_name == 'python' and (m.noarch or m.noarch_python)) and
dep_name in build_dep_versions):
pin_cfg = m.config.variant['pin_run_as_build'][dep_name]
if isinstance(pin_cfg, str):
# if pin arg is a single 'x.x', use the same value for min and max
pin_cfg = dict(min_pin=pin_cfg, max_pin=pin_cfg)
pin = utils.apply_pin_expressions(version.split()[0], **pin_cfg)
elif dep.startswith('numpy') and 'x.x' in dep:
if not build_dep_versions.get(dep_name):
raise ValueError("numpy x.x specified, but numpy not in build requirements.")
pin = utils.apply_pin_expressions(version.split()[0], min_pin='x.x', max_pin='x.x')
if pin:
dep = " ".join((dep_name, pin, build)).strip()
return dep
def _filter_run_exports(specs, ignore_list):
filtered_specs = {}
for agent, specs_list in specs.items():
for spec in specs_list:
if hasattr(spec, 'decode'):
spec = spec.decode()
if not any((ignore_spec == '*' or spec == ignore_spec or
spec.startswith(ignore_spec + ' ')) for ignore_spec in ignore_list):
filtered_specs[agent] = filtered_specs.get(agent, []) + [spec]
return filtered_specs
def find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=False):
_pkgs_dirs = pkgs_dirs + list(m.config.bldpkgs_dirs)
pkg_loc = None
for pkgs_dir in _pkgs_dirs:
pkg_dir = os.path.join(pkgs_dir, pkg_dist)
pkg_file = os.path.join(pkgs_dir, pkg_dist + CONDA_PACKAGE_EXTENSION_V1)
if not files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_dir
break
elif os.path.isfile(pkg_file):
pkg_loc = pkg_file
break
elif files_only and os.path.isdir(pkg_dir):
pkg_loc = pkg_file
# create the tarball on demand. This is so that testing on archives works.
with tarfile.open(pkg_file, 'w:bz2') as archive:
for entry in os.listdir(pkg_dir):
archive.add(os.path.join(pkg_dir, entry), arcname=entry)
pkg_subdir = os.path.join(m.config.croot, m.config.host_subdir)
pkg_loc = os.path.join(pkg_subdir, os.path.basename(pkg_file))
shutil.move(pkg_file, pkg_loc)
return pkg_loc
@memoized
def _read_specs_from_package(pkg_loc, pkg_dist):
specs = {}
if pkg_loc and os.path.isdir(pkg_loc):
downstream_file = os.path.join(pkg_loc, 'info/run_exports')
if os.path.isfile(downstream_file):
with open(downstream_file) as f:
specs = {'weak': [spec.rstrip() for spec in f.readlines()]}
# a later attempt: record more info in the yaml file, to support "strong" run exports
elif os.path.isfile(downstream_file + '.yaml'):
with open(downstream_file + '.yaml') as f:
specs = yaml.safe_load(f)
elif os.path.isfile(downstream_file + '.json'):
with open(downstream_file + '.json') as f:
specs = json.load(f)
if not specs and pkg_loc and os.path.isfile(pkg_loc):
# switching to json for consistency in conda-build 4
specs_yaml = utils.package_has_file(pkg_loc, 'info/run_exports.yaml')
specs_json = utils.package_has_file(pkg_loc, 'info/run_exports.json')
if hasattr(specs_json, "decode"):
specs_json = specs_json.decode("utf-8")
if specs_json:
specs = json.loads(specs_json)
elif specs_yaml:
specs = yaml.safe_load(specs_yaml)
else:
legacy_specs = utils.package_has_file(pkg_loc, 'info/run_exports')
# exclude packages pinning themselves (makes no sense)
if legacy_specs:
weak_specs = set()
if hasattr(pkg_dist, "decode"):
pkg_dist = pkg_dist.decode("utf-8")
for spec in legacy_specs.splitlines():
if hasattr(spec, "decode"):
spec = spec.decode("utf-8")
if not spec.startswith(pkg_dist.rsplit('-', 2)[0]):
weak_specs.add(spec.rstrip())
specs = {'weak': sorted(list(weak_specs))}
return specs
def execute_download_actions(m, actions, env, package_subset=None, require_files=False):
index, _, _ = get_build_index(getattr(m.config, f'{env}_subdir'), bldpkgs_dir=m.config.bldpkgs_dir,
output_folder=m.config.output_folder, channel_urls=m.config.channel_urls,
debug=m.config.debug, verbose=m.config.verbose, locking=m.config.locking,
timeout=m.config.timeout)
# this should be just downloading packages. We don't need to extract them -
download_actions = {k: v for k, v in actions.items() if k in ('FETCH', 'EXTRACT', 'PREFIX')}
if 'FETCH' in actions or 'EXTRACT' in actions:
# this is to force the download
execute_actions(download_actions, index, verbose=m.config.debug)
pkg_files = {}
packages = actions.get('LINK', [])
package_subset = utils.ensure_list(package_subset)
selected_packages = set()
if package_subset:
for pkg in package_subset:
if hasattr(pkg, 'name'):
if pkg in packages:
selected_packages.add(pkg)
else:
pkg_name = pkg.split()[0]
for link_pkg in packages:
if pkg_name == link_pkg.name:
selected_packages.add(link_pkg)
break
packages = selected_packages
for pkg in packages:
if hasattr(pkg, 'dist_name'):
pkg_dist = pkg.dist_name
else:
pkg = strip_channel(pkg)
pkg_dist = pkg.split(' ')[0]
pkg_loc = find_pkg_dir_or_file_in_pkgs_dirs(pkg_dist, m, files_only=require_files)
# ran through all pkgs_dirs, and did not find package or folder. Download it.
# TODO: this is a vile hack reaching into conda's internals. Replace with
# proper conda API when available.
if not pkg_loc and conda_43:
try:
pkg_record = [_ for _ in index if _.dist_name == pkg_dist][0]
# the conda 4.4 API uses a single `link_prefs` kwarg
# whereas conda 4.3 used `index` and `link_dists` kwargs
pfe = ProgressiveFetchExtract(link_prefs=(index[pkg_record],))
except TypeError:
# TypeError: __init__() got an unexpected keyword argument 'link_prefs'
pfe = ProgressiveFetchExtract(link_dists=[pkg], index=index)
with utils.LoggingContext():
pfe.execute()
for pkg_dir in pkgs_dirs:
_loc = os.path.join(pkg_dir, index[pkg].fn)
if os.path.isfile(_loc):
pkg_loc = _loc
break
pkg_files[pkg] = pkg_loc, pkg_dist
return pkg_files
def get_upstream_pins(m, actions, env):
"""Download packages from specs, then inspect each downloaded package for additional
downstream dependency specs. Return these additional specs."""
env_specs = m.meta.get('requirements', {}).get(env, [])
explicit_specs = [req.split(' ')[0] for req in env_specs] if env_specs else []
linked_packages = actions.get('LINK', [])
linked_packages = [pkg for pkg in linked_packages if pkg.name in explicit_specs]
ignore_pkgs_list = utils.ensure_list(m.get_value('build/ignore_run_exports_from'))
ignore_list = utils.ensure_list(m.get_value('build/ignore_run_exports'))
additional_specs = {}
for pkg in linked_packages:
if any(pkg.name in req.split(' ')[0] for req in ignore_pkgs_list):
continue
run_exports = None
if m.config.use_channeldata:
channeldata = utils.download_channeldata(pkg.channel)
# only use channeldata if requested, channeldata exists and contains
# a packages key, otherwise use run_exports from the packages themselves
if 'packages' in channeldata:
pkg_data = channeldata['packages'].get(pkg.name, {})
run_exports = pkg_data.get('run_exports', {}).get(pkg.version, {})
if run_exports is None:
loc, dist = execute_download_actions(m, actions, env=env, package_subset=pkg)[pkg]
run_exports = _read_specs_from_package(loc, dist)
specs = _filter_run_exports(run_exports, ignore_list)
if specs:
additional_specs = utils.merge_dicts_of_lists(additional_specs, specs)
return additional_specs
def _read_upstream_pin_files(m, env, permit_unsatisfiable_variants, exclude_pattern):
deps, actions, unsat = get_env_dependencies(m, env, m.config.variant,
exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
# extend host deps with strong build run exports. This is important for things like
# vc feature activation to work correctly in the host env.
extra_run_specs = get_upstream_pins(m, actions, env)
return list(set(deps)) or m.meta.get('requirements', {}).get(env, []), unsat, extra_run_specs
def add_upstream_pins(m, permit_unsatisfiable_variants, exclude_pattern):
"""Applies run_exports from any build deps to host and run sections"""
# if we have host deps, they're more important than the build deps.
requirements = m.meta.get('requirements', {})
build_deps, build_unsat, extra_run_specs_from_build = _read_upstream_pin_files(m, 'build',
permit_unsatisfiable_variants, exclude_pattern)
# is there a 'host' section?
if m.is_cross:
# this must come before we read upstream pins, because it will enforce things
# like vc version from the compiler.
host_reqs = utils.ensure_list(m.get_value('requirements/host'))
# ensure host_reqs is present, so in-place modification below is actually in-place
requirements = m.meta.setdefault('requirements', {})
requirements['host'] = host_reqs
if not host_reqs:
matching_output = [out for out in m.meta.get('outputs', []) if
out.get('name') == m.name()]
if matching_output:
requirements = utils.expand_reqs(matching_output[0].get('requirements', {}))
matching_output[0]['requirements'] = requirements
host_reqs = requirements.setdefault('host', [])
# in-place modification of above thingie
host_reqs.extend(extra_run_specs_from_build.get('strong', []))
host_deps, host_unsat, extra_run_specs_from_host = _read_upstream_pin_files(m, 'host',
permit_unsatisfiable_variants, exclude_pattern)
if m.noarch or m.noarch_python:
extra_run_specs = set(extra_run_specs_from_host.get('noarch', []))
extra_run_constrained_specs = set()
else:
extra_run_specs = set(extra_run_specs_from_host.get('strong', []) +
extra_run_specs_from_host.get('weak', []) +
extra_run_specs_from_build.get('strong', []))
extra_run_constrained_specs = set(
extra_run_specs_from_host.get('strong_constrains', []) +
extra_run_specs_from_host.get('weak_constrains', []) +
extra_run_specs_from_build.get('strong_constrains', [])
)
else:
host_deps = []
host_unsat = []
if m.noarch or m.noarch_python:
if m.build_is_host:
extra_run_specs = set(extra_run_specs_from_build.get('noarch', []))
extra_run_constrained_specs = set()
build_deps = set(build_deps or []).update(extra_run_specs_from_build.get('noarch', []))
else:
extra_run_specs = set()
extra_run_constrained_specs = set()
build_deps = set(build_deps or [])
else:
extra_run_specs = set(extra_run_specs_from_build.get('strong', []))
extra_run_constrained_specs = set(extra_run_specs_from_build.get('strong_constrains', []))
if m.build_is_host:
extra_run_specs.update(extra_run_specs_from_build.get('weak', []))
extra_run_constrained_specs.update(extra_run_specs_from_build.get('weak_constrains', []))
build_deps = set(build_deps or []).update(extra_run_specs_from_build.get('weak', []))
else:
host_deps = set(extra_run_specs_from_build.get('strong', []))
run_deps = extra_run_specs | set(utils.ensure_list(requirements.get('run')))
run_constrained_deps = extra_run_constrained_specs | set(utils.ensure_list(requirements.get('run_constrained')))
for section, deps in (
('build', build_deps), ('host', host_deps), ('run', run_deps), ('run_constrained', run_constrained_deps),
):
if deps:
requirements[section] = list(deps)
m.meta['requirements'] = requirements
return build_unsat, host_unsat
def _simplify_to_exact_constraints(metadata):
"""
For metapackages that are pinned exactly, we want to bypass all dependencies that may
be less exact.
"""
requirements = metadata.meta.get('requirements', {})
# collect deps on a per-section basis
for section in 'build', 'host', 'run':
deps = utils.ensure_list(requirements.get(section, []))
deps_dict = defaultdict(list)
for dep in deps:
spec_parts = utils.ensure_valid_spec(dep).split()
name = spec_parts[0]
if len(spec_parts) > 1:
deps_dict[name].append(spec_parts[1:])
else:
deps_dict[name].append([])
deps_list = []
for name, values in deps_dict.items():
exact_pins = []
for dep in values:
if len(dep) > 1:
version, build = dep[:2]
if not (any(c in version for c in ('>', '<', '*')) or '*' in build):
exact_pins.append(dep)
if len(values) == 1 and not any(values):
deps_list.append(name)
elif exact_pins:
if not all(pin == exact_pins[0] for pin in exact_pins):
raise ValueError(f"Conflicting exact pins: {exact_pins}")
else:
deps_list.append(' '.join([name] + exact_pins[0]))
else:
deps_list.extend(' '.join([name] + dep) for dep in values if dep)
if section in requirements and deps_list:
requirements[section] = deps_list
metadata.meta['requirements'] = requirements
def finalize_metadata(m, parent_metadata=None, permit_unsatisfiable_variants=False):
"""Fully render a recipe. Fill in versions for build/host dependencies."""
if not parent_metadata:
parent_metadata = m
if m.skip():
m.final = True
else:
exclude_pattern = None
excludes = set(m.config.variant.get('ignore_version', []))
for key in m.config.variant.get('pin_run_as_build', {}).keys():
if key in excludes:
excludes.remove(key)
output_excludes = set()
if hasattr(m, 'other_outputs'):
output_excludes = {name for (name, variant) in m.other_outputs.keys()}
if excludes or output_excludes:
exclude_pattern = re.compile(r'|'.join(fr'(?:^{exc}(?:\s|$|\Z))'
for exc in excludes | output_excludes))
parent_recipe = m.meta.get('extra', {}).get('parent_recipe', {})
# extract the topmost section where variables are defined, and put it on top of the
# requirements for a particular output
# Re-parse the output from the original recipe, so that we re-consider any jinja2 stuff
output = parent_metadata.get_rendered_output(m.name(), variant=m.config.variant)
is_top_level = True
if output:
if 'package' in output or 'name' not in output:
# it's just a top-level recipe
output = {'name': m.name()}
else:
is_top_level = False
if not parent_recipe or parent_recipe['name'] == m.name():
combine_top_level_metadata_with_output(m, output)
requirements = utils.expand_reqs(output.get('requirements', {}))
m.meta['requirements'] = requirements
if m.meta.get('requirements'):
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'build')
utils.insert_variant_versions(m.meta['requirements'],
m.config.variant, 'host')
m = parent_metadata.get_output_metadata(m.get_rendered_output(m.name()))
build_unsat, host_unsat = add_upstream_pins(m,
permit_unsatisfiable_variants,
exclude_pattern)
# getting this AFTER add_upstream_pins is important, because that function adds deps
# to the metadata.
requirements = m.meta.get('requirements', {})
# here's where we pin run dependencies to their build time versions. This happens based
# on the keys in the 'pin_run_as_build' key in the variant, which is a list of package
# names to have this behavior.
if output_excludes:
exclude_pattern = re.compile(r'|'.join(fr'(?:^{exc}(?:\s|$|\Z))'
for exc in output_excludes))
pinning_env = 'host' if m.is_cross else 'build'
build_reqs = requirements.get(pinning_env, [])
# if python is in the build specs, but doesn't have a specific associated
# version, make sure to add one
if build_reqs and 'python' in build_reqs:
build_reqs.append('python {}'.format(m.config.variant['python']))
m.meta['requirements'][pinning_env] = build_reqs
full_build_deps, _, _ = get_env_dependencies(m, pinning_env,
m.config.variant,
exclude_pattern=exclude_pattern,
permit_unsatisfiable_variants=permit_unsatisfiable_variants)
full_build_dep_versions = {dep.split()[0]: " ".join(dep.split()[1:])
for dep in full_build_deps}
if isfile(m.requirements_path) and not requirements.get('run'):
requirements['run'] = specs_from_url(m.requirements_path)
run_deps = requirements.get('run', [])
versioned_run_deps = [get_pin_from_build(m, dep, full_build_dep_versions)
for dep in run_deps]
versioned_run_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_run_deps]
requirements[pinning_env] = full_build_deps
requirements['run'] = versioned_run_deps
m.meta['requirements'] = requirements
# append other requirements, such as python.app, appropriately
m.append_requirements()
if m.pin_depends == 'strict':
m.meta['requirements']['run'] = environ.get_pinned_deps(
m, 'run')
test_deps = m.get_value('test/requires')
if test_deps:
versioned_test_deps = list({get_pin_from_build(m, dep, full_build_dep_versions)
for dep in test_deps})
versioned_test_deps = [utils.ensure_valid_spec(spec, warn=True)
for spec in versioned_test_deps]
m.meta['test']['requires'] = versioned_test_deps
extra = m.meta.get('extra', {})
extra['copy_test_source_files'] = m.config.copy_test_source_files
m.meta['extra'] = extra
# if source/path is relative, then the output package makes no sense at all. The next
# best thing is to hard-code the absolute path. This probably won't exist on any
# system other than the original build machine, but at least it will work there.
if m.meta.get('source'):
if 'path' in m.meta['source']:
source_path = m.meta['source']['path']
os.path.expanduser(source_path)
if not os.path.isabs(source_path):
m.meta['source']['path'] = os.path.normpath(
os.path.join(m.path, source_path))
elif ('git_url' in m.meta['source'] and not (
# absolute paths are not relative paths
os.path.isabs(m.meta['source']['git_url']) or
# real urls are not relative paths
":" in m.meta['source']['git_url'])):
m.meta['source']['git_url'] = os.path.normpath(
os.path.join(m.path, m.meta['source']['git_url']))
if not m.meta.get('build'):
m.meta['build'] = {}
_simplify_to_exact_constraints(m)
if build_unsat or host_unsat:
m.final = False
log = utils.get_logger(__name__)
log.warn("Returning non-final recipe for {}; one or more dependencies "
"was unsatisfiable:".format(m.dist()))
if build_unsat:
log.warn(f"Build: {build_unsat}")
if host_unsat:
log.warn(f"Host: {host_unsat}")
else:
m.final = True
if is_top_level:
parent_metadata = m
return m
def try_download(metadata, no_download_source, raise_error=False):
if not metadata.source_provided and not no_download_source:
# this try/catch is for when the tool to download source is actually in
# meta.yaml, and not previously installed in builder env.
try:
source.provide(metadata)
except subprocess.CalledProcessError as error:
print("Warning: failed to download source. If building, will try "
"again after downloading recipe dependencies.")
print("Error was: ")
print(error)
if not metadata.source_provided:
if no_download_source:
raise ValueError("no_download_source specified, but can't fully render recipe without"
" downloading source. Please fix the recipe, or don't use "
"no_download_source.")
elif raise_error:
raise RuntimeError("Failed to download or patch source. Please see build log for info.")
def reparse(metadata):
"""Some things need to be parsed again after the build environment has been created
and activated."""
metadata.final = False
sys.path.insert(0, metadata.config.build_prefix)
sys.path.insert(0, metadata.config.host_prefix)
py_ver = '.'.join(metadata.config.variant['python'].split('.')[:2])
sys.path.insert(0, utils.get_site_packages(metadata.config.host_prefix, py_ver))
metadata.parse_until_resolved()
metadata = finalize_metadata(metadata)
return metadata
def distribute_variants(metadata, variants, permit_unsatisfiable_variants=False,
allow_no_other_outputs=False, bypass_env_check=False):
rendered_metadata = {}
need_source_download = True
# don't bother distributing python if it's a noarch package, and figure out
# which python version we prefer. `python_age` can use used to tweak which
# python gets used here.
if metadata.noarch or metadata.noarch_python:
from .conda_interface import VersionOrder
age = int(metadata.get_value('build/noarch_python_build_age', metadata.config.noarch_python_build_age))
versions = []
for variant in variants:
if 'python' in variant:
vo = variant['python']
if vo not in versions:
versions.append(vo)
version_indices = sorted(range(len(versions)), key=lambda k: VersionOrder(versions[k].split(' ')[0]))
if age < 0:
age = 0
elif age > len(versions) - 1:
age = len(versions) - 1
build_ver = versions[version_indices[len(versions) - 1 - age]]
variants = filter_by_key_value(variants, 'python', build_ver,
'noarch_python_reduction')
# store these for reference later
metadata.config.variants = variants
# These are always the full set. just 'variants' is the one that gets
# used mostly, and can be reduced
metadata.config.input_variants = variants
recipe_requirements = metadata.extract_requirements_text()
recipe_package_and_build_text = metadata.extract_package_and_build_text()
recipe_text = recipe_package_and_build_text + recipe_requirements
if PY3 and hasattr(recipe_text, 'decode'):
recipe_text = recipe_text.decode()
elif not PY3 and hasattr(recipe_text, 'encode'):
recipe_text = recipe_text.encode()
metadata.config.variant = variants[0]
used_variables = metadata.get_used_loop_vars(force_global=False)
top_loop = metadata.get_reduced_variant_set(used_variables)
for variant in top_loop:
from conda_build.build import get_all_replacements
get_all_replacements(variant)
mv = metadata.copy()
mv.config.variant = variant
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
conform_dict = {}
for key in used_variables:
# We use this variant in the top-level recipe.
# constrain the stored variants to only this version in the output
# variant mapping
conform_dict[key] = variant[key]
for key, values in conform_dict.items():
mv.config.variants = (filter_by_key_value(mv.config.variants, key, values,
'distribute_variants_reduction') or
mv.config.variants)
get_all_replacements(mv.config.variants)
pin_run_as_build = variant.get('pin_run_as_build', {})
if mv.numpy_xx and 'numpy' not in pin_run_as_build:
pin_run_as_build['numpy'] = {'min_pin': 'x.x', 'max_pin': 'x.x'}
numpy_pinned_variants = []
for _variant in mv.config.variants:
_variant['pin_run_as_build'] = pin_run_as_build
numpy_pinned_variants.append(_variant)
mv.config.variants = numpy_pinned_variants
mv.config.squished_variants = list_of_dicts_to_dict_of_lists(mv.config.variants)
if mv.needs_source_for_render and mv.variant_in_source:
mv.parse_again()
utils.rm_rf(mv.config.work_dir)
source.provide(mv)
mv.parse_again()
try:
mv.parse_until_resolved(allow_no_other_outputs=allow_no_other_outputs,
bypass_env_check=bypass_env_check)
except SystemExit:
pass
need_source_download = (not mv.needs_source_for_render or not mv.source_provided)
rendered_metadata[(mv.dist(),
mv.config.variant.get('target_platform', mv.config.subdir),
tuple((var, mv.config.variant.get(var))
for var in mv.get_used_vars()))] = \
(mv, need_source_download, None)
# list of tuples.
# each tuple item is a tuple of 3 items:
# metadata, need_download, need_reparse_in_env
return list(rendered_metadata.values())
def expand_outputs(metadata_tuples):
"""Obtain all metadata objects for all outputs from recipe. Useful for outputting paths."""
expanded_outputs = OrderedDict()
for (_m, download, reparse) in metadata_tuples:
from conda_build.build import get_all_replacements
get_all_replacements(_m.config)
from copy import deepcopy
for (output_dict, m) in deepcopy(_m).get_output_metadata_set(permit_unsatisfiable_variants=False):
get_all_replacements(m.config)
expanded_outputs[m.dist()] = (output_dict, m)
return list(expanded_outputs.values())
def render_recipe(recipe_path, config, no_download_source=False, variants=None,
permit_unsatisfiable_variants=True, reset_build_id=True, bypass_env_check=False):
"""Returns a list of tuples, each consisting of
(metadata-object, needs_download, needs_render_in_env)
You get one tuple per variant. Outputs are not factored in here (subpackages won't affect these
results returned here.)
"""
arg = recipe_path
# Don't use byte literals for paths in Python 2
if not PY3:
arg = arg.decode(getpreferredencoding() or 'utf-8')
if isfile(arg):
if arg.endswith(('.tar', '.tar.gz', '.tgz', '.tar.bz2')):
recipe_dir = tempfile.mkdtemp()
t = tarfile.open(arg, 'r:*')
t.extractall(path=recipe_dir)
t.close()
need_cleanup = True
elif arg.endswith('.yaml'):
recipe_dir = os.path.dirname(arg)
need_cleanup = False
else:
print("Ignoring non-recipe: %s" % arg)
return None, None
else:
recipe_dir = abspath(arg)
need_cleanup = False
if not isdir(recipe_dir):
sys.exit("Error: no such directory: %s" % recipe_dir)
try:
m = MetaData(recipe_dir, config=config)
except exceptions.YamlParsingError as e:
sys.stderr.write(e.error_msg())
sys.exit(1)
rendered_metadata = {}
# important: set build id *before* downloading source. Otherwise source goes into a different
# build folder.
if config.set_build_id:
m.config.compute_build_id(m.name(), m.version(), reset=reset_build_id)
# this source may go into a folder that doesn't match the eventual build folder.
# There's no way around it AFAICT. We must download the source to be able to render
# the recipe (from anything like GIT_FULL_HASH), but we can't know the final build
# folder until rendering is complete, because package names can have variant jinja2 in them.
if m.needs_source_for_render and not m.source_provided:
try_download(m, no_download_source=no_download_source)
if m.final:
if not hasattr(m.config, 'variants') or not m.config.variant:
m.config.ignore_system_variants = True
if os.path.isfile(os.path.join(m.path, 'conda_build_config.yaml')):
m.config.variant_config_files = [os.path.join(m.path, 'conda_build_config.yaml')]
m.config.variants = get_package_variants(m, variants=variants)
m.config.variant = m.config.variants[0]
rendered_metadata = [(m, False, False), ]
else:
# merge any passed-in variants with any files found
variants = get_package_variants(m, variants=variants)
# when building, we don't want to fully expand all outputs into metadata, only expand
# whatever variants we have (i.e. expand top-level variants, not output-only variants)
rendered_metadata = distribute_variants(m, variants,
permit_unsatisfiable_variants=permit_unsatisfiable_variants,
allow_no_other_outputs=True, bypass_env_check=bypass_env_check)
if need_cleanup:
utils.rm_rf(recipe_dir)
return rendered_metadata
# Keep this out of the function below so it can be imported by other modules.
FIELDS = ["package", "source", "build", "requirements", "test", "app", "outputs", "about", "extra"]
# Next bit of stuff is to support YAML output in the order we expect.
# http://stackoverflow.com/a/17310199/1170370
class _MetaYaml(dict):
fields = FIELDS
def to_omap(self):
return [(field, self[field]) for field in _MetaYaml.fields if field in self]
def _represent_omap(dumper, data):
return dumper.represent_mapping('tag:yaml.org,2002:map', data.to_omap())
def _unicode_representer(dumper, uni):
node = yaml.ScalarNode(tag='tag:yaml.org,2002:str', value=uni)
return node
class _IndentDumper(yaml.Dumper):
def increase_indent(self, flow=False, indentless=False):
return super().increase_indent(flow, False)
def ignore_aliases(self, data):
return True
yaml.add_representer(_MetaYaml, _represent_omap)
if PY3:
yaml.add_representer(str, _unicode_representer)
unicode = None # silence pyflakes about unicode not existing in py3
else:
yaml.add_representer(unicode, _unicode_representer)
def output_yaml(metadata, filename=None, suppress_outputs=False):
local_metadata = metadata.copy()
if suppress_outputs and local_metadata.is_output and 'outputs' in local_metadata.meta:
del local_metadata.meta['outputs']
output = yaml.dump(_MetaYaml(local_metadata.meta), Dumper=_IndentDumper,
default_flow_style=False, indent=2)
if filename:
if any(sep in filename for sep in ('\\', '/')):
try:
os.makedirs(os.path.dirname(filename))
except OSError:
pass
with open(filename, "w") as f:
f.write(output)
return "Wrote yaml to %s" % filename
else:
return output
| 44.5948 | 116 | 0.616287 |
4a2497a68950887344ae1b3b96494b533bb0b85a | 1,107 | py | Python | script/export.py | rwhogg/what-the-hex | d4753aed76dfc792f46a5f4438318d19bf135581 | [
"Apache-2.0"
] | 2 | 2021-01-02T16:31:39.000Z | 2021-01-22T14:42:53.000Z | script/export.py | rwhogg/what-the-hex | d4753aed76dfc792f46a5f4438318d19bf135581 | [
"Apache-2.0"
] | 16 | 2021-01-30T17:50:21.000Z | 2021-05-26T14:45:34.000Z | script/export.py | rwhogg/what-the-hex | d4753aed76dfc792f46a5f4438318d19bf135581 | [
"Apache-2.0"
] | null | null | null | # Run like this: python3 script/export.py [platforms...]
import os.path
import re
import sys
if sys.version_info < (3, 6):
print("Run with Python 3 please")
sys.exit(1)
from configparser import ConfigParser
from os import chdir, getenv, mkdir
from subprocess import Popen
chdir("project")
if len(sys.argv) > 1:
export_names = filter(lambda s: not (s.startswith("python") or s.endswith(".py")), sys.argv)
else:
print("Exporting all")
cfg = ConfigParser()
cfg.read("export_presets.cfg")
exports = filter(lambda section: re.match(r"^preset.\d$", section) is not None, cfg.sections())
export_names = map(lambda export: cfg.get(export, "name").replace('"', ''), exports)
godot = getenv("GODOT")
if godot is None:
print("Set the environment variable GODOT")
sys.exit(1)
for export_name in export_names:
try:
mkdir(os.path.join("..", "exports", export_name))
except Exception:
print("Directory already exists")
print("Exporting for " + export_name)
process = Popen([godot, "--no-window", "--export", export_name])
process.wait(30)
| 28.384615 | 99 | 0.67299 |
4a2498206e673ae98df1677ea94aad18de2ce330 | 9,770 | py | Python | Deprecated/PythonClient/client_example.py | ruichen-v/carla | 387c448556c79d838ae0c2cb8a6caad3a78c3eba | [
"MIT"
] | 9 | 2019-05-20T05:17:25.000Z | 2021-10-31T08:31:32.000Z | Deprecated/PythonClient/client_example.py | ruichen-v/carla | 387c448556c79d838ae0c2cb8a6caad3a78c3eba | [
"MIT"
] | null | null | null | Deprecated/PythonClient/client_example.py | ruichen-v/carla | 387c448556c79d838ae0c2cb8a6caad3a78c3eba | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
"""Basic CARLA client example."""
from __future__ import print_function
import argparse
import logging
import random
import time
from carla.client import make_carla_client
from carla.sensor import Camera, Lidar
from carla.settings import CarlaSettings
from carla.tcp import TCPConnectionError
from carla.util import print_over_same_line
def run_carla_client(args):
# Here we will run 3 episodes with 300 frames each.
number_of_episodes = 1
frames_per_episode = 15000000
# We assume the CARLA server is already waiting for a client to connect at
# host:port. To create a connection we can use the `make_carla_client`
# context manager, it creates a CARLA client object and starts the
# connection. It will throw an exception if something goes wrong. The
# context manager makes sure the connection is always cleaned up on exit.
with make_carla_client(args.host, args.port) as client:
print('CarlaClient connected')
for episode in range(0, number_of_episodes):
# Start a new episode.
if args.settings_filepath is None:
# Create a CarlaSettings object. This object is a wrapper around
# the CarlaSettings.ini file. Here we set the configuration we
# want for the new episode.
settings = CarlaSettings()
settings.set(
SynchronousMode=True,
SendNonPlayerAgentsInfo=True,
NumberOfVehicles=0,
NumberOfPedestrians=0,
WeatherId=random.choice([1,]),
QualityLevel=args.quality_level)
settings.randomize_seeds()
# Now we want to add a couple of cameras to the player vehicle.
# We will collect the images produced by these cameras every
# frame.
# The default camera captures RGB images of the scene.
camera0 = Camera('CameraRGB')
# Set image resolution in pixels.
camera0.set_image_size(800, 600)
# Set its position relative to the car in meters.
camera0.set_position(0.30, 0, 1.30)
settings.add_sensor(camera0)
# Let's add another camera producing ground-truth depth.
camera1 = Camera('CameraDepth', PostProcessing='Depth')
camera1.set_image_size(800, 600)
camera1.set_position(0.30, 0, 1.30)
settings.add_sensor(camera1)
if args.lidar:
lidar = Lidar('Lidar32')
lidar.set_position(0, 0, 2.50)
lidar.set_rotation(0, 0, 0)
lidar.set(
Channels=32,
Range=50,
PointsPerSecond=100000,
RotationFrequency=10,
UpperFovLimit=10,
LowerFovLimit=-30)
settings.add_sensor(lidar)
else:
# Alternatively, we can load these settings from a file.
with open(args.settings_filepath, 'r') as fp:
settings = fp.read()
# Now we load these settings into the server. The server replies
# with a scene description containing the available start spots for
# the player. Here we can provide a CarlaSettings object or a
# CarlaSettings.ini file as string.
scene = client.load_settings(settings)
# Choose one player start at random.
number_of_player_starts = len(scene.player_start_spots)
player_start = random.randint(0, max(0, number_of_player_starts - 1))
# Notify the server that we want to start the episode at the
# player_start index. This function blocks until the server is ready
# to start the episode.
print('Starting new episode at %r...' % scene.map_name)
client.start_episode(0) # MARK modified from player_start
# Iterate every frame in the episode.
for frame in range(0, frames_per_episode):
# Read the data produced by the server this frame.
measurements, sensor_data = client.read_data()
# Print some of the measurements.
print_measurements(measurements)
# Save the images to disk if requested.
if args.save_images_to_disk:
for name, measurement in sensor_data.items():
filename = args.out_filename_format.format(episode, name, frame)
measurement.save_to_disk(filename)
# We can access the encoded data of a given image as numpy
# array using its "data" property. For instance, to get the
# depth value (normalized) at pixel X, Y
#
# depth_array = sensor_data['CameraDepth'].data
# value_at_pixel = depth_array[Y, X]
#
# Now we have to send the instructions to control the vehicle.
# If we are in synchronous mode the server will pause the
# simulation until we send this control.
if not args.autopilot:
client.send_control(
steer=random.uniform(-1.0, 1.0),
throttle=0.5,
brake=0.0,
hand_brake=False,
reverse=False)
else:
# Together with the measurements, the server has sent the
# control that the in-game autopilot would do this frame. We
# can enable autopilot by sending back this control to the
# server. We can modify it if wanted, here for instance we
# will add some noise to the steer.
control = measurements.player_measurements.autopilot_control
# MARK disable stochastic control
# control.steer += random.uniform(-0.1, 0.1)
# control.throttle *= 4
client.send_control(control)
def print_measurements(measurements):
number_of_agents = len(measurements.non_player_agents)
player_measurements = measurements.player_measurements
message = 'Vehicle at ({pos_x:.1f}, {pos_y:.1f}), '
message += '{speed:.0f} km/h, '
message += 'Collision: {{vehicles={col_cars:.0f}, pedestrians={col_ped:.0f}, other={col_other:.0f}}}, '
message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road, '
message += '({agents_num:d} non-player agents in the scene)'
message = message.format(
pos_x=player_measurements.transform.location.x,
pos_y=player_measurements.transform.location.y,
speed=player_measurements.forward_speed * 3.6, # m/s -> km/h
col_cars=player_measurements.collision_vehicles,
col_ped=player_measurements.collision_pedestrians,
col_other=player_measurements.collision_other,
other_lane=100 * player_measurements.intersection_otherlane,
offroad=100 * player_measurements.intersection_offroad,
agents_num=number_of_agents)
# print_over_same_line(message) # MARK uncomment to enable in-game display
def main():
argparser = argparse.ArgumentParser(description=__doc__)
argparser.add_argument(
'-v', '--verbose',
action='store_true',
dest='debug',
help='print debug information')
argparser.add_argument(
'--host',
metavar='H',
default='localhost',
help='IP of the host server (default: localhost)')
argparser.add_argument(
'-p', '--port',
metavar='P',
default=2000,
type=int,
help='TCP port to listen to (default: 2000)')
argparser.add_argument(
'-a', '--autopilot',
action='store_true',
help='enable autopilot')
argparser.add_argument(
'-l', '--lidar',
action='store_true',
help='enable Lidar')
argparser.add_argument(
'-q', '--quality-level',
choices=['Low', 'Epic'],
type=lambda s: s.title(),
default='Epic',
help='graphics quality level, a lower level makes the simulation run considerably faster.')
argparser.add_argument(
'-i', '--images-to-disk',
action='store_true',
dest='save_images_to_disk',
help='save images (and Lidar data if active) to disk')
argparser.add_argument(
'-c', '--carla-settings',
metavar='PATH',
dest='settings_filepath',
default=None,
help='Path to a "CarlaSettings.ini" file')
args = argparser.parse_args()
log_level = logging.DEBUG if args.debug else logging.INFO
logging.basicConfig(format='%(levelname)s: %(message)s', level=log_level)
logging.info('listening to server %s:%s', args.host, args.port)
args.out_filename_format = '_out/episode_{:0>4d}/{:s}/{:0>6d}'
while True:
try:
run_carla_client(args)
print('Done.')
return
except TCPConnectionError as error:
logging.error(error)
time.sleep(1)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('\nCancelled by user. Bye!')
| 38.616601 | 107 | 0.590993 |
4a2498fe7b1c53c1bc2be4f370e357a627d61516 | 3,943 | py | Python | examples/inheritance2.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | examples/inheritance2.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | examples/inheritance2.py | hugovk/XlsxWriter | e97cc66637d9895480ee32cfb5e561d652d3787b | [
"BSD-2-Clause"
] | null | null | null | ##############################################################################
#
# Example of how to subclass the Workbook and Worksheet objects. See also the
# simpler inheritance1.py example.
#
# In this example we see an approach to implementing a simulated autofit in a
# user application. This works by overriding the write_string() method to
# track the maximum width string in each column and then set the column
# widths.
#
# Note: THIS ISN'T A FULLY FUNCTIONAL AUTOFIT EXAMPLE. It is only a proof or
# concept or a framework to try out solutions.
#
# SPDX-License-Identifier: BSD-2-Clause
# Copyright 2013-2022, John McNamara, [email protected]
#
from xlsxwriter.workbook import Workbook
from xlsxwriter.worksheet import Worksheet
from xlsxwriter.worksheet import convert_cell_args
def excel_string_width(str):
"""
Calculate the length of the string in Excel character units. This is only
an example and won't give accurate results. It will need to be replaced
by something more rigorous.
"""
string_width = len(str)
if string_width == 0:
return 0
else:
return string_width * 1.1
class MyWorksheet(Worksheet):
"""
Subclass of the XlsxWriter Worksheet class to override the default
write_string() method.
"""
@convert_cell_args
def write_string(self, row, col, string, cell_format=None):
# Overridden write_string() method to store the maximum string width
# seen in each column.
# Check that row and col are valid and store max and min values.
if self._check_dimensions(row, col):
return -1
# Set the min width for the cell. In some cases this might be the
# default width of 8.43. In this case we use 0 and adjust for all
# string widths.
min_width = 0
# Check if it the string is the largest we have seen for this column.
string_width = excel_string_width(string)
if string_width > min_width:
max_width = self.max_column_widths.get(col, min_width)
if string_width > max_width:
self.max_column_widths[col] = string_width
# Now call the parent version of write_string() as usual.
return super(MyWorksheet, self).write_string(row, col, string,
cell_format)
class MyWorkbook(Workbook):
"""
Subclass of the XlsxWriter Workbook class to override the default
Worksheet class with our custom class.
"""
def add_worksheet(self, name=None):
# Overwrite add_worksheet() to create a MyWorksheet object.
# Also add an Worksheet attribute to store the column widths.
worksheet = super(MyWorkbook, self).add_worksheet(name, MyWorksheet)
worksheet.max_column_widths = {}
return worksheet
def close(self):
# We apply the stored column widths for each worksheet when we close
# the workbook. This will override any other set_column() values that
# may have been applied. This could be handled in the application code
# below, instead.
for worksheet in self.worksheets():
for column, width in worksheet.max_column_widths.items():
worksheet.set_column(column, column, width)
return super(MyWorkbook, self).close()
# Create a new MyWorkbook object.
workbook = MyWorkbook('inheritance2.xlsx')
# The code from now on will be the same as a normal "Workbook" program.
worksheet = workbook.add_worksheet()
# Write some data to test column fitting.
worksheet.write('A1', 'F')
worksheet.write('B3', 'Foo')
worksheet.write('C1', 'F')
worksheet.write('C2', 'Fo')
worksheet.write('C3', 'Foo')
worksheet.write('C4', 'Food')
worksheet.write('D1', 'This is a longer string')
# Write a string in row-col notation.
worksheet.write(0, 4, 'Hello World')
# Write a number.
worksheet.write(0, 5, 123456)
workbook.close()
| 32.319672 | 78 | 0.66878 |
4a249c20481461864bb28969a121e6eed3e6cc58 | 1,524 | py | Python | pyxl/element.py | adamserafini/pyxl | 8278d222317e1ef2796899506ba37e357dff7c3c | [
"Apache-2.0"
] | 366 | 2015-01-02T06:20:07.000Z | 2022-01-10T01:57:10.000Z | pyxl/element.py | adamserafini/pyxl | 8278d222317e1ef2796899506ba37e357dff7c3c | [
"Apache-2.0"
] | 2 | 2016-07-08T09:28:44.000Z | 2017-10-05T04:47:12.000Z | pyxl/element.py | adamserafini/pyxl | 8278d222317e1ef2796899506ba37e357dff7c3c | [
"Apache-2.0"
] | 27 | 2015-01-10T00:32:05.000Z | 2021-06-05T05:38:23.000Z | #!/usr/bin/env python
from pyxl.base import x_base
class x_element(x_base):
_element = None # render() output cached by _rendered_element()
def _get_base_element(self):
# Adding classes costs ~10%
out = self._rendered_element()
# Note: get_class() may return multiple space-separated classes.
cls = self.get_class()
classes = set(cls.split(' ')) if cls else set()
while isinstance(out, x_element):
new_out = out._rendered_element()
cls = out.get_class()
if cls:
classes.update(cls.split(' '))
out = new_out
if classes and isinstance(out, x_base):
classes.update(out.get_class().split(' '))
out.set_attr('class', ' '.join(filter(None, classes)))
return out
def _to_list(self, l):
self._render_child_to_list(self._get_base_element(), l)
def _rendered_element(self):
if self._element is None:
self.prerender()
self._element = self.render()
self.postrender(self._element)
return self._element
def render(self):
raise NotImplementedError()
def prerender(self):
"""
Hook to do things before the element is rendered. Default behavior is
to do nothing.
"""
pass
def postrender(self, element):
"""
Hook to do things after the element is rendered. Default behavior
is to do nothing
"""
pass
| 27.709091 | 78 | 0.584646 |
4a249c8fd27d73bd3bac05ffa7be80d08d6b9cbe | 20,060 | py | Python | src/_pytest/pathlib.py | theanalyst2020/pytest | d9c4ecaf62573e36d3d835b7830e41095c101f50 | [
"MIT"
] | 1 | 2020-09-19T06:23:11.000Z | 2020-09-19T06:23:11.000Z | src/_pytest/pathlib.py | doerwalter/pytest | d426a79a90351dff0492fbd40404b1256b24f91f | [
"MIT"
] | null | null | null | src/_pytest/pathlib.py | doerwalter/pytest | d426a79a90351dff0492fbd40404b1256b24f91f | [
"MIT"
] | null | null | null | import atexit
import contextlib
import fnmatch
import importlib.util
import itertools
import os
import shutil
import sys
import uuid
import warnings
from enum import Enum
from functools import partial
from os.path import expanduser
from os.path import expandvars
from os.path import isabs
from os.path import sep
from posixpath import sep as posix_sep
from types import ModuleType
from typing import Callable
from typing import Iterable
from typing import Iterator
from typing import Optional
from typing import Set
from typing import TypeVar
from typing import Union
import py
from _pytest.compat import assert_never
from _pytest.outcomes import skip
from _pytest.warning_types import PytestWarning
if sys.version_info[:2] >= (3, 6):
from pathlib import Path, PurePath
else:
from pathlib2 import Path, PurePath
__all__ = ["Path", "PurePath"]
LOCK_TIMEOUT = 60 * 60 * 3
_AnyPurePath = TypeVar("_AnyPurePath", bound=PurePath)
def get_lock_path(path: _AnyPurePath) -> _AnyPurePath:
return path.joinpath(".lock")
def ensure_reset_dir(path: Path) -> None:
"""Ensure the given path is an empty directory."""
if path.exists():
rm_rf(path)
path.mkdir()
def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
"""Handle known read-only errors during rmtree.
The returned value is used only by our own tests.
"""
exctype, excvalue = exc[:2]
# Another process removed the file in the middle of the "rm_rf" (xdist for example).
# More context: https://github.com/pytest-dev/pytest/issues/5974#issuecomment-543799018
if isinstance(excvalue, FileNotFoundError):
return False
if not isinstance(excvalue, PermissionError):
warnings.warn(
PytestWarning(
"(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
)
)
return False
if func not in (os.rmdir, os.remove, os.unlink):
if func not in (os.open,):
warnings.warn(
PytestWarning(
"(rm_rf) unknown function {} when removing {}:\n{}: {}".format(
func, path, exctype, excvalue
)
)
)
return False
# Chmod + retry.
import stat
def chmod_rw(p: str) -> None:
mode = os.stat(p).st_mode
os.chmod(p, mode | stat.S_IRUSR | stat.S_IWUSR)
# For files, we need to recursively go upwards in the directories to
# ensure they all are also writable.
p = Path(path)
if p.is_file():
for parent in p.parents:
chmod_rw(str(parent))
# Stop when we reach the original path passed to rm_rf.
if parent == start_path:
break
chmod_rw(str(path))
func(path)
return True
def ensure_extended_length_path(path: Path) -> Path:
"""Get the extended-length version of a path (Windows).
On Windows, by default, the maximum length of a path (MAX_PATH) is 260
characters, and operations on paths longer than that fail. But it is possible
to overcome this by converting the path to "extended-length" form before
performing the operation:
https://docs.microsoft.com/en-us/windows/win32/fileio/naming-a-file#maximum-path-length-limitation
On Windows, this function returns the extended-length absolute version of path.
On other platforms it returns path unchanged.
"""
if sys.platform.startswith("win32"):
path = path.resolve()
path = Path(get_extended_length_path_str(str(path)))
return path
def get_extended_length_path_str(path: str) -> str:
"""Convert a path to a Windows extended length path."""
long_path_prefix = "\\\\?\\"
unc_long_path_prefix = "\\\\?\\UNC\\"
if path.startswith((long_path_prefix, unc_long_path_prefix)):
return path
# UNC
if path.startswith("\\\\"):
return unc_long_path_prefix + path[2:]
return long_path_prefix + path
def rm_rf(path: Path) -> None:
"""Remove the path contents recursively, even if some elements
are read-only."""
path = ensure_extended_length_path(path)
onerror = partial(on_rm_rf_error, start_path=path)
shutil.rmtree(str(path), onerror=onerror)
def find_prefixed(root: Path, prefix: str) -> Iterator[Path]:
"""Find all elements in root that begin with the prefix, case insensitive."""
l_prefix = prefix.lower()
for x in root.iterdir():
if x.name.lower().startswith(l_prefix):
yield x
def extract_suffixes(iter: Iterable[PurePath], prefix: str) -> Iterator[str]:
"""Return the parts of the paths following the prefix.
:param iter: Iterator over path names.
:param prefix: Expected prefix of the path names.
"""
p_len = len(prefix)
for p in iter:
yield p.name[p_len:]
def find_suffixes(root: Path, prefix: str) -> Iterator[str]:
"""Combine find_prefixes and extract_suffixes."""
return extract_suffixes(find_prefixed(root, prefix), prefix)
def parse_num(maybe_num) -> int:
"""Parse number path suffixes, returns -1 on error."""
try:
return int(maybe_num)
except ValueError:
return -1
def _force_symlink(
root: Path, target: Union[str, PurePath], link_to: Union[str, Path]
) -> None:
"""Helper to create the current symlink.
It's full of race conditions that are reasonably OK to ignore
for the context of best effort linking to the latest test run.
The presumption being that in case of much parallelism
the inaccuracy is going to be acceptable.
"""
current_symlink = root.joinpath(target)
try:
current_symlink.unlink()
except OSError:
pass
try:
current_symlink.symlink_to(link_to)
except Exception:
pass
def make_numbered_dir(root: Path, prefix: str) -> Path:
"""Create a directory with an increased number as suffix for the given prefix."""
for i in range(10):
# try up to 10 times to create the folder
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
new_number = max_existing + 1
new_path = root.joinpath("{}{}".format(prefix, new_number))
try:
new_path.mkdir()
except Exception:
pass
else:
_force_symlink(root, prefix + "current", new_path)
return new_path
else:
raise OSError(
"could not create numbered dir with prefix "
"{prefix} in {root} after 10 tries".format(prefix=prefix, root=root)
)
def create_cleanup_lock(p: Path) -> Path:
"""Create a lock to prevent premature folder cleanup."""
lock_path = get_lock_path(p)
try:
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
except FileExistsError as e:
raise OSError("cannot create lockfile in {path}".format(path=p)) from e
else:
pid = os.getpid()
spid = str(pid).encode()
os.write(fd, spid)
os.close(fd)
if not lock_path.is_file():
raise OSError("lock path got renamed after successful creation")
return lock_path
def register_cleanup_lock_removal(lock_path: Path, register=atexit.register):
"""Register a cleanup function for removing a lock, by default on atexit."""
pid = os.getpid()
def cleanup_on_exit(lock_path: Path = lock_path, original_pid: int = pid) -> None:
current_pid = os.getpid()
if current_pid != original_pid:
# fork
return
try:
lock_path.unlink()
except OSError:
pass
return register(cleanup_on_exit)
def maybe_delete_a_numbered_dir(path: Path) -> None:
"""Remove a numbered directory if its lock can be obtained and it does
not seem to be in use."""
path = ensure_extended_length_path(path)
lock_path = None
try:
lock_path = create_cleanup_lock(path)
parent = path.parent
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
path.rename(garbage)
rm_rf(garbage)
except OSError:
# known races:
# * other process did a cleanup at the same time
# * deletable folder was found
# * process cwd (Windows)
return
finally:
# If we created the lock, ensure we remove it even if we failed
# to properly remove the numbered dir.
if lock_path is not None:
try:
lock_path.unlink()
except OSError:
pass
def ensure_deletable(path: Path, consider_lock_dead_if_created_before: float) -> bool:
"""Check if `path` is deletable based on whether the lock file is expired."""
if path.is_symlink():
return False
lock = get_lock_path(path)
try:
if not lock.is_file():
return True
except OSError:
# we might not have access to the lock file at all, in this case assume
# we don't have access to the entire directory (#7491).
return False
try:
lock_time = lock.stat().st_mtime
except Exception:
return False
else:
if lock_time < consider_lock_dead_if_created_before:
# We want to ignore any errors while trying to remove the lock such as:
# - PermissionDenied, like the file permissions have changed since the lock creation;
# - FileNotFoundError, in case another pytest process got here first;
# and any other cause of failure.
with contextlib.suppress(OSError):
lock.unlink()
return True
return False
def try_cleanup(path: Path, consider_lock_dead_if_created_before: float) -> None:
"""Try to cleanup a folder if we can ensure it's deletable."""
if ensure_deletable(path, consider_lock_dead_if_created_before):
maybe_delete_a_numbered_dir(path)
def cleanup_candidates(root: Path, prefix: str, keep: int) -> Iterator[Path]:
"""List candidates for numbered directories to be removed - follows py.path."""
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
max_delete = max_existing - keep
paths = find_prefixed(root, prefix)
paths, paths2 = itertools.tee(paths)
numbers = map(parse_num, extract_suffixes(paths2, prefix))
for path, number in zip(paths, numbers):
if number <= max_delete:
yield path
def cleanup_numbered_dir(
root: Path, prefix: str, keep: int, consider_lock_dead_if_created_before: float
) -> None:
"""Cleanup for lock driven numbered directories."""
for path in cleanup_candidates(root, prefix, keep):
try_cleanup(path, consider_lock_dead_if_created_before)
for path in root.glob("garbage-*"):
try_cleanup(path, consider_lock_dead_if_created_before)
def make_numbered_dir_with_cleanup(
root: Path, prefix: str, keep: int, lock_timeout: float
) -> Path:
"""Create a numbered dir with a cleanup lock and remove old ones."""
e = None
for i in range(10):
try:
p = make_numbered_dir(root, prefix)
lock_path = create_cleanup_lock(p)
register_cleanup_lock_removal(lock_path)
except Exception as exc:
e = exc
else:
consider_lock_dead_if_created_before = p.stat().st_mtime - lock_timeout
# Register a cleanup for program exit
atexit.register(
cleanup_numbered_dir,
root,
prefix,
keep,
consider_lock_dead_if_created_before,
)
return p
assert e is not None
raise e
def resolve_from_str(input: str, root: py.path.local) -> Path:
rootpath = Path(root)
input = expanduser(input)
input = expandvars(input)
if isabs(input):
return Path(input)
else:
return rootpath.joinpath(input)
def fnmatch_ex(pattern: str, path) -> bool:
"""A port of FNMatcher from py.path.common which works with PurePath() instances.
The difference between this algorithm and PurePath.match() is that the
latter matches "**" glob expressions for each part of the path, while
this algorithm uses the whole path instead.
For example:
"tests/foo/bar/doc/test_foo.py" matches pattern "tests/**/doc/test*.py"
with this algorithm, but not with PurePath.match().
This algorithm was ported to keep backward-compatibility with existing
settings which assume paths match according this logic.
References:
* https://bugs.python.org/issue29249
* https://bugs.python.org/issue34731
"""
path = PurePath(path)
iswin32 = sys.platform.startswith("win")
if iswin32 and sep not in pattern and posix_sep in pattern:
# Running on Windows, the pattern has no Windows path separators,
# and the pattern has one or more Posix path separators. Replace
# the Posix path separators with the Windows path separator.
pattern = pattern.replace(posix_sep, sep)
if sep not in pattern:
name = path.name
else:
name = str(path)
if path.is_absolute() and not os.path.isabs(pattern):
pattern = "*{}{}".format(os.sep, pattern)
return fnmatch.fnmatch(name, pattern)
def parts(s: str) -> Set[str]:
parts = s.split(sep)
return {sep.join(parts[: i + 1]) or sep for i in range(len(parts))}
def symlink_or_skip(src, dst, **kwargs):
"""Make a symlink, or skip the test in case symlinks are not supported."""
try:
os.symlink(str(src), str(dst), **kwargs)
except OSError as e:
skip("symlinks not supported: {}".format(e))
class ImportMode(Enum):
"""Possible values for `mode` parameter of `import_path`."""
prepend = "prepend"
append = "append"
importlib = "importlib"
class ImportPathMismatchError(ImportError):
"""Raised on import_path() if there is a mismatch of __file__'s.
This can happen when `import_path` is called multiple times with different filenames that has
the same basename but reside in packages
(for example "/tests1/test_foo.py" and "/tests2/test_foo.py").
"""
def import_path(
p: Union[str, py.path.local, Path],
*,
mode: Union[str, ImportMode] = ImportMode.prepend
) -> ModuleType:
"""Import and return a module from the given path, which can be a file (a module) or
a directory (a package).
The import mechanism used is controlled by the `mode` parameter:
* `mode == ImportMode.prepend`: the directory containing the module (or package, taking
`__init__.py` files into account) will be put at the *start* of `sys.path` before
being imported with `__import__.
* `mode == ImportMode.append`: same as `prepend`, but the directory will be appended
to the end of `sys.path`, if not already in `sys.path`.
* `mode == ImportMode.importlib`: uses more fine control mechanisms provided by `importlib`
to import the module, which avoids having to use `__import__` and muck with `sys.path`
at all. It effectively allows having same-named test modules in different places.
:raises ImportPathMismatchError:
If after importing the given `path` and the module `__file__`
are different. Only raised in `prepend` and `append` modes.
"""
mode = ImportMode(mode)
path = Path(str(p))
if not path.exists():
raise ImportError(path)
if mode is ImportMode.importlib:
module_name = path.stem
for meta_importer in sys.meta_path:
spec = meta_importer.find_spec(module_name, [str(path.parent)])
if spec is not None:
break
else:
spec = importlib.util.spec_from_file_location(module_name, str(path))
if spec is None:
raise ImportError(
"Can't find module {} at location {}".format(module_name, str(path))
)
mod = importlib.util.module_from_spec(spec)
spec.loader.exec_module(mod) # type: ignore[union-attr]
return mod
pkg_path = resolve_package_path(path)
if pkg_path is not None:
pkg_root = pkg_path.parent
names = list(path.with_suffix("").relative_to(pkg_root).parts)
if names[-1] == "__init__":
names.pop()
module_name = ".".join(names)
else:
pkg_root = path.parent
module_name = path.stem
# Change sys.path permanently: restoring it at the end of this function would cause surprising
# problems because of delayed imports: for example, a conftest.py file imported by this function
# might have local imports, which would fail at runtime if we restored sys.path.
if mode is ImportMode.append:
if str(pkg_root) not in sys.path:
sys.path.append(str(pkg_root))
elif mode is ImportMode.prepend:
if str(pkg_root) != sys.path[0]:
sys.path.insert(0, str(pkg_root))
else:
assert_never(mode)
importlib.import_module(module_name)
mod = sys.modules[module_name]
if path.name == "__init__.py":
return mod
ignore = os.environ.get("PY_IGNORE_IMPORTMISMATCH", "")
if ignore != "1":
module_file = mod.__file__
if module_file.endswith((".pyc", ".pyo")):
module_file = module_file[:-1]
if module_file.endswith(os.path.sep + "__init__.py"):
module_file = module_file[: -(len(os.path.sep + "__init__.py"))]
try:
is_same = os.path.samefile(str(path), module_file)
except FileNotFoundError:
is_same = False
if not is_same:
raise ImportPathMismatchError(module_name, module_file, path)
return mod
def resolve_package_path(path: Path) -> Optional[Path]:
"""Return the Python package path by looking for the last
directory upwards which still contains an __init__.py.
Returns None if it can not be determined.
"""
result = None
for parent in itertools.chain((path,), path.parents):
if parent.is_dir():
if not parent.joinpath("__init__.py").is_file():
break
if not parent.name.isidentifier():
break
result = parent
return result
def visit(
path: str, recurse: Callable[["os.DirEntry[str]"], bool]
) -> Iterator["os.DirEntry[str]"]:
"""Walk a directory recursively, in breadth-first order.
Entries at each directory level are sorted.
"""
entries = sorted(os.scandir(path), key=lambda entry: entry.name)
yield from entries
for entry in entries:
if entry.is_dir(follow_symlinks=False) and recurse(entry):
yield from visit(entry.path, recurse)
def absolutepath(path: Union[Path, str]) -> Path:
"""Convert a path to an absolute path using os.path.abspath.
Prefer this over Path.resolve() (see #6523).
Prefer this over Path.absolute() (not public, doesn't normalize).
"""
return Path(os.path.abspath(str(path)))
def commonpath(path1: Path, path2: Path) -> Optional[Path]:
"""Return the common part shared with the other path, or None if there is
no common part."""
try:
return Path(os.path.commonpath((str(path1), str(path2))))
except ValueError:
return None
def bestrelpath(directory: Path, dest: Path) -> str:
"""Return a string which is a relative path from directory to dest such
that directory/bestrelpath == dest.
If no such path can be determined, returns dest.
"""
if dest == directory:
return os.curdir
# Find the longest common directory.
base = commonpath(directory, dest)
# Can be the case on Windows.
if not base:
return str(dest)
reldirectory = directory.relative_to(base)
reldest = dest.relative_to(base)
return os.path.join(
# Back from directory to base.
*([os.pardir] * len(reldirectory.parts)),
# Forward from base to dest.
*reldest.parts,
)
| 32.724307 | 102 | 0.648554 |
4a249d6ea0ceeba1d2ef8344bdf4a5a1c0dff040 | 4,039 | py | Python | venv/Lib/site-packages/dataframe/pipeable_functions.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dataframe/pipeable_functions.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | venv/Lib/site-packages/dataframe/pipeable_functions.py | kavanAdeshara/Expense_Tracker | b3e4810e858a7786e05cda6b91ba674b73b87981 | [
"Apache-2.0"
] | null | null | null | # dataframe: a data-frame implementation using method piping
#
# Copyright (C) 2016 Simon Dirmeier
#
# This file is part of dataframe.
#
# dataframe is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dataframe is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dataframe. If not, see <http://www.gnu.org/licenses/>.
#
#
# @author = 'Simon Dirmeier'
# @email = '[email protected]'
import dataframe
from dataframe import pipeable
def group(*args):
"""
Pipeable grouping method.
Takes either
- a dataframe and a tuple of strings for grouping,
- a tuple of strings if a dataframe has already been piped into.
:Example:
group(dataframe, "column")
:Example:
dataframe >> group("column")
:param args: tuple of arguments
:type args: tuple
:return: returns a grouped dataframe object
:rtype: GroupedDataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].group(*args[1:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.GROUP, *args)
def aggregate(*args):
"""
Pipeable aggregation method.
Takes either
- a dataframe and a tuple of arguments required for aggregation,
- a tuple of arguments if a dataframe has already been piped into.
In any case one argument has to be a class that extends callable.
:Example:
aggregate(dataframe, Function, "new_col_name", "old_col_name")
:Example:
dataframe >> aggregate(Function, "new_col_name", "old_col_name")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].aggregate(args[1], args[2], *args[3:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.AGGREGATE, *args)
def subset(*args):
"""
Pipeable subsetting method.
Takes either
- a dataframe and a tuple of arguments required for subsetting,
- a tuple of arguments if a dataframe has already been piped into.
:Example:
subset(dataframe, "column")
:Example:
dataframe >> subset("column")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].subset(*args[1:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.SUBSET, *args)
def modify(*args):
"""
Pipeable modification method
Takes either
- a dataframe and a tuple of arguments required for modification,
- a tuple of arguments if a dataframe has already been piped into.
In any case one argument has to be a class that extends callable.
:Example:
modify(dataframe, Function, "new_col_name", "old_col_name")
:Example:
dataframe >> modify(Function, "new_col_name", "old_col_name")
:param args: tuple of arguments
:type args: tuple
:return: returns a dataframe object
:rtype: DataFrame
"""
if args and isinstance(args[0], dataframe.DataFrame):
return args[0].modify(args[1], args[2], *args[3:])
elif not args:
raise ValueError("No arguments provided")
else:
return pipeable.Pipeable(pipeable.PipingMethod.MODIFY, *args)
| 27.107383 | 72 | 0.673929 |
4a249dc6272c1389fe41b83cec0f9974b1b59a50 | 7,257 | py | Python | mindsdb/libs/data_types/light_metadata.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | mindsdb/libs/data_types/light_metadata.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | mindsdb/libs/data_types/light_metadata.py | Glitchfix/mindsdb | e6c33d7085898c223030334962596ae8afa3fbd5 | [
"MIT"
] | null | null | null | # UNDER CONSTRUCTION !
light_metadata = {
"name": {
"type": "string"
},
"version": {
"type": "string"
},
"data_preparation": {
"type": "object",
"properties": {
"accepted_margin_of_error": {
"type": "number"
},
"total_row_count": {
"type": "number"
},
"used_row_count": {
"type": "number"
},
"test_row_count": {
"type": "number"
},
"train_row_count": {
"type": "number"
},
"validation_row_count": {
"type": "number"
}
}
},
"data_analysis": {
"type": "object",
"properties": {
"target_columns_metadata": {
"type": "array",
"items": {
"type": "object",
"properties": {
"column_name": {
"type": "string"
}
}
}
}
}
}
}
scores = ['duplicates_score','empty_cells_score','data_type_distribution_score',
'similarity_score','z_test_based_outlier_score','value_distribution_score'
,'variability_score','redundancy_score','consistency_score','consistency_score','quality_score']
def gen_score(score_name):
return [
score_name: {
"type": "object",
"properties": {
"score": {
"type": "number"
},
"description": {
"type": "string"
}
}
}
]
"data_analysis": {
"target_columns_metadata": [
{
"column_name": "string",
"importance_score": 0,
"data_type": "categorical",
"data_type_distribution": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"data_distribution": {
"data_histogram": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"clusters": [
{
"group": "string",
"members": [
"string"
]
}
],
"mean": "string"
},
"consistency": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"completeness": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"variability": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
}
}
],
"input_columns_metadata": [
{
"column_name": "string",
"importance_score": 0,
"data_type": "categorical",
"data_type_distribution": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"data_distribution": {
"data_histogram": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"clusters": [
{
"group": "string",
"members": [
"string"
]
}
],
"mean": "string"
},
"consistency": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"completeness": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"variability": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
}
}
]
},
"model_analysis": [
{
"column_name": "string",
"overall_input_importance": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"train_accuracy_over_time": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"test_accuracy_over_time": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"accuracy_histogram": {
"x": [
"string"
],
"y": [
0
],
"x_explained": [
[
{
"column_name": "string",
"importance_score": 0,
"data_type": "categorical",
"data_type_distribution": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"data_distribution": {
"data_histogram": {
"type": "categorical",
"x": [
"string"
],
"y": [
0
]
},
"clusters": [
{
"group": "string",
"members": [
"string"
]
}
],
"mean": "string"
},
"consistency": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"completeness": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
},
"variability": {
"score": "string",
"metrics": [
{
"type": "error",
"score": 0,
"description": "string"
}
],
"description": "string"
}
}
]
]
}
}
]
})
| 22.398148 | 96 | 0.309219 |
4a249e054a53c4f4fba189ea57fd8cd100495bbe | 6,463 | py | Python | src/daain/backbones/esp_dropout_net/trainer/data.py | merantix/mxlabs-daain | 0e87df5dd6e678939374dfadf44fc360d34425bb | [
"Apache-2.0"
] | 14 | 2021-06-01T08:34:09.000Z | 2022-02-01T15:45:27.000Z | src/daain/backbones/esp_dropout_net/trainer/data.py | merantix/mxlabs-daain | 0e87df5dd6e678939374dfadf44fc360d34425bb | [
"Apache-2.0"
] | null | null | null | src/daain/backbones/esp_dropout_net/trainer/data.py | merantix/mxlabs-daain | 0e87df5dd6e678939374dfadf44fc360d34425bb | [
"Apache-2.0"
] | 2 | 2021-07-31T01:58:40.000Z | 2022-01-24T14:04:26.000Z | import os
import pickle
from typing import List, Optional, Union
import pytorch_lightning as pl
from torch.utils.data import DataLoader, random_split
from daain.backbones.esp_dropout_net.trainer.data_statistics import DataStatistics
from daain.backbones.esp_dropout_net.trainer.dataset_collate import ConcatTupleDataset, DatasetCollate
from daain.backbones.esp_dropout_net.trainer.dataset_collate import MultipleRandomSampler
from daain.backbones.esp_dropout_net.trainer.dataset_collate import MultipleSequentialSampler
from daain.backbones.esp_dropout_net.trainer.transformations import Compose, Normalize, RandomFlip
from daain.backbones.esp_dropout_net.trainer.transformations import RandomResizedCrop, Resize, ToTensor
from daain.config_schema.datasets.dataset import DatasetPaths
from daain.data.datasets import get_split_sizes
from daain.data.datasets.cityscapes_dataset import Cityscapes
def _create_transforms_(mean, std, scale_in):
"""Create the tranformations used in the original paper.
Note that this will then take some time to run."""
default_post = (
ToTensor(scale_in),
Normalize(mean=mean, std=std),
)
training_transforms = [
(Resize((512, 1024)),),
(Resize((512, 1024)), RandomResizedCrop(32), RandomFlip()),
(Resize((768, 1536)), RandomResizedCrop(128), RandomFlip()),
(Resize((720, 1280)), RandomResizedCrop(128), RandomFlip()),
(Resize((384, 768)), RandomResizedCrop(32), RandomFlip()),
(Resize((256, 512)), RandomFlip()),
]
val_transforms = [(Resize((512, 1024)),)]
training_transforms = [Compose((*ts, *default_post)) for ts in training_transforms]
val_transforms = [Compose((*ts, *default_post)) for ts in val_transforms]
return training_transforms, val_transforms
class AugmentedCityscapesLikeDataModule(pl.LightningDataModule):
def __init__(
self,
root: str,
paths: DatasetPaths,
batch_size: int,
scale_input: int,
mean=None,
std=None,
class_weights=None,
meta_data_path: str = None,
num_workers=1,
dataset=Cityscapes,
fast_dev_run=False,
):
"""Creates an Augmented Cityscape Like dataset, like with the meaning that the labels are the same as the
Cityscapes-dataset."""
super().__init__()
self.batch_size = batch_size
self.scale_input = scale_input
self.dataset = dataset
self.dataset_mean = mean
self.dataset_std = std
self.class_weights = class_weights
if fast_dev_run:
t = os.path.split(meta_data_path)
self.meta_data_path = os.path.join(t[0], f"DEBUG_{t[1]}")
else:
self.meta_data_path = meta_data_path
self.dataset_default = {"normalize": False, "fast_dev_run": fast_dev_run, "root": root, "paths": paths}
self.data_loader_default = {
"batch_size": batch_size,
# "shuffle": False,
"num_workers": num_workers,
"pin_memory": False,
}
self.train, self.test, self.val = None, None, None
self.fast_dev_run = fast_dev_run
self.ignore_index = None
self.num_classes = None
self.prepare_data()
self.setup()
def prepare_data(self):
if self.dataset_mean is None:
if self.meta_data_path and os.path.exists(self.meta_data_path):
with open(self.meta_data_path, "rb") as f:
loaded = pickle.load(f)
self.dataset_mean = loaded["mean"]
self.dataset_std = loaded["std"]
self.class_weights = loaded["class_weights"]
self.num_classes = loaded["num_classes"]
self.ignore_index = loaded["ignore_index"]
else:
data_statistics = DataStatistics(
loader=DataLoader(self.dataset(split="train", **self.dataset_default), **self.data_loader_default)
)
self.dataset_mean = data_statistics.mean
self.dataset_std = data_statistics.std
self.class_weights = data_statistics.class_weights
self.num_classes = data_statistics.num_classes
self.ignore_index = data_statistics.ignore_index
if self.meta_data_path:
with open(self.meta_data_path, "wb") as f:
pickle.dump(
{
"mean": self.dataset_mean,
"std": self.dataset_std,
"class_weights": self.class_weights,
"num_classes": self.num_classes,
"ignore_index": self.ignore_index,
},
f,
)
def setup(self, stage: Optional[str] = None):
training_transforms, val_transforms = _create_transforms_(
mean=self.dataset_mean, std=self.dataset_std, scale_in=self.scale_input
)
if stage == "fit" or stage is None:
dataset_full = ConcatTupleDataset(
[
self.dataset(split="train", transforms=transform, **self.dataset_default)
for transform in training_transforms
]
)
self.train, self.val = random_split(dataset_full, get_split_sizes(dataset_full))
if stage == "test" or stage is None:
self.test = self.dataset(split="test", transforms=val_transforms[0], **self.dataset_default)
def train_dataloader(self, *args, **kwargs) -> DataLoader:
return DataLoader(
self.train,
collate_fn=DatasetCollate(),
sampler=MultipleRandomSampler(self.train, num_times=len(self.train.dataset.datasets)),
**self.data_loader_default,
)
def val_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(
self.val,
collate_fn=DatasetCollate(),
sampler=MultipleSequentialSampler(self.val, num_times=len(self.val.dataset.datasets)),
**self.data_loader_default,
)
def test_dataloader(self, *args, **kwargs) -> Union[DataLoader, List[DataLoader]]:
return DataLoader(self.test, **self.data_loader_default)
| 39.895062 | 118 | 0.618753 |
4a249e8ba1b4469c47ce215736187d3f1657389d | 865 | py | Python | Policies/EmpericalMeans.py | setuc/multi-arm-bandit | e81603700f405beec47a1f03459bbdc4456635f6 | [
"MIT"
] | 2 | 2020-09-29T12:17:50.000Z | 2020-10-11T22:05:48.000Z | Policies/EmpericalMeans.py | setuc/multi-arm-bandit | e81603700f405beec47a1f03459bbdc4456635f6 | [
"MIT"
] | null | null | null | Policies/EmpericalMeans.py | setuc/multi-arm-bandit | e81603700f405beec47a1f03459bbdc4456635f6 | [
"MIT"
] | 1 | 2020-09-22T12:15:54.000Z | 2020-09-22T12:15:54.000Z | import numpy as np
try:
from .IndexPolicy import IndexPolicy
except ImportError:
from IndexPolicy import IndexPolicy
class EmpiricalMeans(IndexPolicy):
"""
The naive Empirical Means policy for bounded bandits: like UCB but without a bias correction term.
Note that it is equal to UCBalpha with alpha=0, only quicker."""
def computeIndex(self, arm):
"""
Compute the current index, at time t and after N pulls of arm k:
"""
if self.pulls[arm] < 1:
return float('+inf')
else:
return self.rewards[arm] / self.pulls[arm]
def computeAllIndex(self):
"""
Compute the current indexes for all arms, in a vectorized manner.
"""
indexes = self.rewards / self.pulls
indexes[self.pulls < 1] = float('+inf')
self.index[:] = indexes | 29.827586 | 103 | 0.620809 |
4a249ee7fb6635802b50098d45d2113765510984 | 184 | py | Python | Beginner/Balsa For The Three (BFTT)/three.py | anishsingh42/CodeChef | 50f5c0438516210895e513bc4ee959b9d99ef647 | [
"Apache-2.0"
] | 127 | 2020-10-13T18:04:35.000Z | 2022-02-17T10:56:27.000Z | Beginner/Balsa For The Three (BFTT)/three.py | anishsingh42/CodeChef | 50f5c0438516210895e513bc4ee959b9d99ef647 | [
"Apache-2.0"
] | 132 | 2020-10-13T18:06:53.000Z | 2021-10-17T18:44:26.000Z | Beginner/Balsa For The Three (BFTT)/three.py | anishsingh42/CodeChef | 50f5c0438516210895e513bc4ee959b9d99ef647 | [
"Apache-2.0"
] | 364 | 2020-10-13T18:04:52.000Z | 2022-03-04T14:34:53.000Z | t = int(input())
for i in range(t):
N = int(input())
s = N+1
while(True):
c = str(s)
if c.count('3') >= 3:
break
s = s+1
print(s)
| 14.153846 | 29 | 0.38587 |
4a249fa7d1bbe9593c9df747293e9191d497d8db | 1,719 | py | Python | pyecharts/charts/bar.py | yangxuan0261/pyecharts | 31cbd4dcd8c77988d1678a05b7f3bd0d845160bd | [
"MIT"
] | null | null | null | pyecharts/charts/bar.py | yangxuan0261/pyecharts | 31cbd4dcd8c77988d1678a05b7f3bd0d845160bd | [
"MIT"
] | null | null | null | pyecharts/charts/bar.py | yangxuan0261/pyecharts | 31cbd4dcd8c77988d1678a05b7f3bd0d845160bd | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#coding=utf-8
from pyecharts.base import Base
from pyecharts.option import get_all_options
class Bar(Base):
"""
<<< Bar chart >>>
Bar chart shows different data through the height of a bar,
which is used in rectangular coordinate with at least 1 category axis.
"""
def __init__(self, title="", subtitle="", **kwargs):
super(Bar, self).__init__(title, subtitle, **kwargs)
def add(self, *args, **kwargs):
self.__add(*args, **kwargs)
def __add(self, name, x_axis, y_axis,
is_stack=False,
**kwargs):
"""
:param name:
Series name used for displaying in tooltip and filtering with legend,
or updaing data and configuration with setOption.
:param x_axis:
data of xAixs
:param y_axis:
data of yAxis
:param is_stack:
It specifies whether to stack category axis.
:param kwargs:
"""
assert len(x_axis) == len(y_axis)
kwargs.update(x_axis=x_axis)
chart = get_all_options(**kwargs)
is_stack = "stack" if is_stack else ""
xaxis, yaxis = chart['xy_axis']
self._option.update(xAxis=xaxis, yAxis=yaxis)
self._option.get('legend')[0].get('data').append(name)
self._option.get('series').append({
"type": "bar",
"name": name,
"data": y_axis,
"stack": is_stack,
"label": chart['label'],
"markPoint": chart['mark_point'],
"markLine": chart['mark_line'],
"indexflag": self._option.get('_index_flag')
})
self._legend_visualmap_colorlst(**kwargs)
| 31.254545 | 81 | 0.572426 |
4a249faea8e0ebdcf653bead2d06a506edfe2455 | 1,303 | py | Python | python/doc/en/2.0/resources/plot_oat_tags.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 21 | 2015-02-06T21:55:59.000Z | 2021-04-29T11:23:18.000Z | python/doc/en/2.0/resources/plot_oat_tags.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 9 | 2015-02-03T10:41:35.000Z | 2020-02-18T12:46:10.000Z | python/doc/en/2.0/resources/plot_oat_tags.py | carlosduarteroa/smap | 5760631dfaf3e85da26ce68bf542bf254bb92c80 | [
"BSD-2-Clause"
] | 20 | 2015-02-06T00:09:19.000Z | 2020-01-10T13:27:06.000Z | """Example code plotting one day's worth of outside air time-series,
locating the streams using a metadata query.
@author Stephen Dawson-Haggerty <[email protected]>
"""
from smap.archiver.client import SmapClient
from smap.contrib import dtutil
from matplotlib import pyplot
from matplotlib import dates
# make a client
c = SmapClient("http://www.openbms.org/backend")
# start and end values are Unix timestamps
start = dtutil.dt2ts(dtutil.strptime_tz("1-1-2013", "%m-%d-%Y"))
end = dtutil.dt2ts(dtutil.strptime_tz("1-2-2013", "%m-%d-%Y"))
# download the data and metadata
tags = c.tags("Metadata/Extra/Type = 'oat'")
uuids, data = c.data("Metadata/Extra/Type = 'oat'", start, end)
# make a dict mapping uuids to data vectors
data_map = dict(zip(uuids, data))
# plot all the data
for timeseries in tags:
d = data_map[timeseries['uuid']]
# since we have the tags, we can add some metadata
label = "%s (%s)" % (timeseries['Metadata/SourceName'],
timeseries['Properties/UnitofMeasure'])
# we can plot all of the series in their appropriate time zones
pyplot.plot_date(dates.epoch2num(d[:, 0] / 1000), d[:, 1], '-',
label=label,
tz=timeseries['Properties/Timezone'])
pyplot.legend(loc="lower center")
pyplot.show()
| 32.575 | 68 | 0.688411 |
4a249fd12356546a3b634f2877453f87666ae3c3 | 2,433 | py | Python | test/client.py | davidhadas/Guard | 04ad14f5913ff30b53e32547944a1689c4042510 | [
"Apache-2.0"
] | null | null | null | test/client.py | davidhadas/Guard | 04ad14f5913ff30b53e32547944a1689c4042510 | [
"Apache-2.0"
] | null | null | null | test/client.py | davidhadas/Guard | 04ad14f5913ff30b53e32547944a1689c4042510 | [
"Apache-2.0"
] | null | null | null | import requests
import random
import string
defaults = {
"url": "sample-tests.sros-e621c7d733ece1fad737ff54a8912822-0000.us-south.containers.appdomain.cloud"
, "scheme": "http"
, "method": "GET"
, "path": "/"
, "data": ""
, "queryKeys": 2
, "queryVal": 2
, "headersKeys": 2
, "headersVal": 2
, "cookiesKeys": 2
, "cookiesVal": 2
, "bodyKeys": 2
, "bodyVal": 2
}
def send(param):
url = param.get("url") or defaults["url"]
method = param.get("method") or defaults["method"]
scheme = param.get("scheme") or defaults["scheme"]
path = param.get("path") or defaults["path"]
queryKeys = param.get("queryKeys") or defaults["queryKeys"]
queryVal = (param.get("queryVal") or defaults["queryVal"])*3
headersKeys = param.get("headersKeys") or defaults["headersKeys"]
headersVal = (param.get("headersVal") or defaults["headersVal"]) * 3
cookiesKeys = param.get("cookiesKeys") or defaults["cookiesKeys"]
cookiesVal = (param.get("cookiesVal") or defaults["cookiesVal"]) * 3
bodyKeys = param.get("bodyKeys") or defaults["bodyKeys"]
bodyVal = (param.get("bodyVal") or defaults["bodyVal"]) * 3
data = param.get("data") or defaults["data"]
query = {}
for i in range(queryKeys):
query["KEY-"+str(i)] = ''.join(random.choice(string.ascii_uppercase) for _ in range(random. randint(queryVal-1, queryVal+2)))
headers = {}
for i in range(headersKeys):
headers["HEADER-"+str(i)] = ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randint(headersVal - 1, headersVal + 2)))
cookies = {}
for i in range(cookiesKeys):
cookies["COOKIE-" + str(i)] = ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randint(cookiesVal - 1, cookiesVal + 2)))
if not data:
data = {}
for i in range(bodyKeys):
data["BODY-" + str(i)] = ''.join(random.choice(string.ascii_uppercase) for _ in range(random.randint(bodyVal - 1, bodyVal + 2)))
print(method, scheme, query, headers, cookies, data)
if (isinstance(data, dict)):
res = requests.request(method, scheme+"://"+url+path, params=query, headers=headers, cookies=cookies, json=data)
else:
res = requests.request(method, scheme + "://" + url + path, params=query, headers=headers, cookies=cookies, data=data)
print(res)
for i in range(1000):
send({"queryKeys": 3}) | 38.015625 | 147 | 0.635018 |
4a24a0a4a46f9129616bcbfcb68ca7dddbcc461a | 2,589 | py | Python | billiards/billiards/messages/assistant.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | null | null | null | billiards/billiards/messages/assistant.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | null | null | null | billiards/billiards/messages/assistant.py | zxkane/billiards | 809a37b111a0fdbf7a2b1176149256b93c43045f | [
"Apache-1.1"
] | 1 | 2021-02-08T13:19:34.000Z | 2021-02-08T13:19:34.000Z | # -*- coding: utf-8 -*-
# encoding: utf-8
'''
Created on 2015年2月21日
@author: kane
'''
from datetime import datetime
from billiards.models import DATETIME_FORMAT
from django.utils.timezone import localtime, utc
import pytz
from billiards.settings import TIME_ZONE
ORDER_CONFIRMATION=u'您的订单已确认,请您准时到场消费。\n\
预约助教:%s\n\
预约时间:%s,%s点-%s点;\n\
预约时长:%s小时\n\
预约单价:%s元/小时\n\
预约球房:%s;\n\
球房地址:%s \n\
消费金额:%s元;\n\
消费时,请将您的消费码告知助教。\n\
感谢您使用教练预约平台。\n\
请在我为台球狂微信公众平台—订单,查看预约详情。'
ORDER_PAY_SUCCESS=u'我们已经收到您的订单,等待助教确认。\n\
预约时间:%s,%s点-%s点;\n\
预约球房:%s;\n\
球房地址:%s\n\
消费金额:%s元;\n\
感谢您使用教练预约平台。\n\
请在我为台球狂微信公众平台—订单查看预约详情。'
ORDER_COMPLETE=u'您的订单已消费。\n\
消费时间:%s,%s\n\
感谢您使用教练预约平台。\n\
请在我为台球狂微信公众平台—订单查看预约详情。'
ORDER_ARRIVAL=u'有您的一个预约订单,请在微信内查看订单详情。\n\
预约助教:%s\n\
预约时间:%s,%s点-%s点;\n\
预约球房:%s;\n\
消费金额:%s元;\n\
客户微信昵称:%s\n\
客户联系电话:%s\n\
消费时,请向客户索要消费码。'
DATE_FORMAT = u'%Y年%m月%d日'.encode('utf-8')
TIME_FORMAT = u'%H'.encode('utf-8')
TIME2_FORMAT = u'%H点%m分'.encode('utf-8')
def orderConfirmationMsg(order):
starttime = datetime.strptime(order['starttime'], DATETIME_FORMAT)
return ORDER_CONFIRMATION %(order['assistant_name'], starttime.strftime(DATE_FORMAT).decode('utf-8'), starttime.strftime(TIME_FORMAT).decode('utf-8'),
datetime.strptime(order['endtime'], DATETIME_FORMAT).strftime(TIME_FORMAT).decode('utf-8'), order['duration'], order['price'],
order['poolroom_name'], order['poolroom_address'], order['payment'])
def orderPaySuccess(order):
starttime = datetime.strptime(order['starttime'], DATETIME_FORMAT)
return ORDER_PAY_SUCCESS %(starttime.strftime(DATE_FORMAT).decode('utf-8'), starttime.strftime(TIME_FORMAT).decode('utf-8'),
datetime.strptime(order['endtime'], DATETIME_FORMAT).strftime(TIME_FORMAT).decode('utf-8'),
order['poolroom_name'], order['poolroom_address'], order['payment'])
def orderComplete(order):
completetime = localtime(datetime.utcfromtimestamp(order['timestamp']).replace(tzinfo=utc), pytz.timezone(TIME_ZONE))
return ORDER_COMPLETE %(completetime.strftime(DATE_FORMAT).decode('utf-8'), completetime.strftime(TIME2_FORMAT).decode('utf-8'))
def orderArrival(order):
starttime = datetime.strptime(order['starttime'], DATETIME_FORMAT)
return ORDER_ARRIVAL %(order['assistant_name'], starttime.strftime(DATE_FORMAT).decode('utf-8'), starttime.strftime(TIME_FORMAT).decode('utf-8'),
datetime.strptime(order['endtime'], DATETIME_FORMAT).strftime(TIME_FORMAT).decode('utf-8'), order['poolroom_name'], order['payment'],
order['user_nickname'], order['user_cellphone'])
| 35.958333 | 154 | 0.719197 |
4a24a10ed7ba66c6488e8a6d5067a6f7f2595b8e | 1,063 | py | Python | setup.py | sanchopanca/address-book | 7e0da3f59840a6305d0a4093aec6efc78a99e53e | [
"MIT"
] | null | null | null | setup.py | sanchopanca/address-book | 7e0da3f59840a6305d0a4093aec6efc78a99e53e | [
"MIT"
] | null | null | null | setup.py | sanchopanca/address-book | 7e0da3f59840a6305d0a4093aec6efc78a99e53e | [
"MIT"
] | null | null | null | """
address_book
~~~~~~~~~~~~
address_book is a simple library for storing and querying data
about persons
"""
from setuptools import setup
setup(
name='address_book',
version='0.1.0',
url='https://github.com/sanchopanca/address-book',
license='MIT',
author='Aleksandr Kovalev',
author_email='[email protected]',
description='a simple library for storing and querying data about persons',
long_description=__doc__,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Software Development :: Libraries :: Python Modules',
],
packages=['address_book'],
)
| 29.527778 | 79 | 0.629351 |
4a24a1af38a382efb64a62fe7fa98fc6fdf5e55b | 1,742 | py | Python | OOPS/class_object.py | venkateshtantravahi/Python-0-to-hero- | dd1c5cb8a9693f72adb7e25ffc54c0492becea4a | [
"MIT"
] | 6 | 2021-09-19T09:33:20.000Z | 2022-01-03T03:57:14.000Z | OOPS/class_object.py | venkateshtantravahi/Python-0-to-hero- | dd1c5cb8a9693f72adb7e25ffc54c0492becea4a | [
"MIT"
] | 1 | 2021-12-18T14:19:03.000Z | 2021-12-18T14:19:03.000Z | OOPS/class_object.py | venkateshtantravahi/Python-0-to-hero- | dd1c5cb8a9693f72adb7e25ffc54c0492becea4a | [
"MIT"
] | null | null | null | """
class is like a blueprint that we provide to get the final output
whereas object is collections of variables and methods that are
inherited from the class, to act on data to provide some output
"""
# We create a class using 'class' keyword
class myNewClass:
"""This is a docstring. Created a new class"""
pass
class Person:
"This is a person class"
age = 10
def greet(self):
print("Hello")
print(Person.age)
print(Person.greet)
print(Person.__doc__)
harry = Person()
print(harry.greet)
harry.greet() # Person.greet(harry)
"""
methods in classes that starts with __ are having special defn and properties, these are called as constructors
"""
class ComplexNumbers:
def __init__(self, r=0, i=0):
self.real = r
self.imag = i
def get_data(self):
print(f"{self.real}+{self.imag}j")
# Create a new complex number object
num1 = ComplexNumbers(2, 3)
# call method get_data num1 instance
num1.get_data()
# creating another object with only one param
num2 = ComplexNumbers(5)
# call the method get_data for num2 instance
num2.get_data()
# deleting parameter imag of object
# del num1.imag
# num1.get_data()
"""
Traceback (most recent call last):
File ".\Classes&Objects.py", line 51, in <module>
num1.get_data()
File ".\Classes&Objects.py", line 35, in get_data
print(f'{self.real}+{self.imag}j')
AttributeError: 'ComplexNumbers' object has no attribute 'imag'
"""
# we can delete a entire method of class by referring to it
# del ComplexNumbers.get_data
# num1.get_data()
"""
Traceback (most recent call last):
File ".\Classes&Objects.py", line 63, in <module>
num1.get_data()
AttributeError: 'ComplexNumbers' object has no attribute 'get_data'
"""
| 22.921053 | 112 | 0.703789 |
4a24a1ddbbabfdc22a95abdcc2a8e2c730991a32 | 1,725 | py | Python | setup.py | MDiesing/pandapipes | ba5bea706bcbdb003be325de78a181817539ee0f | [
"BSD-3-Clause"
] | null | null | null | setup.py | MDiesing/pandapipes | ba5bea706bcbdb003be325de78a181817539ee0f | [
"BSD-3-Clause"
] | null | null | null | setup.py | MDiesing/pandapipes | ba5bea706bcbdb003be325de78a181817539ee0f | [
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2020 by Fraunhofer Institute for Energy Economics
# and Energy System Technology (IEE), Kassel. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
import io
import os
import re
from setuptools import find_packages
from setuptools import setup
with open('README.rst', 'rb') as f:
install = f.read().decode('utf-8')
with open('CHANGELOG.rst', 'rb') as f:
changelog = f.read().decode('utf-8')
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6']
long_description = '\n\n'.join((install, changelog))
setup(
name='pandapipes',
version='1.0.4',
author='Dennis Cronbach, Daniel Lohmeier, Simon Ruben Drauz',
author_email='[email protected], [email protected], '
'[email protected]',
description='Convenient Power System Modelling and Analysis based on PYPOWER and pandas',
long_description=long_description,
url='http://www.pandapipes.org',
license='BSD',
install_requires=["pandapower>=2.0"],
extras_require={"docs": ["numpydoc", "sphinx", "sphinx_rtd_theme", "sphinxcontrib.bibtex"]},
python_requires='>=3, <4',
packages=find_packages(),
include_package_data=True,
classifiers=classifiers
)
| 34.5 | 99 | 0.691014 |
4a24a2424f7f7d1c6b715a7fd75399a48cc6d080 | 559 | py | Python | tests/utils/test_lab.py | danielkelshaw/GPyBO | 39e331953ec831ad1aca954fa5c9d63da9ca9d7d | [
"MIT"
] | 1 | 2021-09-01T11:31:17.000Z | 2021-09-01T11:31:17.000Z | tests/utils/test_lab.py | danielkelshaw/GPyBO | 39e331953ec831ad1aca954fa5c9d63da9ca9d7d | [
"MIT"
] | 64 | 2020-05-11T19:16:24.000Z | 2020-08-17T22:55:20.000Z | tests/utils/test_lab.py | danielkelshaw/GPyBO | 39e331953ec831ad1aca954fa5c9d63da9ca9d7d | [
"MIT"
] | null | null | null | import pytest
from gpybo.kernel.kernels import SquaredExponentialKernel
from gpybo.utils.lab import *
def test_pd_jitter_ve_sm() -> None:
k = torch.ones(5, 3)
with pytest.raises(ValueError):
ret = pd_jitter(k)
def test_pd_jitter_ve_pd() -> None:
k = torch.zeros(10, 10)
with pytest.raises(ValueError):
ret = pd_jitter(k)
def test_pd_jitter() -> None:
x = torch.rand(5, 1)
k = SquaredExponentialKernel()(x, x)
k_jit = pd_jitter(k)
assert isinstance(k_jit, Tensor)
assert k.shape == k_jit.shape
| 17.46875 | 57 | 0.661896 |
Subsets and Splits