filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_106_31051 | """
mbed CMSIS-DAP debugger
Copyright (c) 2006-2015 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..core.target import Target
import logging
from struct import unpack
from time import time
from .flash_builder import FlashBuilder
DEFAULT_PAGE_PROGRAM_WEIGHT = 0.130
DEFAULT_PAGE_ERASE_WEIGHT = 0.048
DEFAULT_CHIP_ERASE_WEIGHT = 0.174
# Program to compute the CRC of sectors. This works on cortex-m processors.
# Code is relocatable and only needs to be on a 4 byte boundary.
# 200 bytes of executable data below + 1024 byte crc table = 1224 bytes
# Usage requirements:
# -In memory reserve 0x600 for code & table
# -Make sure data buffer is big enough to hold 4 bytes for each page that could be checked (ie. >= num pages * 4)
analyzer = (
0x2780b5f0, 0x25004684, 0x4e2b2401, 0x447e4a2b, 0x0023007f, 0x425b402b, 0x40130868, 0x08584043,
0x425b4023, 0x40584013, 0x40200843, 0x40104240, 0x08434058, 0x42404020, 0x40584010, 0x40200843,
0x40104240, 0x08434058, 0x42404020, 0x40584010, 0x40200843, 0x40104240, 0x08584043, 0x425b4023,
0x40434013, 0xc6083501, 0xd1d242bd, 0xd01f2900, 0x46602301, 0x469c25ff, 0x00894e11, 0x447e1841,
0x88034667, 0x409f8844, 0x2f00409c, 0x2201d012, 0x4252193f, 0x34017823, 0x402b4053, 0x599b009b,
0x405a0a12, 0xd1f542bc, 0xc00443d2, 0xd1e74281, 0xbdf02000, 0xe7f82200, 0x000000b2, 0xedb88320,
0x00000042,
)
def _msb(n):
ndx = 0
while (1 < n):
n = (n >> 1)
ndx += 1
return ndx
def _same(d1, d2):
if len(d1) != len(d2):
return False
for i in range(len(d1)):
if d1[i] != d2[i]:
return False
return True
class PageInfo(object):
def __init__(self):
self.base_addr = None # Start address of this page
self.erase_weight = None # Time it takes to erase a page
self.program_weight = None # Time it takes to program a page (Not including data transfer time)
self.size = None # Size of page
def __repr__(self):
return "<PageInfo@0x%x base=0x%x size=0x%x erswt=%g prgwt=%g>" \
% (id(self), self.base_addr, self.size, self.erase_weight, self.program_weight)
class FlashInfo(object):
def __init__(self):
self.rom_start = None # Starting address of ROM
self.erase_weight = None # Time it takes to perform a chip erase
self.crc_supported = None # Is the function computeCrcs supported?
def __repr__(self):
return "<FlashInfo@0x%x start=0x%x erswt=%g crc=%s>" \
% (id(self), self.rom_start, self._erase_weight, self.crc_supported)
class Flash(object):
"""
This class is responsible to flash a new binary in a target
"""
def __init__(self, target, flash_algo):
self.target = target
self.flash_algo = flash_algo
self.flash_algo_debug = False
if flash_algo is not None:
self.is_valid = True
self.use_analyzer = flash_algo['analyzer_supported']
self.end_flash_algo = flash_algo['load_address'] + len(flash_algo) * 4
self.begin_stack = flash_algo['begin_stack']
self.begin_data = flash_algo['begin_data']
self.static_base = flash_algo['static_base']
self.min_program_length = flash_algo.get('min_program_length', 0)
# Check for double buffering support.
if 'page_buffers' in flash_algo:
self.page_buffers = flash_algo['page_buffers']
else:
self.page_buffers = [self.begin_data]
self.double_buffer_supported = len(self.page_buffers) > 1
else:
self.is_valid = False
self.use_analyzer = False
self.end_flash_algo = None
self.begin_stack = None
self.begin_data = None
self.static_base = None
self.min_program_length = 0
self.page_buffers = []
self.double_buffer_supported = False
@property
def minimumProgramLength(self):
return self.min_program_length
def init(self, reset=True):
"""
Download the flash algorithm in RAM
"""
self.target.halt()
if reset:
self.target.setTargetState("PROGRAM")
# update core register to execute the init subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_init'], init=True)
# check the return code
if result != 0:
logging.error('init error: %i', result)
def cleanup(self):
pass
def computeCrcs(self, sectors):
data = []
# Convert address, size pairs into commands
# for the crc computation algorithm to preform
for addr, size in sectors:
size_val = _msb(size)
addr_val = addr // size
# Size must be a power of 2
assert (1 << size_val) == size
# Address must be a multiple of size
assert (addr % size) == 0
val = (size_val << 0) | (addr_val << 16)
data.append(val)
self.target.writeBlockMemoryAligned32(self.begin_data, data)
# update core register to execute the subroutine
result = self.callFunctionAndWait(self.flash_algo['analyzer_address'], self.begin_data, len(data))
# Read back the CRCs for each section
data = self.target.readBlockMemoryAligned32(self.begin_data, len(data))
return data
def eraseAll(self):
"""
Erase all the flash
"""
# update core register to execute the eraseAll subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_eraseAll'])
# check the return code
if result != 0:
logging.error('eraseAll error: %i', result)
def erasePage(self, flashPtr):
"""
Erase one page
"""
# update core register to execute the erasePage subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_erase_sector'], flashPtr)
# check the return code
if result != 0:
logging.error('erasePage(0x%x) error: %i', flashPtr, result)
def programPage(self, flashPtr, bytes):
"""
Flash one page
"""
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# first transfer in RAM
self.target.writeBlockMemoryUnaligned8(self.begin_data, bytes)
# get info about this page
page_info = self.getPageInfo(flashPtr)
# update core register to execute the program_page subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_program_page'], flashPtr, len(bytes), self.begin_data)
# check the return code
if result != 0:
logging.error('programPage(0x%x) error: %i', flashPtr, result)
def getPageBufferCount(self):
return len(self.page_buffers)
def isDoubleBufferingSupported(self):
return self.double_buffer_supported
def startProgramPageWithBuffer(self, bufferNumber, flashPtr):
"""
Flash one page
"""
assert bufferNumber < len(self.page_buffers), "Invalid buffer number"
# get info about this page
page_info = self.getPageInfo(flashPtr)
# update core register to execute the program_page subroutine
result = self.callFunction(self.flash_algo['pc_program_page'], flashPtr, page_info.size, self.page_buffers[bufferNumber])
def loadPageBuffer(self, bufferNumber, flashPtr, bytes):
assert bufferNumber < len(self.page_buffers), "Invalid buffer number"
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# transfer the buffer to device RAM
self.target.writeBlockMemoryUnaligned8(self.page_buffers[bufferNumber], bytes)
def programPhrase(self, flashPtr, bytes):
"""
Flash a portion of a page.
"""
# Get min programming length. If one was not specified, use the page size.
if self.min_program_length:
min_len = self.min_program_length
else:
min_len = self.getPageInfo(flashPtr).size
# Require write address and length to be aligned to min write size.
if flashPtr % min_len:
raise RuntimeError("unaligned flash write address")
if len(bytes) % min_len:
raise RuntimeError("phrase length is unaligned or too small")
# prevent security settings from locking the device
bytes = self.overrideSecurityBits(flashPtr, bytes)
# first transfer in RAM
self.target.writeBlockMemoryUnaligned8(self.begin_data, bytes)
# update core register to execute the program_page subroutine
result = self.callFunctionAndWait(self.flash_algo['pc_program_page'], flashPtr, len(bytes), self.begin_data)
# check the return code
if result != 0:
logging.error('programPhrase(0x%x) error: %i', flashPtr, result)
def getPageInfo(self, addr):
"""
Get info about the page that contains this address
Override this function if variable page sizes are supported
"""
region = self.target.getMemoryMap().getRegionForAddress(addr)
if not region or not region.isFlash:
return None
info = PageInfo()
info.erase_weight = DEFAULT_PAGE_ERASE_WEIGHT
info.program_weight = DEFAULT_PAGE_PROGRAM_WEIGHT
info.size = region.blocksize
info.base_addr = addr - (addr % info.size)
return info
def getFlashInfo(self):
"""
Get info about the flash
Override this function to return differnt values
"""
boot_region = self.target.getMemoryMap().getBootMemory()
info = FlashInfo()
info.rom_start = boot_region.start if boot_region else 0
info.erase_weight = DEFAULT_CHIP_ERASE_WEIGHT
info.crc_supported = self.use_analyzer
return info
def getFlashBuilder(self):
return FlashBuilder(self, self.getFlashInfo().rom_start)
def flashBlock(self, addr, data, smart_flash=True, chip_erase=None, progress_cb=None, fast_verify=False):
"""
Flash a block of data
"""
flash_start = self.getFlashInfo().rom_start
fb = FlashBuilder(self, flash_start)
fb.addData(addr, data)
info = fb.program(chip_erase, progress_cb, smart_flash, fast_verify)
return info
def flashBinary(self, path_file, flashPtr=None, smart_flash=True, chip_erase=None, progress_cb=None, fast_verify=False):
"""
Flash a binary
"""
if flashPtr is None:
flashPtr = self.getFlashInfo().rom_start
f = open(path_file, "rb")
with open(path_file, "rb") as f:
data = f.read()
data = unpack(str(len(data)) + 'B', data)
self.flashBlock(flashPtr, data, smart_flash, chip_erase, progress_cb, fast_verify)
def callFunction(self, pc, r0=None, r1=None, r2=None, r3=None, init=False):
reg_list = []
data_list = []
if self.flash_algo_debug:
# Save vector catch state for use in waitForCompletion()
self._saved_vector_catch = self.target.getVectorCatch()
self.target.setVectorCatch(Target.CATCH_ALL)
if init:
# download flash algo in RAM
self.target.writeBlockMemoryAligned32(self.flash_algo['load_address'], self.flash_algo['instructions'])
if self.use_analyzer:
self.target.writeBlockMemoryAligned32(self.flash_algo['analyzer_address'], analyzer)
reg_list.append('pc')
data_list.append(pc)
if r0 is not None:
reg_list.append('r0')
data_list.append(r0)
if r1 is not None:
reg_list.append('r1')
data_list.append(r1)
if r2 is not None:
reg_list.append('r2')
data_list.append(r2)
if r3 is not None:
reg_list.append('r3')
data_list.append(r3)
if init:
reg_list.append('r9')
data_list.append(self.static_base)
if init:
reg_list.append('sp')
data_list.append(self.begin_stack)
reg_list.append('lr')
data_list.append(self.flash_algo['load_address'] + 1)
self.target.writeCoreRegistersRaw(reg_list, data_list)
# resume target
self.target.resume()
## @brief Wait until the breakpoint is hit.
def waitForCompletion(self):
while(self.target.getState() == Target.TARGET_RUNNING):
pass
if self.flash_algo_debug:
regs = self.target.readCoreRegistersRaw(list(range(19)) + [20])
logging.debug("Registers after flash algo: [%s]", " ".join("%08x" % r for r in regs))
expected_fp = self.flash_algo['static_base']
expected_sp = self.flash_algo['begin_stack']
expected_pc = self.flash_algo['load_address']
expected_flash_algo = self.flash_algo['instructions']
if self.use_analyzer:
expected_analyzer = analyzer
final_ipsr = self.target.readCoreRegister('xpsr') & 0xff
final_fp = self.target.readCoreRegister('r9')
final_sp = self.target.readCoreRegister('sp')
final_pc = self.target.readCoreRegister('pc')
#TODO - uncomment if Read/write and zero init sections can be moved into a separate flash algo section
#final_flash_algo = self.target.readBlockMemoryAligned32(self.flash_algo['load_address'], len(self.flash_algo['instructions']))
#if self.use_analyzer:
# final_analyzer = self.target.readBlockMemoryAligned32(self.flash_algo['analyzer_address'], len(analyzer))
error = False
if final_ipsr != 0:
logging.error("IPSR should be 0 but is 0x%02x", final_ipsr)
error = True
if final_fp != expected_fp:
# Frame pointer should not change
logging.error("Frame pointer should be 0x%x but is 0x%x" % (expected_fp, final_fp))
error = True
if final_sp != expected_sp:
# Stack pointer should return to original value after function call
logging.error("Stack pointer should be 0x%x but is 0x%x" % (expected_sp, final_sp))
error = True
if final_pc != expected_pc:
# PC should be pointing to breakpoint address
logging.error("PC should be 0x%x but is 0x%x" % (expected_pc, final_pc))
error = True
#TODO - uncomment if Read/write and zero init sections can be moved into a separate flash algo section
#if not _same(expected_flash_algo, final_flash_algo):
# logging.error("Flash algorithm overwritten!")
# error = True
#if self.use_analyzer and not _same(expected_analyzer, final_analyzer):
# logging.error("Analyzer overwritten!")
# error = True
assert error == False
self.target.setVectorCatch(self._saved_vector_catch)
return self.target.readCoreRegister('r0')
def callFunctionAndWait(self, pc, r0=None, r1=None, r2=None, r3=None, init=False):
self.callFunction(pc, r0, r1, r2, r3, init)
return self.waitForCompletion()
def setFlashAlgoDebug(self, enable):
"""
Turn on extra flash algorithm checking
When set this will greatly slow down flash algo performance
"""
self.flash_algo_debug = enable
def overrideSecurityBits(self, address, data):
return data
|
the-stack_106_31052 | # coding: utf-8
#
# Copyright © 2012-2015 Ejwa Software. All rights reserved.
#
# This file is part of gitinspector.
#
# gitinspector is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# gitinspector is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with gitinspector. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from __future__ import unicode_literals
import json
import sys
import textwrap
from ..localization import N_
from .. import format, gravatar, terminal
from ..blame import Blame
from .outputable import Outputable
BLAME_INFO_TEXT = N_("Below are the number of rows from each author that have survived and are still "
"intact in the current revision")
class BlameOutput(Outputable):
def __init__(self, changes, blame):
if format.is_interactive_format():
print("")
self.changes = changes
self.blame = blame
Outputable.__init__(self)
def output_html(self):
blame_xml = "<div><div class=\"box\">"
blame_xml += "<p>" + _(BLAME_INFO_TEXT) + ".</p><div><table id=\"blame\" class=\"git\">"
blame_xml += "<thead><tr> <th>{0}</th> <th>{1}</th> <th>{2}</th> <th>{3}</th> <th>{4}</th> </tr></thead>".format(
_("Author"), _("Rows"), _("Stability"), _("Age"), _("% in comments"))
blame_xml += "<tbody>"
chart_data = ""
blames = sorted(self.blame.get_summed_blames().items())
total_blames = 0
for i in blames:
total_blames += i[1].rows
for i, entry in enumerate(blames):
work_percentage = str("{0:.2f}".format(100.0 * entry[1].rows / total_blames))
blame_xml += "<tr " + ("class=\"odd\">" if i % 2 == 1 else ">")
if format.get_selected() == "html":
author_email = self.changes.get_latest_email_by_author(entry[0])
blame_xml += "<td><img src=\"{0}\"/>{1}</td>".format(gravatar.get_url(author_email), entry[0])
else:
blame_xml += "<td>" + entry[0] + "</td>"
blame_xml += "<td>" + str(entry[1].rows) + "</td>"
blame_xml += "<td>" + ("{0:.1f}".format(Blame.get_stability(entry[0], entry[1].rows, self.changes)) + "</td>")
blame_xml += "<td>" + "{0:.1f}".format(float(entry[1].skew) / entry[1].rows) + "</td>"
blame_xml += "<td>" + "{0:.2f}".format(100.0 * entry[1].comments / entry[1].rows) + "</td>"
blame_xml += "<td style=\"display: none\">" + work_percentage + "</td>"
blame_xml += "</tr>"
chart_data += "{{label: {0}, data: {1}}}".format(json.dumps(entry[0]), work_percentage)
if blames[-1] != entry:
chart_data += ", "
blame_xml += "<tfoot><tr> <td colspan=\"5\"> </td> </tr></tfoot></tbody></table>"
blame_xml += "<div class=\"chart\" id=\"blame_chart\"></div></div>"
blame_xml += "<script type=\"text/javascript\">"
blame_xml += " blame_plot = $.plot($(\"#blame_chart\"), [{0}], {{".format(chart_data)
blame_xml += " series: {"
blame_xml += " pie: {"
blame_xml += " innerRadius: 0.4,"
blame_xml += " show: true,"
blame_xml += " combine: {"
blame_xml += " threshold: 0.01,"
blame_xml += " label: \"" + _("Minor Authors") + "\""
blame_xml += " }"
blame_xml += " }"
blame_xml += " }, grid: {"
blame_xml += " hoverable: true"
blame_xml += " }"
blame_xml += " });"
blame_xml += "</script></div></div>"
print(blame_xml)
def output_json(self):
message_json = "\t\t\t\"message\": \"" + _(BLAME_INFO_TEXT) + "\",\n"
blame_json = ""
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_json = "\t\t\t\t\"name\": \"" + i[0] + "\",\n"
email_json = "\t\t\t\t\"email\": \"" + author_email + "\",\n"
gravatar_json = "\t\t\t\t\"gravatar\": \"" + gravatar.get_url(author_email) + "\",\n"
rows_json = "\t\t\t\t\"rows\": " + str(i[1].rows) + ",\n"
stability_json = ("\t\t\t\t\"stability\": " + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + ",\n")
age_json = ("\t\t\t\t\"age\": " + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + ",\n")
percentage_in_comments_json = ("\t\t\t\t\"percentage_in_comments\": " +
"{0:.2f}".format(100.0 * i[1].comments / i[1].rows) + "\n")
blame_json += ("{\n" + name_json + email_json + gravatar_json + rows_json + stability_json + age_json +
percentage_in_comments_json + "\t\t\t},")
else:
blame_json = blame_json[:-1]
print(",\n\t\t\"blame\": {\n" + message_json + "\t\t\t\"authors\": [\n\t\t\t" + blame_json + "]\n\t\t}", end="")
def output_text(self):
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
print(textwrap.fill(_(BLAME_INFO_TEXT) + ":", width=terminal.get_size()[0]) + "\n")
terminal.printb(terminal.ljust(_("Author"), 21) + terminal.rjust(_("Rows"), 10) + terminal.rjust(_("Stability"), 15) +
terminal.rjust(_("Age"), 13) + terminal.rjust(_("% in comments"), 20))
for i in sorted(self.blame.get_summed_blames().items()):
print(terminal.ljust(i[0], 20)[0:20 - terminal.get_excess_column_count(i[0])], end=" ")
print(str(i[1].rows).rjust(10), end=" ")
print("{0:.1f}".format(Blame.get_stability(i[0], i[1].rows, self.changes)).rjust(14), end=" ")
print("{0:.1f}".format(float(i[1].skew) / i[1].rows).rjust(12), end=" ")
print("{0:.2f}".format(100.0 * i[1].comments / i[1].rows).rjust(19))
def output_xml(self):
message_xml = "\t\t<message>" + _(BLAME_INFO_TEXT) + "</message>\n"
blame_xml = ""
for i in sorted(self.blame.get_summed_blames().items()):
author_email = self.changes.get_latest_email_by_author(i[0])
name_xml = "\t\t\t\t<name>" + i[0] + "</name>\n"
email_xml = "\t\t\t\t<email>" + author_email + "</email>\n"
gravatar_xml = "\t\t\t\t<gravatar>" + gravatar.get_url(author_email) + "</gravatar>\n"
rows_xml = "\t\t\t\t<rows>" + str(i[1].rows) + "</rows>\n"
stability_xml = ("\t\t\t\t<stability>" + "{0:.1f}".format(Blame.get_stability(i[0], i[1].rows,
self.changes)) + "</stability>\n")
age_xml = ("\t\t\t\t<age>" + "{0:.1f}".format(float(i[1].skew) / i[1].rows) + "</age>\n")
percentage_in_comments_xml = ("\t\t\t\t<percentage-in-comments>" + "{0:.2f}".format(100.0 * i[1].comments / i[1].rows) +
"</percentage-in-comments>\n")
blame_xml += ("\t\t\t<author>\n" + name_xml + email_xml + gravatar_xml + rows_xml + stability_xml +
age_xml + percentage_in_comments_xml + "\t\t\t</author>\n")
print("\t<blame>\n" + message_xml + "\t\t<authors>\n" + blame_xml + "\t\t</authors>\n\t</blame>")
|
the-stack_106_31053 | import unittest
import pickle
from common import MTurkCommon
class TestHITPersistence(MTurkCommon):
def create_hit_result(self):
return self.conn.create_hit(
question=self.get_question(), **self.get_hit_params()
)
def test_pickle_hit_result(self):
result = self.create_hit_result()
new_result = pickle.loads(pickle.dumps(result))
def test_pickle_deserialized_version(self):
"""
It seems the technique used to store and reload the object must
result in an equivalent object, or subsequent pickles may fail.
This tests a double-pickle to elicit that error.
"""
result = self.create_hit_result()
new_result = pickle.loads(pickle.dumps(result))
pickle.dumps(new_result)
if __name__ == '__main__':
unittest.main()
|
the-stack_106_31054 | import os
import sys
from math import gcd
from functools import reduce
#
# Complete the getTotalX function below.
#
def lcm(a, b):
a = int(a)
b = int(b)
return a * b / gcd(a,b)
def lcms(numbers):
return reduce(lcm, numbers)
def dividedByB(b, factor):
for i in b:
if i % factor != 0:
return False
return True
def getTotalX(a, b):
# Getting the LCM for a
LCM = lcms(a)
bmax = max(b)
# print(type(LCM))
counter = 0
multiplier = 1
while LCM * multiplier <= bmax:
if dividedByB(b, LCM*multiplier):
counter += 1
multiplier += 1
return counter
if __name__ == '__main__':
# f = open(os.environ['OUTPUT_PATH'], 'w')
nm = input().split()
n = int(nm[0])
m = int(nm[1])
a = list(map(int, input().rstrip().split()))
b = list(map(int, input().rstrip().split()))
total = getTotalX(a, b)
print(total)
# f.write(str(total) + '\n')
# f.close()
|
the-stack_106_31057 | from shu.kanunu import KanunuScraper
from shu.base import Node
class MyScraper(KanunuScraper):
def get_title_and_links(self, doc):
table = doc('dl')
for anchor in table('dd a'):
yield (
anchor.text_content(),
str(self.base_url / anchor.get('href')))
def get_content_tree(self, doc):
root = Node(title='root')
for title, link in self.get_title_and_links(doc):
doc = self.get_doc(link)
chapter = Node(title=title)
paras = (p.text_content().strip() for p in doc('div.text p:not([align])'))
chapter.content = '\n\n'.join(paras)
root.append(chapter)
return root
index_url='https://www.kanunu8.com/book2/11011/index.html'
title='半生缘'
author='张爱玲'
output_file='books/ban_sheng_yuan'
formats=['.md', '.mobi']
scraper = MyScraper(index_url=index_url, title=title, author=author)
scraper.download()
scraper.build_ebook(output_file, formats=formats)
|
the-stack_106_31058 | import sys
import time
from typing import Optional
import click
from chia.rpc.full_node_rpc_client import FullNodeRpcClient
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.byte_types import hexstr_to_bytes
from chia.util.config import load_config
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.ints import uint16
from chia.util.bech32m import decode_puzzle_hash
@click.group("wallet", short_help="Manage your wallet")
def wallet_cmd() -> None:
pass
@wallet_cmd.command("get_transaction", short_help="Get a transaction")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-tx", "--tx_id", help="transaction id to search for", type=str, required=True)
@click.option("--verbose", "-v", count=True, type=int)
def get_transaction_cmd(wallet_rpc_port: Optional[int], fingerprint: int, id: int, tx_id: str, verbose: int) -> None:
extra_params = {"id": id, "tx_id": tx_id, "verbose": verbose}
import asyncio
from .wallet_funcs import execute_with_wallet, get_transaction
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transaction))
@wallet_cmd.command("get_transactions", short_help="Get all transactions")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option(
"-o",
"--offset",
help="Skip transactions from the beginning of the list",
type=int,
default=0,
show_default=True,
required=True,
)
@click.option("--verbose", "-v", count=True, type=int)
@click.option(
"--paginate/--no-paginate",
default=None,
help="Prompt for each page of data. Defaults to true for interactive consoles, otherwise false.",
)
def get_transactions_cmd(
wallet_rpc_port: Optional[int],
fingerprint: int,
id: int,
offset: int,
verbose: bool,
paginate: Optional[bool],
) -> None:
extra_params = {"id": id, "verbose": verbose, "offset": offset, "paginate": paginate}
import asyncio
from .wallet_funcs import execute_with_wallet, get_transactions
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_transactions))
# The flush/close avoids output like below when piping through `head -n 1`
# which will close stdout.
#
# Exception ignored in: <_io.TextIOWrapper name='<stdout>' mode='w' encoding='utf-8'>
# BrokenPipeError: [Errno 32] Broken pipe
sys.stdout.flush()
sys.stdout.close()
@wallet_cmd.command("send", short_help="Send sit to another wallet")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-a", "--amount", help="How much sit to send, in XCH", type=str, required=True)
@click.option(
"-m",
"--fee",
help="Set the fees for the transaction, in XCH",
type=str,
default="0",
show_default=True,
required=True,
)
@click.option("-t", "--address", help="Address to send the XCH", type=str, required=True)
@click.option(
"-o", "--override", help="Submits transaction without checking for unusual values", is_flag=True, default=False
)
def send_cmd(
wallet_rpc_port: Optional[int], fingerprint: int, id: int, amount: str, fee: str, address: str, override: bool
) -> None:
extra_params = {"id": id, "amount": amount, "fee": fee, "address": address, "override": override}
import asyncio
from .wallet_funcs import execute_with_wallet, send
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, send))
@wallet_cmd.command("send_from", short_help="Transfer all sit away from a specific puzzle hash")
@click.option(
"-p",
"--rpc-port",
help=(
"Set the port where the Full Node is hosting the RPC interface. "
"See the rpc_port under full_node in config.yaml"
),
type=int,
default=None,
show_default=True,
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-s", "--source", help="Address to send the XCH from", type=str, required=True)
@click.option("-t", "--address", help="Address to send the XCH", type=str, required=True)
def send_from_cmd(
rpc_port: Optional[int],
wallet_rpc_port: Optional[int],
fingerprint: int,
id: int,
source: str,
address: str,
) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet, send_from
extra_params = {"id": id, "source": source, "address": address, "rpc_port": rpc_port}
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, send_from))
@wallet_cmd.command("show", short_help="Show wallet information")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def show_cmd(wallet_rpc_port: Optional[int], fingerprint: int) -> None:
import asyncio
from .wallet_funcs import execute_with_wallet, print_balances
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, {}, print_balances))
@wallet_cmd.command("get_address", short_help="Get a wallet receive address")
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def get_address_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int) -> None:
extra_params = {"id": id}
import asyncio
from .wallet_funcs import execute_with_wallet, get_address
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, get_address))
@wallet_cmd.command(
"delete_unconfirmed_transactions", short_help="Deletes all unconfirmed transactions for this wallet ID"
)
@click.option(
"-wp",
"--wallet-rpc-port",
help="Set the port where the Wallet is hosting the RPC interface. See the rpc_port under wallet in config.yaml",
type=int,
default=None,
)
@click.option("-i", "--id", help="Id of the wallet to use", type=int, default=1, show_default=True, required=True)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def delete_unconfirmed_transactions_cmd(wallet_rpc_port: Optional[int], id, fingerprint: int) -> None:
extra_params = {"id": id}
import asyncio
from .wallet_funcs import delete_unconfirmed_transactions, execute_with_wallet
asyncio.run(execute_with_wallet(wallet_rpc_port, fingerprint, extra_params, delete_unconfirmed_transactions))
async def do_recover_pool_nft(contract_hash: str, launcher_hash: str, fingerprint: int):
from .wallet_funcs import get_wallet
contract_hash_bytes32 = hexstr_to_bytes(contract_hash)
delay = 604800
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
rpc_port = config["full_node"]["rpc_port"]
wallet_rpc_port = config["wallet"]["rpc_port"]
node_client = await FullNodeRpcClient.create(self_hostname, uint16(rpc_port), DEFAULT_ROOT_PATH, config)
wallet_client = await WalletRpcClient.create(self_hostname, uint16(wallet_rpc_port), DEFAULT_ROOT_PATH, config)
coin_records = await node_client.get_coin_records_by_puzzle_hash(contract_hash_bytes32, False)
# expired coins
coins = [coin_record.coin for coin_record in coin_records if coin_record.timestamp <= int(time.time()) - delay]
if not coins:
print("no expired coins")
return
print("found", len(coins), "expired coins, total amount:", sum(coin.amount for coin in coins))
wallet_client_f = await get_wallet(wallet_client, fingerprint=fingerprint)
tx = await wallet_client_f.recover_pool_nft(launcher_hash, contract_hash, coins)
await node_client.push_tx(tx)
print("tx pushed")
@wallet_cmd.command("recover_pool_nft", short_help="Recover coins in pool nft contract")
@click.option(
"--contract-hash",
help="Set the nft contract hash",
type=str,
default=None,
)
@click.option(
"--launcher-hash",
help="Set the launcher hash, you should get it from silicoin wallet",
type=str,
default=None,
)
@click.option("-f", "--fingerprint", help="Set the fingerprint to specify which wallet to use", type=int)
def recover_pool_nft(contract_hash: str, launcher_hash: str, fingerprint: int):
import asyncio
# Convert contract_hash to puzzle_hash
contract_puzzle_hash = decode_puzzle_hash(contract_hash).hex()
asyncio.run(do_recover_pool_nft(contract_puzzle_hash, launcher_hash, fingerprint))
|
the-stack_106_31059 | import sys
if sys.version_info.major == 2:
# the `flatten` function can be found in compiler library:
# `from compiler.ast import flatten`
from collections import Iterable
def flatten(iterable):
for i in iterable:
# if type(t) is list or type(t) is tuple: # strict check
# if isinstance(i, list) or isinstance(i, tuple): # with instances of lists or tuples
if isinstance(i, Iterable) and not isinstance(i, str): # non strict check
for j in flatten(i):
yield j
else:
yield i
elif sys.version_info.major == 3:
def flatten(iterable):
for i in iterable:
try:
iter(i)
except TypeError:
is_iterable = False
else:
is_iterable = True
if is_iterable and not isinstance(i, str):
yield from flatten(i)
else:
yield i
if __name__ == '__main__':
import unittest
class TC(unittest.TestCase):
def test_flatten(self):
iterable = [1, '2', [3, 4], [5, [('6', ), (('76', ), )]]]
expected_result = [1, '2', 3, 4, 5, '6', '76']
result = list(flatten(iterable))
self.assertEqual(result, expected_result)
unittest.main()
|
the-stack_106_31060 | #!/usr/bin/env python
#################################################################################
# Copyright 2016-2019 ARM Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#################################################################################
import json
import os
import logging
import sys
import re
import subprocess
import stat
import tarfile
import zipfile
from string import Template
import shutil
import platform
import tempfile
from contextlib import contextmanager
import requests
import click
logger = logging.getLogger('pal-platform')
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PAL_PLATFORM_ROOT = SCRIPT_DIR
PROG_NAME = os.path.basename(sys.argv[0])
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
PATCH_UTIL = 'patch.exe' if platform.system() == 'Windows' else 'patch'
AVAILABLE_TARGETS = []
BUILD_SYS_MIN_VER = 2
PLAT_CMAKE_TEMPLATE = '''
#################################################################################
# #
# THIS IS AN AUTO GENERATED FILE #
# #
#################################################################################
set (MBED_CLOUD_CLIENT_SDK $mbed_cloud_client_sdk)
set (MBED_CLOUD_CLIENT_OS $mbed_cloud_client_os)
set (MBED_CLOUD_CLIENT_DEVICE $mbed_cloud_client_device)
set (MBED_CLOUD_CLIENT_MIDDLEWARE $mbed_cloud_client_mw_list)
set (MBED_CLOUD_CLIENT_TOOLCHAIN $mbed_cloud_client_toolchain)
set (MBED_CLOUD_CLIENT_BUILD_SYS_MIN_VER $mbed_cloud_client_build_sys_min_ver)
'''
# for 2.7 compatibility:
# http://stackoverflow.com/questions/1713038/super-fails-with-error-typeerror-argument-1-must-be-type-not-classobj
__metaclass__ = type
class Config(object):
def __init__(self):
self.verbose = False
pass_config = click.make_pass_decorator(Config, ensure=True)
class DynamicChoice(click.Choice):
name = 'DynamicChoice'
def __init__(self, func, **kwargs):
self.choices = []
self.func = func
self.kwargs = kwargs
def get_metavar(self, param):
self.choices = self.func(**self.kwargs)
return super(DynamicChoice, self).get_metavar(param)
def get_missing_message(self, param):
self.choices = self.func(**self.kwargs)
return super(DynamicChoice, self).get_missing_message(param)
def convert(self, value, param, ctx):
self.choices = self.func(**self.kwargs)
return super(DynamicChoice, self).convert(value, param, ctx)
def __repr__(self):
self.choices = self.func(**self.kwargs)
return super(DynamicChoice, self).__repr__()
@contextmanager
def TemporaryDirectory():
name = tempfile.mkdtemp()
try:
yield name
finally:
shutil.rmtree(name, onerror=del_rw)
def del_rw(action, path, exc):
"""
Callback function for error handling in shutil.rmtree.
Will be called whenever shutil.rmtree catches an exception.
:param action: The internal action used inside shutil.rmtree
(os.listdir, os.remove, or os.rmdir)
:param path: The path of the file/directory
:param exc: The Exception info
"""
if not os.access(path, os.W_OK):
# Is the error an access error ?
os.chmod(path, stat.S_IWUSR)
action(path)
else:
raise
def is_git_pull_required(repo_dir, branch, **stream_kwargs):
"""
Check if git pull is required - in case sources are modified,
pull will fail, so we check whether pull is required before
failing the script.
http://stackoverflow.com/questions/3258243/check-if-pull-needed-in-git
:param repo_dir: Directory to check
:param branch: Branch name / hash tag
:param stream_kwargs:
* *stdout* --
Standard output handle
* *stderr* --
Standard error handle
:return: True/False
"""
logger.debug('Check if git pull required for %s', repo_dir)
active_branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=repo_dir).strip()
if active_branch.decode(encoding='utf-8') == 'HEAD':
return False
local_hash = check_output(['git', 'rev-parse', '@'], cwd=repo_dir, **stream_kwargs)
remote_hash = check_output(['git', 'rev-parse', '@{upstream}'], cwd=repo_dir, **stream_kwargs)
base_hash = check_output(['git', 'merge-base', '@', '@{upstream}'], cwd=repo_dir, **stream_kwargs)
if local_hash == remote_hash:
logger.info('%s is up-to-date with %s', repo_dir, branch)
return False
elif local_hash == base_hash:
return True
elif remote_hash == base_hash:
logger.warning('There are local commits - do not forget to push %s', repo_dir)
return False
else:
raise Exception('Local and remote repositories of %s have diverged', repo_dir)
def is_git_dir(_dir):
return os.path.isdir(_dir) and os.path.isdir(os.path.join(_dir, '.git'))
def extract_repo_name(url):
"""
Extract repository remote and name from a git URL, ignoring protocol and .git endings
"""
regex_git_url = r'^(git\://|ssh\://|https?\://|)(([^/:@]+)(\:([^/:@]+))?@)?([^/:]+)[:/](.+?)(\.git|\/?)$'
m = re.match(regex_git_url, url)
return m.group(7) or url
def git_fetch(git_url, tree_ref, dest_dir, **stream_kwargs):
"""
Fetch sources from a git url to a local directory
:param git_url: Url of git repository
:param tree_ref: Branch name / hash tag
:param dest_dir: Destination directory
:param stream_kwargs:
* *stdout* --
Standard output handle
* *stderr* --
Standard error handle
"""
is_hash = re.search('[a-fA-F0-9]{40}', tree_ref)
if not is_git_dir(dest_dir):
logger.info('Cloning from %s at %s to %s', git_url, tree_ref, dest_dir)
cmd = ['git', 'clone'] + (['--no-checkout'] if is_hash else ['-b', tree_ref])
check_cmd(cmd + [git_url, dest_dir], **stream_kwargs)
check_cmd(['git', 'config', 'core.longpaths', 'true'], cwd=dest_dir, **stream_kwargs)
if is_hash:
check_cmd(['git', 'config', 'advice.detachedHead', 'false'], cwd=dest_dir, **stream_kwargs)
check_cmd(['git', 'checkout', tree_ref], cwd=dest_dir, **stream_kwargs)
else:
logger.info('%s already exists, updating from %s', dest_dir, git_url)
remote_url = check_output(['git', 'ls-remote', '--get-url'], cwd=dest_dir).decode(encoding='utf-8').strip()
assert extract_repo_name(git_url) == extract_repo_name(remote_url), 'Trying to update %s from different remote (%s)' % (dest_dir, git_url)
check_cmd(['git', 'fetch', '--all'], cwd=dest_dir, **stream_kwargs)
if not is_hash:
active_branch = check_output(['git', 'rev-parse', '--abbrev-ref', 'HEAD'], cwd=dest_dir).strip()
if active_branch.decode(encoding='utf-8') == tree_ref:
if is_git_pull_required(dest_dir, tree_ref, **stream_kwargs):
check_cmd(['git', 'pull', '--rebase', '--tags', '--all'], cwd=dest_dir, **stream_kwargs)
else:
return
logger.info('Checking out from %s at %s', git_url, tree_ref)
check_cmd(['git', 'checkout', tree_ref], cwd=dest_dir, **stream_kwargs)
if not is_hash and is_git_pull_required(dest_dir, tree_ref, **stream_kwargs):
check_cmd(['git', 'pull', '--rebase', '--tags', '--all'], cwd=dest_dir, **stream_kwargs)
def download_file(url, dest_dir, file_name=None):
"""
Download a file from a url to a local directory
:param url: Url of the remote file
:param dest_dir: Destination directory
:param file_name: Local file name (Optional, if missing than a temporary name is given)
:return: Full path of the downloaded file
"""
assert (os.path.isdir(dest_dir)), '%s does not exist or not a directory' % dest_dir
r = requests.get(url, stream=True)
r.raise_for_status()
try:
if file_name:
fh = open(os.path.join(dest_dir, file_name), 'wb')
else:
fh = tempfile.NamedTemporaryFile(dir=dest_dir, delete=False)
name = fh.name
shutil.copyfileobj(r.raw, fh)
finally:
fh.close()
return name
def extract_file(path, to_directory):
"""
Extract an archive file (ZIP/TAR) to a local directory
:param path: Full path of the archive
:param to_directory: Destination directory
"""
if zipfile.is_zipfile(path):
archive_type, opener = 'ZIP', zipfile.ZipFile
elif tarfile.is_tarfile(path):
archive_type, opener = 'TAR', tarfile.open
else:
raise Exception('%s has unknown archive type' % path)
with opener(path) as archive:
if archive_type == 'ZIP':
root = archive.infolist()[0]
root_dir = root.filename
nested = root.filename.endswith('/')
elif archive_type == 'TAR':
root = archive.getmembers()[0]
root_dir = root.name
nested = root.isdir()
if nested:
with TemporaryDirectory() as temp_dir:
archive.extractall(path=temp_dir)
if os.path.isdir(to_directory):
shutil.rmtree(to_directory, onerror=del_rw)
shutil.copytree(os.path.join(temp_dir, root_dir), to_directory)
else:
archive.extractall(path=to_directory)
def apply_patch(patch_file, reverse=False, **stream_kwargs):
"""
Apply patch on a directory
:param patch_file: Patch file to apply
:param reverse: True if un-applying an already patched directory
:param stream_kwargs:
* *stdout* --
Standard output handle
* *stderr* --
Standard error handle
:return: True if the directory is already integrated, otherwise False
"""
logger.info('%s %s', 'Reverting' if reverse else 'Applying', patch_file)
_dir, filename = os.path.split(patch_file)
with open(patch_file, 'rt') as fh:
patch_source = fh.read()
match = re.search(r'^--- (\S+)', patch_source, re.MULTILINE)
if not match:
raise Exception('malformed patch file')
path_list = match.group(1).split('/')
strip_num = path_list.index(os.path.splitext(filename)[0])
logger.debug('patch file relative strip is %s', strip_num)
cmd = [PATCH_UTIL, '-p', str(strip_num), '-i', patch_file, '--binary']
cmd += ['--verbose'] if logger.isEnabledFor(logging.DEBUG) else ['--quiet']
is_integrated = False
try:
check_cmd_and_raise(cmd + ['--reverse', '--dry-run', '--force'], cwd=_dir, **stream_kwargs)
is_integrated = True
logger.info('%s already integrated, %s', patch_file, 'reverting' if reverse else 'no need to patch')
except subprocess.CalledProcessError:
pass
else: # exception was not raised
if reverse:
check_cmd(cmd + ['--reverse', '--force'], cwd=_dir, **stream_kwargs)
is_integrated = False
logger.info('Successfully un-applied %s to %s', patch_file, _dir)
if not is_integrated and not reverse:
try:
check_cmd_and_raise(cmd + ['--dry-run'], cwd=_dir, **stream_kwargs)
check_cmd_and_raise(cmd, cwd=_dir, **stream_kwargs)
except subprocess.CalledProcessError:
click.echo('Applying %s on %s failed, check target directory is clean' % (patch_file, _dir))
raise click.Abort
logger.info('Successfully applied %s to %s', patch_file, _dir)
return is_integrated
def generate_plat_cmake(target):
"""
Generate target-dependent cmake files
:param target: The target to generate files to
:return: Full path of target-dependent output directory
"""
_os = target.os.name
if target.os.version:
_os += '_' + target.os.version
_device = target.device.name
_mw_list = [mw.name for mw in target.middleware]
_sdk = ' '
if target.name == 'K64F_FreeRTOS_mbedtls':
_os, _device = (' ', ' ')
_mw_list = []
_sdk = 'K64F_FreeRTOS'
out_dir_name = '__' + target.name
parent_dir = os.path.normpath(os.path.join(PAL_PLATFORM_ROOT, os.pardir))
out_dir = os.path.join(parent_dir, out_dir_name)
if os.path.exists(out_dir):
shutil.rmtree(out_dir, onerror=del_rw)
os.makedirs(out_dir)
autogen_file = os.path.join(out_dir, 'autogen.cmake')
cmake_template = Template(PLAT_CMAKE_TEMPLATE)
with open(autogen_file, 'wt') as fh:
fh.write(
cmake_template.safe_substitute(
mbed_cloud_client_sdk=_sdk,
mbed_cloud_client_os=_os,
mbed_cloud_client_device=_device,
mbed_cloud_client_mw_list=' '.join(_mw_list),
mbed_cloud_client_toolchain=' ',
mbed_cloud_client_build_sys_min_ver=BUILD_SYS_MIN_VER,
)
)
logger.info('Generated %s', autogen_file)
parent_cmake = os.path.join(parent_dir, 'CMakeLists.txt')
if not os.path.isfile(os.path.join(parent_cmake)):
with open(parent_cmake, 'wt') as fh:
fh.write('ADDSUBDIRS()\n')
logger.info('Generated %s', parent_cmake)
return out_dir
def check_cmd_and_raise(cmd, **kwargs):
"""
Wrapper function for subprocess.check_call
:param cmd: The command to execute
:param kwargs: See `https://docs.python.org/2/library/subprocess.html#subprocess.Popen`_
"""
logger.debug(" ".join(cmd))
subprocess.check_call(cmd, **kwargs)
def check_cmd(cmd, **kwargs):
"""
Wrapper function for subprocess.check_call
:param cmd: The command to execute
:param kwargs: See `https://docs.python.org/2/library/subprocess.html#subprocess.Popen`_
"""
logger.debug(" ".join(cmd))
try:
subprocess.check_call(cmd, **kwargs)
except Exception as e:
logger.error(e)
logger.error("** failed to run command %s **", cmd)
sys.exit()
def check_output(cmd, **kwargs):
"""
Wrapper function for subprocess.check_output
:param cmd: The command to execute
:param kwargs: See `https://docs.python.org/2/library/subprocess.html#subprocess.Popen`_
:return: Output of subprocess.check_output
"""
kwargs.pop('stdout', None)
logger.debug(" ".join(cmd))
try:
output = subprocess.check_output(cmd, **kwargs)
except Exception as e:
logger.error(e)
logger.error("** failed to run command %s **", cmd)
sys.exit()
return output
def check_output_and_raise(cmd, **kwargs):
"""
Wrapper function for subprocess.check_output
:param cmd: The command to execute
:param kwargs: See `https://docs.python.org/2/library/subprocess.html#subprocess.Popen`_
:return: Output of subprocess.check_output
"""
kwargs.pop('stdout', None)
logger.debug(" ".join(cmd))
return subprocess.check_output(cmd, **kwargs)
class Source:
def __init__(self, src, stream_kwargs):
self.location = src['location']
self.stream_kwargs = stream_kwargs
class GitSource(Source):
def __init__(self, src, stream_kwargs):
super(GitSource, self).__init__(src, stream_kwargs)
self.tag = src.get('tag', 'master')
def write(self, dst, out=sys.stdout):
out.write('- Clone %s at %s to %s' % (self.location, self.tag, dst))
def fetch(self, dst, name):
logger.info('Getting %s from git', name)
try:
git_fetch(self.location, self.tag, dst, **self.stream_kwargs)
except Exception as e:
logger.error(e)
logger.error("** failed to fetch %s from git - please check that remote is correct and avialable **", name)
sys.exit()
class LocalSource(Source):
def __init__(self, src, stream_kwargs):
super(LocalSource, self).__init__(src, stream_kwargs)
def write(self, dst, out=sys.stdout):
out.write('- Copy %s to %s' % (self.location, dst))
def fetch(self, dst, name):
assert os.path.isdir(self.location)
logger.info('Copying %s from local folder %s to %s', name, self.location, dst)
if os.path.isdir(dst):
logger.warning('%s already exists, overriding it..', dst)
shutil.rmtree(dst, onerror=del_rw)
shutil.copytree(self.location, dst)
class RemoteArchiveSource(Source):
def __init__(self, src, stream_kwargs):
super(RemoteArchiveSource, self).__init__(src, stream_kwargs)
def write(self, dst, out=sys.stdout):
out.write('- Download %s and extract to %s' % (self.location, dst))
def fetch(self, dst, name):
with TemporaryDirectory() as temp_dir:
logger.info('Downloading %s from %s to %s', name, self.location, temp_dir)
_file = download_file(self.location, temp_dir)
logger.info('Extracting %s to %s', _file, dst)
extract_file(_file, dst)
class RemoteFilesSource(Source):
def __init__(self, src, stream_kwargs):
super(RemoteFilesSource, self).__init__(src, stream_kwargs)
def write(self, dst, out=sys.stdout):
for location in self.location:
out.write('- Download %s to %s' % (location, dst))
def fetch(self, dst, name):
if not os.path.isdir(dst):
os.makedirs(dst)
logger.info('Getting %s files', name)
for location in self.location:
logger.info('Downloading %s to %s', location, dst)
download_file(location, dst, location.split('/')[-1])
class SourceFactory(object):
@staticmethod
def get_source(src, stream_kwargs):
sources = {
'git': GitSource,
'local': LocalSource,
'remote-archive': RemoteArchiveSource,
'remote-files': RemoteFilesSource
}
protocol = src['protocol']
assert protocol in sources.keys(), \
'%s is not a valid protocol, valid protocols are %s' % (protocol, sources.keys())
return sources[protocol](src, stream_kwargs)
class Element:
def __init__(self, data, stream_kwargs, name=None):
self.name = data.get('name', name)
self.version = data.get('version', None)
self.comment = data.get('comment', None)
patch_file = data.get('patch_file', None)
if patch_file:
patch_file = os.path.join(PAL_PLATFORM_ROOT, patch_file)
patch_file = patch_file.replace("/", "\\") if platform.system() == 'Windows' else patch_file
self.patch_file = patch_file
self.source = SourceFactory.get_source(data['from'], stream_kwargs) if 'from' in data else None
dst = data.get('to', None)
if dst:
assert self.source, 'missing "from" field in %s' % self.name
dst = os.path.join(PAL_PLATFORM_ROOT, dst)
dst = dst.replace("/", "\\") if platform.system() == 'Windows' else dst
else:
assert not self.source, 'missing "to" field in %s' % self.name
self.destination = dst
self.stream_kwargs = stream_kwargs
def is_fetch_needed(self):
return self.destination and not os.path.isdir(self.destination)
def write(self, out=sys.stdout):
if self.source:
out.write('\n')
name = self.name if self.name else ''
version = self.version if self.version else ''
out.write('%s\n' % ('#' * (len(name) + len(version) + 5)))
out.write('# %s %s\n' % (name, version))
out.write('%s\n' % ('#' * (len(name) + len(version) + 5)))
if self.comment:
out.write(self.comment + '\n\n')
self.source.write(self.destination, out)
if self.patch_file:
out.write('- Apply patch %s\n' % self.patch_file)
def fetch(self):
if self.source:
self.source.fetch(self.destination, self.name)
def delete(self):
if self.destination and os.path.isdir(self.destination):
logger.info('Deleting %s', self.destination)
shutil.rmtree(self.destination, onerror=del_rw)
def apply_patch(self):
if self.patch_file:
is_integrated = apply_patch(self.patch_file, **self.stream_kwargs)
if not is_integrated and self.source.__class__.__name__ == 'GitSource':
check_cmd(['git', 'config', 'user.name', 'pal-platform'], cwd=self.destination, **self.stream_kwargs)
check_cmd(['git', 'config', 'user.email', '<>'], cwd=self.destination, **self.stream_kwargs)
check_cmd(['git', 'add', '--all'], cwd=self.destination, **self.stream_kwargs)
message = 'applied patch: %s' % self.patch_file
check_cmd(['git', 'commit', '-m', message], cwd=self.destination, **self.stream_kwargs)
class Target(Element):
"""
Class which describes an mbed-cloud-client supported target and it's operations.
For supported targets run::
pal-platform deploy -h
Target's operations are
* Writing deployment instructions to stdout/file.
* Fetching the required target-dependent sources according to pal-platform.json.
* Deleting fetched target-dependent sources according to pal-platform.json.
* Apply patches if needed on the deployed sources according to pal-platform.json.
:param name: Target name
:param data: Dictionary describing the target (read from pal-platform.json)
:param stream_kwargs:
* *stdout* --
Standard output handle
* *stderr* --
Standard error handle
"""
def __init__(self, name, data, stream_kwargs):
super(Target, self).__init__(data, stream_kwargs, name)
self.os = Element(data['os'], stream_kwargs)
self.device = Element(data['device'], stream_kwargs)
mw = data.get('middleware', {})
self.middleware = []
for k in mw:
self.middleware.append(Element(mw[k], stream_kwargs, k))
def is_fetch_needed(self):
fetch_needed = super(Target, self).is_fetch_needed() or \
self.os.is_fetch_needed() or \
self.device.is_fetch_needed()
for mw in self.middleware:
fetch_needed = fetch_needed or mw.is_fetch_needed()
return fetch_needed
def write_elements(self, out):
"""
Write instructions on how to deploy the elements of the target
:param out: Where to write the instructions to (stdout / file)
"""
out.write('%s %s %s\n' % ('~' * 30, self.name, '~' * (80 - (22 + len(self.name)))))
self.write(out)
self.os.write(out)
self.device.write(out)
for mw in self.middleware:
mw.write(out)
out.write('\n')
def fetch_elements(self):
"""
Fetch the required target-dependent sources for the target according to pal-platform.json
"""
self.fetch()
self.os.fetch()
self.device.fetch()
for mw in self.middleware:
mw.fetch()
def delete_elements(self):
"""
Delete the required target-dependent sources of the target according to pal-platform.json
"""
self.os.delete()
self.device.delete()
for mw in self.middleware:
mw.delete()
self.delete()
def patch_elements(self):
"""
Apply patches if needed on the deployed sources of the target according to pal-platform.json
"""
self.apply_patch()
self.os.apply_patch()
self.device.apply_patch()
for mw in self.middleware:
mw.apply_patch()
def json_read(file_name):
"""
Helper function that loads JSON file with all string values and keys represented as string objects
:param file_name: JSON file to read
:return: Dictionary representation of the JSON file
"""
with open(file_name, 'rt') as fh:
try:
return json.load(fh)
except ValueError as config_parse_exception:
raise Exception(
'Malformed %s - %s' % (file_name, config_parse_exception)
)
def get_available_targets():
return AVAILABLE_TARGETS
def get_available_toolchains():
return AVAILABLE_TOOLCHAINS
@click.group(context_settings=CONTEXT_SETTINGS, chain=True)
@click.option('-v', '--verbose', is_flag=True, help='Turn ON verbose mode')
@click.option(
'--from-file',
type=click.Path(exists=True, file_okay=True, readable=True, resolve_path=True),
default=os.path.join(SCRIPT_DIR, 'pal-platform.json'),
help='Path to a .json file containing the supported targets configuration.\n'
'Default is %s' % os.path.normpath(os.path.join(SCRIPT_DIR, 'pal-platform.json'))
)
@click.version_option(version='1.2')
@pass_config
def cli(config, verbose, from_file):
config.verbose = verbose
config.stream_kwargs = {'stdout': open(os.devnull, 'w'), 'stderr': subprocess.STDOUT}
config.targets = json_read(from_file)
global AVAILABLE_TARGETS
AVAILABLE_TARGETS = config.targets.keys()
parent_dir = os.path.normpath(os.path.join(from_file, os.pardir))
toolchain_dir = os.path.join(parent_dir, "Toolchain")
list = os.listdir(toolchain_dir)
global AVAILABLE_TOOLCHAINS
AVAILABLE_TOOLCHAINS = list
logging.basicConfig(
level=logging.DEBUG if verbose else logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
stream=sys.stdout
)
if logger.isEnabledFor(logging.DEBUG):
config.stream_kwargs = {}
@cli.command(
context_settings=CONTEXT_SETTINGS,
short_help='Deploy mbed-cloud-client files (run "%s deploy -h" for help)' % PROG_NAME
)
@click.option(
'--target',
'target_name',
help='The target to deploy platform-dependent files for',
required=True,
type=DynamicChoice(get_available_targets)
)
@click.option('--skip-update', is_flag=True, help='Skip Git Repositories update')
@click.option('-i', '--instructions', is_flag=True, help='Show deployment instructions for a given target and exit.')
@pass_config
def deploy(config, target_name, skip_update, instructions):
"""Deploy target-dependent files"""
config.target_name = target_name
config.skip_update = skip_update
target = Target(config.target_name, config.targets[target_name], config.stream_kwargs)
if instructions:
target.write_elements(sys.stdout)
click.get_current_context().exit()
if not config.skip_update:
target.fetch_elements()
target.patch_elements()
instructions_file = os.path.join(PAL_PLATFORM_ROOT, target.name + '.txt')
with open(instructions_file, 'wt') as fh:
target.write_elements(fh)
click.echo(click.style('Deployment for %s is successful.' % config.target_name, fg='green'))
click.echo(click.style('Deployment instructions are in %s.' % instructions_file, fg='green'))
@cli.command(
context_settings=CONTEXT_SETTINGS,
short_help='Generate platform-dependent files (run "%s generate -h" for help)' % PROG_NAME
)
@click.option(
'--target',
'target_name',
help='The target to generate platform-dependent files for',
type=DynamicChoice(get_available_targets)
)
@pass_config
def generate(config, target_name):
"""Generate files to be used by build-system"""
if target_name:
config.target_name = target_name
if not hasattr(config, 'target_name'):
ctx = click.get_current_context()
raise click.MissingParameter(ctx=ctx, param=ctx.command.params[0])
else:
target = Target(config.target_name, config.targets[config.target_name], config.stream_kwargs)
if target.is_fetch_needed():
click.echo(
'Target %s is not deployed, please run "%s deploy --target %s" first.' %
(config.target_name, PROG_NAME, config.target_name)
)
raise click.Abort
out_dir = generate_plat_cmake(target)
shutil.copy(
os.path.join(PAL_PLATFORM_ROOT, 'mbedCloudClientCmake.txt'),
os.path.join(out_dir, 'CMakeLists.txt')
)
click.echo(
click.style(
'Generation for %s is successful, please run cmake & make from %s' % (config.target_name, out_dir),
fg='green'
)
)
@cli.command(
context_settings=CONTEXT_SETTINGS,
short_help='Clean platform-dependent files (run "%s clean -h" for help)' % PROG_NAME
)
@click.option(
'--target',
'target_name',
help='The target to clean',
required=True,
type=DynamicChoice(get_available_targets)
)
@click.option(
'-k', '--keep-sources', is_flag=True,
help='Keep the deployed platform-dependent files (clean only generated files)'
)
@pass_config
def clean(config, target_name, keep_sources):
"""Clean target-dependent files"""
config.target_name = target_name
target = Target(config.target_name, config.targets[target_name], config.stream_kwargs)
out_dir_name = '__' + target.name
parent_dir = os.path.normpath(os.path.join(PAL_PLATFORM_ROOT, os.pardir))
out_dir = os.path.join(parent_dir, out_dir_name)
if os.path.isdir(out_dir):
logger.info('Deleting %s', out_dir)
shutil.rmtree(out_dir, onerror=del_rw)
if not keep_sources:
target.delete_elements()
def runCmakeAndMake(folder, debug, toolchain, outdir, envPair, external, name, numOfBuildThreads):
logger.info('running cmake')
#"""cmake -G "Unix Makefiles" -DCMAKE_BUILD_TYPE=Debug -DCMAKE_TOOLCHAIN_FILE=../pal-platform/Toolchain/ARMGCC/ARMGCC.cmake -DEXTARNAL_DEFINE_FILE=../mbed-client-pal/Examples/PlatformBSP/mbedTLS/mbedTLS_cmake_config.txt"""
command = "cmake"
make_params = "-j1"
argument1 = "-G"
argument2 = "Unix Makefiles"
if debug == 1:
argument3 = "-DCMAKE_BUILD_TYPE=Debug"
else:
argument3 = "-DCMAKE_BUILD_TYPE=Release"
argument4 = "-DCMAKE_TOOLCHAIN_FILE=../pal-platform/Toolchain/"+toolchain+"/"+toolchain+".cmake"
if external != None:
argument5 = "-DEXTARNAL_DEFINE_FILE="+external
else:
argument5 = "-DEXTARNAL_DEFINE_FILE=../mbed-client-pal/Configs/pal_ext_configs.cmake"
if envPair != None:
logger.info('setting environment: %s = %s', envPair[0], envPair[1])
os.environ[envPair[0]] = envPair[1]
if numOfBuildThreads != None:
make_params = "-j" + str(numOfBuildThreads)
p = subprocess.Popen([command, argument1, argument2, argument3, argument4, argument5], cwd=folder)
returnCode = p.wait()
if returnCode == 0:
if name != None:
p = subprocess.Popen(["make", make_params, name], cwd=folder)
else:
p = subprocess.Popen(["make", make_params], cwd=folder)
returnCode = p.wait()
if returnCode == 0:
if debug == 1:
copyFrom = os.path.join(folder, "Debug")
else:
copyFrom = os.path.join(folder, "Release")
if not os.path.exists(outdir):
os.makedirs(outdir)
shutil.move(copyFrom, outdir)
else:
logger.error("** build failed **")
else:
logger.error("** CMAKE failed **")
def getPathForToolChainInPath(toolchain):
output = None
realname = toolchain
if platform.system() == 'Windows': #widnows type OS
realname = toolchain + ".exe"
try:
found = check_output_and_raise(['where', realname]).strip()
except Exception as e:
logger.error(e)
logger.error("** Toolchain %s not found in path - make sure toolchain executable [%s] is in the path **", toolchain, realname)
return None
separator = "\\"
double_separator = "\\\\"
else: # assume linux type OS
try:
found = check_output_and_raise(['which', realname]).strip()
except Exception as e:
logger.error(e)
logger.error("** Toolchain %s not found in path - make sure toolchain executable [%s] is in the path **", toolchain, realname)
return None
separator = "/"
double_separator = "//"
if found != None:
parent_dir = found #os.path.normpath(os.path.join(PAL_PLATFORM_ROOT, os.pardir))
while parent_dir.endswith(separator+realname) or parent_dir.endswith(separator+realname) or parent_dir.endswith(separator+"bin") or parent_dir.endswith(separator+"bin"+separator) or parent_dir.endswith(double_separator+"bin"+separator) or parent_dir.endswith(separator+"bin"+double_separator) or parent_dir.endswith(double_separator+"bin"+double_separator):
parent_dir = os.path.normpath(os.path.join(parent_dir, os.pardir))
output = parent_dir
return output
def checkToolchainEnv(toolchain):
logger.info('Checking Environment for Toolchain - %s', toolchain)
#toolchain_env stucture: key == toolchain name , value is a tupple with two elements:
#1. a tuple of relevant environment variables for the toolchain - Note : the first value is the one we want (we will export it if any of the values are found)
#2. a string with the expected name of compiler binary for path seach
toolchainEnv = {"ARMCC":(("ARMCC_DIR", "ARM_PATH", "MBED_ARM_PATH"), "armcc"),
"ARMGCC":(("ARMGCC_DIR", "GCC_ARM_PATH", "MBED_GCC_ARM_PATH"), "arm-none-eabi-gcc"),
"GCC": (("GCC_DIR",), "gcc"),
"GCC-OPENWRT": (("TOOLCHAIN_DIR",), "arm-openwrt-linux-gcc")}
toolchainInfo = toolchainEnv.get(toolchain, None)
if None == toolchainInfo:
logger.warning('toolchain environment not found for toolchain selected [%s] - please make sure toolchain is present and the correct environment variable is set', toolchain)
return None
for envVariable in toolchainInfo[0]:
path = os.getenv(envVariable, None)
if path != None:
logger.debug("env variable %s found", envVariable)
return (toolchainInfo[0][0], path)
path = getPathForToolChainInPath(toolchainInfo[1])
if path != None:
return (toolchainInfo[0][0], path)
logger.warning('toolchain environment not found for toolchain selected [%s] - please make sure toolchain is present and correct environment variable is set [%s]', toolchain, toolchainInfo[0][0])
return None
@cli.command(
context_settings=CONTEXT_SETTINGS,
short_help='[DEPRECATED] fullBuild deploy and build the project (run "%s fullBuild -h" for help)' % PROG_NAME)
@click.option(
'--target',
'target_name',
help='The target to deploy and build',
required=True,
type=DynamicChoice(get_available_targets)
)
@click.option(
'--toolchain',
'toolchain',
help='The toolchain to use for the build',
required=True,
type=DynamicChoice(get_available_toolchains)
)
@click.option(
'--external',
'external',
help='The path of the eternal define CMAKE file to include',
required=False,
type=click.Path()
)
@click.option(
'--name',
'name',
help='name of the build target passed to the make command',
required=False,
type=click.Path()
)
@click.option(
'-k', '--keep-sources', is_flag=True,
help='Keep the deployed platform-dependent files (clean only generated files)'
)
@click.option(
'-j',
'numOfBuildThreads',
help='-j parallel make parameter (Example: -j4)',
required=False,
type=int
)
@pass_config
def fullbuild(config, target_name, toolchain, external, name, keep_sources, numOfBuildThreads):
"""deploy and build target files"""
config.target_name = target_name
logger.info('fullBuild option has been DEPRECATED and will be removed in future release.')
logger.info('fullBuild running for target = %s with toolchain = %s', target_name, toolchain)
ctx = click.get_current_context()
ctx.invoke(deploy, target_name=target_name, skip_update=None, instructions=None)
ctx.invoke(generate, target_name=target_name)
envPair = checkToolchainEnv(toolchain)
if (None == envPair):
logger.error("** Toolchain not found - exiting **")
return
target = Target(config.target_name, config.targets[target_name], config.stream_kwargs)
out_dir_name = '__' + target.name
parent_dir = os.path.normpath(os.path.join(PAL_PLATFORM_ROOT, os.pardir))
out_dir = os.path.join(parent_dir, out_dir_name)
isDebug = 1 # build debug version
output = os.path.join(parent_dir, "out")
if os.path.exists(output):
shutil.rmtree(output)
runCmakeAndMake(out_dir, isDebug, toolchain, output, envPair, external, name, numOfBuildThreads) # CMAKE + build debug version
isDebug = 0 # generate and build release version
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
ctx.invoke(generate, target_name=target_name)
runCmakeAndMake(out_dir, isDebug, toolchain, output, envPair, external, name, numOfBuildThreads) # CMAKE + build release version
logger.info('fullBuild option has been DEPRECATED and will be removed in future release.')
logger.info('\nCompleted fullBuild running for target = %s\nWith toolchain = %s.\nOutput directory: %s\n', target_name, toolchain, output)
if __name__ == '__main__':
cli(sys.argv[1:])
|
the-stack_106_31062 | """
# Copyright 2022 Red Hat
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
# pylint: disable=no-member, protected-access
import os
import shutil
from unittest import TestCase
from unittest.mock import Mock, patch
from cibyl.models.attribute import AttributeDictValue
from cibyl.models.ci.base.job import Job
from cibyl.sources.jenkins_job_builder import JenkinsJobBuilder
def remove_fake_files():
"""Remove the fake xml files."""
shutil.rmtree("out_jjb_test")
def fake_xml_files():
"""Create two fake xml files to test the get_jobs method."""
path1 = "out_jjb_test/out-xml/fake_job1"
path2 = "out_jjb_test/out-xml/fake_job2"
os.makedirs(path1, exist_ok=True)
os.makedirs(path2, exist_ok=True)
file_name = os.path.join(path1, "config.xml")
with open(file_name, "w", encoding="utf-8") as fake1:
fake1.write('<?xml version="1.0" encoding="utf-8"?>\n')
fake1.write('<com.folder.Folder plugin="cloudbees-folder">\n')
fake1.write(' <icon class="com.folder.icons.StockFolderIcon"/>\n')
fake1.write(' <views/>\n')
fake1.write(' <scm class="hudson.scm.NullSCM"/>\n')
fake1.write(' <publishers/>\n')
fake1.write(' <buildWrappers/>\n')
fake1.write('</com.folder.Folder>\n')
file_name = os.path.join(path2, "config.xml")
with open(file_name, "w", encoding="utf-8") as fake2:
fake2.write('<?xml version="1.0" encoding="utf-8"?>\n')
fake2.write('<flow-definition plugin="workflow-job">\n')
fake2.write(' <definition plugin="workflow-cps" class="org.j">\n')
fake2.write(' </definition>\n')
fake2.write('</flow-definition> \n')
@patch('cibyl.sources.jenkins_job_builder.JenkinsJobBuilder.get_repo')
class TestJenkinsJobBuilderSource(TestCase):
"""Tests for :class:`JenkinsJobBuilder`."""
def setUp(self):
fake_xml_files()
def tearDown(self):
remove_fake_files()
# second argument is necessary to support patching of mock get_repo method
def test_with_all_args(self, _):
"""Checks that the object is built correctly when all arguments are
provided.
"""
url = 'url/to/repo'
dest = 'dest_folder'
branch = 'master'
repos = [{'url': url, 'dest': dest, 'branch': branch}]
jenkins = JenkinsJobBuilder(repos=repos)
self.assertEqual(dest, jenkins.repos[-1].get('dest'))
self.assertEqual(url, jenkins.repos[-1].get('url'))
self.assertEqual(branch, jenkins.repos[-1].get('branch'))
def test_with_no_dest(self, _):
"""Checks that object is built correctly when the dest is not
provided.
"""
url = 'url/to/repo/'
branch = 'master'
repos = [{'url': url, 'branch': branch}]
jenkins = JenkinsJobBuilder(repos=repos)
self.assertIsNone(jenkins.repos[-1].get('dest'))
jenkins.setup()
self.assertEqual(url, jenkins.repos[-1].get('url'))
self.assertEqual(branch, jenkins.repos[-1].get('branch'))
self.assertIsNotNone(jenkins.repos[-1].get('dest'))
self.assertTrue(os.path.isdir(jenkins.repos[-1].get('dest')))
def test_with_no_branch(self, _):
"""Checks that object is built correctly when the branch is not
provided.
"""
url = 'url/to/repo/'
dest = 'dest'
repos = [{'url': url, 'dest': dest}]
jenkins = JenkinsJobBuilder(repos)
self.assertEqual(url, jenkins.repos[-1].get('url'))
self.assertIsNone(jenkins.repos[-1].get('branch'))
self.assertEqual(dest, jenkins.repos[-1].get('dest'))
def test_get_jobs(self, _):
"""
Tests that the internal logic of :meth:`JenkinsJobBuilder.get_jobs`
is correct. The jenkins API method that should do the query is
mocked so that it returns the query itself.
"""
repos = [{'dest': 'out_jjb_test'}]
jenkins = JenkinsJobBuilder(repos=repos)
jenkins._generate_xml = Mock()
jobs = jenkins.get_jobs()
job = Job(name="fake_job2")
result = AttributeDictValue("jobs", attr_type=Job,
value={"fake_job2": job})
self.assertEqual(jobs, result)
|
the-stack_106_31063 | """
flask_excel
~~~~~~~~~~~~~~~~~~~
A flask extension that provides one application programming interface
to read and write data in different excel file formats
:copyright: (c) 2015-2017 by Onni Software Ltd and its contributors
:license: New BSD License
"""
try:
# if in py2
from urllib import quote
_PY_VERSION = 2
except ImportError:
# else (aka in py3)
from urllib.parse import quote
_PY_VERSION = 3
from flask import Request, Response
import pyexcel_webio as webio
class ExcelRequest(webio.ExcelInputInMultiDict, Request):
"""
Mix in pyexcel's webio function signatures to Flask request
"""
def get_file_tuple(self, field_name):
"""
Implement Flask specific way of getting uploaded files
"""
filehandle = self.files[field_name]
filename = filehandle.filename
extension = filename.split(".")[-1]
if extension == filename:
raise IOError("Failed to find out file extension")
return extension, filehandle
def _make_response(content, content_type, status, file_name=None):
"""
Custom response function that is called by pyexcel-webio
"""
response = Response(content, content_type=content_type, status=status)
if file_name:
if _PY_VERSION == 2 and isinstance(file_name, unicode):
file_name = file_name.encode('utf-8')
url_encoded_file_name = quote(file_name)
response.headers["Content-Disposition"] = (
"attachment; filename=%s;filename*=utf-8''%s"
% (url_encoded_file_name, url_encoded_file_name)
)
return response
from pyexcel_webio import ( # noqa
make_response,
make_response_from_array,
make_response_from_dict,
make_response_from_records,
make_response_from_book_dict,
make_response_from_a_table,
make_response_from_query_sets,
make_response_from_tables
)
def init_excel(app):
app.request_class = ExcelRequest
webio.init_webio(_make_response)
return app
|
the-stack_106_31064 | import math
import os
import pickle
import shutil
import tempfile
from contextlib import contextmanager
from itertools import permutations
import pytest
from numpy.testing import assert_almost_equal
from pyproj import Geod
try:
from shapely.geometry import (
LinearRing,
LineString,
MultiLineString,
MultiPoint,
MultiPolygon,
Point,
Polygon,
)
from shapely.geometry.polygon import orient
SHAPELY_LOADED = True
except ImportError:
SHAPELY_LOADED = False
skip_shapely = pytest.mark.skipif(not SHAPELY_LOADED, reason="Missing shapely")
@contextmanager
def temporary_directory():
"""
Get a temporary directory
"""
temp_dir = tempfile.mkdtemp()
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir)
def test_geod_inverse_transform():
gg = Geod(ellps="clrk66")
lat1pt = 42.0 + (15.0 / 60.0)
lon1pt = -71.0 - (7.0 / 60.0)
lat2pt = 45.0 + (31.0 / 60.0)
lon2pt = -123.0 - (41.0 / 60.0)
"""
distance between boston and portland, clrk66:
-66.531 75.654 4164192.708
distance between boston and portland, WGS84:
-66.530 75.654 4164074.239
testing pickling of Geod instance
distance between boston and portland, clrk66 (from pickle):
-66.531 75.654 4164192.708
distance between boston and portland, WGS84 (from pickle):
-66.530 75.654 4164074.239
inverse transform
from proj.4 invgeod:
b'-66.531\t75.654\t4164192.708\n'
"""
print("from pyproj.Geod.inv:")
az12, az21, dist = gg.inv(lon1pt, lat1pt, lon2pt, lat2pt)
assert_almost_equal((az12, az21, dist), (-66.531, 75.654, 4164192.708), decimal=3)
print("forward transform")
print("from proj.4 geod:")
endlon, endlat, backaz = gg.fwd(lon1pt, lat1pt, az12, dist)
assert_almost_equal((endlon, endlat, backaz), (-123.683, 45.517, 75.654), decimal=3)
print("intermediate points:")
print("from geod with +lat_1,+lon_1,+lat_2,+lon_2,+n_S:")
npts = 4
lonlats = gg.npts(lon1pt, lat1pt, lon2pt, lat2pt, npts)
lonprev = lon1pt
latprev = lat1pt
print(dist / (npts + 1))
print("%6.3f %7.3f" % (lat1pt, lon1pt))
result_dists = (
(-66.53059478766238, 106.79071710136431, 832838.5416198927),
(-73.20928289863558, 99.32289055927389, 832838.5416198935),
(-80.67710944072617, 91.36325611787134, 832838.5416198947),
(-88.63674388212858, 83.32809401477382, 832838.5416198922),
)
for (lon, lat), (res12, res21, resdist) in zip(lonlats, result_dists):
az12, az21, dist = gg.inv(lonprev, latprev, lon, lat)
assert_almost_equal((az12, az21, dist), (res12, res21, resdist))
latprev = lat
lonprev = lon
az12, az21, dist = gg.inv(lonprev, latprev, lon2pt, lat2pt)
assert_almost_equal(
(lat2pt, lon2pt, dist), (45.517, -123.683, 832838.542), decimal=3
)
def test_geod_cities():
# specify the lat/lons of some cities.
boston_lat = 42.0 + (15.0 / 60.0)
boston_lon = -71.0 - (7.0 / 60.0)
portland_lat = 45.0 + (31.0 / 60.0)
portland_lon = -123.0 - (41.0 / 60.0)
g1 = Geod(ellps="clrk66")
g2 = Geod(ellps="WGS84")
az12, az21, dist = g1.inv(boston_lon, boston_lat, portland_lon, portland_lat)
print("distance between boston and portland, clrk66:")
print("%7.3f %6.3f %12.3f" % (az12, az21, dist))
assert_almost_equal((az12, az21, dist), (-66.531, 75.654, 4164192.708), decimal=3)
print("distance between boston and portland, WGS84:")
az12, az21, dist = g2.inv(boston_lon, boston_lat, portland_lon, portland_lat)
assert_almost_equal((az12, az21, dist), (-66.530, 75.654, 4164074.239), decimal=3)
print("%7.3f %6.3f %12.3f" % (az12, az21, dist))
print("testing pickling of Geod instance")
with temporary_directory() as tmpdir:
with open(os.path.join(tmpdir, "geod1.pickle"), "wb") as gp1w:
pickle.dump(g1, gp1w, -1)
with open(os.path.join(tmpdir, "geod2.pickle"), "wb") as gp2w:
pickle.dump(g2, gp2w, -1)
with open(os.path.join(tmpdir, "geod1.pickle"), "rb") as gp1:
g3 = pickle.load(gp1)
with open(os.path.join(tmpdir, "geod2.pickle"), "rb") as gp2:
g4 = pickle.load(gp2)
az12, az21, dist = g3.inv(boston_lon, boston_lat, portland_lon, portland_lat)
assert_almost_equal((az12, az21, dist), (-66.531, 75.654, 4164192.708), decimal=3)
print("distance between boston and portland, clrk66 (from pickle):")
print("%7.3f %6.3f %12.3f" % (az12, az21, dist))
az12, az21, dist = g4.inv(boston_lon, boston_lat, portland_lon, portland_lat)
print("distance between boston and portland, WGS84 (from pickle):")
print("%7.3f %6.3f %12.3f" % (az12, az21, dist))
assert_almost_equal((az12, az21, dist), (-66.530, 75.654, 4164074.239), decimal=3)
g3 = Geod("+ellps=clrk66") # proj4 style init string
print("inverse transform")
lat1pt = 42.0 + (15.0 / 60.0)
lon1pt = -71.0 - (7.0 / 60.0)
lat2pt = 45.0 + (31.0 / 60.0)
lon2pt = -123.0 - (41.0 / 60.0)
az12, az21, dist = g3.inv(lon1pt, lat1pt, lon2pt, lat2pt)
print("%7.3f %6.3f %12.3f" % (az12, az21, dist))
assert_almost_equal((az12, az21, dist), (-66.531, 75.654, 4164192.708), decimal=3)
def test_line_length__single_point():
geod = Geod(ellps="WGS84")
assert geod.line_length(1, 1) == 0
def test_line_length__radians():
geod = Geod(ellps="WGS84")
total_length = geod.line_length([1, 2], [0.5, 1], radians=True)
assert_almost_equal(total_length, 5426061.32197463, decimal=3)
def test_line_lengths__single_point():
geod = Geod(ellps="WGS84")
assert geod.line_lengths(1, 1) == 0
def test_line_lengths__radians():
geod = Geod(ellps="WGS84")
line_lengths = geod.line_lengths([1, 2], [0.5, 1], radians=True)
assert_almost_equal(line_lengths, [5426061.32197463], decimal=3)
def test_polygon_area_perimeter__single_point():
geod = Geod(ellps="WGS84")
area, perimeter = geod.polygon_area_perimeter(1, 1)
assert area == 0
assert perimeter == 0
@skip_shapely
def test_geometry_length__point():
geod = Geod(ellps="WGS84")
assert geod.geometry_length(Point(1, 2)) == 0
@skip_shapely
def test_geometry_length__linestring():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_length(LineString([Point(1, 2), Point(3, 4)])),
313588.39721259556,
decimal=2,
)
@skip_shapely
def test_geometry_length__linestring__radians():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_length(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
]
),
radians=True,
),
313588.39721259556,
decimal=2,
)
@skip_shapely
def test_geometry_length__linearring():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_length(
LinearRing(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
),
1072185.2103813463,
decimal=2,
)
@skip_shapely
def test_geometry_length__polygon():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_length(
Polygon(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
),
1072185.2103813463,
decimal=2,
)
@skip_shapely
def test_geometry_length__polygon__radians():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_length(
Polygon(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
Point(math.radians(5), math.radians(2)),
]
)
),
radians=True,
),
1072185.2103813463,
decimal=2,
)
@skip_shapely
def test_geometry_length__multipolygon():
geod = Geod(ellps="WGS84")
polygon = Polygon(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
assert_almost_equal(
geod.geometry_length(MultiPolygon([polygon, polygon])),
2 * 1072185.2103813463,
decimal=2,
)
@skip_shapely
def test_geometry_length__multipolygon__radians():
geod = Geod(ellps="WGS84")
polygon = Polygon(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
Point(math.radians(5), math.radians(2)),
]
)
)
assert_almost_equal(
geod.geometry_length(MultiPolygon([polygon, polygon]), radians=True),
2 * 1072185.2103813463,
decimal=2,
)
@skip_shapely
def test_geometry_length__multilinestring():
geod = Geod(ellps="WGS84")
line_string = LineString([Point(1, 2), Point(3, 4), Point(5, 2)])
assert_almost_equal(
geod.geometry_length(MultiLineString([line_string, line_string])),
1254353.5888503822,
decimal=2,
)
@skip_shapely
def test_geometry_length__multipoint():
geod = Geod(ellps="WGS84")
assert (
geod.geometry_length(MultiPoint([Point(1, 2), Point(3, 4), Point(5, 2)])) == 0
)
@skip_shapely
def test_geometry_area_perimeter__point():
geod = Geod(ellps="WGS84")
assert geod.geometry_area_perimeter(Point(1, 2)) == (0, 0)
@skip_shapely
def test_geometry_area_perimeter__linestring():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_area_perimeter(LineString([Point(1, 2), Point(3, 4)])),
(0.0, 627176.7944251911),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__linestring__radians():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_area_perimeter(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
]
),
radians=True,
),
(0.0, 627176.7944251911),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__linearring():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_area_perimeter(
LinearRing(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
),
(-49187690467.58623, 1072185.2103813463),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__polygon():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_area_perimeter(
Polygon(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
),
(-49187690467.58623, 1072185.2103813463),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__polygon__radians():
geod = Geod(ellps="WGS84")
assert_almost_equal(
geod.geometry_area_perimeter(
Polygon(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
Point(math.radians(5), math.radians(2)),
]
)
),
radians=True,
),
(-49187690467.58623, 1072185.2103813463),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__polygon__holes():
geod = Geod(ellps="WGS84")
polygon = Polygon(
LineString([Point(1, 1), Point(1, 10), Point(10, 10), Point(10, 1)]),
holes=[LineString([Point(1, 2), Point(3, 4), Point(5, 2)])],
)
assert_almost_equal(
geod.geometry_area_perimeter(orient(polygon, 1)),
(944373881400.3394, 3979008.0359657984),
decimal=2,
)
assert_almost_equal(
geod.geometry_area_perimeter(orient(polygon, -1)),
(-944373881400.3394, 3979008.0359657984),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__multipolygon():
geod = Geod(ellps="WGS84")
polygon = Polygon(LineString([Point(1, 2), Point(3, 4), Point(5, 2)]))
assert_almost_equal(
geod.geometry_area_perimeter(MultiPolygon([polygon, polygon])),
(-98375380935.17245, 2144370.4207626926),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__multipolygon__radians():
geod = Geod(ellps="WGS84")
polygon = Polygon(
LineString(
[
Point(math.radians(1), math.radians(2)),
Point(math.radians(3), math.radians(4)),
Point(math.radians(5), math.radians(2)),
]
)
)
assert_almost_equal(
geod.geometry_area_perimeter(MultiPolygon([polygon, polygon]), radians=True),
(-98375380935.17245, 2144370.4207626926),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__multilinestring():
geod = Geod(ellps="WGS84")
line_string = LineString([Point(1, 2), Point(3, 4), Point(5, 2)])
assert_almost_equal(
geod.geometry_area_perimeter(MultiLineString([line_string, line_string])),
(-98375380935.17245, 2144370.4207626926),
decimal=2,
)
@skip_shapely
def test_geometry_area_perimeter__multipoint():
geod = Geod(ellps="WGS84")
assert geod.geometry_area_perimeter(
MultiPoint([Point(1, 2), Point(3, 4), Point(5, 2)])
) == (0, 0)
@pytest.mark.parametrize(
"lon,lat,az", permutations([10.0, [10.0], (10.0,)])
) # 6 test cases
def test_geod_fwd_honours_input_types(lon, lat, az):
# 622
gg = Geod(ellps="clrk66")
outx, outy, outz = gg.fwd(lons=lon, lats=lat, az=az, dist=0)
assert isinstance(outx, type(lon))
assert isinstance(outy, type(lat))
assert isinstance(outz, type(az))
@pytest.mark.parametrize(
"lons1,lats1,lons2", permutations([10.0, [10.0], (10.0,)])
) # 6 test cases
def test_geod_inv_honours_input_types(lons1, lats1, lons2):
# 622
gg = Geod(ellps="clrk66")
outx, outy, outz = gg.inv(lons1=lons1, lats1=lats1, lons2=lons2, lats2=0)
assert isinstance(outx, type(lons1))
assert isinstance(outy, type(lats1))
assert isinstance(outz, type(lons2))
|
the-stack_106_31065 | import logging
import numpy as np
import re
logger = logging.getLogger(__name__)
def strike_symbol(strike):
R = np.zeros((2, 2))
R[0, 0] = np.cos(np.deg2rad(-strike))
R[0, 1] = -np.sin(np.deg2rad(-strike))
R[1, 0] = np.sin(np.deg2rad(-strike))
R[1, 1] = np.cos(np.deg2rad(-strike))
R = np.zeros((2, 2))
R[0, 0] = np.cos(np.deg2rad(-strike))
R[0, 1] = -np.sin(np.deg2rad(-strike))
R[1, 0] = np.sin(np.deg2rad(-strike))
R[1, 1] = np.cos(np.deg2rad(-strike))
vec = np.array([0, 1])
rotated = R @ vec
vec2 = np.array([-0.5, 0])
r2 = R @ vec2
return rotated, r2
def get_levels():
"""dict for converting to logger levels from string
Returns
-------
dict
contains all strings with corresponding logging levels.
"""
return {'info':logging.INFO,'warning':logging.WARNING,'error':logging.ERROR,'debug':logging.DEBUG}
def log_to_file(filename,level='info'):
"""Set the logging parameters for log file
Parameters
----------
filename : string
name of file or path to file
level : str, optional
'info', 'warning', 'error', 'debug' mapped to logging levels, by default 'info'
"""
levels = get_levels()
level = levels.get(level,logging.WARNING)
logging.basicConfig(level=level,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename=filename,
filemode='w')
def log_to_console(level='warning'):
"""Set the level of logging to the console
Parameters
----------
level : str, optional
'info', 'warning', 'error', 'debug' mapped to logging levels, by default 'info'
"""
levels = get_levels()
level = levels.get(level,logging.WARNING)
changed_level = False
for h in logging.getLogger().handlers:
if type(h) is logging.StreamHandler:
h.setLevel(level)
changed_level = True
if not changed_level:
console = logging.StreamHandler()
console.setLevel(level)
# add the handler to the root logger
logging.getLogger().addHandler(console)
def read_voxet(voxetname,propertyfile):
"""
Read a gocad property file and the geometry information from the .vo file
voxetname - is the path to the voxet file
propertyfile is the path to the binary file
Returns
origin numpy array
voxet_extent - is the length of each axis of the voxet
N is the number of steps in the voxet
array is the property values
steps is the size of the step vector for the voxet
"""
array = np.fromfile(propertyfile,dtype='float32')
array = array.astype('<f4') # little endian
with open(voxetname,'r') as file:
for l in file:
if 'AXIS_O ' in l:
origin = np.array(re.findall(r"[-+]?\d*\.?\d+|[-+]?\d+",l)).astype(float)
if 'AXIS_U ' in l:
U = float(re.findall(r'[\d\.\d]+',l)[0])
if 'AXIS_V ' in l:
V = float(re.findall(r'[\d\.\d]+',l)[1])
if 'AXIS_W ' in l:
W = float(re.findall(r'[\d\.\d]+',l)[2])
if 'AXIS_N ' in l:
N = np.array(re.findall(r'[\d\.\d]+',l)).astype(int)
voxet_extent = np.array([U,V,W])
steps = (voxet_extent ) / (N-1)
return origin, voxet_extent, N, array, steps
def write_property_to_gocad_voxet(propertyfilename, propertyvalues):
"""
This function writes a numpy array into the right format for a gocad
voxet property file. This assumet there is a property already added to the .vo file,
and is just updating the file.
propertyfile - string giving the path to the file to write
propertyvalues - numpy array nz,ny,nx ordering and in float format
"""
propertyvalues = propertyvalues.astype('>f4') #big endian
# array = propertyvalues.newbyteorder()
propertyvalues.tofile(propertyfilename)
|
the-stack_106_31068 | import json.encoder as json_encoder
import types
from json import JSONEncoder
from typing import Final
# noinspection PyUnresolvedReferences
ENCODE_BASESTRING_ASCII: Final = json_encoder.encode_basestring_ascii
# noinspection PyUnresolvedReferences
ENCODE_BASESTRING: Final = json_encoder.encode_basestring
# noinspection PyUnresolvedReferences
INFINITY: Final = json_encoder.INFINITY
class GeneratorAwareJSONEncoder(JSONEncoder):
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string
representation as available.
For example::
for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk)
"""
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = ENCODE_BASESTRING_ASCII
else:
_encoder = ENCODE_BASESTRING
# noinspection PyShadowingNames
def floatstr(o, allow_nan=self.allow_nan, _repr=float.__repr__, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
# noinspection PyArgumentList
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot)
return _iterencode(o, 0)
# noinspection PyPep8Naming,PyShadowingBuiltins
def _make_iterencode(
markers, _default, _encoder, _indent, _floatstr,
_key_separator, _item_separator, _sort_keys, _skipkeys, _one_shot,
# HACK: hand-optimized bytecode; turn globals into locals
ValueError=ValueError,
dict=dict,
float=float,
id=id,
int=int,
isinstance=isinstance,
list=list,
str=str,
tuple=tuple,
_intstr=int.__repr__,
):
if _indent is not None and not isinstance(_indent, str):
_indent = ' ' * _indent
def _iterencode_list(lst, _current_indent_level):
if not lst:
yield '[]'
return
if markers is not None:
markerid = id(lst)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = lst
buf = '['
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
separator = _item_separator + newline_indent
buf += newline_indent
else:
newline_indent = None
separator = _item_separator
first = True
for value in lst:
if first:
first = False
else:
buf = separator
if isinstance(value, str):
yield buf + _encoder(value)
elif value is None:
yield buf + 'null'
elif value is True:
yield buf + 'true'
elif value is False:
yield buf + 'false'
elif isinstance(value, int):
# Subclasses of int/float may override __repr__, but we still
# want to encode them as integers/floats in JSON. One example
# within the standard library is IntEnum.
# noinspection PyArgumentList
yield buf + _intstr(value)
elif isinstance(value, float):
# see comment above for int
yield buf + _floatstr(value)
else:
yield buf
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield ']'
if markers is not None:
# noinspection PyUnboundLocalVariable
del markers[markerid]
def _iterencode_dict(dct, _current_indent_level):
if not dct:
yield '{}'
return
if markers is not None:
markerid = id(dct)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = dct
yield '{'
if _indent is not None:
_current_indent_level += 1
newline_indent = '\n' + _indent * _current_indent_level
item_separator = _item_separator + newline_indent
yield newline_indent
else:
newline_indent = None
item_separator = _item_separator
first = True
if _sort_keys:
items = sorted(dct.items())
else:
items = dct.items()
for key, value in items:
if isinstance(key, str):
pass
# JavaScript is weakly typed for these, so it makes sense to
# also allow them. Many encoders seem to do something like this.
elif isinstance(key, float):
# see comment for int/float in _make_iterencode
key = _floatstr(key)
elif key is True:
key = 'true'
elif key is False:
key = 'false'
elif key is None:
key = 'null'
elif isinstance(key, int):
# see comment for int/float in _make_iterencode
key = _intstr(key)
elif _skipkeys:
continue
else:
raise TypeError(f'keys must be str, int, float, bool or None, '
f'not {key.__class__.__name__}')
if first:
first = False
else:
yield item_separator
yield _encoder(key)
yield _key_separator
if isinstance(value, str):
yield _encoder(value)
elif value is None:
yield 'null'
elif value is True:
yield 'true'
elif value is False:
yield 'false'
elif isinstance(value, int):
# see comment for int/float in _make_iterencode
yield _intstr(value)
elif isinstance(value, float):
# see comment for int/float in _make_iterencode
yield _floatstr(value)
elif isinstance(value, types.GeneratorType):
for o in value:
yield from _iterencode(o, _current_indent_level)
else:
if isinstance(value, (list, tuple)):
chunks = _iterencode_list(value, _current_indent_level)
elif isinstance(value, dict):
chunks = _iterencode_dict(value, _current_indent_level)
else:
chunks = _iterencode(value, _current_indent_level)
yield from chunks
if newline_indent is not None:
_current_indent_level -= 1
yield '\n' + _indent * _current_indent_level
yield '}'
if markers is not None:
# noinspection PyUnboundLocalVariable
del markers[markerid]
def _iterencode(o, _current_indent_level):
if isinstance(o, str):
yield _encoder(o)
elif o is None:
yield 'null'
elif o is True:
yield 'true'
elif o is False:
yield 'false'
elif isinstance(o, int):
# see comment for int/float in _make_iterencode
yield _intstr(o)
elif isinstance(o, float):
# see comment for int/float in _make_iterencode
yield _floatstr(o)
elif isinstance(o, (list, tuple)):
yield from _iterencode_list(o, _current_indent_level)
elif isinstance(o, dict):
yield from _iterencode_dict(o, _current_indent_level)
elif isinstance(o, types.GeneratorType):
for it in o:
yield from _iterencode(it, _current_indent_level)
else:
if markers is not None:
markerid = id(o)
if markerid in markers:
raise ValueError("Circular reference detected")
markers[markerid] = o
o = _default(o)
yield from _iterencode(o, _current_indent_level)
if markers is not None:
# noinspection PyUnboundLocalVariable
del markers[markerid]
return _iterencode
|
the-stack_106_31069 | import pytest
from dataclasses import dataclass
from uuid import uuid4
from tests.utils import (
do_rpc_call as do_rpc_call_fixture
)
from asyncio_rpc.models import RPCCall, RPCStack
from asyncio_rpc.client import WrappedException
from asyncio_rpc.server import DefaultExecutor
do_rpc_call = do_rpc_call_fixture
@dataclass
class CustomDataModel:
x: int
y: int
def multiply(self):
return self.x * self.y
class CustomException(Exception):
pass
class Service(object):
"""
Testing service that is register via the TestExecutor
on the RPCServer
"""
def __init__(self):
self._data = {'foo': 'bar'}
def multiply(self, x, y=1):
return x * y
@property
def data(self):
return self._data
def get_item(self, key):
return self._data[key]
def custom_error(self):
raise CustomException("Foobar")
def multiply_with_dataclass(self, x: CustomDataModel):
assert isinstance(x, CustomDataModel)
return x.multiply()
class ServiceClient(object):
"""
TestService client, exposing (rpc) functions
that can be called on the TestService instance.
"""
def __init__(self, client):
self.client = client
@property
async def data(self):
rpc_func_call = RPCCall('data', [], {})
rpc_func_stack = RPCStack(
uuid4().hex, 'TEST', 300, [rpc_func_call])
return await self.client.rpc_call(rpc_func_stack)
async def multiply(self, x, y=100):
rpc_func_call = RPCCall('multiply', [x], {'y': y})
rpc_func_stack = RPCStack(
uuid4().hex, 'TEST', 300, [rpc_func_call])
return await self.client.rpc_call(rpc_func_stack)
async def get_item(self, key):
rpc_func_call = RPCCall('get_item', [key], {})
rpc_func_stack = RPCStack(
uuid4().hex, 'TEST', 300, [rpc_func_call])
return await self.client.rpc_call(rpc_func_stack)
async def custom_error(self):
rpc_func_call = RPCCall('custom_error', [], {})
rpc_func_stack = RPCStack(
uuid4().hex, 'TEST', 300, [rpc_func_call])
return await self.client.rpc_call(rpc_func_stack)
async def multiply_with_dataclass(self, x: CustomDataModel):
assert isinstance(x, CustomDataModel)
rpc_func_call = RPCCall('multiply_with_dataclass', [x], {})
rpc_func_stack = RPCStack(
uuid4().hex, 'TEST', 300, [rpc_func_call])
return await self.client.rpc_call(rpc_func_stack)
@pytest.mark.asyncio
async def test_simple_call(do_rpc_call):
test_service_client = ServiceClient(None)
result = await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.multiply(100, 100))
assert result == 100 * 100
@pytest.mark.asyncio
async def test_simple_call_with_client_processing(do_rpc_call):
test_service_client = ServiceClient(None)
result = await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.multiply(100, 100),
client_processing=True)
assert result == 100 * 100
@pytest.mark.asyncio
async def test_simple_call2(do_rpc_call):
test_service_client = ServiceClient(None)
result = await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.get_item('foo'))
assert result == 'bar'
@pytest.mark.asyncio
async def test_property(do_rpc_call):
test_service_client = ServiceClient(None)
result = await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.data)
assert result == {'foo': 'bar'}
@pytest.mark.asyncio
async def test_key_error(do_rpc_call):
test_service_client = ServiceClient(None)
with pytest.raises(KeyError):
await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.get_item('bar'))
@pytest.mark.asyncio
async def test_not_builtin_exception(do_rpc_call):
test_service_client = ServiceClient(None)
with pytest.raises(WrappedException):
await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.custom_error())
@pytest.mark.asyncio
async def test_custom_data_model(do_rpc_call):
test_service_client = ServiceClient(None)
value = CustomDataModel(100, 100)
result = await do_rpc_call(
test_service_client,
DefaultExecutor("TEST", Service()),
test_service_client.multiply_with_dataclass(value),
custom_dataclasses=[CustomDataModel])
assert result == value.multiply()
|
the-stack_106_31070 | import torch
import torch.nn.functional as F
import torch.optim as optim
from torchvision.models import vgg16
import time
import os
import psutil
import numpy as np
from exp_config import random_input_generator, MONITOR_INTERVAL, NUM_ITERS, BATCH_SIZE, LERANING_RATE
# set gpu_id 0
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# system monitor
info = psutil.virtual_memory()
monitor_interval = MONITOR_INTERVAL
avg_mem_usage = 0
max_mem_usage = 0
count = 0
total_time = 0
# get the whole model
vgg = vgg16()
start_time = time.time()
vgg = vgg.to(device)
total_time += time.time() - start_time
# training setting
num_iter = NUM_ITERS
batch_size = BATCH_SIZE
optimizer = optim.Adam(vgg.parameters(), lr=LERANING_RATE)
# data generator
gen = random_input_generator(num_iter, batch_size, format='NCHW')
# begin training
for idx, data in enumerate(gen):
x_batch = torch.Tensor(data[0])
y_batch = torch.Tensor(data[1]).long()
start_time = time.time()
x_batch = x_batch.to(device)
y_batch = y_batch.to(device)
# forward + backward
outputs = vgg(x_batch)
loss = F.cross_entropy(outputs, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
end_time = time.time()
consume_time = end_time - start_time
total_time += consume_time
if idx % monitor_interval == 0:
cur_usage = psutil.Process(os.getpid()).memory_info().rss
max_mem_usage = max(cur_usage, max_mem_usage)
avg_mem_usage += cur_usage
count += 1
print("[*] {} iteration: memory usage {:.2f}MB, consume time {:.4f}s".format(
idx, cur_usage / (1024 * 1024), consume_time))
print('consumed time:', total_time)
avg_mem_usage = avg_mem_usage / count / (1024 * 1024)
max_mem_usage = max_mem_usage / (1024 * 1024)
print('average memory usage: {:.2f}MB'.format(avg_mem_usage))
print('maximum memory usage: {:.2f}MB'.format(max_mem_usage))
|
the-stack_106_31072 | #-*- coding: UTF-8 -*-
import theano
import theano.tensor as T
import numpy
import cPickle
class UsrEmbLayer(object):
def __init__(self, rng, n_usr, dim, name, prefix=None):
self.name = name
if prefix == None:
U_values = numpy.zeros((n_usr+1,dim),dtype=numpy.float32)
U = theano.shared(value=U_values, name='U', borrow=True)
else:
f = file(prefix + name + '.save', 'rb')
U = cPickle.load(f)
f.close()
self.U = U
self.output = self.U
self.params = [self.U]
def save(self, prefix):
f = file(prefix + self.name + '.save', 'wb')
for obj in self.params:
cPickle.dump(obj, f, protocol=cPickle.HIGHEST_PROTOCOL)
f.close()
|
the-stack_106_31073 | import Logs
import Options
import Utils
class CompilerTraits(object):
def get_warnings_flags(self, level):
"""get_warnings_flags(level) -> list of cflags"""
raise NotImplementedError
def get_optimization_flags(self, level):
"""get_optimization_flags(level) -> list of cflags"""
raise NotImplementedError
def get_debug_flags(self, level):
"""get_debug_flags(level) -> (list of cflags, list of cppdefines)"""
raise NotImplementedError
class GccTraits(CompilerTraits):
def __init__(self):
super(GccTraits, self).__init__()
# cumulative list of warnings per level
self.warnings_flags = [['-Wall'], ['-Werror'], ['-Wextra']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['-O0']
elif level == 1:
return ['-O']
elif level == 2:
return ['-O2']
elif level == 3:
return ['-O3']
def get_debug_flags(self, level):
if level == 0:
return (['-g0'], ['NDEBUG'])
elif level == 1:
return (['-g'], [])
elif level >= 2:
return (['-ggdb', '-g3'], ['_DEBUG'])
class IccTraits(CompilerTraits):
def __init__(self):
super(IccTraits, self).__init__()
# cumulative list of warnings per level
# icc is _very_ verbose with -Wall, -Werror is barely achievable
self.warnings_flags = [[], [], ['-Wall']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['-O0']
elif level == 1:
return ['-O']
elif level == 2:
return ['-O2']
elif level == 3:
return ['-O3']
def get_debug_flags(self, level):
if level == 0:
return (['-g0'], ['NDEBUG'])
elif level == 1:
return (['-g'], [])
elif level >= 2:
return (['-ggdb', '-g3'], ['_DEBUG'])
class MsvcTraits(CompilerTraits):
def __init__(self):
super(MsvcTraits, self).__init__()
# cumulative list of warnings per level
self.warnings_flags = [['/W2'], ['/WX'], ['/Wall']]
def get_warnings_flags(self, level):
warnings = []
for l in range(level):
if l < len(self.warnings_flags):
warnings.extend(self.warnings_flags[l])
else:
break
return warnings
def get_optimization_flags(self, level):
if level == 0:
return ['/Od']
elif level == 1:
return []
elif level == 2:
return ['/O2']
elif level == 3:
return ['/Ox']
def get_debug_flags(self, level):
if level == 0:
return ([], ['NDEBUG'])
elif level == 1:
return (['/ZI', '/RTC1'], [])
elif level >= 2:
return (['/ZI', '/RTC1'], ['_DEBUG'])
gcc = GccTraits()
icc = IccTraits()
msvc = MsvcTraits()
# how to map env['COMPILER_CC'] or env['COMPILER_CXX'] into a traits object
compiler_mapping = {
'gcc': gcc,
'g++': gcc,
'msvc': msvc,
'icc': icc,
'icpc': icc,
}
profiles = {
# profile name: [optimization_level, warnings_level, debug_level]
'default': [2, 1, 1],
'debug': [0, 2, 3],
'release': [3, 1, 0],
}
default_profile = 'default'
def options(opt):
assert default_profile in profiles
opt.add_option('-d', '--build-profile',
action='store',
default=default_profile,
help=("Specify the build profile. "
"Build profiles control the default compilation flags"
" used for C/C++ programs, if CCFLAGS/CXXFLAGS are not"
" set set in the environment. [Allowed Values: %s]"
% ", ".join([repr(p) for p in profiles.keys()])),
choices=profiles.keys(),
dest='build_profile')
def configure(conf):
cc = conf.env['COMPILER_CC'] or None
cxx = conf.env['COMPILER_CXX'] or None
if not (cc or cxx):
raise Utils.WafError("neither COMPILER_CC nor COMPILER_CXX are defined; "
"maybe the compiler_cc or compiler_cxx tool has not been configured yet?")
try:
compiler = compiler_mapping[cc]
except KeyError:
try:
compiler = compiler_mapping[cxx]
except KeyError:
Logs.warn("No compiler flags support for compiler %r or %r"
% (cc, cxx))
return
opt_level, warn_level, dbg_level = profiles[Options.options.build_profile]
optimizations = compiler.get_optimization_flags(opt_level)
debug, debug_defs = compiler.get_debug_flags(dbg_level)
warnings = compiler.get_warnings_flags(warn_level)
conf.env.append_value('CXXFLAGS', "-Wno-write-strings")
if cc and not conf.env['CCFLAGS']:
conf.env.append_value('CCFLAGS', optimizations)
conf.env.append_value('CCFLAGS', debug)
conf.env.append_value('CCFLAGS', warnings)
conf.env.append_value('CCDEFINES', debug_defs)
if cxx and not conf.env['CXXFLAGS']:
conf.env.append_value('CXXFLAGS', optimizations)
conf.env.append_value('CXXFLAGS', debug)
conf.env.append_value('CXXFLAGS', warnings)
conf.env.append_value('CXXDEFINES', debug_defs)
|
the-stack_106_31075 | # Copyright 2013-2014, Simon Kennedy, [email protected]
#
# Part of 'hiss' the asynchronous notification library
"""
Currently the following schemes are supported
========= ================================
``gtnp`` Growl Network Transfer Protocol
``pb`` Pushbullet
``po`` Pushover
``prowl`` Prowl
``snp`` Snarl Network Protocol
``kodi`` KODI
========= ================================
For the ``snp``, ``gntp`` and ``kodi`` schemes, targets are specified
using a URL like string of the form ::
scheme://[username:[password@]]host[:port]
If no port number is specified then the default port for the target type
will be used.
For the Prowl, Pushbullet and Pushover schemes, targets are specified using
an API Key with an optional filter to target specific devices ::
scheme://apikey[:filter]
"""
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
SNP_SCHEME = 'snp'
GNTP_SCHEME = 'gntp'
KODI_SCHEME = 'kodi'
PROWL_SCHEME = 'prowl'
PUSHBULLET_SCHEME = 'pb'
PUSHOVER_SCHEME = 'po'
DEFAULT_SCHEME = SNP_SCHEME
URL_SCHEMES = [SNP_SCHEME, GNTP_SCHEME, KODI_SCHEME]
TOKEN_SCHEMES = [PROWL_SCHEME, PUSHOVER_SCHEME, PUSHBULLET_SCHEME]
ALL_SCHEMES = URL_SCHEMES + TOKEN_SCHEMES
def add_urlparse_schemes():
"""Allow urlparse to understand our protocol schemes"""
for s in ALL_SCHEMES:
if s not in urlparse.uses_relative:
urlparse.uses_relative.append(s)
if s not in urlparse.uses_netloc:
urlparse.uses_netloc.append(s)
if s not in urlparse.uses_params:
urlparse.uses_params.append(s)
if s not in urlparse.uses_query:
urlparse.uses_query.append(s)
|
the-stack_106_31076 | import unittest
from hstest.check_result import correct
from hstest.dynamic.dynamic_test import dynamic_test
from hstest.dynamic.output.infinite_loop_detector import loop_detector
from hstest.stage_test import StageTest
from hstest.testing.tested_program import TestedProgram
class InfiniteLoopTestNotWorking(StageTest):
@dynamic_test
def test(self):
main = TestedProgram('main')
main.start()
return correct()
class Test(unittest.TestCase):
def test(self):
prev = loop_detector.working
loop_detector.working = False
try:
status, feedback = InfiniteLoopTestNotWorking().run_tests()
self.assertEqual(
"test OK",
feedback)
self.assertEqual(status, 0)
finally:
loop_detector.working = prev
if __name__ == '__main__':
Test().test()
|
the-stack_106_31077 | '''
Dshell external file class/utils
for use in rippers, dumpers, etc.
@author: amm
'''
import os
from dshell import Blob
from shutil import move
from hashlib import md5
'''
Mode Constants
'''
FILEONDISK = 1 # Object refers to file already written to disk
FILEINMEMORY = 2 # Object contains file contents in data member
'''
dfile -- Dshell file class.
Extends blob for offset based file chunk (segment) reassembly.
Removes time and directionality from segments.
Decoders can instantiate this class and pass it to
output modules or other decoders.
Decoders can choose to pass a file in memory or already
written to disk.
A dfile object can have one of the following modes:
FILEONDISK
FILEINMEMORY
'''
class dfile(Blob):
def __init__(self,mode=FILEINMEMORY,name=None,data=None,**kwargs):
# Initialize Segments
# Only really used in memory mode
self.segments={}
self.startoffset=0
self.endoffset=0
# Initialize consistent info members
self.mode=mode
self.name=name
self.diskpath=None
self.info_keys = ['mode','name','diskpath','startoffset','endoffset']
#update with additional info
self.info(**kwargs)
#update data
if data != None:
self.update(data)
def __iter__(self):
'''
Undefined
'''
pass
def __str__(self):
'''
Returns filename (string)
'''
return self.name
def __repr__(self):
'''
Returns filename (string)
'''
return self.name
def md5(self):
'''
Returns md5 of file
Calculate based on reassembly from FILEINMEMORY
or loads from FILEONDISK
'''
if self.mode == FILEINMEMORY:
return md5(self.data()).hexdigest()
elif self.mode == FILEONDISK:
m = md5()
fh = open(self.diskpath, 'r')
m.update(fh.read())
fh.close()
return m.hexdigest()
else:
return None
def load(self):
'''
Load file from disk. Converts object to mode FILEINMEMORY
'''
if not self.mode == FILEONDISK: return False
try:
fh = open(self.diskpath, 'r')
self.update(fh.read())
fh.close()
self.mode = FILEINMEMORY
except:
return False
def write(self,path='.',name=None,clobber=False,errorHandler=None,padding=None,overlap=True):
'''
Write file contents at location relative to path.
Name on disk will be based on internal name unless one is provided.
For mode FILEINMEMORY, file will data() will be called for reconstruction.
After writing to disk, mode will be changed to FILEONDISK.
If mode is already FILEONDISK, file will be moved to new location.
'''
olddiskpath = self.diskpath
if name == None: name=self.name
self.diskpath = self.__localfilename(name, path, clobber)
if self.mode == FILEINMEMORY:
fh = open(self.diskpath, 'w')
fh.write(self.data())
fh.close()
self.segments={}
self.startoffset=0
self.endoffset=0
return self.diskpath
elif self.mode == FILEONDISK:
move(olddiskpath, self.diskpath)
return self.diskpath
def update(self,data,offset=None):
if self.mode != FILEINMEMORY: return
#if offsets are not being provided, just keep packets in wire order
if offset==None: offset=self.endoffset
#don't buffer duplicate packets
if offset not in self.segments: self.segments[offset]=data
#update the end offset if this packet goes at the end
if offset >= self.endoffset: self.endoffset=offset+len(data)
#
# Generate a local (extracted) filename based on the original
#
def __localfilename(self, origname, path = '.', clobber = False):
tmp = origname.replace("\\", "_")
tmp = tmp.replace("/", "_")
tmp = tmp.replace(":", "_")
tmp = tmp.replace("?", "_")
tmp = tmp.lstrip('_')
localname = ''
for c in tmp:
if ord(c) > 32 and ord(c) < 127:
localname += c
else:
localname += "%%%02X" % ord(c)
# Truncate (from left) to max filename length on filesystem (-3 in case we need to add a suffix)
localname = localname[os.statvfs(path).f_namemax*-1:]
# Empty filename not allowed
if localname == '': localname = 'blank'
localname = os.path.realpath(os.path.join(path,localname))
if clobber: return localname
# No Clobber mode, check to see if file exists
suffix = ''
i = 0
while os.path.exists(localname+suffix):
i += 1
suffix = "_%02d" % i
return localname+suffix
|
the-stack_106_31080 | # qubit number=4
# total number=32
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += CNOT(0,3) # number=29
prog += X(3) # number=30
prog += CNOT(0,3) # number=31
prog += RX(-1.9352210746113125,3) # number=14
prog += CNOT(1,2) # number=22
prog += H(1) # number=2
prog += H(2) # number=3
prog += H(3) # number=4
prog += Y(2) # number=13
prog += Y(2) # number=28
prog += RX(0.13823007675795101,2) # number=24
prog += H(0) # number=5
prog += H(1) # number=6
prog += H(2) # number=7
prog += H(3) # number=8
prog += H(0) # number=9
prog += RX(-1.9069467407290044,2) # number=20
prog += H(3) # number=21
prog += H(3) # number=27
prog += Y(2) # number=10
prog += H(1) # number=17
prog += CZ(3,1) # number=18
prog += H(1) # number=19
prog += Y(2) # number=11
prog += CNOT(1,0) # number=15
prog += CNOT(1,0) # number=16
prog += Z(3) # number=23
prog += Y(1) # number=25
prog += Y(1) # number=26
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil2996.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
the-stack_106_31083 | import sys
from sklearn.linear_model import Ridge
import numpy as np
from modules.utils import overlap_ratio
class BBRegressor():
def __init__(self, img_size, alpha=1000, overlap=[0.6, 1], scale=[1, 2]):
self.img_size = img_size
self.alpha = alpha
self.overlap_range = overlap
self.scale_range = scale
self.model = Ridge(alpha=self.alpha)
def train(self, X, bbox, gt):
X = X.cpu().numpy()
bbox = np.copy(bbox)
gt = np.copy(gt)
if gt.ndim==1:
gt = gt[None,:]
r = overlap_ratio(bbox, gt)
s = np.prod(bbox[:,2:], axis=1) / np.prod(gt[0,2:])
idx = (r >= self.overlap_range[0]) * (r <= self.overlap_range[1]) * \
(s >= self.scale_range[0]) * (s <= self.scale_range[1])
X = X[idx]
bbox = bbox[idx]
Y = self.get_examples(bbox, gt)
self.model.fit(X, Y)
def predict(self, X, bbox):
X = X.cpu().numpy()
bbox_ = np.copy(bbox)
Y = self.model.predict(X)
bbox_[:,:2] = bbox_[:,:2] + bbox_[:,2:]/2
bbox_[:,:2] = Y[:,:2] * bbox_[:,2:] + bbox_[:,:2]
bbox_[:,2:] = np.exp(Y[:,2:]) * bbox_[:,2:]
bbox_[:,:2] = bbox_[:,:2] - bbox_[:,2:]/2
bbox_[:,:2] = np.maximum(bbox_[:,:2], 0)
bbox_[:,2:] = np.minimum(bbox_[:,2:], self.img_size - bbox[:,:2])
return bbox_
def get_examples(self, bbox, gt):
bbox[:,:2] = bbox[:,:2] + bbox[:,2:]/2
gt[:,:2] = gt[:,:2] + gt[:,2:]/2
dst_xy = (gt[:,:2] - bbox[:,:2]) / bbox[:,2:]
dst_wh = np.log(gt[:,2:] / bbox[:,2:])
Y = np.concatenate((dst_xy, dst_wh), axis=1)
return Y
|
the-stack_106_31085 | import tkinter as tk
import tkinter.ttk as ttk
import logging
from cep_price_console.utils.log_utils import CustomAdapter, debug
class CntrUploadTab (object):
logger = CustomAdapter(logging.getLogger(str(__name__)), None)
@debug(lvl=logging.NOTSET, prefix='')
def __init__(self, master, tab_text, tab_state='normal'):
self.master = master
self.cont = self.master.vc
self.nbook = self.master.nb
self.tab_text = tab_text
self.__tab_id = self.master.tab_id_assignment()
self.tab_state = tab_state
self.frame_base = ttk.Frame(self.nbook)
self.frame_cmd = ttk.Frame(self.frame_base)
self.frame_main = ttk.Frame(self.frame_base)
self.btn_next = ttk.Button(self.frame_cmd)
self.btn_prev = ttk.Button(self.frame_cmd)
self.widgets = []
self.manager = BusyManager(self.frame_main)
# region tab_text
@property
@debug(lvl=logging.NOTSET, prefix='')
def tab_text(self):
return self.__tab_text
@tab_text.setter
@debug(lvl=logging.NOTSET, prefix='')
def tab_text(self, value):
if isinstance(value, str):
self.__tab_text = value
else:
CntrUploadTab.logger.error("CntrUploadTab: tab_text not string: {0}".format(str(type(value))))
# endregion
# region tab_id
@property
@debug(lvl=logging.NOTSET, prefix='')
def tab_id(self):
return self.__tab_id
@tab_id.setter
@debug(lvl=logging.DEBUG, prefix='')
def tab_id(self, value):
if isinstance(value, int):
self.__tab_id = value
else:
CntrUploadTab.logger.error("CntrUploadTab: tab_id not integer: {0}".format(str(type(value))))
# endregion
# region tab_state
@property
@debug(lvl=logging.NOTSET, prefix='')
def tab_state(self):
return self.__tab_state
@tab_state.setter
@debug(lvl=logging.NOTSET, prefix='')
def tab_state(self, value):
states = ('normal', 'disabled', 'hidden')
if value in states:
self.__tab_state = value
else:
CntrUploadTab.logger.error("Tab State ({0}) not a valid value: {1}".format(value, states))
# endregion
@debug(lvl=logging.NOTSET, prefix='')
def nbook_add(self):
CntrUploadTab.logger.log(logging.NOTSET, "Tab ID: {0}".format(str(self.tab_id)))
self.nbook.add(self.frame_base, text=self.tab_text, state=self.tab_state)
self.frame_base.columnconfigure(0, weight=1)
self.frame_base.rowconfigure(0, weight=1)
self.frame_main.grid(row=0, column=0, sticky=tk.NSEW)
self.frame_cmd.grid(row=1, column=0, sticky=tk.SE)
if self.tab_id != self.master.max_tab_id:
self.btn_next.config(text="Proceed")
self.btn_next.state(['disabled'])
self.btn_next.grid(row=0, column=2)
if self.tab_id != 0:
self.btn_prev.config(text="Previous", command=lambda: self.master.tab_switcher(self.tab_id - 1))
self.btn_prev.grid(row=0, column=1)
@debug(lvl=logging.DEBUG, prefix='')
def add_widget(self, widget: object):
self.widgets.append(widget)
@debug(lvl=logging.NOTSET, prefix='')
def toggle_tab(self, tab_state: str):
self.tab_state = tab_state
self.nbook.tab(self.tab_id, state=self.tab_state)
class BusyManager:
def __init__(self, widget):
self.toplevel = widget.winfo_toplevel()
self.widgets = {}
def busy(self, widget=None):
# attach busy cursor to toplevel, plus all windows
# that define their own cursor.
if widget is None:
w = self.toplevel # myself
else:
w = widget
if not self.widgets.get(str(w)):
try:
# attach cursor to this widget
cursor = w.cget("cursor")
if cursor != "watch":
self.widgets[str(w)] = (w, cursor)
w.config(cursor="watch")
else:
pass
except tk.TclError:
pass
else:
pass
for w in w.children.values():
self.busy(w)
def not_busy(self):
# restore cursors
for w, cursor in self.widgets.values():
try:
w.config(cursor=cursor)
except tk.TclError:
pass
self.widgets = {}
|
the-stack_106_31087 | import pathlib
from setuptools import setup
VERSION = '0.1.10'
HERE = pathlib.Path(__file__).parent
README = (HERE / "README.md").read_text()
setup(name='google_nest_sdm_jm',
version=VERSION,
description='Library for the Google Nest SDM API',
long_description=README,
long_description_content_type="text/markdown",
keywords='google nest sdm camera therostat security doorbell',
author='Allen Porter, Jonathan Melcher',
author_email='[email protected]',
url='https://github.com/allenporter/python-google-nest-sdm',
packages=['google_nest_sdm_jm'],
include_package_data=True,
install_requires=[
'aiohttp>=3.6.2',
'google-auth>=1.22.0',
'google-auth-oauthlib>=0.4.1',
'google-cloud-pubsub>=2.1.0',
'requests-oauthlib>=1.3.0',
],
entry_points = {
'console_scripts': [
'google_nest=google_nest_sdm_jm.google_nest:main',
],
},
tests_require=[
'pytest_aiohttp>=0.3.0',
])
|
the-stack_106_31088 | import sys
from typing import List, Any
from argparse import ArgumentParser, Namespace, _SubParsersAction
from pathlib import Path
from os.path import join
from utils import shellcode_encoder
from cli.enums import SHELLCODE_HELP
from inject import Injector
def run(args: List[Any]):
options = {
"should_restore": not args["no_restore"],
"enter": args["enter"],
"cave": args["cave"],
"nop_restore_data": args["nop_restore_data"],
}
manager = Injector(args["shellcode"], args["file"], args["output"], options)
return manager.inject()
def apply(subparser: _SubParsersAction) -> ArgumentParser:
parser = subparser.add_parser("build", help="build injected binary")
parser.add_argument(
"-s", "--shellcode", type=str, help=SHELLCODE_HELP, required=True,
)
parser.add_argument(
"-f", "--file", type=str, help="path to source pe file", required=True
)
parser.add_argument(
"-o", "--output", type=str, help="path to newly created pe file"
)
parser.add_argument(
"-F",
"--force",
action="store_true",
help="force overwrite output",
default=False,
)
parser.add_argument(
"--no-restore",
action="store_true",
help="do not fix the payload with popa and pusha",
default=False,
)
parser.add_argument(
"--nop-restore-data",
action="store_true",
help="fill replaced/removed original instructions with NOPs instead of appending them to shellcode",
default=False,
)
parser.add_argument(
"-c",
"--cave",
action="store",
choices=["auto", "cave", "new-section"],
default="auto",
help="where to write the shellcode. defaults to auto",
)
parser.add_argument(
"-e",
"--enter",
action="store",
choices=["jump", "new-section"],
default="jump",
help="how to handle the entrypoing. defaults to 'jump' where the executable uses 'jmp' to move to new section",
)
return parser
def normalize(args: Namespace) -> dict:
items = vars(args)
items["shellcode"] = shellcode_encoder(items["shellcode"])
p_file = Path(items["file"])
if not p_file.is_file():
print(f"[!] File not found at {items['file']}")
sys.exit(1)
items["file"] = p_file
if not args.output or Path(args.output).is_dir():
if Path(args.output).is_dir():
parent = args.output
else:
parent = p_file.parent
parts = p_file.name.split(".")
if len(parts) > 1:
output = (
"".join(parts[: len(parts) - 1]) + "-injected." + parts[len(parts) - 1]
)
else:
output = p_file.name + "-injected"
items["output"] = join(parent, output)
if items["output"] in ["stdout", "/proc/self/fd/1"]:
print("[!] Writing to stdout not supported")
sys.exit(1)
p_output = Path(items["output"])
if p_output.is_file() and not items["force"]:
print("[!] Output file already exists. Delete it or use '--force' to overwrite")
sys.exit(1)
items["output"] = p_output
return items
|
the-stack_106_31089 | """
This sample demonstrates a simple skill built with the Amazon Alexa Skills Kit.
The Intent Schema, Custom Slots, and Sample Utterances for this skill, as well
as testing instructions are located at http://amzn.to/1LzFrj6
For additional samples, visit the Alexa Skills Kit Getting Started guide at
http://amzn.to/1LGWsLG
"""
from __future__ import print_function
import urllib.request
# --------------- Helpers that build all of the responses ----------------------
def build_speechlet_response(title, output, reprompt_text, should_end_session, debug):
return {
'outputSpeech': {
'type': 'PlainText',
'text': output
},
'card': {
'type': 'Simple',
'title': "SessionSpeechlet - " + title,
'content': "SessionSpeechlet - " + output
},
'reprompt': {
'outputSpeech': {
'type': 'PlainText',
'text': reprompt_text
}
},
'shouldEndSession': should_end_session,
'debug': debug
}
def build_response(session_attributes, speechlet_response):
return {
'version': '1.0',
'sessionAttributes': session_attributes,
'response': speechlet_response
}
# --------------- Functions that control the skill's behavior ------------------
def get_welcome_response():
""" If we wanted to initialize the session to have some attributes we could
add those here
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "I will " + \
"stop transcribing"
reprompt_text = "You can ask me to start or stop transcribing by saying, " \
"start transcribing or stop transcribing."
debug = "stoping reading"
captioning = urllib.request.urlopen("https://1d496ef7.ngrok.io/stop").read()
#print(captioning)
print(speech_output)
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session, debug))
def handle_session_end_request():
card_title = "Session Ended"
speech_output = "Thank you for trying the Alexa Skills for transcribing. " \
"Bye! "
# Setting this to true ends the session and exits the skill.
should_end_session = True
debug = " "
return build_response({}, build_speechlet_response(
card_title, speech_output, None, should_end_session, debug))
def create_transcribe_attribute(stopTrans):
return {"stopTrans": stopTrans}
def set_transcribe_in_session(intent, session):
card_title = intent['name']
session_attributes = {}
should_end_session = False
speech_output = "I will " + \
"stop transcribing" + \
". You can ask me to start transcribing anytime. "
reprompt_text = "You can ask me to start or stop transcribing by saying, " \
"start transcribing or stop transcribing."
debug = "stoping reading"
captioning = urllib.request.urlopen("https://1d496ef7.ngrok.io/stop").read()
#print(captioning)
print(speech_output)
should_end_session = True
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, should_end_session, debug))
def get_transcribe_from_session(intent, session):
session_attributes = {}
reprompt_text = None
if session.get('attributes', {}) and "stopTrans" in session.get('attributes', {}):
stopTrans = session['attributes']['stopTrans']
speech_output = "You can " + stopTrans + \
". Goodbye."
should_end_session = True
else:
speech_output = "I'm not sure what you mean. " \
"Please try again."
should_end_session = False
debug = " "
# Setting reprompt_text to None signifies that we do not want to reprompt
# the user. If the user does not respond or says something that is not
# understood, the session will end.
return build_response(session_attributes, build_speechlet_response(
intent['name'], speech_output, reprompt_text, should_end_session, debug))
# --------------- Events ------------------ (Alexa is called)
def on_session_started(session_started_request, session):
""" Called when the session starts """
print("on_session_started requestId=" + session_started_request['requestId']
+ ", sessionId=" + session['sessionId'])
def on_launch(launch_request, session):
""" Called when the user launches the skill without specifying what they
want
"""
print("on_launch requestId=" + launch_request['requestId'] +
", sessionId=" + session['sessionId'])
# Dispatch to your skill's launch
return get_welcome_response()
def on_intent(intent_request, session):
""" Called when the user specifies an intent for this skill """
print("on_intent requestId=" + intent_request['requestId'] +
", sessionId=" + session['sessionId'])
intent = intent_request['intent']
intent_name = intent_request['intent']['name']
# Dispatch to your skill's intent handlers
if intent_name == "stopTransIsIntent":
return set_transcribe_in_session(intent, session)
elif intent_name == "AMAZON.HelpIntent":
return get_welcome_response()
elif intent_name == "AMAZON.CancelIntent" or intent_name == "AMAZON.StopIntent":
return handle_session_end_request()
else:
raise ValueError("Invalid intent")
def on_session_ended(session_ended_request, session):
""" Called when the user ends the session.
Is not called when the skill returns should_end_session=true
"""
print("on_session_ended requestId=" + session_ended_request['requestId'] +
", sessionId=" + session['sessionId'])
# add cleanup logic here
# --------------- Main handler ------------------
def lambda_handler(event, context):
""" Route the incoming request based on type (LaunchRequest, IntentRequest,
etc.) The JSON body of the request is provided in the event parameter.
"""
print("event.session.application.applicationId=" +
event['session']['application']['applicationId'])
"""
Uncomment this if statement and populate with your skill's application ID to
prevent someone else from configuring a skill that sends requests to this
function.
"""
# if (event['session']['application']['applicationId'] !=
# "amzn1.echo-sdk-ams.app.[unique-value-here]"):
# raise ValueError("Invalid Application ID")
if event['session']['new']:
on_session_started({'requestId': event['request']['requestId']},
event['session'])
if event['request']['type'] == "LaunchRequest":
return on_launch(event['request'], event['session'])
elif event['request']['type'] == "IntentRequest":
return on_intent(event['request'], event['session'])
elif event['request']['type'] == "SessionEndedRequest":
return on_session_ended(event['request'], event['session']) |
the-stack_106_31090 | #!/usr/bin/env python
import io
import os
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
# get Image out of pillow
from PIL import Image
# Instantiates a client
client = vision.ImageAnnotatorClient()
if __name__ == "__main__":
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
'resources/f_one.jpg')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image_one = types.Image(content=content)
im_one = Image.open(file_name)
### Performs label detection on the image file
response_one = client.label_detection(image=image_one)
labels_one = response_one.label_annotations
#
print('Labels:')
for label in labels_one:
print(label.description)
# Performs label detection on the image file
face_response_one = client.face_detection(image=image_one)
print("face_re")
print(dir(face_response_one))
print(face_response_one.face_annotations)
face_one_box = face_response_one.face_annotations[0].fd_bounding_poly.vertices
faceone_box = ( face_one_box[0].x, face_one_box[0].y, face_one_box[1].x, face_one_box[2].y )
im_one_box = im_one.crop(faceone_box)
file_two = os.path.join(
os.path.dirname(__file__),
'resources/f_two.jpg')
# Loads the image into memory
with io.open(file_two, 'rb') as image_file:
content_two = image_file.read()
image_two = types.Image(content=content_two)
im_two = Image.open(file_two)
# Performs face detection on the image file
face_response_two = client.face_detection(image=image_two)
print("face_re_two")
print(face_response_two.face_annotations)
face_box = face_response_two.face_annotations[0].fd_bounding_poly.vertices
facetwo_box = ( face_box[0].x, face_box[0].y, face_box[1].x, face_box[2].y )
im_two_box = im_two.crop(facetwo_box)
# Write out the new image
Image.blend(im_one_box, im_two_box.resize(im_one_box.size), 0.5 ).save("/tmp/comp_2.jpg") |
the-stack_106_31094 | import sys
import py2app
__all__ = ['infoPlistDict']
def infoPlistDict(CFBundleExecutable, plist={}):
CFBundleExecutable = CFBundleExecutable
NSPrincipalClass = ''.join(CFBundleExecutable.split())
version = sys.version[:3]
pdict = dict(
CFBundleDevelopmentRegion='English',
CFBundleDisplayName=plist.get('CFBundleName', CFBundleExecutable),
CFBundleExecutable=CFBundleExecutable,
CFBundleIconFile=CFBundleExecutable,
CFBundleIdentifier='org.pythonmac.unspecified.%s' % (NSPrincipalClass,),
CFBundleInfoDictionaryVersion='6.0',
CFBundleName=CFBundleExecutable,
CFBundlePackageType='BNDL',
CFBundleShortVersionString=plist.get('CFBundleVersion', '0.0'),
CFBundleSignature='????',
CFBundleVersion='0.0',
LSHasLocalizedDisplayName=False,
NSAppleScriptEnabled=False,
NSHumanReadableCopyright='Copyright not specified',
NSMainNibFile='MainMen',
NSPrincipalClass=NSPrincipalClass,
PyMainFileNames=['__boot__'],
PyResourcePackages=[ (s % version) for s in [
'lib/python%s',
'lib/python%s/lib-dynload',
'lib/python%s/site-packages.zip',
]] + [ 'lib/python%s.zip' % version.replace('.', '') ],
PyRuntimeLocations=[(s % version) for s in [
'@executable_path/../Frameworks/Python.framework/Versions/%s/Python',
'~/Library/Frameworks/Python.framework/Versions/%s/Python',
'/Library/Frameworks/Python.framework/Versions/%s/Python',
'/Network/Library/Frameworks/Python.framework/Versions/%s/Python',
'/System/Library/Frameworks/Python.framework/Versions/%s/Python',
]],
)
pdict.update(plist)
pythonInfo = pdict.setdefault('PythonInfoDict', {})
pythonInfo.update(dict(
PythonLongVersion=sys.version,
PythonShortVersion=sys.version[:3],
PythonExecutable=sys.executable,
))
py2appInfo = pythonInfo.setdefault('py2app', {}).update(dict(
version=py2app.__version__,
template='bundle',
))
return pdict
|
the-stack_106_31096 | import re
import sqlite3
from datetime import datetime
class GamePipeline:
@classmethod
def from_crawler(cls, crawler):
settings = crawler.settings
db = settings.get('DATABASE')
drop = settings.getbool('DROP')
return cls(db, drop)
def __init__(self, db, drop):
self.db = db
self.drop = drop
self.con = None
self.cur = None
def open_spider(self, spider):
if spider.name == 'games' and self.db is not None:
self.con = sqlite3.connect(self.db)
self.cur = self.con.cursor()
if self.drop:
self.cur.executescript('''
DROP TABLE IF EXISTS betting;
CREATE TABLE betting(GAME_ID TEXT, HOME_SPREAD REAL, HOME_SPREAD_WL TEXT,
OVER_UNDER REAL, OU_RESULT TEXT);
''')
def close_spider(self, spider):
if spider.name == 'games' and self.db is not None:
self.cur.execute('VACUUM')
self.con.close()
def process_item(self, item, spider):
if spider.name == 'games' and self.db is not None:
self.store_item(item)
return item
def store_item(self, item):
# only store home games to avoid duplicating data
if item['home']:
# map team abbreviations to those in the database
team_abbr = {
'BK': 'BKN',
'CHAR': 'CHA',
'GS': 'GSW',
'NETS': 'BKN',
'NJ': 'BKN',
'NO': 'NOP',
'NY': 'NYK',
'PHO': 'PHX',
'SA': 'SAS'
}
opponent = item['opponent'].upper()
if opponent in team_abbr:
opponent = team_abbr[opponent]
# find opponent ID by abbreviation
self.cur.execute(f'SELECT ID FROM teams WHERE ABBREVIATION IS "{opponent}"')
opp_id = self.cur.fetchone()[0]
# format game date to match games table
response = item['response_url']
start_year, end_year = re.search(r'(\d+)-(\d+)', response).group(1, 2)
start_months = ['Oct', 'Nov', 'Dec']
date = item['date']
year = start_year if date.split()[0] in start_months else end_year
date = datetime.strptime(f'{date} {year}', '%b %d %Y')
date = date.strftime('%Y-%m-%d')
# find game by opponent and date
self.cur.execute(f'SELECT ID FROM games WHERE AWAY_TEAM_ID == {opp_id} AND GAME_DATE IS "{date}"')
game_id = self.cur.fetchone()
# raise exception if no matching game found
if game_id is None:
raise ValueError('No game found')
# insert row into database
values = (game_id[0], item['spread'], item['spread_result'], item['over_under'], item['over_under_result'])
self.cur.execute('''INSERT INTO betting(GAME_ID, HOME_SPREAD, HOME_SPREAD_WL, OVER_UNDER, OU_RESULT)
VALUES(?, ?, ?, ?, ?)''', values)
self.con.commit()
|
the-stack_106_31098 | # Process html tags
from .state_inline import StateInline
from ..common.html_re import HTML_TAG_RE
from ..common.utils import charCodeAt
def isLetter(ch: int):
lc = ch | 0x20 # to lower case
# /* a */ and /* z */
return (lc >= 0x61) and (lc <= 0x7A)
def html_inline(state: StateInline, silent: bool):
pos = state.pos
if not state.md.options.get("html", None):
return False
# Check start
maximum = state.posMax
if charCodeAt(state.src, pos) != 0x3C or pos + 2 >= maximum: # /* < */
return False
# Quick fail on second char
ch = charCodeAt(state.src, pos + 1)
if (
ch != 0x21
and ch != 0x3F # /* ! */
and ch != 0x2F # /* ? */
and not isLetter(ch) # /* / */
):
return False
match = HTML_TAG_RE.search(state.src[pos:])
if not match:
return False
if not silent:
token = state.push("html_inline", "", 0)
token.content = state.src[pos : pos + len(match.group(0))]
state.pos += len(match.group(0))
return True
|
the-stack_106_31099 | import googleMapApiAdapter as gMapApi
from loc import loc
from RVGraph import RVGraph
from RTVGraph import RTVGraph
from assignTrips import AssignTrips
class DynamicTripVehicleAssignmentMatcher:
def __init__(self, constraints_param, useGridWorld=False):
'''
constraints_param:
{
# max distance between driver's location and request's start point allowed to be matched
"maxMatchDistance": number
}
useGridWorld:
True or False, indicate whether use grid world to do testing
'''
#self.maxMatchDistance = constraints_param['maxMatchDistance']
self.constraints_param = constraints_param
self.useGridWorld = useGridWorld
def match(self, requests, drivers, currentTime=None, showDetails=False):
'''
Input
requests format:
[{ "id": string,
"userId": string,
"startLocation": {
"latitude": number,
"longitude": number
},
"endLocation": {
"latitude": number,
"longitude": number
}
"timestamp": number,
"isOnCar": False,
]
drivers format:
[{ "userId": string,
"location": {
"latitude": number,
"longitude": number
},
"ongoingRide": [ {
"id": string,
"userId": string,
"startLocation": {
"latitude": number,
"longitude": number
},
"endLocation": {
"latitude": number,
"longitude": number
}
"timestamp": number,
"isOnCar": boolean, <---- if passenger on the car
} ],
"capacity": number,
"timestamp": number }]
'''
# print('entered RV')
g = RVGraph(self.constraints_param, self.useGridWorld)
g.RVGraphPairwiseRequests(requests)
if showDetails:
print("rrGraph: ", g.requestsGraph)
g.RVGraphPairwiseDriverRequest(requests, drivers)
if showDetails:
print("rvGraph: ",g.rvGraph)
# print('entered rtv')
driversInRV = []
for d,_,_ in g.rvGraph:
if d not in driversInRV:
driversInRV.append(d)
g2 = RTVGraph(self.constraints_param, self.useGridWorld)
g2.RTVGraphFindFeasibleTrips(g, driversInRV)
if showDetails:
print("rtvGraph: ",g2.rtvGraph)
# print('entered assignment')
g3=AssignTrips(self.constraints_param["maxCost"], self.useGridWorld)
#g3.assignment_ilp(g2.rtvGraph, showDetails=showDetails)
g3.assignment(g2.rtvGraph, showDetails=showDetails)
if showDetails:
print("assignment: ",g3.assignList)
print("assigned V: ",g3. assignedV)
print("assigned R: ",g3. assignedR)
for r,d in g3.assignList:
d["ongoingRide"].append(r)
remainingReq = [ r for r in requests if r not in g3.assignedR ]
return (g3.assignList, remainingReq)
def Test():
requests = [
{
"id": '1',
"userId": 'eric',
"startLocation": loc['city_one'],
"endLocation": loc['sai_ying_pun_station'],
"timestamp": 1553701760965,
"isOnCar": False
},
{
"id": '2',
"userId": 'tony',
"startLocation": loc['cu'],
"endLocation": loc['hku'],
"timestamp": 1553701760965,
"isOnCar": False
},
{
"id": '3',
"userId": 'alex',
"startLocation": loc['cu'],
"endLocation": loc['city_one'],
"timestamp": 1553701760965,
"isOnCar": False
},
]
onGoingReq1 = {
"id": '4',
"userId": 'David',
"startLocation": loc['cu'],
"endLocation": loc['hku'],
"timestamp": 1553701060965,
"isOnCar": False
}
drivers = [
{
"userId": 'Antony',
"location": loc['cu'],
"ongoingRide": [],
"capacity": 2
},
{
"userId": 'Elven',
"location": loc['science_park'],
"ongoingRide": [],
"capacity": 2
}
]
dMatcher = DynamicTripVehicleAssignmentMatcher({ 'maxMatchDistance': 5000, 'maxCost': 5000 })
M, R = dMatcher.match(requests, drivers)
for r, d in M:
print(r["userId"], '->', d["userId"])
print('remaining request: ', len(R))
if __name__ == "__main__":
Test() |
the-stack_106_31100 | # Import
import numpy as np
import time
import json
import torch
import argparse
from torch import nn
from torch import optim
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets, transforms, models
from PIL import Image
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# input
def get_input_args():
# options
parser = argparse.ArgumentParser(description='Predict flower')
parser.add_argument('--img', type=str, default='flowers/test/1/image_06760.jpg', help='Choose image ')
parser.add_argument('--check_point', type=str, default='checkpoint.pth', help='Choose the checkpoint')
parser.add_argument('--gpu', type=bool, default=False, help='train with gpu')
parser.add_argument('--topK', type=int, default=5, help='Print the top K classes ')
parser.add_argument('--category_to_name', type=str, default='cat_to_name.json', help='Load the JSON file to find names')
return parser.parse_args()
# checkpoint
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
model = models.vgg19(pretrained=True) if checkpoint['arch'] == 'VGG' else models.densenet161(pretrained=True)
model.to(device)
model.classifier = checkpoint['classifier']
model.load_state_dict(checkpoint['state_dict'])
model.class_to_idx = checkpoint['class_to_idx']
return model
def process_image(image):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
im=Image.open(image)
width=im.size[0]
height=im.size[1]
AspectRatio=width/height
if width <= height:
im=im.resize((256,int(256/AspectRatio)))
else:
im=im.resize((int(256*AspectRatio),256))
midWidth=im.size[0]/2
midHeight=im.size[1]/2
cropped_im=im.crop((midWidth-112, midHeight-112, midWidth+112, midHeight+112))
np_image=np.asarray(cropped_im)/255
means=np.array([0.485, 0.456, 0.406])
std=np.array([0.229, 0.224, 0.225])
normalized_image=(np_image-means)/std
final_image=normalized_image.transpose((2, 0, 1))
return torch.from_numpy(final_image)
def predict(image_path, model, topk=5):
''' Predict the class of image using the trained model.
'''
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# predict the class from the image file
image = process_image(image_path)
image = image.unsqueeze(0).float()
image = image.to(device)
model = load_checkpoint(model)
model.eval()
with torch.no_grad():
output = model.forward(image)
ps = torch.exp(output)
probs, indices = torch.topk(ps, topk)
Probs = np.array(probs.data[0])
Indices = np.array(indices.data[0])
with open(args.category_to_name, 'r') as f:
cat_to_name = json.load(f)
# convert the class_to_idx
idx_to_class = {idx:Class for Class,idx in model.class_to_idx.items()}
classes = [idx_to_class[i] for i in Indices]
labels = [cat_to_name[Class] for Class in classes]
return Probs,labels
# Print the top K classes
args = get_input_args()
probs,classes = predict(args.img, args.check_point, args.topK)
print('Left: Possible Type Right: Probability')
for prob, Class in zip(probs, classes):
print("%20s: %f" % (Class, prob)) |
the-stack_106_31101 |
import os
for dirs in os.listdir(os.curdir):
if os.path.isfile(dirs):
continue
count = 0
file1 = open(dirs + "/protection_curve.csv")
for line in file1:
count+=1
if count == 11:
trident_11 = float(line)
if count == 22:
trident_22 = float(line)
print(dirs + "," +str(trident_11) + "," +str(trident_22)) |
the-stack_106_31103 | import pathlib
import setuptools
# The directory containing this file
HERE = pathlib.Path(__file__).parent
# The text of the README file
README = (HERE / "README.md").read_text()
# This call to setup() does all the work
setuptools.setup(
name="rx-scheduler",
version="0.0.1",
description="Function interval runner based on rxpy and asyncio",
long_description=README,
long_description_content_type="text/markdown",
url="https://github.com/ryazantseff/rxpy-scheduler",
author="Maxim Ryazantsev",
author_email="[email protected]",
license="MIT",
keywords = ['Scheduler', 'rxpy', 'async'],
install_requires=[
'asyncio',
'rx',
],
packages=setuptools.find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
)
|
the-stack_106_31105 | #!coding: utf-8
import os
import shutil
import textwrap
from . import engines
from . import provision
from .. import util
from ..script import Script
from ..script import ScriptDirectory
from ..util.compat import get_current_bytecode_suffixes
from ..util.compat import has_pep3147
from ..util.compat import u
def _get_staging_directory():
if provision.FOLLOWER_IDENT:
return "scratch_%s" % provision.FOLLOWER_IDENT
else:
return "scratch"
def staging_env(create=True, template="generic", sourceless=False):
from alembic import command, script
cfg = _testing_config()
if create:
path = os.path.join(_get_staging_directory(), "scripts")
if os.path.exists(path):
shutil.rmtree(path)
command.init(cfg, path, template=template)
if sourceless:
try:
# do an import so that a .pyc/.pyo is generated.
util.load_python_file(path, "env.py")
except AttributeError:
# we don't have the migration context set up yet
# so running the .env py throws this exception.
# theoretically we could be using py_compiler here to
# generate .pyc/.pyo without importing but not really
# worth it.
pass
assert sourceless in (
"pep3147_envonly",
"simple",
"pep3147_everything",
), sourceless
make_sourceless(
os.path.join(path, "env.py"),
"pep3147" if "pep3147" in sourceless else "simple",
)
sc = script.ScriptDirectory.from_config(cfg)
return sc
def clear_staging_env():
shutil.rmtree(_get_staging_directory(), True)
def script_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), "scripts")
path = os.path.join(dir_, "script.py.mako")
with open(path, "w") as f:
f.write(txt)
def env_file_fixture(txt):
dir_ = os.path.join(_get_staging_directory(), "scripts")
txt = (
"""
from alembic import context
config = context.config
"""
+ txt
)
path = os.path.join(dir_, "env.py")
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
with open(path, "w") as f:
f.write(txt)
def _sqlite_file_db(tempname="foo.db"):
dir_ = os.path.join(_get_staging_directory(), "scripts")
url = "sqlite:///%s/%s" % (dir_, tempname)
return engines.testing_engine(url=url)
def _sqlite_testing_config(sourceless=False):
dir_ = os.path.join(_get_staging_directory(), "scripts")
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file(
"""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
% (dir_, url, "true" if sourceless else "false")
)
def _multi_dir_testing_config(sourceless=False, extra_version_location=""):
dir_ = os.path.join(_get_staging_directory(), "scripts")
url = "sqlite:///%s/foo.db" % dir_
return _write_config_file(
"""
[alembic]
script_location = %s
sqlalchemy.url = %s
sourceless = %s
version_locations = %%(here)s/model1/ %%(here)s/model2/ %%(here)s/model3/ %s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
% (
dir_,
url,
"true" if sourceless else "false",
extra_version_location,
)
)
def _no_sql_testing_config(dialect="postgresql", directives=""):
"""use a postgresql url with no host so that
connections guaranteed to fail"""
dir_ = os.path.join(_get_staging_directory(), "scripts")
return _write_config_file(
"""
[alembic]
script_location = %s
sqlalchemy.url = %s://
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
% (dir_, dialect, directives)
)
def _write_config_file(text):
cfg = _testing_config()
with open(cfg.config_file_name, "w") as f:
f.write(text)
return cfg
def _testing_config():
from alembic.config import Config
if not os.access(_get_staging_directory(), os.F_OK):
os.mkdir(_get_staging_directory())
return Config(os.path.join(_get_staging_directory(), "test_alembic.ini"))
def write_script(
scriptdir, rev_id, content, encoding="ascii", sourceless=False
):
old = scriptdir.revision_map.get_revision(rev_id)
path = old.path
content = textwrap.dedent(content)
if encoding:
content = content.encode(encoding)
with open(path, "wb") as fp:
fp.write(content)
pyc_path = util.pyc_file_from_path(path)
if pyc_path:
os.unlink(pyc_path)
script = Script._from_path(scriptdir, path)
old = scriptdir.revision_map.get_revision(script.revision)
if old.down_revision != script.down_revision:
raise Exception(
"Can't change down_revision " "on a refresh operation."
)
scriptdir.revision_map.add_revision(script, _replace=True)
if sourceless:
make_sourceless(
path, "pep3147" if sourceless == "pep3147_everything" else "simple"
)
def make_sourceless(path, style):
import py_compile
py_compile.compile(path)
if style == "simple" and has_pep3147():
pyc_path = util.pyc_file_from_path(path)
suffix = get_current_bytecode_suffixes()[0]
filepath, ext = os.path.splitext(path)
simple_pyc_path = filepath + suffix
shutil.move(pyc_path, simple_pyc_path)
pyc_path = simple_pyc_path
elif style == "pep3147" and not has_pep3147():
raise NotImplementedError()
else:
assert style in ("pep3147", "simple")
pyc_path = util.pyc_file_from_path(path)
assert os.access(pyc_path, os.F_OK)
os.unlink(path)
def three_rev_fixture(cfg):
a = util.rev_id()
b = util.rev_id()
c = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(a, "revision a", refresh=True)
write_script(
script,
a,
"""\
"Rev A"
revision = '%s'
down_revision = None
from alembic import op
def upgrade():
op.execute("CREATE STEP 1")
def downgrade():
op.execute("DROP STEP 1")
"""
% a,
)
script.generate_revision(b, "revision b", refresh=True)
write_script(
script,
b,
u(
"""# coding: utf-8
"Rev B, méil, %3"
revision = '{}'
down_revision = '{}'
from alembic import op
def upgrade():
op.execute("CREATE STEP 2")
def downgrade():
op.execute("DROP STEP 2")
"""
).format(b, a),
encoding="utf-8",
)
script.generate_revision(c, "revision c", refresh=True)
write_script(
script,
c,
"""\
"Rev C"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 3")
def downgrade():
op.execute("DROP STEP 3")
"""
% (c, b),
)
return a, b, c
def multi_heads_fixture(cfg, a, b, c):
"""Create a multiple head fixture from the three-revs fixture"""
d = util.rev_id()
e = util.rev_id()
f = util.rev_id()
script = ScriptDirectory.from_config(cfg)
script.generate_revision(
d, "revision d from b", head=b, splice=True, refresh=True
)
write_script(
script,
d,
"""\
"Rev D"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 4")
def downgrade():
op.execute("DROP STEP 4")
"""
% (d, b),
)
script.generate_revision(
e, "revision e from d", head=d, splice=True, refresh=True
)
write_script(
script,
e,
"""\
"Rev E"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 5")
def downgrade():
op.execute("DROP STEP 5")
"""
% (e, d),
)
script.generate_revision(
f, "revision f from b", head=b, splice=True, refresh=True
)
write_script(
script,
f,
"""\
"Rev F"
revision = '%s'
down_revision = '%s'
from alembic import op
def upgrade():
op.execute("CREATE STEP 6")
def downgrade():
op.execute("DROP STEP 6")
"""
% (f, b),
)
return d, e, f
def _multidb_testing_config(engines):
"""alembic.ini fixture to work exactly with the 'multidb' template"""
dir_ = os.path.join(_get_staging_directory(), "scripts")
databases = ", ".join(engines.keys())
engines = "\n\n".join(
"[%s]\n" "sqlalchemy.url = %s" % (key, value.url)
for key, value in engines.items()
)
return _write_config_file(
"""
[alembic]
script_location = %s
sourceless = false
databases = %s
%s
[loggers]
keys = root
[handlers]
keys = console
[logger_root]
level = WARN
handlers = console
qualname =
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatters]
keys = generic
[formatter_generic]
format = %%(levelname)-5.5s [%%(name)s] %%(message)s
datefmt = %%H:%%M:%%S
"""
% (dir_, databases, engines)
)
|
the-stack_106_31107 | #runas solve(500)
#pythran export solve(int)
def solve(nfact):
prime_list = [2, 3, 5, 7, 11, 13, 17, 19, 23] # Ensure that this is initialised with at least 1 prime
prime_dict = dict.fromkeys(prime_list, 1)
def _isprime(n):
''' Raw check to see if n is prime. Assumes that prime_list is already populated '''
isprime = n >= 2 and 1 or 0
for prime in prime_list: # Check for factors with all primes
if prime * prime > n: break # ... up to sqrt(n)
if not n % prime:
isprime = 0
break
if isprime: prime_dict[n] = 1 # Maintain a dictionary for fast lookup
return isprime
def prime(x):
''' Returns the xth prime '''
lastn = prime_list[-1]
while len(prime_list) <= x: # Keep working until we've got the xth prime
lastn = lastn + 1 # Check the next number
if _isprime(lastn):
prime_list.append(lastn) # Maintain a list for sequential access
return prime_list[x]
def num_factors(n):
''' Returns the number of factors of n, including 1 and n '''
div = 1
x = 0
while n > 1:
c = 1
while not n % prime(x):
c = c + 1
n = n // prime(x)
x = x + 1
div = div * c
return div
for i in range(1, 1000000000):
n = i * (i+1) // 2
if num_factors(n) > nfact:
return n
break
|
the-stack_106_31108 | # Search for lines that contain 'New Revision: ' followed by a number
# Then turn the number into a float and append it to nums
# Finally print the length and the average of nums
import re
fname = input('Enter file:')
hand = open(fname)
nums = list()
for line in hand:
line = line.rstrip()
x = re.findall('New Revision: ([0-9]+)', line)
if len(x) == 1:
val = float(x[0])
nums.append(val)
print(len(nums))
print(int(sum(nums)/len(nums)))
|
the-stack_106_31110 | #@+leo-ver=5-thin
#@+node:mork.20041010095009: * @file ../plugins/xsltWithNodes.py
#@+<< docstring >>
#@+node:ekr.20050226120104: ** << docstring >>
""" Adds the Outline:XSLT menu containing XSLT-related commands.
This menu contains the following items:
- Set StyleSheet Node:
- Selects the current node as the xsl stylesheet the plugin will use.
- Process Node with Stylesheet Node:
- Processes the current node as an xml document,
resolving section references and Leo directives.
- Creates a sibling containing the results.
Requires 4Suite 1.0a3 or better, downloadable from http://4Suite.org.
"""
#@-<< docstring >>
#@@language python
#@@tabwidth -4
#@+<< imports >>
#@+node:mork.20041025113509: ** << imports >>
import leo.core.leoGlobals as g
from xml.dom import minidom
import io
StringIO = io.StringIO
try:
import Ft
from Ft.Xml import InputSource
from Ft.Xml.Xslt.Processor import Processor
except ImportError:
g.cantImport("Ft",__name__)
Ft = None
import weakref
#@-<< imports >>
#@+<<parser problems>>
#@+node:mork.20041024091024: ** <<parser problems>>
#@@killcolor
#@+at
# 1. Having space before the start of the document caused it not to work. I fixed
# this by striping the whitespace from the start and end of the data at xslt
# time.
#
# 2. having a @ right before a tag causes it to not process.
# It appears to be safe to follow this pattern:
# @ </end>
# but not:
# @</end>
#
# I dont know at this point if its just illegal xml, or its a problem in the parser. ??
#@-<<parser problems>>
#@+<<future directions>>
#@+node:mork.20041025101943: ** <<future directions>>
#@+at
# 1. Add more XSLT boilerplate insertions.( done in .3 )
# 2. Maybe add a well-formedness check. (done in .3, test with minidom )
#@-<<future directions>>
__version__ = '0.6'
#@+<< version history >>
#@+node:mork.20041025113211: ** << version history >>
#@@killcolor
#@+at
#
# 0.1: Original code.
#
# 0.2 EKR: Converted to outline.
#
# 0.3: Added more XSLT boilerplate. Added Test with Minidom Discovered parser problem(?).
#
# 0.4 EKR:
# - Added init function.
# 0.5 EKR:
# - Remove 'start2' hook & haveseen dict.
# - Use keywords.get('c') instead of g.top().
# 0.6 EKR:
# - Removed g.top from example code.
#@-<< version history >>
#@+others
#@+node:ekr.20050226120104.1: ** init
def init():
'''Return True if the plugin has loaded successfully.'''
ok = Ft
if ok:
g.registerHandler(('menu2',"new"),addMenu)
g.plugin_signon(__name__)
return ok
#@+node:mork.20041025115037: ** xslt elements
#This dict contains elements that go into a stylesheet
xslt = {
'apply-imports': '<xsl:apply-imports/>',
'apply-templates': "<xsl:apply-templates select ='' mode='' />",
'attribute': "<xsl:attribute name=''> </xsl:attribute>",
'attribute-set': "<xsl:attribute-set name=''> </xsl:attribute-set>",
'call-template': "<xsl:call-template name=''> </xsl:call-template>",
'choose': "<xsl:choose> </xsl:choose>",
'comment': "<xsl:comment> </xsl:comment>",
'copy': "<xsl:copy> </xsl:copy>",
'copy-of': "<xsl:copy-of select='' />",
'decimal-format' : "<xsl:decimal-format />",
'element': "<xsl:element name='' > </xsl:element>",
'fallback': "<xsl:fallback> </xsl:fallback>",
'for-each': "<xsl:for-each select='' > </xsl:for-each>",
'if': "<xsl:if test=''> </xsl:if>",
'import': "<xsl:import href='' />",
'include': "<xsl:include href='' />",
'key': "<xsl:key name='' match='' use='' />",
'message': "<xsl:message> </xsl:message>",
'namespace-alias': "<xsl:namespace-alias stylesheet-prefix='' result-prefix='' />",
'number': "<xsl:number />",
'otherwise': "<xsl:otherwise> </xsl:otherwise>",
'output': "<xsl:output />",
'param': "<xsl:param name='' > </xsl:param>",
'preserve-space': "<xsl:preserve-space elements='' />",
'processing-instruction': "<xsl:processing-instruction name='' > </xsl:processing-instruction>",
'sort': "<xsl:sort />",
'strip-space': "<xsl:strip-space elements='' />",
'stylesheet': "<xsl:stylesheet xmlns:xsl='' version='' > </xsl:stylesheet>",
'template': "<xsl:template > </xsl:template>",
'text': "<xsl:text > </xsl:text>",
'transform': "<xsl:transform > </xsl:transform>",
'value-of': "<xsl:value-of select='' />",
'variable': "<xsl:variable name=''> </xsl:variable>",
'when': "<xsl:when text='' > </xsl:when>",
'with-param': "<xsl:with-param name=''> </xsl:with-param>",
}
#@+node:mork.20041010095202: ** setStyleNode
stylenodes = weakref.WeakKeyDictionary()
def setStyleNode( c ):
'''this command sets what the current style node is'''
position = c.p
stylenodes[ c ] = position
#@+node:mork.20041010095202.1: ** processDocumentNode
def processDocumentNode( c ):
'''this executes the stylesheet node against the current node'''
try:
if not styleNodeSelected( c ): return
proc = Processor()
stylenode = stylenodes[ c ]
pos = c.p
c.selectPosition( stylenode )
sIO = getString( c )
mdom1 = minidom.parseString( sIO )
sIO = str( mdom1.toxml() )
hstring = str( stylenode.h )
if hstring == "": hstring = "no headline"
stylesource = InputSource.DefaultFactory.fromString( sIO, uri = hstring)
proc.appendStylesheet( stylesource )
c.selectPosition( pos )
xmlnode = pos.v
xIO = getString( c )
mdom2 = minidom.parseString( xIO )
xIO = str( mdom2.toxml())
xhead = str( xmlnode.headString )
if xhead == "": xhead = "no headline"
xmlsource = InputSource.DefaultFactory.fromString( xIO, uri = xhead )
result = proc.run( xmlsource )
nhline = "xsl:transform of " + str( xmlnode.headString )
p2 = pos.insertAfter() # tnode )
p2.setBodyString(result)
p2.setHeadString(nhline)
c.redraw()
except Exception as x:
g.es( 'exception ' + str( x ))
c.redraw()
#@+node:mork.20041025121608: ** addXSLTNode
def addXSLTNode (c):
'''creates a node and inserts some xslt boilerplate'''
pos = c.p
#body = '''<?xml version="1.0"?>'''
# body = '''<?xml version="1.0"?>
#<xsl:transform xmlns:xsl="http:///www.w3.org/1999/XSL/Transform" version="1.0">'''
body = '''<?xml version="1.0"?>
<xsl:transform xmlns:xsl="http:///www.w3.org/1999/XSL/Transform" version="1.0">
</xsl:transform>'''
p2 = pos.insertAfter() # tnode)
p2.setBodyString(body)
p2.setHeadString("xslt stylesheet")
c.redraw()
#@+node:mork.20041010110121: ** addXSLTElement
def addXSLTElement( c , element):
'''adds some xslt to the text node'''
w = c.frame.body.wrapper
w.insert( 'insert', element )
#@+node:mork.20041025113021: ** getString (xsltWithNodes.py)
def getString (c):
'''
This def turns a node into a string using Leo's file-nosent write logic.
'''
at = c.atFileCommands
# EKR: 2017/04/10: needs testing.
at.toString = True
at.writeOpenFile(c.p, sentinels=False)
return cleanString(at.stringOutput)
#@+node:mork.20041025120706: ** doMinidomTest
def doMinidomTest( c ):
'''
This def performs a simple test on a node.
Can the data be successfully parsed by minidom or not?
Results are output to the log.
'''
s = getString( c )
try:
minidom.parseString( s )
except Exception as x:
g.error("Minidom could not parse node because of:\n %s" % x)
return
g.blue("Minidom could parse the node")
#@+node:mork.20041025090303: ** cleanString
def cleanString( data ):
'''This method cleans a string up for the processor. It currently just removes
leading and trailing whitespace'''
val = data.strip()
return val
#@+node:mork.20041010125444: ** jumpToStyleNode
def jumpToStyleNode( c ):
'''Simple method that jumps us to the current XSLT node'''
if not styleNodeSelected( c ): return
pos = stylenodes[ c ]
c.selectPosition( pos )
c.redraw()
#@+node:mork.20041010125444.1: ** styleNodeSelected
def styleNodeSelected( c ):
'''Determines if a XSLT Style node has not been selected'''
if c not in stylenodes:
g.es( "No Style Node selected" )
return False
return True
#@+node:mork.20041010100633: ** addMenu
def addMenu( tag, keywords ):
# pylint: disable=undefined-variable
# c *is* defined.
c = keywords.get('c')
if not c: return
mc = c.frame.menu
# men = men.getMenu( 'Outline' )
# xmen = Tk.Menu(men,tearoff = False)
xmen = mc.createNewMenu ('XSLT',"Outline")
c.add_command(xmen,
label = "Set Stylesheet Node",
command = lambda c = c : setStyleNode(c))
c.add_command(xmen,
label = "Jump To Style Node",
command = lambda c = c: jumpToStyleNode(c))
c.add_command(xmen,
label = "Process Node with Stylesheet Node",
command = lambda c=c : processDocumentNode(c))
xmen.add_separator(xmen)
c.add_command(xmen,
label = "Create Stylesheet Node",
command = lambda c = c : addXSLTNode(c))
# elmen= Tk.Menu( xmen, tearoff = False )
# xmen.add_cascade( label = "Insert XSL Element", menu = elmen )
m2 = mc.createNewMenu ('Insert XSL Element','XSLT')
xsltkeys = list(xslt.keys())
xsltkeys.sort()
for z in xsltkeys:
# pylint: disable=cell-var-from-loop
c.add_command(m2,
label = z,
command = lambda c=c,element=xslt[ z ]: addXSLTElement(c,element))
# men.add_cascade(menu = xmen, label = "XSLT-Node Commands")
m3 = mc.createNewMenu('XSLT-Node Commands','XSLT')
c.add_command(m3,
label = 'Test Node with Minidom',
command = lambda c=c: doMinidomTest(c))
#@+node:mork.20041025100716: ** examples/tests
#@+at
# table.leo contains the xml. xslt is in the other node.
#
# To test this plugin, set the xslt node to be the xslt node.
#
# Process it against the table.leo node.
#@@c
# pylint: disable=pointless-string-statement
r'''
#@+others
#@+node:ekr.20140906065955.18786: *3* table.leo
#@@path /boboo/leo-4.2-final/plugins
#@+node:ekr.20140906065955.18787: *4* @@nosent table.py
import io
StringIO = io.StringIO
import Tkinter as Tk
import tktable as tktab
import leo.core.leoGlobals as g
import csv
import weakref
import Pmw
class CSVVisualizer:
arrays = []
#@+others
#@+node:ekr.20140906065955.18788: *5* init
def __init__( self, c ):
self.c = c
self.arr = tktab.ArrayVar()
CSVVisualizer.arrays.append( self.arr )
self.rows = 0
self.columns = 0
self.type = 'excel'
#@+node:ekr.20140906065955.18789: *5* addData
def addData( self ):
arr = self.arr
reader = self.readData()
hc = False
for n, d in enumerate( reader ):
for n1, d2 in enumerate( d ):
arr.set( "%s,%s" %( n, n1 ), str(d2) )
self.columns = n1 + 1
self.rows = n + 1
return self.columns, self.rows
#@+node:ekr.20140906065955.18790: *5* readData
def readData( self ):
c = self.c
pos = c.p
data = pos.b
cS = StringIO()
cS.write( data )
cS.seek( 0 )
sniff = csv.Sniffer()
self.type = sniff.sniff( data )
reader = csv.reader( cS, self.type )
return reader
#@+node:ekr.20140906065955.18791: *5* writeData
def writeData( self, save ):
pos = self.c.p
n2 = self.rows
n = self.columns
data = []
for z in range( n2 ):
ndata = []
for z2 in range( n ):
ndata.append( self.arr.get( "%s,%s" % ( z, z2 ) ) )
data.append( ndata )
cS = StringIO()
csv_write = csv.writer( cS, self.type )
for z in data:
csv_write.writerow( z )
cS.seek( 0 )
if not save:
p2 = pos.insertAfter() # tnd )
p2.setBodyString(cS.getvalue())
p2.setHeadString("Save of Edited " + str( pos.h))
else:
# pos.setTnodeText( cS.getvalue() )
pos.setBodyString(cS.getvalue())
self.c.redraw()
#@+node:ekr.20140906065955.18792: *5* addColumn
def addColumn( self, tab ):
self.columns = self.columns + 1
tab.configure( cols = self.columns )
for z in range( self.rows ):
self.arr.set( '%s,%s' %( z , self.columns -1 ), "" )
#@+node:ekr.20140906065955.18793: *5* deleteColumn
def deleteColumn( self, tab ):
i = tab.index( 'active' )
if i:
tab.delete_cols( i[ 1 ], 1 )
self.columns = self.columns - 1
#@+node:ekr.20140906065955.18794: *5* addRow
def addRow( self , tab ):
self.rows = self.rows + 1
tab.configure( rows = self.rows )
rc = '%s,0' % (self.rows -1 )
for z in range( self.columns ):
self.arr.set( '%s,%s' %( self.rows - 1, z ), "" )
tab.activate( rc )
tab.focus_set()
#@+node:ekr.20140906065955.18795: *5* deleteRow
def deleteRow( self, tab ):
i = tab.index( 'active' )
if i:
tab.delete_rows( i[ 0 ], 1 )
self.rows = self.rows - 1
#@+node:ekr.20140906065955.18796: *5* createDefaultRecord
def createDefaultRecord( self, rows, columns ):
self.rows = rows
self.columns = columns
for z in range( rows ):
for z1 in range( columns ):
self.arr.set( '%s,%s' %( z, z1 ), "" )
#@+node:ekr.20140906065955.18797: *5* newTable
def newTable( c ):
pos = c.p
npos = pos.insertAfter() # tnd )
npos.setHeadString('New Table')
c.redraw()
c.selectPosition( npos )
viewTable( c , True )
#@+node:ekr.20140906065955.18798: *5* viewTable
def viewTable( c, new = False ):
pos = c.p
dialog = createDialog( pos )
csvv = CSVVisualizer( c )
sframe = Pmw.ScrolledFrame( dialog.interior() )
sframe.pack()
tab = createTable( sframe.interior(), csvv.arr )
createBBox( dialog.interior(), csvv, tab )
if not new:
n = csvv.addData()
else:
n = ( 4, 1 )
csvv.createDefaultRecord( n[ 1 ], n[ 0 ] )
tab.configure( cols = n[ 0 ], rows = n[ 1 ] )
dialog.configure( command = lambda name, d = dialog, csvv = csvv:
fireButton( name, d, csvv ) )
dialog.activate()
#@+node:ekr.20140906065955.18799: *5* fireButton
def fireButton( name, dialog, csvv ):
if name == "Close":
dialog.deactivate()
dialog.destroy()
elif name == "Write To New":
csvv.writeData( False )
elif name == "Save To Current":
csvv.writeData( True )
#@+node:ekr.20140906065955.18800: *5* createDialog
def createDialog( pos ):
dialog = Pmw.Dialog( title = "Table Editor for " + str( pos.h),
buttons = [ 'Save To Current', 'Write To New', 'Close' ] )
dbbox = dialog.component( 'buttonbox' )
for z in range( dbbox.numbuttons() ):
dbbox.button( z ).configure( background = 'white', foreground = 'blue' )
return dialog
#@+node:ekr.20140906065955.18801: *5* createTable
def createTable( parent , arr ):
tab = tktab.Table( parent , rows = 0, cols = 0, variable = arr, sparsearray=1,
background = 'white', foreground = 'blue', selecttype = 'row' )
tab.tag_configure( 'active', background = '#FFE7C6', foreground = 'blue' )
tab.tag_configure( 'sel', background = '#FFE7C6', foreground = 'blue', bd =2 )
tab.pack()
return tab
#@+node:ekr.20140906065955.18802: *5* createBBox
def createBBox( parent, csvv, tab ):
bbox = Pmw.ButtonBox( parent )
bconfig = ( ( "Add Row", lambda tab = tab : csvv.addRow( tab ) ),
( "Delete Row", lambda tab = tab: csvv.deleteRow( tab ) ),
( "Add Column", lambda tab = tab: csvv.addColumn( tab ) ),
( "Delete Column", lambda tab = tab: csvv.deleteColumn( tab ) ) )
for z in bconfig:
bbox.add( z[ 0 ], command = z[ 1 ], background = 'white', foreground = 'blue' )
bbox.pack()
#@+node:ekr.20140906065955.18803: *5* addMenu
haveseen = weakref.WeakKeyDictionary()
def addMenu( tag, keywords ):
c = keywords.get('c') or keywords.get('new_c')
if c in haveseen:
return
haveseen[ c ] = None
men = c.frame.menu
men = men.getMenu( 'Outline' )
tmen = Tk.Menu( men, tearoff = 0 )
men.add_cascade( menu = tmen, label = "Table Commands" )
c.add_command(tmen, label = "Edit Node With Table", command = lambda c = c: viewTable( c ) )
c.add_command(tmen, label = "Create New Table", command = lambda c = c: newTable( c ) )
#@+node:ekr.20140906065955.18804: *5* if 1:
if 1:
registerHandler( ('start2' , 'open2', "new") , addMenu )
__version__ = ".125"
g.plugin_signon( __name__ )
#@-others
#@+node:mork.20041025100851.1: *3* xslt to turn leo file into html
<?xml version="1.0"?>
<xsl:transform xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method = 'xml' />
<xsl:preserve-space elements='leo_file/tnodes/t'/>
<xsl:template match='v'>
<ul type='square'>
<xsl:variable name ='t' select ='@t' />
<h1><xsl:value-of select='vh'/></h1>
<xsl:for-each select='ancestor::leo_file/tnodes/t'>
<xsl:if test="./attribute::tx=$t">
<li>
<pre>
<xsl:value-of select='.' />
</pre>
</li>
</xsl:if>
</xsl:for-each>
<xsl:if test ='./v' >
<xsl:apply-templates select = 'v'/>
</xsl:if>
</ul>
</xsl:template>
<xsl:template match ='leo_file'>
<html><head>
<style>
ul{ position:relative;right=25;
border:thin ridge blue}
li{ position:relative;right=25}
pre{ background:#FFE7C6 }
</style>
</head>
<body>
<xsl:apply-templates select='vnodes'/>
</body>
</html>
</xsl:template>
<xsl:template match = 'vnodes'>
<xsl:for-each select = 'v'>
<frame>
<xsl:apply-templates select ='.'/>
</frame>
</xsl:for-each>
</xsl:template>
</xsl:transform>
#@-others
'''
#@-others
#@-leo
|
the-stack_106_31111 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
from __future__ import unicode_literals, division, absolute_import, print_function
from .compatibility_utils import PY2, bstr, utf8_str
if PY2:
range = xrange
import os
import struct
# note: struct pack, unpack, unpack_from all require bytestring format
# data all the way up to at least python 2.7.5, python 3 okay with bytestring
import re
# note: re requites the pattern to be the exact same type as the data to be searched in python3
# but u"" is not allowed for the pattern itself only b""
from .mobi_index import MobiIndex
from .mobi_utils import fromBase32
from .unipath import pathof
_guide_types = [b'cover',b'title-page',b'toc',b'index',b'glossary',b'acknowledgements',
b'bibliography',b'colophon',b'copyright-page',b'dedication',
b'epigraph',b'foreward',b'loi',b'lot',b'notes',b'preface',b'text']
# locate beginning and ending positions of tag with specific aid attribute
def locate_beg_end_of_tag(ml, aid):
pattern = utf8_str(r'''<[^>]*\said\s*=\s*['"]%s['"][^>]*>''' % aid)
aid_pattern = re.compile(pattern,re.IGNORECASE)
for m in re.finditer(aid_pattern, ml):
plt = m.start()
pgt = ml.find(b'>',plt+1)
return plt, pgt
return 0, 0
# iterate over all tags in block in reverse order, i.e. last ta to first tag
def reverse_tag_iter(block):
end = len(block)
while True:
pgt = block.rfind(b'>', 0, end)
if pgt == -1:
break
plt = block.rfind(b'<', 0, pgt)
if plt == -1:
break
yield block[plt:pgt+1]
end = plt
class K8Processor:
def __init__(self, mh, sect, files, debug=False):
self.sect = sect
self.files = files
self.mi = MobiIndex(sect)
self.mh = mh
self.skelidx = mh.skelidx
self.fragidx = mh.fragidx
self.guideidx = mh.guideidx
self.fdst = mh.fdst
self.flowmap = {}
self.flows = None
self.flowinfo = []
self.parts = None
self.partinfo = []
self.linked_aids = set()
self.fdsttbl= [0,0xffffffff]
self.DEBUG = debug
# read in and parse the FDST info which is very similar in format to the Palm DB section
# parsing except it provides offsets into rawML file and not the Palm DB file
# this is needed to split up the final css, svg, etc flow section
# that can exist at the end of the rawML file
if self.fdst != 0xffffffff:
header = self.sect.loadSection(self.fdst)
if header[0:4] == b"FDST":
num_sections, = struct.unpack_from(b'>L', header, 0x08)
self.fdsttbl = struct.unpack_from(bstr('>%dL' % (num_sections*2)), header, 12)[::2] + (mh.rawSize, )
sect.setsectiondescription(self.fdst,"KF8 FDST INDX")
if self.DEBUG:
print("\nFDST Section Map: %d sections" % num_sections)
for j in range(num_sections):
print("Section %d: 0x%08X - 0x%08X" % (j, self.fdsttbl[j],self.fdsttbl[j+1]))
else:
print("\nError: K8 Mobi with Missing FDST info")
# read/process skeleton index info to create the skeleton table
skeltbl = []
if self.skelidx != 0xffffffff:
# for i in range(2):
# fname = 'skel%04d.dat' % i
# data = self.sect.loadSection(self.skelidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.skelidx, "KF8 Skeleton")
fileptr = 0
for [text, tagMap] in outtbl:
# file number, skeleton name, fragtbl record count, start position, length
skeltbl.append([fileptr, text, tagMap[1][0], tagMap[6][0], tagMap[6][1]])
fileptr += 1
self.skeltbl = skeltbl
if self.DEBUG:
print("\nSkel Table: %d entries" % len(self.skeltbl))
print("table: filenum, skeleton name, frag tbl record count, start position, length")
for j in range(len(self.skeltbl)):
print(self.skeltbl[j])
# read/process the fragment index to create the fragment table
fragtbl = []
if self.fragidx != 0xffffffff:
# for i in range(3):
# fname = 'frag%04d.dat' % i
# data = self.sect.loadSection(self.fragidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.fragidx, "KF8 Fragment")
for [text, tagMap] in outtbl:
# insert position, ctoc offset (aidtext), file number, sequence number, start position, length
ctocoffset = tagMap[2][0]
ctocdata = ctoc_text[ctocoffset]
fragtbl.append([int(text), ctocdata, tagMap[3][0], tagMap[4][0], tagMap[6][0], tagMap[6][1]])
self.fragtbl = fragtbl
if self.DEBUG:
print("\nFragment Table: %d entries" % len(self.fragtbl))
print("table: file position, link id text, file num, sequence number, start position, length")
for j in range(len(self.fragtbl)):
print(self.fragtbl[j])
# read / process guide index for guide elements of opf
guidetbl = []
if self.guideidx != 0xffffffff:
# for i in range(3):
# fname = 'guide%04d.dat' % i
# data = self.sect.loadSection(self.guideidx + i)
# with open(pathof(fname), 'wb') as f:
# f.write(data)
outtbl, ctoc_text = self.mi.getIndexData(self.guideidx, "KF8 Guide elements)")
for [text, tagMap] in outtbl:
# ref_type, ref_title, frag number
ctocoffset = tagMap[1][0]
ref_title = ctoc_text[ctocoffset]
ref_type = text
fileno = None
if 3 in tagMap:
fileno = tagMap[3][0]
if 6 in tagMap:
fileno = tagMap[6][0]
guidetbl.append([ref_type, ref_title, fileno])
self.guidetbl = guidetbl
if self.DEBUG:
print("\nGuide Table: %d entries" % len(self.guidetbl))
print("table: ref_type, ref_title, fragtbl entry number")
for j in range(len(self.guidetbl)):
print(self.guidetbl[j])
def buildParts(self, rawML):
# now split the rawML into its flow pieces
self.flows = []
for j in range(0, len(self.fdsttbl)-1):
start = self.fdsttbl[j]
end = self.fdsttbl[j+1]
self.flows.append(rawML[start:end])
# the first piece represents the xhtml text
text = self.flows[0]
self.flows[0] = b''
# walk the <skeleton> and fragment tables to build original source xhtml files
# *without* destroying any file position information needed for later href processing
# and create final list of file separation start: stop points and etc in partinfo
if self.DEBUG:
print("\nRebuilding flow piece 0: the main body of the ebook")
self.parts = []
self.partinfo = []
fragptr = 0
baseptr = 0
cnt = 0
for [skelnum, skelname, fragcnt, skelpos, skellen] in self.skeltbl:
baseptr = skelpos + skellen
skeleton = text[skelpos: baseptr]
for i in range(fragcnt):
[insertpos, idtext, filenum, seqnum, startpos, length] = self.fragtbl[fragptr]
aidtext = idtext[12:-2]
if i == 0:
filename = 'part%04d.xhtml' % filenum
slice = text[baseptr: baseptr + length]
insertpos = insertpos - skelpos
head = skeleton[:insertpos]
tail = skeleton[insertpos:]
actual_inspos = insertpos
if (tail.find(b'>') < tail.find(b'<') or head.rfind(b'>') < head.rfind(b'<')):
# There is an incomplete tag in either the head or tail.
# This can happen for some badly formed KF8 files
print('The fragment table for %s has incorrect insert position. Calculating manually.' % skelname)
bp, ep = locate_beg_end_of_tag(skeleton, aidtext)
if bp != ep:
actual_inspos = ep + 1 + startpos
if insertpos != actual_inspos:
print("fixed corrupt fragment table insert position", insertpos+skelpos, actual_inspos+skelpos)
insertpos = actual_inspos
self.fragtbl[fragptr][0] = actual_inspos + skelpos
skeleton = skeleton[0:insertpos] + slice + skeleton[insertpos:]
baseptr = baseptr + length
fragptr += 1
cnt += 1
self.parts.append(skeleton)
self.partinfo.append([skelnum, 'Text', filename, skelpos, baseptr, aidtext])
assembled_text = b''.join(self.parts)
if self.DEBUG:
outassembled = os.path.join(self.files.k8dir, 'assembled_text.dat')
with open(pathof(outassembled),'wb') as f:
f.write(assembled_text)
# The primary css style sheet is typically stored next followed by any
# snippets of code that were previously inlined in the
# original xhtml but have been stripped out and placed here.
# This can include local CDATA snippets and and svg sections.
# The problem is that for most browsers and ereaders, you can not
# use <img src="imageXXXX.svg" /> to import any svg image that itself
# properly uses an <image/> tag to import some raster image - it
# should work according to the spec but does not for almost all browsers
# and ereaders and causes epub validation issues because those raster
# images are in manifest but not in xhtml text - since they only
# referenced from an svg image
# So we need to check the remaining flow pieces to see if they are css
# or svg images. if svg images, we must check if they have an <image />
# and if so inline them into the xhtml text pieces.
# there may be other sorts of pieces stored here but until we see one
# in the wild to reverse engineer we won't be able to tell
self.flowinfo.append([None, None, None, None])
svg_tag_pattern = re.compile(br'''(<svg[^>]*>)''', re.IGNORECASE)
image_tag_pattern = re.compile(br'''(<image[^>]*>)''', re.IGNORECASE)
for j in range(1,len(self.flows)):
flowpart = self.flows[j]
nstr = '%04d' % j
m = re.search(svg_tag_pattern, flowpart)
if m is not None:
# svg
ptype = b'svg'
start = m.start()
m2 = re.search(image_tag_pattern, flowpart)
if m2 is not None:
pformat = b'inline'
pdir = None
fname = None
# strip off anything before <svg if inlining
flowpart = flowpart[start:]
else:
pformat = b'file'
pdir = "Images"
fname = 'svgimg' + nstr + '.svg'
else:
# search for CDATA and if exists inline it
if flowpart.find(b'[CDATA[') >= 0:
ptype = b'css'
flowpart = b'<style type="text/css">\n' + flowpart + b'\n</style>\n'
pformat = b'inline'
pdir = None
fname = None
else:
# css - assume as standalone css file
ptype = b'css'
pformat = b'file'
pdir = "Styles"
fname = 'style' + nstr + '.css'
self.flows[j] = flowpart
self.flowinfo.append([ptype, pformat, pdir, fname])
if self.DEBUG:
print("\nFlow Map: %d entries" % len(self.flowinfo))
for fi in self.flowinfo:
print(fi)
print("\n")
print("\nXHTML File Part Position Information: %d entries" % len(self.partinfo))
for pi in self.partinfo:
print(pi)
if False: # self.Debug:
# dump all of the locations of the aid tags used in TEXT
# find id links only inside of tags
# inside any < > pair find all "aid=' and return whatever is inside the quotes
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
print("\npositions of all aid= pieces")
id_pattern = re.compile(br'''<[^>]*\said\s*=\s*['"]([^'"]*)['"][^>]*>''',re.IGNORECASE)
for m in re.finditer(id_pattern, rawML):
[filename, partnum, start, end] = self.getFileInfo(m.start())
[seqnum, idtext] = self.getFragTblInfo(m.start())
value = fromBase32(m.group(1))
print(" aid: %s value: %d at: %d -> part: %d, start: %d, end: %d" % (m.group(1), value, m.start(), partnum, start, end))
print(" %s fragtbl entry %d" % (idtext, seqnum))
return
# get information fragment table entry by pos
def getFragTblInfo(self, pos):
for j in range(len(self.fragtbl)):
[insertpos, idtext, filenum, seqnum, startpos, length] = self.fragtbl[j]
if pos >= insertpos and pos < (insertpos + length):
# why are these "in: and before: added here
return seqnum, b'in: ' + idtext
if pos < insertpos:
return seqnum, b'before: ' + idtext
return None, None
# get information about the part (file) that exists at pos in original rawML
def getFileInfo(self, pos):
for [partnum, pdir, filename, start, end, aidtext] in self.partinfo:
if pos >= start and pos < end:
return filename, partnum, start, end
return None, None, None, None
# accessor functions to properly protect the internal structure
def getNumberOfParts(self):
return len(self.parts)
def getPart(self,i):
if i >= 0 and i < len(self.parts):
return self.parts[i]
return None
def getPartInfo(self, i):
if i >= 0 and i < len(self.partinfo):
return self.partinfo[i]
return None
def getNumberOfFlows(self):
return len(self.flows)
def getFlow(self,i):
# note flows[0] is empty - it was all of the original text
if i > 0 and i < len(self.flows):
return self.flows[i]
return None
def getFlowInfo(self,i):
# note flowinfo[0] is empty - it was all of the original text
if i > 0 and i < len(self.flowinfo):
return self.flowinfo[i]
return None
def getIDTagByPosFid(self, posfid, offset):
# first convert kindle:pos:fid and offset info to position in file
# (fromBase32 can handle both string types on input)
row = fromBase32(posfid)
off = fromBase32(offset)
[insertpos, idtext, filenum, seqnm, startpos, length] = self.fragtbl[row]
pos = insertpos + off
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if fname is None:
# pos does not exist
# default to skeleton pos instead
print("Link To Position", pos, "does not exist, retargeting to top of target")
pos = self.skeltbl[filenum][3]
fname, pn, skelpos, skelend = self.getFileInfo(pos)
# an existing "id=" or "name=" attribute must exist in original xhtml otherwise it would not have worked for linking.
# Amazon seems to have added its own additional "aid=" inside tags whose contents seem to represent
# some position information encoded into Base32 name.
# so find the closest "id=" before position the file by actually searching in that file
idtext = self.getIDTag(pos)
return fname, idtext
def getIDTag(self, pos):
# find the first tag with a named anchor (name or id attribute) before pos
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if pn is None and skelpos is None:
print("Error: getIDTag - no file contains ", pos)
textblock = self.parts[pn]
npos = pos - skelpos
# if npos inside a tag then search all text before the its end of tag marker
pgt = textblock.find(b'>',npos)
plt = textblock.find(b'<',npos)
if plt == npos or pgt < plt:
npos = pgt + 1
# find id and name attributes only inside of tags
# use a reverse tag search since that is faster
# inside any < > pair find "id=" and "name=" attributes return it
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
textblock = textblock[0:npos]
id_pattern = re.compile(br'''<[^>]*\sid\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
name_pattern = re.compile(br'''<[^>]*\sname\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
aid_pattern = re.compile(br'''<[^>]+\s(?:aid|AID)\s*=\s*['"]([^'"]+)['"]''')
for tag in reverse_tag_iter(textblock):
# any ids in the body should default to top of file
if tag[0:6] == b'<body ':
return b''
if tag[0:6] != b'<meta ':
m = id_pattern.match(tag) or name_pattern.match(tag)
if m is not None:
return m.group(1)
m = aid_pattern.match(tag)
if m is not None:
self.linked_aids.add(m.group(1))
return b'aid-' + m.group(1)
return b''
# do we need to do deep copying
def setParts(self, parts):
assert(len(parts) == len(self.parts))
for i in range(len(parts)):
self.parts[i] = parts[i]
# do we need to do deep copying
def setFlows(self, flows):
assert(len(flows) == len(self.flows))
for i in range(len(flows)):
self.flows[i] = flows[i]
# get information about the part (file) that exists at pos in original rawML
def getSkelInfo(self, pos):
for [partnum, pdir, filename, start, end, aidtext] in self.partinfo:
if pos >= start and pos < end:
return [partnum, pdir, filename, start, end, aidtext]
return [None, None, None, None, None, None]
# fileno is actually a reference into fragtbl (a fragment)
def getGuideText(self):
guidetext = b''
for [ref_type, ref_title, fileno] in self.guidetbl:
if ref_type == b'thumbimagestandard':
continue
if ref_type not in _guide_types and not ref_type.startswith(b'other.'):
if ref_type == b'start':
ref_type = b'text'
else:
ref_type = b'other.' + ref_type
[pos, idtext, filenum, seqnm, startpos, length] = self.fragtbl[fileno]
[pn, pdir, filename, skelpos, skelend, aidtext] = self.getSkelInfo(pos)
idtext = self.getIDTag(pos)
linktgt = filename.encode('utf-8')
if idtext != b'':
linktgt += b'#' + idtext
guidetext += b'<reference type="'+ref_type+b'" title="'+ref_title+b'" href="'+utf8_str(pdir)+b'/'+linktgt+b'" />\n'
# opf is encoded utf-8 so must convert any titles properly
guidetext = (guidetext.decode(self.mh.codec)).encode("utf-8")
return guidetext
def getPageIDTag(self, pos):
# find the first tag with a named anchor (name or id attribute) before pos
# but page map offsets need to little more leeway so if the offset points
# into a tag look for the next ending tag "/>" or "</" and start your search from there.
fname, pn, skelpos, skelend = self.getFileInfo(pos)
if pn is None and skelpos is None:
print("Error: getIDTag - no file contains ", pos)
textblock = self.parts[pn]
npos = pos - skelpos
# if npos inside a tag then search all text before next ending tag
pgt = textblock.find(b'>',npos)
plt = textblock.find(b'<',npos)
if plt == npos or pgt < plt:
# we are in a tag
# so find first ending tag
pend1 = textblock.find(b'/>', npos)
pend2 = textblock.find(b'</', npos)
if pend1 != -1 and pend2 != -1:
pend = min(pend1, pend2)
else:
pend = max(pend1, pend2)
if pend != -1:
npos = pend
else:
npos = pgt + 1
# find id and name attributes only inside of tags
# use a reverse tag search since that is faster
# inside any < > pair find "id=" and "name=" attributes return it
# [^>]* means match any amount of chars except for '>' char
# [^'"] match any amount of chars except for the quote character
# \s* means match any amount of whitespace
textblock = textblock[0:npos]
id_pattern = re.compile(br'''<[^>]*\sid\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
name_pattern = re.compile(br'''<[^>]*\sname\s*=\s*['"]([^'"]*)['"]''',re.IGNORECASE)
for tag in reverse_tag_iter(textblock):
# any ids in the body should default to top of file
if tag[0:6] == b'<body ':
return b''
if tag[0:6] != b'<meta ':
m = id_pattern.match(tag) or name_pattern.match(tag)
if m is not None:
return m.group(1)
return b''
|
the-stack_106_31112 | # coding=utf-8
"""SQLAlchemy session."""
from contextlib import contextmanager
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from sqlalchemy.pool import NullPool
from kombu.utils.compat import register_after_fork
ModelBase = declarative_base()
@contextmanager
def session_cleanup(session):
try:
yield
except Exception:
session.rollback()
raise
finally:
session.close()
def _after_fork_cleanup_session(session):
session._after_fork()
class SessionManager(object):
"""Manage SQLAlchemy sessions."""
def __init__(self):
self._engines = {}
self._sessions = {}
self.forked = False
self.prepared = False
if register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_session)
def _after_fork(self):
self.forked = True
def get_engine(self, dburi, **kwargs):
if self.forked:
try:
return self._engines[dburi]
except KeyError:
engine = self._engines[dburi] = create_engine(dburi, **kwargs)
return engine
else:
return create_engine(dburi, poolclass=NullPool)
def create_session(self, dburi, short_lived_sessions=False, **kwargs):
engine = self.get_engine(dburi, **kwargs)
if self.forked:
if short_lived_sessions or dburi not in self._sessions:
self._sessions[dburi] = sessionmaker(bind=engine)
return engine, self._sessions[dburi]
else:
return engine, sessionmaker(bind=engine)
def prepare_models(self, engine):
if not self.prepared:
ModelBase.metadata.create_all(engine)
self.prepared = True
def session_factory(self, dburi, **kwargs):
engine, session = self.create_session(dburi, **kwargs)
self.prepare_models(engine)
return session()
|
the-stack_106_31115 | #!/usr/bin/env python3
# -*-coding:utf-8-*-
import os
import core.template
content = """
Character sheet
===============
Name: {name}
Name again: {name}
Age: {age}
"""
def test_get_tags(tmpdir):
template_file = os.path.join(str(tmpdir.realpath()), 'test_template.md')
with open(template_file, 'w') as out:
out.write(content)
template = core.template.Template(template_file)
tags = template.tags
assert 'name' in tags
assert 'age' in tags
assert len(tags) == 2
|
the-stack_106_31116 | #coding=utf-8
# Copyright (c) 2018 Baidu, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import tensorflow as tf
import layers.tf_layers as layers
from utils.utility import seq_length
class LSTM(object):
"""
mlp cnn init function
"""
def __init__(self, config):
self.vocab_size = int(config['vocabulary_size'])
self.emb_size = int(config['embedding_dim'])
self.rnn_hidden_size = int(config['rnn_hidden_size'])
self.hidden_size = int(config['hidden_size'])
self.left_name, self.seq_len1 = config['left_slots'][0]
self.right_name, self.seq_len2 = config['right_slots'][0]
self.task_mode = config['training_mode']
self.emb_layer = layers.EmbeddingEnhancedLayer(self.vocab_size,
self.emb_size, zero_pad=True, scale=False)
self.rnn = layers.LSTMLayer(self.rnn_hidden_size)
self.extract = layers.ExtractLastLayer()
if self.task_mode == "pointwise":
self.n_class = int(config['n_class'])
self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 2, self.hidden_size)
self.fc2_layer = layers.FCLayer(self.hidden_size, self.n_class)
elif self.task_mode == "pairwise":
self.fc1_layer = layers.FCLayer(self.rnn_hidden_size * 1, self.hidden_size)
self.cos_layer = layers.CosineLayer()
else:
logging.error("training mode not supported")
def predict(self, left_slots, right_slots):
"""
predict graph of this net
"""
left = left_slots[self.left_name]
right = right_slots[self.right_name]
left_emb = self.emb_layer.ops(left) # (N, len, D)
right_emb = self.emb_layer.ops(right) # (N, len, D)
## left
left_length = seq_length(left)
left_encoder = self.rnn.ops(left_emb, left_length)
left_rep = self.extract.ops(left_encoder, left_length)
right_length = seq_length(right)
right_encoder = self.rnn.ops(right_emb, right_length)
right_rep = self.extract.ops(right_encoder, right_length)
if self.task_mode == "pointwise":
rep_concat = tf.concat([left_rep, right_rep], -1)
hidden1 = self.fc1_layer.ops(rep_concat)
pred = self.fc2_layer.ops(hidden1)
elif self.task_mode == "pairwise":
left_hidden1 = self.fc1_layer.ops(left_rep)
right_hidden1 = self.fc1_layer.ops(right_rep)
pred = self.cos_layer.ops(left_hidden1, right_hidden1)
return pred
|
the-stack_106_31118 | import sys
import pytest
import ibis
from pandas.util import testing as tm
pa = pytest.importorskip('pyarrow')
import pyarrow.parquet as pq # noqa: E402
from ibis.file.parquet import ParquetClient, ParquetTable # noqa: E402
from ibis.file.client import (
FileDatabase, execute_and_reset as execute) # noqa: E402
pytestmark = pytest.mark.skipif(sys.platform == 'win32',
reason='See ibis issue #1698')
@pytest.fixture
def transformed(parquet):
closes = parquet.pq.close
opens = parquet.pq.open
t = opens.inner_join(closes, ['time', 'ticker'])
t = t[opens, closes.close]
t = t.mutate(avg=(t.open + t.close) / 2)
t = t[['time', 'ticker', 'avg']]
return t
def test_creation(parquet):
# we have existing files in our dir
d = parquet.client.root
assert len(list(d.iterdir())) == 1
pqd = d / 'pq'
assert len(list(pqd.iterdir())) == 2
assert len(pq.read_table(str(pqd / 'open.parquet'))) == 50
assert len(pq.read_table(str(pqd / 'close.parquet'))) == 50
def test_client(tmpdir, data):
# construct with a path to a file
d = tmpdir / 'pq'
d.mkdir()
for k, v in data.items():
f = d / "{}.parquet".format(k)
table = pa.Table.from_pandas(v)
pq.write_table(table, str(f))
c = ParquetClient(tmpdir)
assert c.list_databases() == ['pq']
assert c.database().pq.list_tables() == ['close', 'open']
def test_navigation(parquet):
# directory navigation
assert isinstance(parquet, FileDatabase)
result = dir(parquet)
assert result == ['pq']
d = parquet.pq
assert isinstance(d, FileDatabase)
result = dir(d)
assert result == ['close', 'open']
result = d.list_tables()
assert result == ['close', 'open']
opens = d.open
assert isinstance(opens.op(), ParquetTable)
closes = d.close
assert isinstance(closes.op(), ParquetTable)
def test_read(parquet, data):
closes = parquet.pq.close
assert str(closes) is not None
result = closes.execute()
expected = data['close']
tm.assert_frame_equal(result, expected)
result = execute(closes)
tm.assert_frame_equal(result, expected)
def test_write(transformed, tmpdir):
t = transformed
expected = execute(t)
tpath = tmpdir / 'new_dir'
tpath.mkdir()
path = tpath / 'foo.parquet'
assert not path.exists()
t = transformed[['time', 'ticker', 'avg']]
c = ibis.parquet.connect(tpath)
c.insert('foo.parquet', t)
execute(t)
assert path.exists()
# readback
c = ParquetClient(str(tpath)).database()
result = c.list_databases()
assert result == []
result = c.foo.execute()
tm.assert_frame_equal(result, expected)
path = tpath / 'foo.parquet'
assert path.exists()
|
the-stack_106_31119 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import sys
import click
from polyaxon_cli.cli.check import check_polyaxonfile, check_polyaxonfile_kind
from polyaxon_cli.cli.project import get_project_or_local
from polyaxon_cli.client import PolyaxonClient
from polyaxon_cli.client.exceptions import (
PolyaxonClientException,
PolyaxonHTTPError,
PolyaxonShouldExitError
)
from polyaxon_cli.logger import clean_outputs
from polyaxon_cli.schemas import kinds
from polyaxon_cli.utils import indentation
from polyaxon_cli.utils.formatting import Printer
def get_tensorboard_url(user, project_name, experiment=None, group=None):
if experiment:
return "{}/tensorboard/{}/{}/experiments/{}/\n".format(
PolyaxonClient().api_config.http_host,
user,
project_name,
experiment)
if group:
return "{}/tensorboard/{}/{}/groups/{}/\n".format(
PolyaxonClient().api_config.http_host,
user,
project_name,
group)
return "{}/tensorboard/{}/{}/\n".format(PolyaxonClient().api_config.http_host,
user,
project_name)
@click.group()
@click.option('--project', '-p', type=str, help="The project name, e.g. 'mnist' or 'adam/mnist'.")
@click.option('--group', '-g', type=int, help="The group id number.")
@click.option('--experiment', '-xp', type=int, help="The experiment id number.")
@click.pass_context
@clean_outputs
def tensorboard(ctx, project, group, experiment):
ctx.obj = ctx.obj or {}
ctx.obj['project'] = project
ctx.obj['group'] = group
ctx.obj['experiment'] = experiment
@tensorboard.command()
@click.pass_context
@clean_outputs
def url(ctx):
"""Prints the tensorboard url for project/experiment/experiment group.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples for project tensorboards:
\b
```bash
$ polyaxon tensorboard url
```
\b
```bash
$ polyaxon tensorboard -p mnist url
```
Examples for experiment tensorboards:
\b
```bash
$ polyaxon tensorboard -xp 1 url
```
Examples for experiment group tensorboards:
\b
```bash
$ polyaxon tensorboard -g 1 url
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
try:
response = PolyaxonClient().experiment.get_experiment(
username=user,
project_name=project_name,
experiment_id=experiment)
obj = 'experiment {}'.format(experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get experiment `{}`.'.format(experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
response = PolyaxonClient().experiment_group.get_experiment_group(
username=user,
project_name=project_name,
group_id=group)
obj = 'group `{}`.'.format(group)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get group `{}`.'.format(group))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
response = PolyaxonClient().project.get_project(
username=user,
project_name=project_name)
obj = 'project `{}`.'.format(project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not get project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.has_tensorboard:
click.echo(get_tensorboard_url(user=user,
project_name=project_name,
experiment=experiment,
group=group))
else:
Printer.print_warning('This `{}` does not have a running tensorboard'.format(obj))
click.echo('You can start tensorboard with this command: polyaxon tensorboard start --help')
@tensorboard.command()
@click.option('--file', '-f', multiple=True, type=click.Path(exists=True),
help='The polyaxon files to run.')
@click.pass_context
@clean_outputs
def start(ctx, file): # pylint:disable=redefined-builtin
"""Start a tensorboard deployment for project/experiment/experiment group.
Project tensorboard will aggregate all experiments under the project.
Experiment group tensorboard will aggregate all experiments under the group.
Experiment tensorboard will show all metrics for an experiment.
Uses [Caching](/references/polyaxon-cli/#caching)
Example: using the default tensorflow image 1.4.1.
\b
```bash
$ polyaxon tensorboard start
```
Example: with custom image and resources
\b
```bash
$ polyaxon tensorboard start -f file -f file_override ...
```
Example: starting a tensorboard for an experiment group
\b
```bash
$ polyaxon tensorboard -g 1 start -f file
```
Example: starting a tensorboard for an experiment
\b
```bash
$ polyaxon tensorboard -xp 112 start -f file
```
"""
specification = None
job_content = None
if file:
specification = check_polyaxonfile(file, log=False).specification
if specification:
# pylint:disable=protected-access
check_polyaxonfile_kind(specification=specification, kind=kinds.TENSORBOARD)
job_content = specification.raw_data
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
try:
response = PolyaxonClient().experiment.start_tensorboard(
username=user,
project_name=project_name,
experiment_id=experiment,
content=job_content,
is_managed=True,
)
obj = 'experiment `{}`'.format(experiment)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard experiment `{}`.'.format(experiment))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
response = PolyaxonClient().experiment_group.start_tensorboard(
username=user,
project_name=project_name,
group_id=group,
content=job_content,
is_managed=True,
)
obj = 'group `{}`'.format(group)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard group `{}`.'.format(group))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
response = PolyaxonClient().project.start_tensorboard(
username=user,
project_name=project_name,
content=job_content,
is_managed=True,
)
obj = 'project `{}`'.format(project_name)
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not start tensorboard project `{}`.'.format(project_name))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
if response.status_code == 200:
Printer.print_header("A tensorboard for this {} is already running on:".format(obj))
click.echo(get_tensorboard_url(user=user,
project_name=project_name,
experiment=experiment,
group=group))
sys.exit(0)
if response.status_code != 201:
Printer.print_error('Something went wrong, Tensorboard was not created.')
sys.exit(1)
Printer.print_success('Tensorboard is being deployed for {}'.format(obj))
indentation.puts("It may take some time before you can access tensorboard.\n")
indentation.puts("Your tensorboard will be available on:\n")
with indentation.indent(4):
indentation.puts(get_tensorboard_url(user, project_name, experiment, group))
@tensorboard.command()
@click.option('--yes', '-y', is_flag=True, default=False,
help='Automatic yes to prompts. '
'Assume "yes" as answer to all prompts and run non-interactively.')
@click.pass_context
@clean_outputs
def stop(ctx, yes):
"""Stops the tensorboard deployment for project/experiment/experiment group if it exists.
Uses [Caching](/references/polyaxon-cli/#caching)
Examples: stopping project tensorboard
\b
```bash
$ polyaxon tensorboard stop
```
Examples: stopping experiment group tensorboard
\b
```bash
$ polyaxon tensorboard -g 1 stop
```
Examples: stopping experiment tensorboard
\b
```bash
$ polyaxon tensorboard -xp 112 stop
```
"""
user, project_name = get_project_or_local(ctx.obj.get('project'))
group = ctx.obj.get('group')
experiment = ctx.obj.get('experiment')
if experiment:
obj = 'experiment `{}`'.format(experiment)
elif group:
obj = 'group `{}`'.format(group)
else:
obj = 'project `{}/{}`'.format(user, project_name)
if not yes and not click.confirm("Are sure you want to stop tensorboard "
"for {}".format(obj)):
click.echo('Existing without stopping tensorboard.')
sys.exit(1)
if experiment:
try:
PolyaxonClient().experiment.stop_tensorboard(
username=user,
project_name=project_name,
experiment_id=experiment)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
elif group:
try:
PolyaxonClient().experiment_group.stop_tensorboard(
username=user,
project_name=project_name,
group_id=group)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
else:
try:
PolyaxonClient().project.stop_tensorboard(
username=user,
project_name=project_name)
Printer.print_success('Tensorboard is being deleted')
except (PolyaxonHTTPError, PolyaxonShouldExitError, PolyaxonClientException) as e:
Printer.print_error('Could not stop tensorboard {}.'.format(obj))
Printer.print_error('Error message `{}`.'.format(e))
sys.exit(1)
|
the-stack_106_31123 | """
Postgresql workload class
"""
import logging
import random
from prettytable import PrettyTable
from ocs_ci.ocs.ripsaw import RipSaw
from ocs_ci.utility.utils import TimeoutSampler, run_cmd
from ocs_ci.ocs.utils import get_pod_name_by_pattern
from ocs_ci.utility import utils, templating
from ocs_ci.ocs.exceptions import UnexpectedBehaviour, CommandFailed, ResourceWrongStatusException
from ocs_ci.ocs.resources.ocs import OCS
from ocs_ci.ocs import constants
from subprocess import CalledProcessError
from ocs_ci.ocs.resources.pod import get_all_pods, get_pod_obj, get_operator_pods
from tests.helpers import wait_for_resource_state
from ocs_ci.ocs.constants import RIPSAW_NAMESPACE, RIPSAW_CRD
from ocs_ci.utility.spreadsheet.spreadsheet_api import GoogleSpreadSheetAPI
log = logging.getLogger(__name__)
class Postgresql(RipSaw):
"""
Postgresql workload operation
"""
def __init__(self, **kwargs):
"""
Initializer function
"""
super().__init__(**kwargs)
self._apply_crd(crd=RIPSAW_CRD)
def _apply_crd(self, crd):
"""
Apply the CRD
Args:
crd (str): yaml to apply
"""
RipSaw.apply_crd(self, crd=crd)
def setup_postgresql(self, replicas):
"""
Deploy postgres sql server
Args:
replicas (int): Number of postgresql pods to be deployed
Raises:
CommandFailed: If PostgreSQL server setup fails
"""
log.info("Deploying postgres database")
try:
pgsql_service = templating.load_yaml(
constants.PGSQL_SERVICE_YAML
)
pgsql_cmap = templating.load_yaml(
constants.PGSQL_CONFIGMAP_YAML
)
pgsql_sset = templating.load_yaml(
constants.PGSQL_STATEFULSET_YAML
)
pgsql_sset['spec']['replicas'] = replicas
self.pgsql_service = OCS(**pgsql_service)
self.pgsql_service.create()
self.pgsql_cmap = OCS(**pgsql_cmap)
self.pgsql_cmap.create()
self.pgsql_sset = OCS(**pgsql_sset)
self.pgsql_sset.create()
self.pod_obj.wait_for_resource(
condition='Running',
selector='app=postgres',
resource_count=replicas,
timeout=3600
)
except (CommandFailed, CalledProcessError) as cf:
log.error('Failed during setup of PostgreSQL server')
raise cf
self.pgsql_is_setup = True
log.info("Successfully deployed postgres database")
def create_pgbench_benchmark(
self, replicas, clients=None, threads=None,
transactions=None, scaling_factor=None,
timeout=None
):
"""
Create pgbench benchmark pods
Args:
replicas (int): Number of pgbench pods to be deployed
clients (int): Number of clients
threads (int): Number of threads
transactions (int): Number of transactions
scaling_factor (int): scaling factor
timeout (int): Time in seconds to wait
Returns:
List: pgbench pod objects list
"""
pg_obj_list = []
for i in range(replicas):
log.info("Create resource file for pgbench workload")
pg_data = templating.load_yaml(constants.PGSQL_BENCHMARK_YAML)
pg_data['metadata']['name'] = 'pgbench-benchmark' + f"{i}"
pg_data['spec']['workload']['args']['databases'][0][
'host'
] = "postgres-" + f"{i}" + ".postgres"
if clients is not None:
pg_data['spec']['workload']['args']['clients'][0] = clients
if threads is not None:
pg_data['spec']['workload']['args']['threads'] = threads
if transactions is not None:
pg_data[
'spec'
]['workload']['args']['transactions'] = transactions
if scaling_factor is not None:
pg_data[
'spec'
]['workload']['args']['scaling_factor'] = scaling_factor
pg_obj = OCS(**pg_data)
pg_obj_list.append(pg_obj)
pg_obj.create()
# Confirm that expected pgbench pods are spinned
log.info("Searching the pgbench pods by its name pattern")
timeout = timeout if timeout else 300
for pgbench_pods in TimeoutSampler(
timeout, replicas, get_pod_name_by_pattern,
'pgbench-1-dbs-client', RIPSAW_NAMESPACE
):
try:
if len(pgbench_pods) == replicas:
log.info(
f"Expected number of pgbench pods are "
f"found: {replicas}"
)
break
except IndexError:
log.info(
f'Expected number of pgbench pods are {replicas} '
f'but only found {len(pgbench_pods)}'
)
return pg_obj_list
def get_postgres_pods(self):
"""
Get all postgres pods
Returns:
List: postgres pod objects list
"""
return get_all_pods(
namespace=RIPSAW_NAMESPACE, selector=['postgres']
)
def get_pgbench_pods(self):
"""
Get all pgbench pods
Returns:
List: pgbench pod objects list
"""
return [
get_pod_obj(
pod, RIPSAW_NAMESPACE
) for pod in get_pod_name_by_pattern('pgbench', RIPSAW_NAMESPACE)
]
def delete_pgbench_pods(self, pg_obj_list):
"""
Delete all pgbench pods on cluster
Returns:
bool: True if deleted, False otherwise
"""
log.info("Delete pgbench Benchmark")
for pgbench_pod in pg_obj_list:
pgbench_pod.delete(force=True)
def is_pgbench_running(self):
"""
Check if pgbench is running
Returns:
bool: True if pgbench is running; False otherwise
"""
pod_objs = self.get_pgbench_pods()
for pod in pod_objs:
if pod.get().get(
'status'
).get('containerStatuses')[0].get('state') == 'running':
log.info("One or more pgbench pods are in running state")
return True
else:
return False
break
def get_pgbench_status(self, pgbench_pod_name):
"""
Get pgbench status
Args:
pgbench_pod_name (str): Name of the pgbench pod
Returns:
str: state of pgbench pod (running/completed)
"""
pod_obj = get_pod_obj(pgbench_pod_name, namespace=RIPSAW_NAMESPACE)
status = pod_obj.get().get(
'status'
).get('containerStatuses')[0].get('state')
return 'running' if list(status.keys())[0] == 'running' else status[
'terminated'
]['reason']
def wait_for_postgres_status(
self, status=constants.STATUS_RUNNING, timeout=300
):
"""
Wait for postgres pods status to reach running/completed
Args:
status (str): status to reach Running or Completed
timeout (int): Time in seconds to wait
"""
log.info(f"Waiting for postgres pods to be reach {status} state")
postgres_pod_objs = self.get_postgres_pods()
for postgres_pod_obj in postgres_pod_objs:
wait_for_resource_state(
resource=postgres_pod_obj, state=status, timeout=timeout
)
def wait_for_pgbench_status(self, status, timeout=None):
"""
Wait for pgbench benchmark pods status to reach running/completed
Args:
status (str): status to reach Running or Completed
timeout (int): Time in seconds to wait
"""
"""
Sometimes with the default values in the benchmark yaml the pgbench pod is not
getting completed within the specified time and the tests are failing.
I think it is varying with the infrastructure.
So, for now we set the timeout to 30 mins and will start monitoring each pg bench
pods for each run.Based on the results we will define the timeout again
"""
timeout = timeout if timeout else 1800
# Wait for pg_bench pods to initialized and running
log.info(f"Waiting for pgbench pods to be reach {status} state")
pgbench_pod_objs = self.get_pgbench_pods()
for pgbench_pod_obj in pgbench_pod_objs:
try:
wait_for_resource_state(
resource=pgbench_pod_obj, state=status, timeout=timeout
)
except ResourceWrongStatusException:
output = run_cmd(f'oc logs {pgbench_pod_obj.name}')
error_msg = f'{pgbench_pod_obj.name} did not reach to {status} state after {timeout} sec\n{output}'
log.error(error_msg)
raise UnexpectedBehaviour(error_msg)
def validate_pgbench_run(self, pgbench_pods, print_table=True):
"""
Validate pgbench run
Args:
pgbench pods (list): List of pgbench pods
Returns:
pg_output (list): pgbench outputs in list
"""
all_pgbench_pods_output = []
for pgbench_pod in pgbench_pods:
log.info(f"pgbench_client_pod===={pgbench_pod.name}====")
output = run_cmd(f'oc logs {pgbench_pod.name} -n {RIPSAW_NAMESPACE}')
pg_output = utils.parse_pgsql_logs(output)
log.info(
"*******PGBench output log*********\n"
f"{pg_output}"
)
# for data in all_pgbench_pods_output:
for data in pg_output:
run_id = list(data.keys())
latency_avg = data[run_id[0]]['latency_avg']
if not latency_avg:
raise UnexpectedBehaviour(
"PGBench failed to run, "
"no data found on latency_avg"
)
log.info(f"PGBench on {pgbench_pod.name} completed successfully")
all_pgbench_pods_output.append((pg_output, pgbench_pod.name))
if print_table:
pgbench_pod_table = PrettyTable()
pgbench_pod_table.field_names = [
'pod_name', 'scaling_factor', 'num_clients', 'num_threads',
'trans_client', 'actually_trans', 'latency_avg', 'lat_stddev',
'tps_incl', 'tps_excl'
]
for pgbench_pod_out in all_pgbench_pods_output:
for pod_output in pgbench_pod_out[0]:
for pod in pod_output.values():
pgbench_pod_table.add_row(
[pgbench_pod_out[1], pod['scaling_factor'],
pod['num_clients'], pod['num_threads'],
pod['number_of_transactions_per_client'],
pod['number_of_transactions_actually_processed'],
pod['latency_avg'], pod['lat_stddev'],
pod['tps_incl'], pod['tps_excl']]
)
log.info(f'\n{pgbench_pod_table}\n')
return all_pgbench_pods_output
def get_pgsql_nodes(self):
"""
Get nodes that contain a pgsql app pod
Returns:
list: Cluster node OCP objects
"""
pgsql_pod_objs = self.pod_obj.get(
selector=constants.PGSQL_APP_LABEL, all_namespaces=True
)
log.info("Create a list of nodes that contain a pgsql app pod")
nodes_set = set()
for pod in pgsql_pod_objs['items']:
log.info(
f"pod {pod['metadata']['name']} located on "
f"node {pod['spec']['nodeName']}"
)
nodes_set.add(pod['spec']['nodeName'])
return list(nodes_set)
def respin_pgsql_app_pod(self):
"""
Respin the pgsql app pod
Returns:
pod status
"""
app_pod_list = get_operator_pods(
constants.PGSQL_APP_LABEL, constants.RIPSAW_NAMESPACE
)
app_pod = app_pod_list[random.randint(0, len(app_pod_list) - 1)]
log.info(f"respin pod {app_pod.name}")
app_pod.delete(wait=True, force=False)
wait_for_resource_state(
resource=app_pod, state=constants.STATUS_RUNNING, timeout=300
)
def get_pgbech_pod_status_table(self, pgbench_pods):
"""
Get pgbench pod data and print results on a table
Args:
pgbench pods (list): List of pgbench pods
"""
pgbench_pod_table = PrettyTable()
pgbench_pod_table.field_names = [
'pod_name', 'scaling_factor', 'num_clients', 'num_threads',
'trans_client', 'actually_trans', 'latency_avg', 'lat_stddev',
'tps_incl', 'tps_excl'
]
for pgbench_pod in pgbench_pods:
output = run_cmd(f'oc logs {pgbench_pod.name}')
pg_output = utils.parse_pgsql_logs(output)
for pod_output in pg_output:
for pod in pod_output.values():
pgbench_pod_table.add_row(
[pgbench_pod.name, pod['scaling_factor'],
pod['num_clients'], pod['num_threads'],
pod['number_of_transactions_per_client'],
pod['number_of_transactions_actually_processed'],
pod['latency_avg'], pod['lat_stddev'],
pod['tps_incl'], pod['tps_excl']]
)
log.info(f'\n{pgbench_pod_table}\n')
def export_pgoutput_to_googlesheet(self, pg_output, sheet_name, sheet_index):
"""
Collect pgbench output to google spreadsheet
Args:
pg_output (list): pgbench outputs in list
sheet_name (str): Name of the sheet
sheet_index (int): Index of sheet
"""
# Collect data and export to Google doc spreadsheet
g_sheet = GoogleSpreadSheetAPI(
sheet_name=sheet_name, sheet_index=sheet_index
)
log.info("Exporting pgoutput data to google spreadsheet")
for pgbench_pod in range(len(pg_output)):
for run in range(len(pg_output[pgbench_pod][0])):
run_id = list(pg_output[pgbench_pod][0][run].keys())[0]
lat_avg = pg_output[
pgbench_pod
][0][run][run_id]['latency_avg']
lat_stddev = pg_output[
pgbench_pod
][0][run][run_id]['lat_stddev']
tps_incl = pg_output[
pgbench_pod
][0][run][run_id]['lat_stddev']
tps_excl = pg_output[pgbench_pod][0][run][run_id]['tps_excl']
g_sheet.insert_row(
[f"Pgbench-pod{pg_output[pgbench_pod][1]}-run-{run_id}",
int(lat_avg),
int(lat_stddev),
int(tps_incl),
int(tps_excl)], 2
)
g_sheet.insert_row(
["", "latency_avg", "lat_stddev", "lat_stddev", "tps_excl"], 2
)
# Capturing versions(OCP, OCS and Ceph) and test run name
g_sheet.insert_row(
[f"ocp_version:{utils.get_cluster_version()}",
f"ocs_build_number:{utils.get_ocs_build_number()}",
f"ceph_version:{utils.get_ceph_version()}",
f"test_run_name:{utils.get_testrun_name()}"], 2
)
def cleanup(self):
"""
Clean up
"""
log.info("Deleting postgres pods and configuration")
if self.pgsql_is_setup:
self.pgsql_sset.delete()
self.pgsql_cmap.delete()
self.pgsql_service.delete()
log.info("Deleting pgbench pods")
pods_obj = self.get_pgbench_pods()
for pod in pods_obj:
pod.delete()
pod.ocp.wait_for_delete(pod.name)
log.info("Deleting ripsaw configuration")
RipSaw.cleanup(self)
|
the-stack_106_31125 | """Copyright 2020 ETH Zurich, Seonwook Park
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from collections import OrderedDict
import logging
import math
import os
import socket
import time
import traceback
import gspread
import numpy as np
from oauth2client.service_account import ServiceAccountCredentials
from ..core import DefaultConfig
config = DefaultConfig()
logger = logging.getLogger(__name__)
class GoogleSheetLogger:
"""Log selected outputs to a predefined Google Sheet.
Many thanks to Emre for the idea and the initial code!
"""
first_column_name = 'Identifier'
def __init__(self, model):
self.__model = model
to_write = self.fill_in_basic_info()
if config.resume_from == '':
to_write['Start Time'] = time.strftime('%Y/%m/%d %H:%M:%S')
# Write experiment information to create row for future logging
try:
self.ready = True
self.update_or_append_row(to_write)
except Exception:
self.ready = False
traceback.print_exc()
return
def fill_in_basic_info(self):
to_write = OrderedDict()
to_write['Identifier'] = self.__model.identifier
to_write['Last Updated'] = time.strftime('%Y/%m/%d %H:%M:%S')
# Write config parameters
config_kv = config.get_all_key_values()
config_kv = dict([
(k, v) for k, v in config_kv.items()
if not k.startswith('datasrc_')
and not k.startswith('gsheet_')
])
for k in sorted(list(config_kv.keys())):
to_write[k] = config_kv[k]
# Get hostname
to_write['hostname'] = socket.getfqdn()
# Get LSF job ID if exists
if 'LSB_JOBID' in os.environ:
to_write['LSF Job ID'] = os.environ['LSB_JOBID']
return to_write
def update_or_append_row(self, values):
assert isinstance(values, dict)
if not self.ready: # Silently skip if init failed
return
if not os.path.isfile(config.gsheet_secrets_json_file):
logger.info('Not logging to Google Sheets due to missing authentication information.')
logger.info('> Please set the configuration entry: "gsheet_secrets_json_file".')
return
if len(config.gsheet_workbook_key) == 0:
logger.info('Not logging to Google Sheets due to missing workbook key.')
logger.info('> Please set the configuration entry: "gsheet_workbook_key".')
return
# Add some missing info automatically
basic_info = self.fill_in_basic_info()
for k, v in basic_info.items():
values[k] = v
# Authenticate
try:
credentials = ServiceAccountCredentials.from_json_keyfile_name(
filename=config.gsheet_secrets_json_file,
scopes=[
'https://www.googleapis.com/auth/spreadsheets',
],
)
client = gspread.authorize(credentials)
except: # noqa
logger.debug('Could not authenticate with Drive API.')
traceback.print_exc()
return
# Decide on sheet name to select
sheet_name = self.__model.identifier.split('/')[0]
# Find a workbook by name.
workbook = client.open_by_key(config.gsheet_workbook_key)
try:
sheet = workbook.worksheet(sheet_name)
except: # noqa
try:
sheet = workbook.add_worksheet(title=sheet_name,
rows=1000, cols=20)
except: # noqa
logger.debug('Could not access/add worksheet.')
traceback.print_exc()
return
try:
current_values = sheet.get_all_values()
except: # noqa
logger.debug('Could not get values from worksheet.')
traceback.print_exc()
return
if len(current_values) == 0:
try:
sheet.update_cell(1, 1, self.first_column_name)
except: # noqa
logger.debug('Could not insert first cell.')
traceback.print_exc()
return
header = [self.first_column_name]
else:
header = current_values[0]
identifier = values[self.first_column_name]
# Construct new row
is_header_changed = False
new_row = [None] * len(header)
for key, value in values.items():
if key not in header:
header.append(key)
new_row.append(None)
is_header_changed = True
index = header.index(key)
new_row[index] = value
if isinstance(value, float) or isinstance(value, int):
if math.isnan(value):
new_row[index] = 'NaN'
elif isinstance(value, np.generic):
if np.any(np.isnan(value)):
new_row[index] = 'NaN'
elif np.isinf(value):
new_row[index] = 'Inf'
else:
new_row[index] = np.asscalar(value)
elif isinstance(value, np.ndarray) and value.ndim == 0:
new_row[index] = value.item()
elif hasattr(value, '__len__') and len(value) > 0:
new_row[index] = str(value)
# Update header as necessary
cells_to_update = []
if is_header_changed:
cells_to_update += [
gspread.models.Cell(1, col+1, value)
for col, value in enumerate(header)
]
# Either update an existing row or append new row
try:
row_index = [r[0] for r in current_values].index(identifier)
cells_to_update += [
gspread.models.Cell(row_index+1, col_index+1, value=value)
for col_index, value in enumerate(new_row)
if value is not None # Don't remove existing values
]
except: # noqa
sheet.append_row(new_row)
# Run all necessary update operations
if len(cells_to_update) > 0:
try:
sheet.update_cells(cells_to_update)
except: # noqa
logger.debug('Error in API call to update cells.')
traceback.print_exc()
return
|
the-stack_106_31126 | '''Base sequence classes.'''
import collections
import coral
from coral.sequence._sequence import Sequence
from coral.constants.molecular_bio import COMPLEMENTS
class NucleicAcid(Sequence):
'''Abstract sequence container for a single nucleic acid sequence
molecule.'''
def __init__(self, sequence, material, circular=False, run_checks=True,
any_char='N'):
'''
:param sequence: Input sequence.
:type sequence: str
:param material: Material type (dna, rna)
:type material: str
:param circular: The topology of the sequence - if the ends connect,
(a circular sequence), set to True. Otherwise, set to
False. Enables operations like .rotate().
:type circular: bool
:param run_checks: Check inputs / formats (disabling increases speed):
alphabet check
case
:param any_char: Character representing \'any\', e.g. N for DNA.
:type any_char: str
:type run_checks: bool
:returns: coral.sequence.Sequence instance.
'''
super(NucleicAcid, self).__init__(sequence, material,
run_checks=run_checks,
any_char=any_char)
self.circular = circular
def copy(self):
return type(self)(self.seq, self.material, circular=self.circular,
run_checks=False)
def circularize(self):
'''Circularize the sequence, if linear.
:returns: A circularized version of the current sequence.
:rtype: coral.sequence._sequence.Sequence
'''
copy = self.copy()
copy.circular = True
return copy
def complement(self):
copy = self.copy()
code = dict(COMPLEMENTS[self.material])
copy.seq = ''.join([code[str(base)] for base in copy])
return copy
def gc(self):
'''Find the frequency of G and C in the current sequence.'''
gc = len([base for base in self.seq if base == 'C' or base == 'G'])
return float(gc) / len(self)
def is_palindrome(self):
seq_len = len(self.seq)
if seq_len % 2 == 0:
# Sequence has even number of bases, can test non-overlapping seqs
wing = seq_len / 2
l_wing = self[0: wing]
r_wing = self[wing:]
if l_wing == r_wing.reverse_complement():
return True
else:
return False
else:
# Sequence has odd number of bases and cannot be a palindrome
return False
def is_rotation(self, other):
'''Determine whether two sequences are the same, just at different
rotations.
:param other: The sequence to check for rotational equality.
:type other: coral.sequence._sequence.Sequence
'''
if len(self) != len(other):
return False
for i in range(len(self)):
if self.rotate(i) == other:
return True
return False
def linearize(self, index=0):
'''Linearize the Sequence at an index.
:param index: index at which to linearize.
:type index: int
:returns: A linearized version of the current sequence.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if the input is a linear sequence.
'''
if not self.circular and index != 0:
raise ValueError('Cannot relinearize a linear sequence.')
copy = self.copy()
# Snip at the index
if index:
return copy[index:] + copy[:index]
copy.circular = False
return copy
def locate(self, pattern):
'''Find sequences matching a pattern.
:param pattern: Sequence for which to find matches.
:type pattern: str
:returns: Indices of pattern matches.
:rtype: list of ints
'''
if self.circular:
if len(pattern) >= 2 * len(self):
raise ValueError('Search pattern longer than searchable ' +
'sequence.')
seq = self + self[:len(pattern) - 1]
return super(NucleicAcid, seq).locate(pattern)
else:
return super(NucleicAcid, self).locate(pattern)
def mw(self):
'''Calculate the molecular weight.
:returns: The molecular weight of the current sequence in amu.
:rtype: float
'''
counter = collections.Counter(self.seq.lower())
mw_a = counter['a'] * 313.2
mw_t = counter['t'] * 304.2
mw_g = counter['g'] * 289.2
mw_c = counter['c'] * 329.2
mw_u = counter['u'] * 306.2
if self.material == 'dna':
return mw_a + mw_t + mw_g + mw_c + 79.0
else:
return mw_a + mw_u + mw_g + mw_c + 159.0
def rotate(self, n):
'''Rotate Sequence by n bases.
:param n: Number of bases to rotate.
:type n: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
if not self.circular and n != 0:
raise ValueError('Cannot rotate a linear sequence')
else:
rotated = self[-n:] + self[:-n]
return rotated.circularize()
def rotate_to(self, index):
'''Orient Sequence to index (only applies to circular sequences).
:param index: Position at which to re-zero the Sequence.
:type index: int
:returns: The current sequence reoriented at `index`.
:rtype: coral.sequence._sequence.Sequence
:raises: ValueError if applied to linear sequence or `index` is
negative.
'''
return self.rotate(-index)
def reverse(self):
return self[::-1]
def reverse_complement(self):
copy = self.copy()
copy.seq = str(self.reverse().complement())
return copy
def tm(self, parameters='cloning'):
'''Find the melting temperature.
:param parameters: The tm method to use (cloning, santalucia98,
breslauer86)
:type parameters: str
'''
return coral.analysis.tm(self, parameters=parameters)
|
the-stack_106_31128 | # -*- coding: utf-8 -*-
"""
pygments.styles.native
~~~~~~~~~~~~~~~~~~~~~~
pygments version of my "native" vim theme.
:copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Token, Whitespace
class NativeStyle(Style):
"""
Pygments version of the "native" vim theme.
"""
background_color = '#202020'
highlight_color = '#404040'
styles = {
Token: '#d0d0d0',
Whitespace: '#666666',
Comment: 'italic #999999',
Comment.Preproc: 'noitalic bold #cd2828',
Comment.Special: 'noitalic bold #e50808 bg:#520000',
Keyword: 'bold #6ab825',
Keyword.Pseudo: 'nobold',
Operator.Word: 'bold #6ab825',
String: '#ed9d13',
String.Other: '#ffa500',
Number: '#3677a9',
Name.Builtin: '#24909d',
Name.Variable: '#40ffff',
Name.Constant: '#40ffff',
Name.Class: 'underline #447fcf',
Name.Function: '#447fcf',
Name.Namespace: 'underline #447fcf',
Name.Exception: '#bbbbbb',
Name.Tag: 'bold #6ab825',
Name.Attribute: '#bbbbbb',
Name.Decorator: '#ffa500',
Generic.Heading: 'bold #ffffff',
Generic.Subheading: 'underline #ffffff',
Generic.Deleted: '#d22323',
Generic.Inserted: '#589819',
Generic.Error: '#d22323',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: '#aaaaaa',
Generic.Output: '#cccccc',
Generic.Traceback: '#d22323',
Error: 'bg:#e3d2d2 #a61717'
}
|
the-stack_106_31130 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2014, Brian Coca <[email protected]>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['stableinterface'],
'supported_by': 'core'}
DOCUMENTATION = '''
---
module: getent
short_description: a wrapper to the unix getent utility
description:
- Runs getent against one of it's various databases and returns information into
the host's facts, in a getent_<database> prefixed variable
version_added: "1.8"
options:
database:
required: True
description:
- the name of a getent database supported by the target system (passwd, group,
hosts, etc).
key:
required: False
default: ''
description:
- key from which to return values from the specified database, otherwise the
full contents are returned.
split:
required: False
default: None
description:
- "character used to split the database values into lists/arrays such as ':' or '\t', otherwise it will try to pick one depending on the database"
fail_key:
required: False
default: True
description:
- If a supplied key is missing this will make the task fail if True
notes:
- "Not all databases support enumeration, check system documentation for details"
requirements: [ ]
author: "Brian Coca (@bcoca)"
'''
EXAMPLES = '''
# get root user info
- getent:
database: passwd
key: root
- debug:
var: getent_passwd
# get all groups
- getent:
database: group
split: ':'
- debug:
var: getent_group
# get all hosts, split by tab
- getent:
database: hosts
- debug:
var: getent_hosts
# get http service info, no error if missing
- getent:
database: services
key: http
fail_key: False
- debug:
var: getent_services
# get user password hash (requires sudo/root)
- getent:
database: shadow
key: www-data
split: ':'
- debug:
var: getent_shadow
'''
import traceback
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils._text import to_native
def main():
module = AnsibleModule(
argument_spec = dict(
database = dict(required=True),
key = dict(required=False, default=None),
split = dict(required=False, default=None),
fail_key = dict(required=False, type='bool', default=True),
),
supports_check_mode = True,
)
colon = [ 'passwd', 'shadow', 'group', 'gshadow' ]
database = module.params['database']
key = module.params.get('key')
split = module.params.get('split')
fail_key = module.params.get('fail_key')
getent_bin = module.get_bin_path('getent', True)
if key is not None:
cmd = [ getent_bin, database, key ]
else:
cmd = [ getent_bin, database ]
if split is None and database in colon:
split = ':'
try:
rc, out, err = module.run_command(cmd)
except Exception as e:
module.fail_json(msg=to_native(e), exception=traceback.format_exc())
msg = "Unexpected failure!"
dbtree = 'getent_%s' % database
results = { dbtree: {} }
if rc == 0:
for line in out.splitlines():
record = line.split(split)
results[dbtree][record[0]] = record[1:]
module.exit_json(ansible_facts=results)
elif rc == 1:
msg = "Missing arguments, or database unknown."
elif rc == 2:
msg = "One or more supplied key could not be found in the database."
if not fail_key:
results[dbtree][key] = None
module.exit_json(ansible_facts=results, msg=msg)
elif rc == 3:
msg = "Enumeration not supported on this database."
module.fail_json(msg=msg)
if __name__ == '__main__':
main()
|
the-stack_106_31133 | # (c) 2012-2018, Ansible by Red Hat
#
# This file is part of Ansible Galaxy
#
# Ansible Galaxy is free software: you can redistribute it and/or modify
# it under the terms of the Apache License as published by
# the Apache Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# Ansible Galaxy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# Apache License for more details.
#
# You should have received a copy of the Apache License
# along with Galaxy. If not, see <http://www.apache.org/licenses/>.
from .apb import APBLoader
from .module import ModuleLoader
from .module_utils import ModuleUtilsLoader
from .plugin import PluginLoader
from .role import RoleLoader
ALL_LOADERS = [
APBLoader,
ModuleLoader,
ModuleUtilsLoader,
PluginLoader,
RoleLoader,
]
def get_loader(content_type):
"""Returns loader class for specified content type.
:type content_type: constants.ContentType
:param content_type: Content type.
:returns: Loader class for specified content type.
:raise ValueError: If no loader found for specified content type.
"""
for loader_cls in ALL_LOADERS:
content_types = loader_cls.content_types
if not isinstance(loader_cls.content_types, (list, tuple)):
content_types = [content_types]
if content_type in content_types:
return loader_cls
raise ValueError('Loader for content type "{0}" not found'
.format(content_type))
|
the-stack_106_31135 | import glob
import os
import dill
import matplotlib.pyplot as plt
import mosaiks.config as c
import numpy as np
import pandas as pd
import seaborn as sns
from cartopy import crs as ccrs
from matplotlib import ticker
# plotting variables
cs = c.world_app_order
c_by_app = [getattr(c, i) for i in cs]
applications = [config["application"] for config in c_by_app]
variables = [config["variable"] for config in c_by_app]
sample_types = [config["sampling"] for config in c_by_app]
disp_names = [config["disp_name"] for config in c_by_app]
logged = [config["logged"] for config in c_by_app]
colorbar_bounds = [config["world_bounds_colorbar"] for config in c_by_app]
clip_bounds = [config["world_bounds_pred"] for config in c_by_app]
units = [config["units_disp"] for config in c_by_app]
c_plotting = getattr(c, "plotting")
colors = [config["color"] for config in c_by_app]
cmap_fxn = c_plotting["cmap_fxn"]
cmaps = [cmap_fxn(color) for color in colors]
# matches the order the data is stored in
task_to_data_idxs = {"treecover": 0, "elevation": 1, "population": 2, "nightlights": 3}
task_to_cfg_idxs = {"treecover": 0, "elevation": 1, "population": 2, "nightlights": 3}
# matches each task to the continent it falls into for the continent specific model
task_to_continent_idxs = {
"treecover": 3,
"elevation": 2,
"population": 4,
"nightlights": 2,
}
def plot_world_binned(
latlons,
y_true,
y_pred,
vmin,
vmax,
task_name="Title Me!",
log_cbar=False,
cmap_this="viridis",
sub_select=False,
show_coasts=True,
agg_scale=10.0,
units_this="units?",
land_mass=None,
proj=ccrs.Robinson(central_longitude=0),
):
# parse data input
if sub_select:
sub_ids = np.random.choice(len(latlons), 5000)
lls = latlons[sub_ids, :]
preds = y_pred[sub_ids]
labels = y_true[sub_ids].ravel()
else:
lls = latlons
preds = y_pred
labels = y_true.ravel()
fig = plt.figure(figsize=(18, 6))
gs = fig.add_gridspec(1, 2, width_ratios=[1, 1], wspace=0.01, hspace=0.05)
x0, y0, labels_binned = points_to_bin(lls[:, 1], lls[:, 0], labels, scale=agg_scale)
x0, y0, preds_binned = points_to_bin(lls[:, 1], lls[:, 0], preds, scale=agg_scale)
ax_truth = fig.add_subplot(gs[0, 0], projection=proj)
ax_truth.outline_patch.set_visible(False)
ax_truth.background_patch.set_visible(False)
ax_colorbar = ax_truth.pcolormesh(
x0,
y0,
labels_binned,
transform=ccrs.PlateCarree(),
cmap=cmap_this,
vmin=vmin,
vmax=vmax,
edgecolors="none",
)
ax_pred = fig.add_subplot(gs[0, 1], projection=proj)
ax_pred.outline_patch.set_visible(False)
ax_pred.background_patch.set_visible(False)
ax_pred.pcolormesh(
x0,
y0,
preds_binned,
transform=ccrs.PlateCarree(),
cmap=cmap_this,
vmin=vmin,
vmax=vmax,
edgecolors="none",
)
if land_mass is not None:
ax_pred.add_geometries(
[land_mass],
crs=ccrs.PlateCarree(),
facecolor="grey",
edgecolor="none",
zorder=-100,
)
ax_truth.add_geometries(
[land_mass],
crs=ccrs.PlateCarree(),
facecolor="grey",
edgecolor="none",
zorder=-100,
)
if show_coasts:
ax_truth.coastlines(color="grey", linewidth=0.5)
ax_pred.coastlines(color="grey", linewidth=0.5)
ax_truth.set_title("Labels", fontsize=24)
ax_pred.set_title("Predicted", fontsize=24)
# colorbar for the first two
bb_truth = ax_truth.get_position()
bb_pred = ax_pred.get_position()
height = bb_truth.height * 0.05
width = (bb_pred.x1 - bb_truth.x0) * 0.95
y0 = bb_truth.y0 - height
x0 = bb_truth.x0 + width * 0.025
ax_cbar = fig.add_axes((x0, y0, width, height))
cb = fig.colorbar(ax_colorbar, cax=ax_cbar, orientation="horizontal")
cb.locator = ticker.MaxNLocator(nbins=6, integer=True)
cb.update_ticks()
ax_cbar.set_xlabel(units_this, labelpad=1.0)
return fig
def points_to_bin(x, y, vals, scale=10.0):
"""args:
x,y: nx1 arrays of locations in 1 dimension each
preds: nx1 array of values to be averaged
scale: the edge of a bin/box in {x,y} units.
returns:
x0, y0: kx1, mx1 arrays of the x and y gridpoints
vals_grid: (m-1)x(k-1) resulting aggregated values
"""
x_range = np.max(x) - np.min(x)
y_range = np.max(y) - np.min(y)
bin_shapes = [int(y_range / scale), int(x_range / scale)]
sums_grid, y0, x0 = np.histogram2d(y, x, bins=bin_shapes, weights=vals)
counts, y1, x1 = np.histogram2d(y, x, bins=bin_shapes)
vals_grid = sums_grid / counts
vals_grid = np.ma.masked_invalid(vals_grid)
return x0, y0, vals_grid
def task_to_world_bin_plot(task, world_data, agg_scale):
assert task in task_to_data_idxs.keys(), print(
"task name not reconized, options are {0}".format(task_to_data_idxs.keys())
)
# grab data for this task
this_idx_data = task_to_data_idxs[task]
this_idx_config = task_to_cfg_idxs[task]
latlons_this = world_data["latlons_train"]
y_this = world_data["y_train"][:, this_idx_data]
y_pred_this = world_data["y_pred_train_cross_val"][:, this_idx_data]
proj = ccrs.Robinson(central_longitude=0)
vmin_this, vmax_this = colorbar_bounds[this_idx_config]
disp_name_this = disp_names[this_idx_config]
cmap_this = cmaps[this_idx_config]
units_this = units[this_idx_config]
sns.set(
rc={
"axes.facecolor": "lightgrey",
"figure.facecolor": "lightgrey",
"axes.grid": False,
}
)
fig = plot_world_binned(
latlons_this,
y_this,
y_pred_this,
vmin_this,
vmax_this,
sub_select=False,
task_name=disp_name_this,
cmap_this=cmap_this,
units_this=units_this,
agg_scale=agg_scale,
show_coasts=True,
land_mass=None,
proj=proj,
)
return fig
def predict_y_dense_sample(task, world_wts, labels_to_run):
# get features for each zoom
feats = [None] * len(labels_to_run)
for tt in labels_to_run:
path = os.path.join(c.features_dir, "dense_" + tt + ".pkl")
with open(path, "rb") as f:
data = dill.load(f)
tloc = task_to_data_idxs[tt]
feats[tloc] = data["X"].astype("float64")
# get weights estimated from continent model (from optimal hyperparameter)
idx = task_to_data_idxs[task]
# clip predictions for this task
mylb = clip_bounds[idx][0]
myub = clip_bounds[idx][1]
ypreds = [None] * len(labels_to_run)
# for each zoom, predict for this task
for z in range(len(labels_to_run)):
# this is the continent needed for this zoom
zcont = task_to_continent_idxs[task]
# get the right weights for this zoom and task
mywts = world_wts["weights"][zcont][idx]
# predictions
ypreds[z] = np.dot(feats[z], mywts)
ypreds[z][ypreds[z] < mylb] = mylb
ypreds[z][ypreds[z] > myub] = myub
return ypreds
def merge_zoompreds(zoom, labels_to_run, allpreds):
fl = glob.glob(os.path.join(c.grid_dir, "*" + zoom + "*"))
file = np.load(fl[0])
# Create pandas dataframe from npz
sampdf = pd.DataFrame(file["ID"])
sampdf["lon"] = file["lon"]
sampdf["lat"] = file["lat"]
sampdf.columns = ["ID", "lon", "lat"]
# which entry in allpreds[][] is this zoom?
idz = task_to_data_idxs[zoom]
for task in labels_to_run:
# where is this task located in the task vector
idx = task_to_data_idxs[task]
# pull the predictions for this task and zoom
sampdf[task] = allpreds[idx][idz]
return sampdf
|
the-stack_106_31136 | # -*- coding: utf-8 -*-
__doc__="将Rhino中的Mesh 导入 Revit 中"
from rpw.extras.rhino import Rhino as rc
from pyrevit import forms ,DB,UI,_HostApplication,revit
from RhinoToRevit import RhinoToRevit as RhToRe
import rpw
from rpw import db
from rpw.ui.forms import FlexForm, Label, ComboBox, TextBox, TextBox,Separator, Button,SelectFromList
from Helper import *
uidoc = __revit__.ActiveUIDocument
doc = __revit__.ActiveUIDocument.Document
hostapp = _HostApplication(__revit__)
print(hostapp.app.Language)
if hostapp.app.Language.ToString()=="English_USA":
ParameterName=LG_EUN()
elif hostapp.app.Language.ToString()=="Chinese_Simplified":
ParameterName = LG_CHS()
#RhToRe.rhMeshToMesh(1)
#Read Rhino File
finlename=forms.pick_file(file_ext='3dm', files_filter='', init_dir='', restore_dir=True, multi_file=False, unc_paths=False)
Materials = rpw.db.Collector(of_category='OST_Materials', is_type=False).get_elements(wrapped=False)
Materials_options = {t.Name: t for t in Materials}
CategoryID_options={ "常规模型":DB.BuiltInCategory.OST_GenericModel,
"墙":DB.BuiltInCategory.OST_Walls}
#信息输入部分
components = [
Label('材质'),
ComboBox('Material', Materials_options),
Label('类型'),
ComboBox('Category', CategoryID_options),
Label('Rhino图层'),
TextBox('Layer', Text="Default"),
Button('确定')
]
form = FlexForm('结构', components)
form.show()
Value=form.values
Mat=Value['Material'].Id
Category=Value['Category']
RhinoFile=rc.FileIO.File3dm.Read(finlename)
def GetOBjectByLayer(RehinoFile,LayerName):
Objects=RehinoFile.Objects.FindByLayer(LayerName)
return Objects
RhinoOBject=GetOBjectByLayer(RhinoFile,Value['Layer'])
Mesh=[i.Geometry for i in RhinoOBject]
#NewLine=[RhToRe.rhLineToLine(i.Geometry) for i in RhinoOBject]
@rpw.db.Transaction.ensure('Create Mesh From Rhino')
def CreateMesh(GeometricalObjects):
ds = DB.DirectShape.CreateElement(doc, DB.ElementId(Category))
ds.ApplicationId = "Application id"
ds.ApplicationDataId = "Geometry object id"
ds.SetShape(GeometricalObjects)
print("Id:{id} 创建成功".format(id=ds.Id))
for i in Mesh:
try:
CreateMesh(RhToRe.rhMeshToMesh(i,Mat))
except:
pass
|
the-stack_106_31137 | #!/usr/bin/python
"""
(C) Copyright 2020 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import time
import threading
import uuid
from itertools import product
from apricot import TestWithServers
from write_host_file import write_host_file
from test_utils_pool import TestPool
from test_utils_container import TestContainer
from ior_utils import IorCommand
from job_manager_utils import Mpirun
from command_utils_base import CommandFailure
from mpio_utils import MpioUtils
try:
# python 3.x
import queue as queue
except ImportError:
# python 2.7
import Queue as queue
class NvmePoolCapacity(TestWithServers):
# pylfloat: disable=too-many-ancestors
"""Test class Description: Verify NOSPC
condition is reported when accessing data beyond
pool size.
:avocado: recursive
"""
def setUp(self):
"""Set up for test case."""
super(NvmePoolCapacity, self).setUp()
self.ior_flags = self.params.get("ior_flags", '/run/ior/iorflags/*')
self.ior_apis = self.params.get("ior_api", '/run/ior/iorflags/*')
self.ior_test_sequence = self.params.get(
"ior_test_sequence", '/run/ior/iorflags/*')
self.ior_dfs_oclass = self.params.get(
"obj_class", '/run/ior/iorflags/*')
# Recreate the client hostfile without slots defined
self.hostfile_clients = write_host_file(
self.hostlist_clients, self.workdir, None)
self.pool = None
self.out_queue = queue.Queue()
def ior_thread(self, pool, oclass, api, test, flags, results):
"""Start threads and wait until all threads are finished.
Args:
pool (object): pool handle
oclass (str): IOR object class
API (str): IOR API
test (list): IOR test sequence
flags (str): IOR flags
results (queue): queue for returning thread results
Returns:
None
"""
processes = self.params.get("slots", "/run/ior/clientslots/*")
container_info = {}
mpio_util = MpioUtils()
if mpio_util.mpich_installed(self.hostlist_clients) is False:
self.fail("Exiting Test: Mpich not installed")
self.pool = pool
# Define the arguments for the ior_runner_thread method
ior_cmd = IorCommand()
ior_cmd.get_params(self)
ior_cmd.set_daos_params(self.server_group, self.pool)
ior_cmd.dfs_oclass.update(oclass)
ior_cmd.api.update(api)
ior_cmd.transfer_size.update(test[2])
ior_cmd.block_size.update(test[3])
ior_cmd.flags.update(flags)
container_info["{}{}{}"
.format(oclass,
api,
test[2])] = str(uuid.uuid4())
# Define the job manager for the IOR command
self.job_manager = Mpirun(ior_cmd, mpitype="mpich")
key = "{}{}{}".format(oclass, api, test[2])
self.job_manager.job.dfs_cont.update(container_info[key])
env = ior_cmd.get_default_env(str(self.job_manager))
self.job_manager.assign_hosts(self.hostlist_clients, self.workdir, None)
self.job_manager.assign_processes(processes)
self.job_manager.assign_environment(env, True)
# run IOR Command
try:
self.job_manager.run()
except CommandFailure as _error:
results.put("FAIL")
def test_create_delete(self, num_pool=2, num_cont=5, total_count=100,
scm_size=100000000000, nvme_size=300000000000):
"""
Test Description:
This method is used to create/delete pools
for a long run. It verifies the NVME free space
during this process.
Args:
num_pool (int): Total pools for running test
num_cont (int): Total containers created on each pool
total_count (int): Total times the test is run in a loop
scm_size (int): SCM size used in the testing
nvme_size (int): NVME size used in the testing
Returns:
None
"""
pool = {}
cont = {}
for loop_count in range(0, total_count):
self.log.info("Running test %s", loop_count)
for val in range(0, num_pool):
pool[val] = TestPool(self.context, self.get_dmg_command())
pool[val].get_params(self)
# Split total SCM and NVME size for creating multiple pools.
temp = int(scm_size) / num_pool
pool[val].scm_size.update(str(temp))
temp = int(nvme_size) / num_pool
pool[val].nvme_size.update(str(temp))
pool[val].create()
self.pool = pool[val]
display_string = "pool{} space at the Beginning".format(val)
self.pool.display_pool_daos_space(display_string)
nvme_size_begin = self.pool.get_pool_free_space("NVME")
for cont_val in range(0, num_cont):
cont[cont_val] = TestContainer(pool[val])
m_leak = 0
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
nvme_size_end = self.pool.get_pool_free_space("NVME")
pool[val].destroy()
if (nvme_size_begin != nvme_size_end) and (m_leak == 0):
m_leak = val + 1
# After destroying pools, check memory leak for each test loop.
if m_leak != 0:
self.fail("Memory leak : iteration {0} \n".format(m_leak))
def test_run(self, num_pool=1):
"""
Method Description:
This method is called with different test_cases.
Args:
num_pool (int): Total pools for running a test.
Returns:
None
"""
num_jobs = self.params.get("no_parallel_job", '/run/ior/*')
# Create a pool
pool = {}
# Iterate through IOR different ior test sequence
for oclass, api, test, flags in product(self.ior_dfs_oclass,
self.ior_apis,
self.ior_test_sequence,
self.ior_flags):
# Create the IOR threads
threads = []
for val in range(0, num_pool):
pool[val] = TestPool(self.context, self.get_dmg_command())
pool[val].get_params(self)
# Split total SCM and NVME size for creating multiple pools.
pool[val].scm_size.value = int(test[0]) / num_pool
pool[val].nvme_size.value = int(test[1]) / num_pool
pool[val].create()
display_string = "pool{} space at the Beginning".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
for thrd in range(0, num_jobs):
# Add a thread for these IOR arguments
threads.append(threading.Thread(target=self.ior_thread,
kwargs={"pool": pool[val],
"oclass": oclass,
"api": api,
"test": test,
"flags": flags,
"results":
self.out_queue}))
# Launch the IOR threads
for thrd in threads:
self.log.info("Thread : %s", thrd)
thrd.start()
time.sleep(5)
# Wait to finish the threads
for thrd in threads:
thrd.join()
# Verify the queue and make sure no FAIL for any IOR run
# Test should fail with ENOSPC.
while not self.out_queue.empty():
if (self.out_queue.get() == "FAIL" and test[4] == "PASS") \
or (self.out_queue.get() != "FAIL" and test[4] == "FAIL"):
self.fail("FAIL")
for val in range(0, num_pool):
display_string = "Pool{} space at the End".format(val)
self.pool = pool[val]
self.pool.display_pool_daos_space(display_string)
self.pool.destroy()
def test_nvme_pool_capacity(self):
"""Jira ID: DAOS-2085.
Test Description:
Purpose of this test is to verify whether DAOS stack
report NOSPC when accessing data beyond pool size.
Use Cases
Test Case 1 or 2:
1. Perform IO less than entire SSD disk space.
2. Perform IO beyond entire SSD disk space.
Test Case 3:
3. Create Pool/Container and destroy them several times.
Use case:
:avocado: tags=all,hw,medium,ib2,nvme,full_regression
:avocado: tags=nvme_pool_capacity
"""
# Run test with one pool.
self.log.info("Running Test Case 1 with one Pool")
self.test_run(1)
time.sleep(5)
# Run test with two pools.
self.log.info("Running Test Case 1 with two Pools")
self.test_run(2)
time.sleep(5)
# Run Create/delete pool/container
self.log.info("Running Test Case 3: Pool/Cont Create/Destroy")
self.test_create_delete(10, 50, 100)
|
the-stack_106_31138 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# -----------------------------------------------------------------------------------
import json
import asyncio
from azure.eventprocessorhost.lease import Lease
class AzureBlobLease(Lease):
"""
Azure Blob Lease
"""
def __init__(self):
"""
Init Azure Blob Lease.
"""
super()
Lease.__init__(self)
self.offset = None
self.state = lambda: None
def serializable(self):
"""
Returns Serialiazble instance of `__dict__`.
"""
serial = self.__dict__.copy()
del serial['state']
return serial
def with_lease(self, lease):
"""
Init with exisiting lease.
"""
super().with_source(lease)
def with_blob(self, blob):
"""
Init Azure Blob Lease with existing blob.
"""
content = json.loads(blob.content)
self.partition_id = content["partition_id"]
self.owner = content["owner"]
self.token = content["token"]
self.epoch = content["epoch"]
self.offset = content["offset"]
self.sequence_number = content["sequence_number"]
def with_source(self, lease):
"""
Init Azure Blob Lease from existing.
"""
super().with_source(lease)
self.offset = lease.offset
self.sequence_number = lease.sequence_number
async def is_expired(self):
"""
Check and return Azure Blob Lease state using Storage API.
"""
if asyncio.iscoroutinefunction(self.state):
current_state = await self.state()
else:
current_state = self.state()
if current_state:
return current_state != "leased"
return False
|
the-stack_106_31143 | import logging
import sys
import urllib.request
import urllib.parse
import urllib.error
import requests
from requests.auth import HTTPBasicAuth
from six import string_types, text_type
from redash.query_runner import *
from redash.utils import json_dumps, json_loads
try:
import http.client as http_client
except ImportError:
# Python 2
import http.client as http_client
logger = logging.getLogger(__name__)
ELASTICSEARCH_TYPES_MAPPING = {
"integer": TYPE_INTEGER,
"long": TYPE_INTEGER,
"float": TYPE_FLOAT,
"double": TYPE_FLOAT,
"boolean": TYPE_BOOLEAN,
"string": TYPE_STRING,
"date": TYPE_DATE,
"object": TYPE_STRING,
# "geo_point" TODO: Need to split to 2 fields somehow
}
ELASTICSEARCH_BUILTIN_FIELDS_MAPPING = {"_id": "Id", "_score": "Score"}
PYTHON_TYPES_MAPPING = {
str: TYPE_STRING,
text_type: TYPE_STRING,
bool: TYPE_BOOLEAN,
int: TYPE_INTEGER,
float: TYPE_FLOAT,
}
class BaseElasticSearch(BaseQueryRunner):
should_annotate_query = False
DEBUG_ENABLED = False
@classmethod
def configuration_schema(cls):
return {
"type": "object",
"properties": {
"server": {"type": "string", "title": "Base URL"},
"basic_auth_user": {"type": "string", "title": "Basic Auth User"},
"basic_auth_password": {
"type": "string",
"title": "Basic Auth Password",
},
},
"order": ["server", "basic_auth_user", "basic_auth_password"],
"secret": ["basic_auth_password"],
"required": ["server"],
}
@classmethod
def enabled(cls):
return False
def __init__(self, configuration):
super(BaseElasticSearch, self).__init__(configuration)
self.syntax = "json"
if self.DEBUG_ENABLED:
http_client.HTTPConnection.debuglevel = 1
# you need to initialize logging, otherwise you will not see anything from requests
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
requests_log = logging.getLogger("requests.packages.urllib3")
requests_log.setLevel(logging.DEBUG)
requests_log.propagate = True
logger.setLevel(logging.DEBUG)
self.server_url = self.configuration["server"]
if self.server_url[-1] == "/":
self.server_url = self.server_url[:-1]
basic_auth_user = self.configuration.get("basic_auth_user", None)
basic_auth_password = self.configuration.get("basic_auth_password", None)
self.auth = None
if basic_auth_user and basic_auth_password:
self.auth = HTTPBasicAuth(basic_auth_user, basic_auth_password)
def _get_mappings(self, url):
mappings = {}
error = None
try:
r = requests.get(url, auth=self.auth)
r.raise_for_status()
mappings = r.json()
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(
r.status_code, r.text
)
mappings = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
mappings = None
return mappings, error
def _get_query_mappings(self, url):
mappings_data, error = self._get_mappings(url)
if error:
return mappings_data, error
mappings = {}
for index_name in mappings_data:
index_mappings = mappings_data[index_name]
for m in index_mappings.get("mappings", {}):
if "properties" not in index_mappings["mappings"][m]:
continue
for property_name in index_mappings["mappings"][m]["properties"]:
property_data = index_mappings["mappings"][m]["properties"][
property_name
]
if property_name not in mappings:
property_type = property_data.get("type", None)
if property_type:
if property_type in ELASTICSEARCH_TYPES_MAPPING:
mappings[property_name] = ELASTICSEARCH_TYPES_MAPPING[
property_type
]
else:
mappings[property_name] = TYPE_STRING
# raise Exception("Unknown property type: {0}".format(property_type))
return mappings, error
def get_schema(self, *args, **kwargs):
def parse_doc(doc, path=None):
"""Recursively parse a doc type dictionary
"""
path = path or []
result = []
for field, description in doc["properties"].items():
if "properties" in description:
result.extend(parse_doc(description, path + [field]))
else:
result.append(".".join(path + [field]))
return result
schema = {}
url = "{0}/_mappings".format(self.server_url)
mappings, error = self._get_mappings(url)
if mappings:
# make a schema for each index
# the index contains a mappings dict with documents
# in a hierarchical format
for name, index in mappings.items():
columns = []
schema[name] = {"name": name}
for doc, items in index["mappings"].items():
columns.extend(parse_doc(items))
# remove duplicates
# sort alphabetically
schema[name]["columns"] = sorted(set(columns))
return list(schema.values())
def _parse_results(
self, mappings, result_fields, raw_result, result_columns, result_rows
):
def add_column_if_needed(
mappings, column_name, friendly_name, result_columns, result_columns_index
):
if friendly_name not in result_columns_index:
result_columns.append(
{
"name": friendly_name,
"friendly_name": friendly_name,
"type": mappings.get(column_name, "string"),
}
)
result_columns_index[friendly_name] = result_columns[-1]
def get_row(rows, row):
if row is None:
row = {}
rows.append(row)
return row
def collect_value(mappings, row, key, value, type):
if result_fields and key not in result_fields_index:
return
mappings[key] = type
add_column_if_needed(
mappings, key, key, result_columns, result_columns_index
)
row[key] = value
def collect_aggregations(
mappings, rows, parent_key, data, row, result_columns, result_columns_index
):
if isinstance(data, dict):
for key, value in data.items():
val = collect_aggregations(
mappings,
rows,
parent_key if key == "buckets" else key,
value,
row,
result_columns,
result_columns_index,
)
if val:
row = get_row(rows, row)
collect_value(mappings, row, key, val, "long")
for data_key in ["value", "doc_count"]:
if data_key not in data:
continue
if "key" in data and len(list(data.keys())) == 2:
key_is_string = "key_as_string" in data
collect_value(
mappings,
row,
data["key"] if not key_is_string else data["key_as_string"],
data[data_key],
"long" if not key_is_string else "string",
)
else:
return data[data_key]
elif isinstance(data, list):
for value in data:
result_row = get_row(rows, row)
collect_aggregations(
mappings,
rows,
parent_key,
value,
result_row,
result_columns,
result_columns_index,
)
if "doc_count" in value:
collect_value(
mappings,
result_row,
"doc_count",
value["doc_count"],
"integer",
)
if "key" in value:
if "key_as_string" in value:
collect_value(
mappings,
result_row,
parent_key,
value["key_as_string"],
"string",
)
else:
collect_value(
mappings, result_row, parent_key, value["key"], "string"
)
return None
result_columns_index = {c["name"]: c for c in result_columns}
result_fields_index = {}
if result_fields:
for r in result_fields:
result_fields_index[r] = None
if "error" in raw_result:
error = raw_result["error"]
if len(error) > 10240:
error = error[:10240] + "... continues"
raise Exception(error)
elif "aggregations" in raw_result:
if result_fields:
for field in result_fields:
add_column_if_needed(
mappings, field, field, result_columns, result_columns_index
)
for key, data in raw_result["aggregations"].items():
collect_aggregations(
mappings,
result_rows,
key,
data,
None,
result_columns,
result_columns_index,
)
logger.debug("result_rows %s", str(result_rows))
logger.debug("result_columns %s", str(result_columns))
elif "hits" in raw_result and "hits" in raw_result["hits"]:
if result_fields:
for field in result_fields:
add_column_if_needed(
mappings, field, field, result_columns, result_columns_index
)
for h in raw_result["hits"]["hits"]:
row = {}
column_name = "_source" if "_source" in h else "fields"
for column in h[column_name]:
if result_fields and column not in result_fields_index:
continue
add_column_if_needed(
mappings, column, column, result_columns, result_columns_index
)
value = h[column_name][column]
row[column] = (
value[0]
if isinstance(value, list) and len(value) == 1
else value
)
result_rows.append(row)
else:
raise Exception(
"Redash failed to parse the results it got from Elasticsearch."
)
def test_connection(self):
try:
r = requests.get(
"{0}/_cluster/health".format(self.server_url), auth=self.auth
)
r.raise_for_status()
except requests.HTTPError as e:
logger.exception(e)
raise Exception(
"Failed to execute query. Return Code: {0} Reason: {1}".format(
r.status_code, r.text
)
)
except requests.exceptions.RequestException as e:
logger.exception(e)
raise Exception("Connection refused")
class Kibana(BaseElasticSearch):
@classmethod
def enabled(cls):
return True
def _execute_simple_query(
self, url, auth, _from, mappings, result_fields, result_columns, result_rows
):
url += "&from={0}".format(_from)
r = requests.get(url, auth=self.auth)
r.raise_for_status()
raw_result = r.json()
self._parse_results(
mappings, result_fields, raw_result, result_columns, result_rows
)
total = raw_result["hits"]["total"]
result_size = len(raw_result["hits"]["hits"])
logger.debug("Result Size: {0} Total: {1}".format(result_size, total))
return raw_result["hits"]["total"]
def run_query(self, query, user):
try:
error = None
logger.debug(query)
query_params = json_loads(query)
index_name = query_params["index"]
query_data = query_params["query"]
size = int(query_params.get("size", 500))
limit = int(query_params.get("limit", 500))
result_fields = query_params.get("fields", None)
sort = query_params.get("sort", None)
if not self.server_url:
error = "Missing configuration key 'server'"
return None, error
url = "{0}/{1}/_search?".format(self.server_url, index_name)
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
mappings, error = self._get_query_mappings(mapping_url)
if error:
return None, error
if sort:
url += "&sort={0}".format(urllib.parse.quote_plus(sort))
url += "&q={0}".format(urllib.parse.quote_plus(query_data))
logger.debug("Using URL: {0}".format(url))
logger.debug("Using Query: {0}".format(query_data))
result_columns = []
result_rows = []
if isinstance(query_data, string_types):
_from = 0
while True:
query_size = size if limit >= (_from + size) else (limit - _from)
total = self._execute_simple_query(
url + "&size={0}".format(query_size),
self.auth,
_from,
mappings,
result_fields,
result_columns,
result_rows,
)
_from += size
if _from >= limit:
break
else:
# TODO: Handle complete ElasticSearch queries (JSON based sent over HTTP POST)
raise Exception("Advanced queries are not supported")
json_data = json_dumps({"columns": result_columns, "rows": result_rows})
except KeyboardInterrupt:
error = "Query cancelled by user."
json_data = None
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(
r.status_code, r.text
)
json_data = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
json_data = None
return json_data, error
class ElasticSearch(BaseElasticSearch):
@classmethod
def enabled(cls):
return True
@classmethod
def name(cls):
return "Elasticsearch"
def run_query(self, query, user):
try:
error = None
logger.debug(query)
query_dict = json_loads(query)
index_name = query_dict.pop("index", "")
result_fields = query_dict.pop("result_fields", None)
if not self.server_url:
error = "Missing configuration key 'server'"
return None, error
url = "{0}/{1}/_search".format(self.server_url, index_name)
mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)
mappings, error = self._get_query_mappings(mapping_url)
if error:
return None, error
logger.debug("Using URL: %s", url)
logger.debug("Using query: %s", query_dict)
r = requests.get(url, json=query_dict, auth=self.auth)
r.raise_for_status()
logger.debug("Result: %s", r.json())
result_columns = []
result_rows = []
self._parse_results(
mappings, result_fields, r.json(), result_columns, result_rows
)
json_data = json_dumps({"columns": result_columns, "rows": result_rows})
except KeyboardInterrupt:
logger.exception(e)
error = "Query cancelled by user."
json_data = None
except requests.HTTPError as e:
logger.exception(e)
error = "Failed to execute query. Return Code: {0} Reason: {1}".format(
r.status_code, r.text
)
json_data = None
except requests.exceptions.RequestException as e:
logger.exception(e)
error = "Connection refused"
json_data = None
return json_data, error
register(Kibana)
register(ElasticSearch)
|
the-stack_106_31145 | import argparse
import json
def validate_parallel_run_config(parallel_run_config):
max_concurrency = 20
if (parallel_run_config.process_count_per_node * parallel_run_config.node_count) > max_concurrency:
print("Please decrease concurrency to maximum of 20 as currently AutoML does not support it.")
raise ValueError("node_count*process_count_per_node must be between 1 and max_concurrency {}"
.format(max_concurrency))
def get_automl_environment():
from azureml.core import Environment
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.runconfig import DEFAULT_CPU_IMAGE
train_env = Environment(name="many_models_environment_automl")
train_conda_deps = CondaDependencies.create(pip_packages=['azureml-sdk[automl]', 'joblib', 'pyarrow==0.14'])
train_conda_deps.add_conda_package('pandas==0.23.4')
train_conda_deps.add_conda_package('numpy==1.16.2')
train_conda_deps.add_conda_package('fbprophet==0.5')
train_conda_deps.add_conda_package('py-xgboost==0.90')
train_env.python.conda_dependencies = train_conda_deps
train_env.docker.enabled = True
train_env.docker.base_image = DEFAULT_CPU_IMAGE
return train_env
|
the-stack_106_31147 | import argparse
def parse_args():
parser = argparse.ArgumentParser(
description='Mimic image or video in your terminal')
parser.add_argument(
'path',
type=str,
help=('the path of the picture/video use "{0}" like "frame{0}.jpg" if '
'the images name is frame1.jpg, frame2.jpg, ...'))
parser.add_argument('--ascii',
help='whether using ascii characters or ansi escape code',
action='store_true')
parser.add_argument(
'--char',
help=('pass a character to get fixed character instead of some '
'random text (must be used it with "--ascii" opt)'),
type=str,
default=None)
parser.add_argument('--fps', type=int,help='maximum fps', default=60)
parser.add_argument('--start-frame',
type=int,
default=1,
metavar='NUMBER',
help='set the first frame played')
parser.add_argument('--last-frame',
type=int,
default=None,
metavar='NUMBER',
help='set the last frame played (not work for video)')
parser.add_argument('-v',
'--verbose',
help='show some information',
action='store_true')
dim_group = parser.add_mutually_exclusive_group()
dim_group.add_argument(
'--dim',
help='fixed dimension to display in terminal (example: 480x360)',
metavar='WIDTHxHEIGHT',
type=str,
default=None)
dim_group.add_argument(
'--aspect-ratio',
help=
('preferred aspect ratio to fix stretched image while printing in terminal, '
'result will change dynamically based on terminal size (usage: 16:9, 4:3, 10:5, ...). '
'Try increasing the height value if the image stretched vertically and '
'vice versa if streched horizontally.'),
metavar='RATIO',
type=str,
default=None)
return parser.parse_args()
|
the-stack_106_31150 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 06/13/2021
# Author: Sian Xiao & Hao Tian
"""
Two examples for function atomCount
1PJ3:
fileDirection = '../data/pockets/1PJ3_out/pockets/pocket1_atm.pdb'
info = 'Chain A:GLN64,ARG67,ILE88,ARG91,LEU95; Chain B:PHE1127,ARG1128'
return = 10
1AO0:
fileDirection = '../data/pockets/1AO0_out/pockets/pocket1_atm.pdb'
info = 'Chain A:HIS25,TYR242,SER244,ARG245,PRO246,ARG259,PRO281,ASP282,SER283,LYS305,LYS328; \
Chain B:ILE304,LYS305,ASN306,ARG307'
return = 31
"""
def atomCount(fileDirection: str, info: str) -> int:
"""compare how many matched heavy atoms
Args:
fileDirection (str): file location '../data/pockets/{pdb}_out/pockets/*.pdb'
info (str): correspoding allosteric info from ASD
Returns:
int: how many matched heavy atoms
"""
# collect allosteric info
atomTarget = dict()
info = info.split("\t")[-1].strip().split(";") # 'allosteric_site_residue'
for chains in info:
chains = chains.strip()
chainID = chains[6]
atoms = chains[8:].split(",")
# map atoms to chain ID
for atom in atoms:
atomTarget[atom] = chainID
# count matched atoms
pocket = open(fileDirection, "r").readlines()
count = 0
for line in pocket:
if line.startswith("ATOM"):
# Chain identifier
chainID = line[21]
# Residue name and Residue sequence number
atom = line[17:20] + line[22:26].strip()
# same atom and same chain ID
if atom in atomTarget and atomTarget[atom] == chainID:
count += 1
return count
|
the-stack_106_31151 | """
@author: magician
@file: card_demo.py
@date: 2020/9/28
"""
import collections
import random
Card = collections.namedtuple('Card', ['rank', 'suit'])
class FrenchDeck:
"""
FrenchDeck
"""
ranks = [str(n) for n in range(2, 11)] + list('JQKA')
suits = 'spades diamonds clubs hearts'.split()
def __init__(self):
self._cards = [Card(rank, suit) for suit in self.suits for rank in self.ranks]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
suit_values = dict(spades=3, hearts=2, diamonds=1, clubs=0)
def spades_high(card):
"""
spades_high
@param card:
@return:
"""
rank_value = FrenchDeck.ranks.index(card.rank)
return rank_value * len(suit_values) + suit_values[card.suit]
if __name__ == '__main__':
beer_card = Card('7', 'diamonds')
print(beer_card)
deck = FrenchDeck()
print(len(deck))
print(deck[0])
print(deck[-1])
# random
print(random.choice(deck))
print(random.choice(deck))
print(random.choice(deck))
# slice
print(deck[:3])
print(deck[12::13])
# iterator
for card in deck:
print(card)
for card in reversed(deck):
print(card)
# in
print(Card('Q', 'hearts') in deck)
print(Card('Q', 'beasts') in deck)
# sort
for card in sorted(deck, key=spades_high):
print(card)
|
the-stack_106_31152 | from concurrent.futures import ThreadPoolExecutor, as_completed
from datetime import datetime
from urllib import request
from urllib.parse import urljoin, urlparse, quote
from lxml import html
import logging
import redis
import sys
import os
import re
__r = redis.StrictRedis(host='localhost', port=6379, db=0)
__pool = ThreadPoolExecutor(os.cpu_count() * 2)
__future_callback = {}
# logger = multiprocessing.log_to_stderr()
# logger.setLevel(multiprocessing.SUBDEBUG)
SITE_CONFIGS = [
{
"name": "Thanh Nien",
"id": "TN",
"url_xpath": "//*[contains(@class, 'zone--timeline') or contains(@class, 'timeline')]//article//h2/a/@href",
"content_xpath": "//*[contains(@id, 'main_detail')]//div/text()",
"next_page_pattern": "trang-{}.html",
"page_regex": r"trang-(\d+)\.html",
"categories": [
{ "name": "Pháp luật", "id": "PL", "urls": ["https://thanhnien.vn/thoi-su/phap-luat/"] },
{ "name": "Kinh Doanh", "id": "KD", "urls": ["https://thanhnien.vn/tai-chinh-kinh-doanh/"] },
{ "name": "Thể Thao", "id": "TT", "urls": ["http://thethao.thanhnien.vn/bong-da-viet-nam/",
"http://thethao.thanhnien.vn/bong-da-quoc-te/",
"http://thethao.thanhnien.vn/binh-luan/",
"http://thethao.thanhnien.vn/quan-vot/",
"http://thethao.thanhnien.vn/hau-truong/",
"http://thethao.thanhnien.vn/toan-canh-the-thao/",
"https://thethao.thanhnien.vn/world-cup-2018/"] },
{ "name": "Công Nghệ", "id": "CN", "urls": ["https://thanhnien.vn/cong-nghe/"] },
{ "name": "Sức Khoẻ", "id": "SK", "urls": ["https://thanhnien.vn/suc-khoe/"] },
{ "name": "Văn Hoá", "id": "VH", "urls": ["https://thanhnien.vn/van-hoa/"] },
]
},
{
"name": "Dan Tri",
"id": "DT",
"url_xpath": "//*[contains(@id, 'listcheckepl')]/div//h2/a/@href",
"content_xpath": "//*[contains(@id, 'divNewsContent')]//p/text()",
"next_page_pattern": "trang-{}.htm",
"page_regex": r"trang-(\d+)\.htm",
"categories": [
{ "name": "Pháp luật", "id": "PL", "urls": ["http://dantri.com.vn/phap-luat.htm"] },
{ "name": "Kinh Doanh", "id": "KD", "urls": ["http://dantri.com.vn/kinh-doanh.htm"] },
{ "name": "Thể Thao", "id": "TT", "urls": ["http://dantri.com.vn/the-thao.htm"] },
{ "name": "Công Nghệ", "id": "CN", "urls": ["http://dantri.com.vn/suc-manh-so.htm"] },
{ "name": "Sức Khoẻ", "id": "SK", "urls": ["http://dantri.com.vn/suc-khoe.htm"] },
{ "name": "Văn Hoá", "id": "VH", "urls": ["http://dantri.com.vn/van-hoa.htm"] },
]
},
{
"name": "VNExpress",
"id": "VNE",
"url_xpath": "//article[contains(@class, 'list_news')]/h4/a[1]/@href",
"content_xpath": "//article[contains(@class, 'content_detail')]/p//text()",
"next_page_pattern": "p{}",
"page_regex": r"p(\d+)",
"categories": [
{ "name": "Pháp luật", "id": "PL", "urls": ["https://vnexpress.net/phap-luat-"] },
{ "name": "Kinh Doanh", "id": "KD", "urls": ["https://vnexpress.net/kinh-doanh/"]},
{ "name": "Thể Thao", "id": "TT", "urls": ["https://vnexpress.net/the-thao/"] },
{ "name": "Công Nghệ", "id": "CN", "urls": ["https://vnexpress.net/so-hoa/"] },
{ "name": "Sức Khoẻ", "id": "SK", "urls": ["https://vnexpress.net/suc-khoe/"] },
{ "name": "Văn Hoá", "id": "VH", "urls": ["https://vnexpress.net/giai-tri/"] },
]
},
{
"name": "Vietnamnet",
"id": "VNN",
"url_xpath": "//*[contains(@class, 'd-ib')]/h3/a/@href",
"content_xpath": "//*[contains(@id, 'ArticleContent')]/p//text()",
"next_page_pattern": "trang{}/index.html",
"page_regex": r"trang(\d+)/index\.html",
"categories": [
{ "name": "Pháp luật", "id": "PL", "urls": ["http://vietnamnet.vn/vn/phap-luat/"] },
{ "name": "Kinh Doanh", "id": "KD", "urls": ["http://vietnamnet.vn/vn/kinh-doanh/"] },
{ "name": "Thể Thao", "id": "TT", "urls": ["http://vietnamnet.vn/vn/the-thao/"] },
{ "name": "Công Nghệ", "id": "CN", "urls": ["http://vietnamnet.vn/vn/cong-nghe/"] },
{ "name": "Sức Khoẻ", "id": "SK", "urls": ["http://vietnamnet.vn/vn/suc-khoe/"] },
{ "name": "Văn Hoá", "id": "VH", "urls": ["http://vietnamnet.vn/vn/giai-tri/"] },
]
}
]
REDIS_VISITED_SET = "visited_urls"
FILE_NAME_PATTERN = "{cate_id}_{site_id}_{post_id}"
CURRENT_DIR = os.getcwd()
DATA_DIR = os.path.join(CURRENT_DIR, "data")
LOG_FILE = os.path.join(CURRENT_DIR, "log.txt")
def write_log(content):
t = str(datetime.now())
with open(LOG_FILE, "a") as f:
log_content = "{} - {}\n".format(t, content)
f.write(log_content)
def push_task(func, args=(), callback=None):
try:
future = __pool.submit(func, *args) # concurrent.futures.Future
__future_callback[future] = callback
except:
print("[ERROR]: {}".format(sys.exec_info()[0]))
def extract_urls(root_url, doc, url_xpath):
"""
Arguments:
:param root_url:
:param doc: HTML content extracted from page.
:param url_xpath: Identity of post urls in doc.
:type root_url: str
:type doc: lxml.html.HtmlElement
:type url_xpath: str
Returns:
:return: List of extracted urls.
:rtype: list[str]
"""
urls = doc.xpath(url_xpath)
filtered_urls = []
for url in urls:
url = "{}{}".format(root_url, urlparse(url).path)
url = quote(url)
if not __r.sismember(REDIS_VISITED_SET, url):
filtered_urls.append(url)
return filtered_urls
def init_page_url(page_url, next_page_pattern):
pattern = re.compile(r".*\.html|.*\.htm")
if len(pattern.findall(page_url)) > 0:
if len(page_url.rsplit(".html", 1)) > 1:
page_url = page_url.rsplit(".html", 1)[0]
elif len(page_url.rsplit(".htm", 1)) > 1:
page_url = page_url.rsplit(".htm", 1)[0] + "/"
if page_url[-1] == "-":
page_url = page_url + next_page_pattern.format(1)
else:
page_url = urljoin(page_url, next_page_pattern.format(1))
return page_url
def get_next_page_url(current_url, page_regex, next_page_pattern):
pattern = re.compile(page_regex)
next_page = None
if len(pattern.findall(current_url)) > 0:
next_page = int(pattern.findall(current_url)[0]) + 1
else:
next_page = 2
next_page_url = re.sub(page_regex, next_page_pattern.format(next_page), current_url)
print("Next page of {}: {}".format(current_url, next_page_url))
return next_page_url
def extract_content(doc, content_xpath):
"""
Arguments:
:param doc: HTML content from the page.
:param content_xpath: Identity of content in doc.
:type doc: lxml.html.HtmlElement
:type content_xpath: str
Returns:
:return: The news content.
:rtype: str
"""
content = doc.xpath(content_xpath)
content = " ".join(content)
content = re.sub("\s\s+", " ", content).strip()
return content
def persist_content(site_id, cate_id, post_id, content):
"""
Arguments:
:type site_id: str
:type cate_id: str
:type post_id: str
:type content: str
Return:
:rtype: bool
"""
try:
if not os.path.isdir(DATA_DIR):
os.makedirs(DATA_DIR)
cate_dir = os.path.join(DATA_DIR, cate_id)
if not os.path.isdir(cate_dir):
os.makedirs(cate_dir)
file_name = FILE_NAME_PATTERN.format(site_id=site_id, cate_id=cate_id, post_id=post_id)
file_path = os.path.join(cate_dir, file_name)
with open(file_path, "w") as f:
f.write(content)
except OSError as e:
print("OS error: {}".format(e))
return False
except:
print("Unexpected error: {}".format(sys.exc_info()[0]))
return False
return True
def process_post_content(post_url, content_xpath, site_id, cate_id):
"""
Arguments:
:type post_url: str
:type content_xpath: str
:type site_id: str
:type cate_id: str
Returns:
:rtype: dict
"""
print("Processing: {}".format(post_url))
result = {
"post_url": post_url,
"is_success": False,
"error": None
}
try:
post_id_pattern = re.compile(r"-(.\d+)\.htm")
post_id = post_id_pattern.findall(post_url)[0]
page = request.urlopen(post_url, timeout=5)
doc = html.fromstring(page.read())
content = extract_content(doc, content_xpath)
success = persist_content(site_id, cate_id, post_id, content)
if not success:
result["is_success"] = False
result["error"] = "Could not store content."
return result
url_elements = urlparse(post_url)
__r.sadd(REDIS_VISITED_SET, url_elements.netloc + url_elements.path)
except Exception as e:
print("Error: {}".format(str(e)))
print("Error from url: {}".format(post_url))
result["is_success"] = False
result["error"] = str(e)
return result
result["is_success"] = True
return result
def process_post_callback(result):
print("Logging: {}".format(result["post_url"]))
content = "[INFO]" if result["is_success"] else "[ERROR]"
if not result["is_success"]:
content += " - {}".format(result["error"])
content += " - {}".format(result["post_url"])
write_log(content)
def process_page(site_id, cate_id, page_url, url_xpath, content_xpath, page_regex, next_page_pattern):
"""
Arguments:
:type site_id: str
:type cate_id: str
:type page_url: str
:type url_xpath: str
:type content_xpath: str
:type page_regex: str
:type next_page_pattern: str
"""
print("Processing page: {}".format(page_url))
post_args = []
url_elements = urlparse(page_url)
page = request.urlopen(page_url, timeout=5)
doc = html.fromstring(page.read())
next_page_url = get_next_page_url(page_url, page_regex, next_page_pattern)
urls = extract_urls(url_elements.netloc, doc, url_xpath)
for url in urls:
url = "{}://{}".format(url_elements.scheme, url)
push_task(process_post_content, (url, content_xpath, site_id, cate_id), callback=process_post_callback)
push_task(process_page, (site_id, cate_id, next_page_url, url_xpath, content_xpath, page_regex, next_page_pattern))
if __name__ == "__main__":
for config in SITE_CONFIGS:
site_id = config["id"]
url_xpath = config["url_xpath"]
content_xpath = config["content_xpath"]
page_regex = config["page_regex"]
next_page_pattern = config["next_page_pattern"]
for category in config["categories"]:
cate_id = category["id"]
for url in category["urls"]:
url = init_page_url(url, next_page_pattern)
args = (site_id, cate_id, url, url_xpath, content_xpath, page_regex, next_page_pattern)
push_task(process_page, args)
while True:
for future in as_completed(__future_callback):
func = __future_callback[future]
if func is not None:
f_result = future.result()
push_task(func, (f_result, ))
del __future_callback[future]
|
the-stack_106_31153 | """Adding new operation types
Revision ID: 1cf750b30c08
Revises: e35c7cf01cb4
Create Date: 2021-11-02 23:51:07.308510
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '1cf750b30c08'
down_revision = 'e35c7cf01cb4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("INSERT INTO operation_type VALUES (20, 'Mascota'), (21, 'Salud'), (22, 'Inversión')")
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.execute("DELETE FROM operation_type WHERE id in (20,21,22)")
# ### end Alembic commands ###
|
the-stack_106_31155 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 10:34:50 2019
@author: GAllison
This script performs the overall task of creating a FracFocus database from
the raw excel collection and creating the tables used to make data sets.
Change the file handles at the top of core.Data_set_constructor to point to appropriate
directories.
Within Open-FF, there are three "working" FracFocus raw files. When new data is
downloaded from FracFocus, we put it in a file named "test_data.zip" (If we
are performing a tripwire scan, we move the previous "test_data.zip" into
"test_data_last.zip""). These two files are handled in the script:
"get_new_raw_file.py" and is performed frequently to document new disclosures.
That script also saves a copy of test_data.zip into an archive directory once
a week - usually the same day that the weekly blog post is generated.
The third working raw file in Open-FF is "curentData.zip". This is simply a
copy of "test_data.zip" made once I'm ready to start a curation process. This
separation of files allows Open-FF to continue to download data into test_data.zip
while I am working on the curation and repository process (which can take several
days). The currentData.zip file is typically what is used in this script to
build a file eventually ready for a repository.
# 3/2022 - separating Skytruth archive from main data
# 3/2022 - adding FFV1_scrape as an additional separate data set
"""
## For standard processing, set the following to 'bulk'
data_source = 'SkyTruth' # can be 'bulk', 'FFV1_scrape' or 'SkyTruth'
# or 'NM_scrape_2022_05'
bulk_fn = 'currentData' # name of raw bulk archive file
construct_from_scratch = True # normally True
do_end_tests = True # normally True, only for 'bulk' set, ignore otherwise
make_output_files = False # True for final bulk runs, adds lots of compile time
do_abbrev = False # normally False, for some testing purposes
# used to control files to read from the bulk download. If less than full
# set, this process is performed in "test" mode.
startfile = 0 # 0 for full set
endfile = None # None for no upper limit
if (startfile!=0) | (endfile!=None) :
# test mode does not overwrite production mode pickles
mode = 'TEST'
print('\n'+30*'-'+ 'Performing in TEST mode!'+30*'-'+'\n')
else:
mode = 'PRODUCTION'
import core.Data_set_constructor as set_const
import core.Analysis_set as ana_set
def run_build(bulk_fn = bulk_fn,
mode=mode,
data_source=data_source,
make_output_files=make_output_files,
startfile=startfile,
endfile=endfile,
do_abbrev=do_abbrev,
do_end_tests=do_end_tests,
construct_from_scratch=construct_from_scratch):
if construct_from_scratch:
# this can be skipped when testing if pickles already made
t = set_const.Data_set_constructor(bulk_fn=bulk_fn, const_mode=mode,
make_files=make_output_files,
startfile=startfile,
endfile=endfile,
data_source=data_source,
abbreviated=do_abbrev)\
.create_full_set()
if do_end_tests&(mode=='PRODUCTION')&(data_source=='bulk'):
import core.Tests_of_final as tests
print('\nStarting tests of final product')
print(' Creating test set of FULL data')
df = ana_set.Full_set(bulk_fn=bulk_fn,
pkl_when_creating=False).get_set()
tests.final_test(df).run_all_tests()
if (make_output_files == True) &(data_source=='bulk'):
print('\n\n -- Generating output data sets\n')
ana_set.Standard_data_set(bulk_fn=bulk_fn,
pkl_when_creating=False).save_compressed()
ana_set.Full_set(bulk_fn=bulk_fn,
pkl_when_creating=False).save_compressed()
print('\nBuild completed\n')
try:
return t
except:
print('No data set constructor to return.')
if __name__ == '__main__':
t = run_build() # build using the defaults at the top. |
the-stack_106_31156 | from corehq.apps.reports.filters.dates import DatespanFilter
from corehq.apps.reports.standard import CustomProjectReport, ProjectReportParametersMixin, DatespanMixin
from custom.intrahealth.filters import FicheLocationFilter2
from custom.intrahealth.reports.utils import IntraHealthLocationMixin, IntraHealthReportConfigMixin
from custom.intrahealth.sqldata import FicheData2
from memoized import memoized
from corehq.apps.locations.models import SQLLocation
class MultiReport(CustomProjectReport, IntraHealthLocationMixin, IntraHealthReportConfigMixin,
ProjectReportParametersMixin, DatespanMixin):
title = ''
base_template_path = "intrahealth/base_multi_report.html"
report_template_path = "intrahealth/multi_report.html"
flush_layout = True
export_format_override = 'csv'
@property
@memoized
def rendered_report_title(self):
return self.title
@property
@memoized
def data_providers(self):
return []
@property
def report_context(self):
context = {
'reports': [self.get_report_context(dp) for dp in self.data_providers],
'title': self.title
}
return context
def get_report_context(self, data_provider):
total_row = []
self.data_source = data_provider
if self.needs_filters:
headers = []
rows = []
else:
rows = data_provider.rows
headers = data_provider.headers
total_row = data_provider.total_row
context = dict(
report_table=dict(
title=data_provider.title,
slug=data_provider.slug,
headers=headers,
rows=rows,
total_row=total_row,
default_rows=self.default_rows,
datatables=data_provider.datatables,
fix_column=data_provider.fix_left_col
)
)
return context
@property
def export_table(self):
reports = [r['report_table'] for r in self.report_context['reports']]
return [self._export_table(r['title'], r['headers'], r['rows'], total_row=r['total_row']) for r in reports]
def _export_table(self, export_sheet_name, headers, formatted_rows, total_row=None):
def _unformat_row(row):
return [col.get("sort_key", col) if isinstance(col, dict) else col for col in row]
table = headers.as_export_table
rows = [_unformat_row(row) for row in formatted_rows]
replace = ''
#make headers and subheaders consistent
for k, v in enumerate(table[0]):
if v != ' ':
replace = v
else:
table[0][k] = replace
table.extend(rows)
if total_row:
table.append(_unformat_row(total_row))
return [export_sheet_name, table]
class FicheConsommationReport2(MultiReport):
name = "Fiche Consommation NEW"
slug = 'fiche_consommation2'
title = "Fiche Consommation NEW"
fields = [DatespanFilter, FicheLocationFilter2]
export_format_override = 'csv'
default_rows = 10
exportable = True
@property
@memoized
def data_providers(self):
config = self.report_config
locations = []
if 'region_id' in config:
locations = tuple(SQLLocation.objects.get(
location_id=config['region_id']
).archived_descendants().values_list('location_id', flat=True))
elif 'district_id' in config:
locations = tuple(SQLLocation.objects.get(
location_id=config['district_id']
).archived_descendants().values_list('location_id', flat=True))
if locations:
config.update({'archived_locations': locations})
return [
FicheData2(config=config),
]
|
the-stack_106_31157 | """Parallel coordinates plot showing posterior points with and without divergences marked."""
import numpy as np
from scipy.stats import rankdata
from ..data import convert_to_dataset
from ..labels import BaseLabeller
from ..sel_utils import xarray_to_ndarray
from ..rcparams import rcParams
from ..stats.stats_utils import stats_variance_2d as svar
from ..utils import _numba_var, _var_names, get_coords
from .plot_utils import get_plotting_function
def plot_parallel(
data,
var_names=None,
filter_vars=None,
coords=None,
figsize=None,
textsize=None,
legend=True,
colornd="k",
colord="C1",
shadend=0.025,
labeller=None,
ax=None,
norm_method=None,
backend=None,
backend_config=None,
backend_kwargs=None,
show=None,
):
"""
Plot parallel coordinates plot showing posterior points with and without divergences.
Described by https://arxiv.org/abs/1709.01449
Parameters
----------
data: obj
Any object that can be converted to an :class:`arviz.InferenceData` object
refer to documentation of :func:`arviz.convert_to_dataset` for details
var_names: list of variable names
Variables to be plotted, if `None` all variables are plotted. Can be used to change the
order of the plotted variables. Prefix the variables by ``~`` when you want to exclude
them from the plot.
filter_vars: {None, "like", "regex"}, optional, default=None
If `None` (default), interpret var_names as the real variables names. If "like",
interpret var_names as substrings of the real variables names. If "regex",
interpret var_names as regular expressions on the real variables names. A la
``pandas.filter``.
coords: mapping, optional
Coordinates of ``var_names`` to be plotted.
Passed to :meth:`xarray.Dataset.sel`.
figsize: tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be autoscaled based
on ``figsize``.
legend: bool
Flag for plotting legend (defaults to True)
colornd: valid matplotlib color
color for non-divergent points. Defaults to 'k'
colord: valid matplotlib color
color for divergent points. Defaults to 'C1'
shadend: float
Alpha blending value for non-divergent points, between 0 (invisible) and 1 (opaque).
Defaults to .025
labeller : labeller instance, optional
Class providing the method ``make_label_vert`` to generate the labels in the plot.
Read the :ref:`label_guide` for more details and usage examples.
ax: axes, optional
Matplotlib axes or bokeh figures.
norm_method: str
Method for normalizing the data. Methods include normal, minmax and rank.
Defaults to none.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_config: dict, optional
Currently specifies the bounds to use for bokeh axes.
Defaults to value set in ``rcParams``.
backend_kwargs: bool, optional
These are kwargs specific to the backend being used, passed to
:func:`matplotlib.pyplot.subplots` or
:func:`bokeh.plotting.figure`.
show: bool, optional
Call backend show function.
Returns
-------
axes: matplotlib axes or bokeh figures
See Also
--------
plot_pair : Plot a scatter, kde and/or hexbin matrix with (optional) marginals on the diagonal.
plot_trace : Plot distribution (histogram or kernel density estimates) and sampled values
or rank plot
Examples
--------
Plot default parallel plot
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('centered_eight')
>>> az.plot_parallel(data, var_names=["mu", "tau"])
Plot parallel plot with normalization
.. plot::
:context: close-figs
>>> az.plot_parallel(data, var_names=["mu", "tau"], norm_method='normal')
"""
if coords is None:
coords = {}
if labeller is None:
labeller = BaseLabeller()
# Get diverging draws and combine chains
divergent_data = convert_to_dataset(data, group="sample_stats")
_, diverging_mask = xarray_to_ndarray(
divergent_data,
var_names=("diverging",),
combined=True,
)
diverging_mask = np.squeeze(diverging_mask)
# Get posterior draws and combine chains
posterior_data = convert_to_dataset(data, group="posterior")
var_names = _var_names(var_names, posterior_data, filter_vars)
var_names, _posterior = xarray_to_ndarray(
get_coords(posterior_data, coords),
var_names=var_names,
combined=True,
label_fun=labeller.make_label_vert,
)
if len(var_names) < 2:
raise ValueError("Number of variables to be plotted must be 2 or greater.")
if norm_method is not None:
if norm_method == "normal":
mean = np.mean(_posterior, axis=1)
if _posterior.ndim <= 2:
standard_deviation = np.sqrt(_numba_var(svar, np.var, _posterior, axis=1))
else:
standard_deviation = np.std(_posterior, axis=1)
for i in range(0, np.shape(mean)[0]):
_posterior[i, :] = (_posterior[i, :] - mean[i]) / standard_deviation[i]
elif norm_method == "minmax":
min_elem = np.min(_posterior, axis=1)
max_elem = np.max(_posterior, axis=1)
for i in range(0, np.shape(min_elem)[0]):
_posterior[i, :] = ((_posterior[i, :]) - min_elem[i]) / (max_elem[i] - min_elem[i])
elif norm_method == "rank":
_posterior = rankdata(_posterior, axis=1, method="average")
else:
raise ValueError(f"{norm_method} is not supported. Use normal, minmax or rank.")
parallel_kwargs = dict(
ax=ax,
colornd=colornd,
colord=colord,
shadend=shadend,
diverging_mask=diverging_mask,
posterior=_posterior,
textsize=textsize,
var_names=var_names,
legend=legend,
figsize=figsize,
backend_kwargs=backend_kwargs,
backend_config=backend_config,
show=show,
)
if backend is None:
backend = rcParams["plot.backend"]
backend = backend.lower()
# TODO: Add backend kwargs
plot = get_plotting_function("plot_parallel", "parallelplot", backend)
ax = plot(**parallel_kwargs)
return ax
|
the-stack_106_31161 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Nekozilla is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nekozilla is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nekozilla. If not, see <https://www.gnu.org/licenses/>.
"""
Modules to load internally.
"""
import importlib
import inspect
import pkgutil
import typing
from neko3 import algorithms
from neko3 import logging_utils
from neko3 import properties
from neko3 import singleton
DEFAULT_START = "neko3.features"
class ModuleDetectionService(logging_utils.Loggable, metaclass=singleton.SingletonMeta):
@properties.cached_property()
def extension_candidates(self) -> typing.List[str]:
queue = [importlib.import_module(DEFAULT_START)]
successful_candidates = []
while queue:
module = queue.pop(0)
if inspect.isfunction(getattr(module, "setup", None)):
successful_candidates.append(module.__name__)
self.logger.debug("%s is a valid extension", module.__name__)
else:
self.logger.debug("%s is not a valid extension", module.__name__)
if hasattr(module, "__path__"):
for _, fqn, is_package in pkgutil.walk_packages(module.__path__, prefix=f"{module.__name__}."):
try:
queue.append(importlib.import_module(fqn))
except Exception as ex:
self.logger.exception("Failed to import %s", fqn, exc_info=ex)
return successful_candidates
def auto_load_modules(self, bot) -> typing.List[typing.Tuple[BaseException, str]]:
"""
Auto-loads any modules into the given bot.
If any extensions fail to load, then we do not halt. A traceback is printed
and we continue. Any errors are returned in a collection of 2-tuples paired
with the name of the corresponding extension that caused the error.
"""
errors = []
modules = self.extension_candidates
if not modules:
self.logger.warning("No modules were discovered.")
else:
with algorithms.TimeIt() as overall_timer:
for module in modules:
try:
with algorithms.TimeIt() as timer:
bot.load_extension(module)
except KeyboardInterrupt as ex:
raise ex from None
except Exception as ex:
self.logger.exception(f"Failed to load extension {module}", exc_info=ex)
errors.append((ex, module))
else:
self.logger.info(f"Loaded module {module} in {timer.time_taken * 1000:,.2f}ms")
self.logger.info(
f"Loaded {len(modules) - len(errors)}/{len(modules)} "
f"modules successfully in {overall_timer.time_taken * 1000:,.2f}ms. Bot now has {len(bot.extensions)} "
f"extensions loaded, with a total of {len(bot.cogs)} cogs and {len(bot.all_commands)} commands! "
f"Will now start bot."
)
return errors
|
the-stack_106_31162 | """
# SOURCE
https://github.com/parzival-roethlein/prmaya
# DESCRIPTION
Temporarily sets (Panel > Show > types) while:
- dragging the translate/rotate/scale tools
- timeline dragging
- timeline playback
The purpose is to have a clear view of the deforming geometry
Technical: Creates a scriptJob (SelectionChanged) and OpenMaya.MConditionMessage (playingBack)
# INSTALLATION
Copy this file ("prPanelCtx.py") into your ".../maya/scripts" folder.
# USAGE (It's recommended to run it in your userSetup file, so you don't have to think about it and treat it like a Maya setting)
import prPanelCtx
# AND EITHER
prPanelCtx.enable(manipulators=False) # prPanelCtx.disable()
# OR
prPanelCtx.toggle(manipulators=False)
# USAGE EXAMPLE: ANIMATION
import prPanelCtx
prPanelCtx.enable(manipulators=False, nurbsCurves=False, locators=False, controllers=False)
# USAGE EXAMPLE: RIGGING / if you want different settings for manipulator and playback
import prPanelCtx
prPanelCtx.enable(manipCtxKwargs={'manipulators': False}, playbackCtxKwargs={'nurbsCurves': False, 'locators': False, 'controllers': False})
# TODO
- UI
- shadingCtx (xray joints, default material, ...)
- LightingCtx
- switch scriptJob creation to onFileOpen and delete onFileClose? so playbackId does not get lost / multiple playback scriptjobs created
- (could not find a event for this) timeline context to start on mousedown, not only after time changes
- compare and maybe switch to MEvent version of manipScriptjob
# TODO (impossible without custom Maya API plugin as far as I know)
- camera orbit ctx (orbitCtx, draggerContext, panZoomCtx)
- manipCtx component selection support
- channelBox attribute drag support: mc.draggerContext doesn't seem to trigger from channelBox drag
- Universal Manipulator support: Doesn't seem to have a command, als tried mc.draggerContext('xformManipContext', ..)
"""
from collections import defaultdict
from functools import wraps
import logging
import maya.api.OpenMaya as om
import maya.cmds as mc
import maya.mel as mm
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
SCENE_PANEL_VALUES = defaultdict(dict)
MANIP_NODE_TYPE = None
MANIP_CTX_ID = None
PLAYBACK_CTX_ID = None
TOGGLE_STATUS = False
def enable(manipCtxKwargs=None, playbackCtxKwargs=None, **allCtxKwargs):
"""
:param manipCtxKwargs: settings for only the manipulators (translate, rotate, scale tools)
:param playbackCtxKwargs: settings for only the timeline interaction (drag, playback)
:param allCtxKwargs: settings for both: manipulators / timeline interaction
:return:
"""
global TOGGLE_STATUS
TOGGLE_STATUS = True
manipCtxKwargs = dict(allCtxKwargs.items() + (manipCtxKwargs or {}).items())
playbackCtxKwargs = dict(allCtxKwargs.items() + (playbackCtxKwargs or {}).items())
createManipCtx(**manipCtxKwargs)
createPlaybackCtx(**playbackCtxKwargs)
def disable():
global TOGGLE_STATUS
TOGGLE_STATUS = False
deleteManipCtx()
deletePlaybackCtx()
def toggle(displayInfo=True, **enableKwargs):
"""
:param displayInfo: display toggle status
:param enableKwargs: see def enable(..)
:return:
"""
global TOGGLE_STATUS
if not TOGGLE_STATUS:
enable(**enableKwargs)
if displayInfo:
om.MGlobal.displayInfo('ENABLED prPanelCtx')
else:
disable()
if displayInfo:
om.MGlobal.displayInfo('DISABLED prPanelCtx')
def preCommand(withFocus=False, **flags):
"""
:param withFocus: only affect panel with focus: cmds.getPanel(withFocus=...)
:param flags: see cmds.modelEditor() documentation
:return: list of affected panels
"""
global SCENE_PANEL_VALUES
SCENE_PANEL_VALUES.clear()
panels = mc.getPanel(type='modelPanel')
if withFocus:
focusedPanel = mc.getPanel(withFocus=True)
if focusedPanel not in panels:
# is this even possible?
logger.debug('focusedPanel: "{0}" not a modelPanel: [{1}]'.format(focusedPanel, panels))
return []
panels = [focusedPanel]
for panel in panels:
for flag, value in flags.iteritems():
sceneValue = mc.modelEditor(panel, q=True, **{flag: True})
if sceneValue != value:
mc.modelEditor(panel, e=True, **{flag: value})
SCENE_PANEL_VALUES[panel][flag] = sceneValue
return panels
def postCommand():
for panel, flags in SCENE_PANEL_VALUES.iteritems():
for flag, value in flags.iteritems():
mc.modelEditor(panel, e=True, **{flag: value})
def log(func):
@wraps(func)
def wrapper(*args, **kwargs):
logger.debug('{0}(args: {1}, kwargs: {2})'.format(func.__name__, args, kwargs))
result = func(*args, **kwargs)
logger.debug(' {0} output = {1}'.format(func.__name__, result))
return result
return wrapper
def setManipCommands(nodeType='transform', preFunc=str, postFunc=str):
mc.manipMoveContext('Move', e=True, preDragCommand=[preFunc, nodeType])
mc.manipMoveContext('Move', e=True, postDragCommand=[postFunc, nodeType])
mc.manipRotateContext('Rotate', e=True, preDragCommand=[preFunc, nodeType])
mc.manipRotateContext('Rotate', e=True, postDragCommand=[postFunc, nodeType])
mc.manipScaleContext('Scale', e=True, preDragCommand=[preFunc, nodeType])
mc.manipScaleContext('Scale', e=True, postDragCommand=[postFunc, nodeType])
# the drag commands are only active on reentering the context
currentCtx = mc.currentCtx()
if currentCtx in ['moveSuperContext', 'RotateSuperContext', 'scaleSuperContext']:
# only set context if needed
mc.setToolTo(currentCtx)
def manipCtxNodeTypeChange():
global MANIP_NODE_TYPE
selectedNodeTypes = mc.ls(sl=True, showType=True, type='transform')[1::2]
if not selectedNodeTypes or MANIP_NODE_TYPE in selectedNodeTypes:
return False
MANIP_NODE_TYPE = selectedNodeTypes[0]
return True
@log
def createManipCtx(**preCommandKwargs):
def createManipCtxDeferred():
deleteManipCtx()
def prPanelCtxManipScriptJob():
if manipCtxNodeTypeChange():
global MANIP_NODE_TYPE
setManipCommands(nodeType=MANIP_NODE_TYPE, preFunc=lambda: preCommand(**preCommandKwargs), postFunc=postCommand)
prPanelCtxManipScriptJob()
global MANIP_CTX_ID
MANIP_CTX_ID = mc.scriptJob(event=["SelectionChanged", prPanelCtxManipScriptJob])
# evalDeferred to be able to run in Maya userSetup file
mc.evalDeferred(createManipCtxDeferred)
@log
def getManipCtx():
scriptJob_ids = []
for scriptJob in mc.scriptJob(listJobs=True) or []:
if 'prPanelCtxManipScriptJob' in scriptJob:
scriptJobId = int(scriptJob[:scriptJob.find(':')])
scriptJob_ids.append(scriptJobId)
return scriptJob_ids
@log
def deleteManipCtx():
global MANIP_CTX_ID
if MANIP_CTX_ID:
mc.scriptJob(kill=MANIP_CTX_ID, force=True)
MANIP_CTX_ID = None
global MANIP_NODE_TYPE
MANIP_NODE_TYPE = None
setManipCommands()
invalid_ids = getManipCtx()
if invalid_ids:
for id_ in invalid_ids:
mc.scriptJob(kill=id_, force=True)
mm.eval('warning "Deleted manipCtx ids that should not have existed : {}"'.format(invalid_ids))
@log
def createPlaybackCtx(**preCommandKwargs):
def createPlaybackCtxDeferred():
deletePlaybackCtx()
def prPanelCtxCondition(state, **preCommandKwargs):
if state:
preCommand(**preCommandKwargs)
else:
postCommand()
global PLAYBACK_CTX_ID
PLAYBACK_CTX_ID = om.MConditionMessage.addConditionCallback('playingBack', lambda state, *args: prPanelCtxCondition(state, **preCommandKwargs))
# evalDeferred to be able to run in Maya userSetup file
mc.evalDeferred(createPlaybackCtxDeferred)
@log
def deletePlaybackCtx():
global PLAYBACK_CTX_ID
if PLAYBACK_CTX_ID:
om.MMessage.removeCallback(PLAYBACK_CTX_ID)
PLAYBACK_CTX_ID = None
|
the-stack_106_31163 | '''
Переставить min и max
'''
def exchangeMinMax(a):
minEl = float('inf')
minId = -1
maxEl = float('-inf')
maxId = -1
for i in range(len(a)):
if (a[i] > maxEl):
maxEl = a[i]
maxId = i
if (a[i] < minEl):
minEl = a[i]
minId = i
(a[minId], a[maxId]) = (a[maxId], a[minId])
a = list(map(int, input().split()))
exchangeMinMax(a)
print(*a)
|
the-stack_106_31164 | #!/usr/bin/env python
#
# DRAGONS
# gempy.scripts
# showpars.py
# -----------------------------------------------------------------------------
import sys
import textwrap
from argparse import ArgumentParser
from importlib import import_module
import astrodata
import gemini_instruments # noqa
from gempy import __version__
from recipe_system.mappers import primitiveMapper
# -----------------------------------------------------------------------------
def main(args=None):
parser = ArgumentParser(
description=f"Primitive parameter display, v{__version__}")
parser.add_argument("-v", "--version", action="version",
version=f"v{__version__}")
parser.add_argument('filename', help="filename")
parser.add_argument('primitive', help="primitive name")
parser.add_argument("-d", "--doc", action="store_true",
help="show the full docstring")
parser.add_argument('--adpkg', help='Name of the astrodata instrument '
'package to use if not gemini_instruments')
parser.add_argument('--drpkg', help='Name of the DRAGONS instrument '
'package to use if not geminidr')
args = parser.parse_args(args)
pobj, tags = get_pars(args.filename, adpkg=args.adpkg, drpkg=args.drpkg)
return showpars(pobj, args.primitive, tags, args.doc)
def get_pars(filename, adpkg=None, drpkg=None):
if adpkg is not None:
import_module(adpkg)
ad = astrodata.open(filename)
dtags = set(list(ad.tags)[:])
instpkg = ad.instrument(generic=True).lower()
if drpkg is None:
pm = primitiveMapper.PrimitiveMapper(dtags, instpkg)
else:
pm = primitiveMapper.PrimitiveMapper(dtags, instpkg, drpkg=drpkg)
pclass = pm.get_applicable_primitives()
pobj = pclass([ad])
return pobj, dtags
def showpars(pobj, primname, tags, show_docstring):
print(f"Dataset tagged as {tags}")
print(f"\nSettable parameters on '{primname}':")
print("=" * 40)
print(f"{'Name':20s} {'Current setting':20s} Description\n")
params = pobj.params[primname]
for k, v in params.items():
if not k.startswith("debug"):
print(f"{k:20s} {v!r:20s} {params.doc(k)}")
if show_docstring:
print(f"\nDocstring for '{primname}':")
print("=" * 40)
print(textwrap.dedent(getattr(pobj, primname).__doc__))
if __name__ == '__main__':
sys.exit(main())
|
the-stack_106_31165 | from typing import TYPE_CHECKING, Dict, List, Union
from modules.base import K8sServiceModuleProcessor, LocalK8sModuleProcessor
from modules.linker_helper import LinkerHelper
from opta.core.kubernetes import create_namespace_if_not_exists, list_namespaces
from opta.exceptions import UserErrors
if TYPE_CHECKING:
from opta.layer import Layer
from opta.module import Module
class LocalK8sServiceProcessor(LocalK8sModuleProcessor, K8sServiceModuleProcessor):
def __init__(self, module: "Module", layer: "Layer"):
if (module.aliased_type or module.type) != "local-k8s-service":
raise Exception(
f"The module {module.name} was expected to be of type local k8s service"
)
super(LocalK8sServiceProcessor, self).__init__(module, layer)
def pre_hook(self, module_idx: int) -> None:
list_namespaces()
create_namespace_if_not_exists(self.layer.name)
super(LocalK8sServiceProcessor, self).pre_hook(module_idx)
def process(self, module_idx: int) -> None:
# Update the secrets
self.module.data["link_secrets"] = self.module.data.get("link_secrets", [])
if isinstance(self.module.data.get("public_uri"), str):
self.module.data["public_uri"] = [self.module.data["public_uri"]]
current_envars: Union[List, Dict[str, str]] = self.module.data.get("env_vars", [])
if isinstance(current_envars, dict):
self.module.data["env_vars"] = [
{"name": x, "value": y} for x, y in current_envars.items()
]
# Handle links
for link_data in self.module.data.get("links", []):
if type(link_data) is str:
target_module_name = link_data
link_permissions = []
elif type(link_data) is dict:
target_module_name = list(link_data.keys())[0]
link_permissions = list(link_data.values())[0]
else:
raise UserErrors(
f"Link data {link_data} must be a string or map holding the permissions"
)
module = self.layer.get_module(target_module_name, module_idx)
if module is None:
raise Exception(
f"Did not find the desired module {target_module_name} "
"make sure that the module you're referencing is listed before the k8s "
"app one"
)
module_type = module.aliased_type or module.type
if module_type == "local-postgres":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_name", "db_password", "db_host"],
)
elif module_type == "local-redis":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["cache_host"],
)
elif module_type == "local-mongodb":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_name", "db_password", "db_host"],
)
elif module_type == "local-mysql":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=["db_user", "db_name", "db_password", "db_host"],
)
elif module_type == "mongodb-atlas":
LinkerHelper.handle_link(
module=self.module,
linked_module=module,
link_permissions=link_permissions,
required_vars=[
"db_password",
"db_user",
"mongodb_atlas_connection_string",
],
)
else:
raise Exception(
f"Unsupported module type for k8s service link: {module_type}"
)
if "image_tag" in self.layer.variables:
self.module.data["tag"] = self.layer.variables["image_tag"]
if "image_digest" in self.layer.variables:
self.module.data["digest"] = self.layer.variables["image_digest"]
seen = set()
self.module.data["link_secrets"] = [
seen.add(obj["name"]) or obj # type: ignore
for obj in self.module.data["link_secrets"]
if obj["name"] not in seen
]
super(LocalK8sServiceProcessor, self).process(module_idx)
|
the-stack_106_31166 | """The type file for image collection."""
from snovault import (
collection,
load_schema,
)
from .base import (
Item,
# lab_award_attribution_embed_list
)
from snovault.attachment import ItemWithAttachment
@collection(
name='images',
unique_key='image:filename',
properties={
'title': 'Image',
'description': 'Listing of portal images',
})
class Image(ItemWithAttachment, Item):
"""Class image,defines accepted file types."""
item_type = 'image'
schema = load_schema('encoded:schemas/image.json')
schema['properties']['attachment']['properties']['type']['enum'] = [
'image/png',
'image/jpeg',
'image/gif',
]
embedded_list = Item.embedded_list # + lab_award_attribution_embed_list
def unique_keys(self, properties):
"""smth."""
keys = super(Image, self).unique_keys(properties)
value = properties['attachment']['download']
keys.setdefault('image:filename', []).append(value)
return keys
|
the-stack_106_31171 | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe, json
from frappe.model.document import Document
from frappe import _
class DeletedDocument(Document):
pass
@frappe.whitelist()
def restore(name):
deleted = frappe.get_doc('Deleted Document', name)
doc = frappe.get_doc(json.loads(deleted.data))
try:
doc.insert()
except frappe.DocstatusTransitionError:
frappe.msgprint(_("Cancelled Document restored as Draft"))
doc.docstatus = 0
doc.insert()
doc.add_comment('Edit', _('restored {0} as {1}').format(deleted.deleted_name, doc.name))
deleted.new_name = doc.name
deleted.restored = 1
deleted.db_update()
frappe.msgprint(_('Document Restored')) |
the-stack_106_31172 | """Functions to rescale data depending on the user's needs"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import scale
################################################################################
# Functions for pandas objects
################################################################################
def standardize_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Return a copy of the original dataframe where all the numerical columns
have been individually standardized."""
new_cols = [
standardize_series(dataframe[col])
if pd.api.types.is_numeric_dtype(dataframe[col])
else dataframe[col]
for col in dataframe.columns
]
return pd.concat(new_cols, axis=1)
def standardize_columns(dataframe: pd.DataFrame, columns: list[str]) -> pd.DataFrame:
"""Return a copy of the original dataframe where the columns passed in the
columns argument have been individually standardized."""
new_cols = [
standardize_series(dataframe[col]) if col in columns else dataframe[col]
for col in dataframe.columns
]
return pd.concat(new_cols, axis=1)
def standardize_series(series: pd.Series) -> pd.Series:
"""Return the standardized version of the passed pandas series. If the
series is constant, a 0-filled series is returned."""
try:
sd = 1 if series.std() == 0 else series.std()
return (series - series.mean()) / sd
except TypeError as e:
raise TypeError(
"Cannot standardize a non-numerical series."
f"Series {series.name} is of type {series.dtype}"
) from e
def normalize_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Return a copy of the original dataframe where all the numerical columns
have been individually normalized.
Normalization happens by diving each element of the series by the maximum
value of the series itself."""
new_cols = [
normalize_series(dataframe[col])
if pd.api.types.is_numeric_dtype(dataframe[col])
else dataframe[col]
for col in dataframe.columns
]
return pd.DataFrame(new_cols)
################################################################################
# Functions for array objects
################################################################################
def standardize_array(array: np.ndarray, axis: int = 0) -> np.ndarray:
"""Return the standardized version of the passed numpy array.
Parameters
----------
array : np.ndarray
The array to be standardized.
axis : int, optional
If array is unidimensional - array.ndim == 1 - this parameter is
ignored. Otherwise, axis = 0 should be used if standardization is to
happen by column, 1 otherwise.
For general use, axis=0 is used when row represent the observations and
columns represent the variables.
Returns
-------
np.ndarray
The standardized version of the array.
"""
axis = 0 if array.ndim == 1 else axis
return scale(array, axis=axis)
|
the-stack_106_31175 | # encoding='utf-8'
'''
/**
* This is the solution of No.79 problem in the LeetCode,
* the website of the problem is as follow:
* https://leetcode-cn.com/problems/word-search
* <p>
* The description of problem is as follow:
* ==========================================================================================================
* 给定一个二维网格和一个单词,找出该单词是否存在于网格中。
* <p>
* 单词必须按照字母顺序,通过相邻的单元格内的字母构成,其中“相邻”单元格是那些水平相邻或垂直相邻的单元格。同一个单元格内的字母不允许被重复使用。
* <p>
* 示例:
* <p>
* board =
* [
* ['A','B','C','E'],
* ['S','F','C','S'],
* ['A','D','E','E']
* ]
* <p>
* 来源:力扣(LeetCode)
* 链接:https://leetcode-cn.com/problems/word-search
* 著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
* ==========================================================================================================
*
* @author zhangyu ([email protected])
*/
'''
from typing import List
class Solution:
def word_search(self, board: List[List[str]], word: str) -> bool:
'''
单词查找
Args:
board: 字符串数组
word: 字符串单词
Returns:
布尔值
'''
if not word or len(word) < 1:
return True
m, n = len(board), len(board[0])
visited = [[False for i in range(n)] for j in range(m)]
for i in range(m):
for j in range(n):
if board[i][j] == word[0] and self.dfs(board, word, i, j, 0, visited):
return True
return False
def dfs(self, board, word, i, j, index, visited):
'''
深度优先遍历
Args:
board: 二维字符串
word: 单词
i: 下标i
j: 下标j
index: 下标
visited: 是否访问过
Returns:
布尔值
'''
if index == len(word):
return True
if i >= len(board) or i < 0 or j >= len(board[0]) or j < 0 or board[i][j] != word[index] or visited[i][j]:
return False
visited[i][j] = True
if self.dfs(board, word, i - 1, j, index + 1, visited) or \
self.dfs(board, word, i + 1, j, index + 1, visited) or \
self.dfs(board, word, i, j - 1, index + 1, visited) or \
self.dfs(board, word, i, j + 1, index + 1, visited): \
return True
visited[i][j] = False
return False
if __name__ == '__main__':
board = [
['a', 'b'],
['c', 'd']
]
word = "abdc"
solution = Solution()
result = solution.word_search(board, word)
print(result)
assert result == True
|
the-stack_106_31176 | """`Domain models` setup script."""
import os
import re
from setuptools import setup
from setuptools import Command
# Getting description:
with open('README.rst') as readme_file:
description = readme_file.read()
# Getting requirements:
with open('requirements.txt') as version:
requirements = version.readlines()
# Getting version:
with open('domain_models/__init__.py') as init_file:
version = re.search('VERSION = \'(.*?)\'', init_file.read()).group(1)
class PublishCommand(Command):
"""Setuptools `publish` command."""
description = "Publish current distribution to PyPi and create tag"
user_options = []
def initialize_options(self):
"""Init options."""
def finalize_options(self):
"""Finalize options."""
def run(self):
"""Command execution."""
self.run_command('sdist')
self.run_command('upload')
os.system('git tag -a {0} -m \'version {0}\''.format(version))
os.system('git push --tags')
setup(name='domain-models',
version=version,
description='Domain models framework for Python projects',
long_description=description,
author='ETS Labs',
author_email='[email protected]',
maintainer='ETS Labs',
maintainer_email='[email protected]',
url='https://github.com/ets-labs/python-domain-models',
bugtrack_url='https://github.com/ets-labs/python-domain-models/issues',
download_url='https://pypi.python.org/pypi/domain_models',
license='BSD New',
packages=['domain_models'],
platforms=['any'],
zip_safe=True,
install_requires=requirements,
cmdclass={
'publish': PublishCommand,
},
keywords=[
'Domain models',
'Domain modelling',
'Domain driven design',
'Domain driven development',
'DDD',
'Models',
],
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
])
|
the-stack_106_31179 | import numpy as np
import cv2
from enum import Enum
class Models(Enum):
ssd_lite = 'ssd_lite'
tiny_yolo = 'tiny_yolo'
tf_lite = 'tf_lite'
def __str__(self):
return self.value
@staticmethod
def from_string(s):
try:
return Models[s]
except KeyError:
raise ValueError()
MAX_AREA = 0.019 # max area from train set
RATIO_MEAN = 4.17
RATIO_STD = 1.06
def load_image_into_numpy_array(image_path):
image = cv2.imread(image_path)
return cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
def affine_tile_corners(x0, y0, theta, wp, hp):
"""
Find corners of tile defined by affine transformation.
Find corners in original image for tile defined by affine transformation,
i.e. a rotation and translation, given (x0, y0) the upper left corner of
the tile, theta, the rotation angle of the tile in degrees, and the tile
width wp, and height hp.
Args:
x0 Horizontal coordinate of tile upper left corner (pixels)
y0 Vertical coordinate of tile upper left corner (pixels)
theta Rotation angle (degrees clockwise from vertical)
wp Tile width (pixels)
hp Tile height (pixels)
Returns:
corners Corner points, in clockwise order starting from upper left
corner, ndarray size (4, 2)
"""
rot_angle = np.radians(theta)
corners = np.array(
[[x0, y0],
[x0 + wp * np.cos(rot_angle), y0 + wp * np.sin(rot_angle)],
[x0 + wp * np.cos(rot_angle) - hp * np.sin(rot_angle),
y0 + wp * np.sin(rot_angle) + hp * np.cos(rot_angle)],
[x0 - hp * np.sin(rot_angle), y0 + hp * np.cos(rot_angle)]])
return corners
def tile_images(tiling_params, img):
res = []
original_sizes = []
offset = []
for cur_pt, cur_theta, cur_multiplier in zip(
tiling_params["upper_left_pts"],
tiling_params["thetas"],
tiling_params["multipliers"]):
cur_x0, cur_y0 = cur_pt
corners = affine_tile_corners(
cur_x0, cur_y0, cur_theta,
int(cur_multiplier * tiling_params["wp"]),
int(cur_multiplier * tiling_params["hp"])).astype(int)
top = min(corners[:, 1])
left = min(corners[:, 0])
bottom = max(corners[:, 1])
right = max(corners[:, 0])
h = bottom - top
w = right - left
tile = np.zeros((h, w, 3)).astype(np.uint8)
# crop tile from image
tmp = img[top: bottom, left: right]
tile[:tmp.shape[0], :tmp.shape[1], :3] = tmp
# resize the tile
tile = cv2.resize(tile, (tiling_params["wp"], tiling_params["hp"]),
interpolation=cv2.INTER_NEAREST)
# rotate the tile
image_center = tuple(np.array(tile.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, cur_theta, 1.0)
tmp = cv2.warpAffine(tile, rot_mat, (tile.shape[1::-1]),
flags=cv2.INTER_LINEAR)
original_sizes.append((bottom - top, right - left))
offset.append((top, left))
res.append(tmp)
return res, original_sizes, offset
def rotate_points(points, rotation_matrix):
# add ones
points_ones = np.append(points, 1)
# transform points
transformed_points = rotation_matrix.dot(points_ones)
return transformed_points# [:,::-1]
def split_img(img, m, n):
h, w, _ = img.shape
tile_h = h // m
tile_w = w // n
padding_h = tile_h // 10
padding_w = int(tile_w * 0.15)
res = []
original_sizes = []
offset = []
for i in range(0, m):
top = i * tile_h
bottom = min(h, (i + 1) * tile_h + padding_h)
for j in range(0, n):
left = j * tile_w
right = min(w, (j + 1) * tile_w + padding_w)
original_sizes.append((bottom - top, right - left))
offset.append((top, left))
res.append(cv2.resize(img[top: bottom, left: right, :],
(tile_w, tile_h),
interpolation=cv2.INTER_NEAREST))
return res, original_sizes, offset
def get_global_coord(point, img_size, original_size, offset):
return [int(point[0] / img_size[1] * original_size[1] + offset[1]), \
int(point[1] / img_size[0] * original_size[0] + offset[0])]
def non_max_suppression_fast(boxes, labels, overlap_thresh=0.5):
# if there are no boxes, return an empty list
boxes = np.array(boxes)
if len(boxes) == 0:
return [], []
# initialize the list of picked indexes
pick = []
# grab the coordinates of the bounding boxes
x1 = boxes[:, 0]
y1 = boxes[:, 1]
x2 = boxes[:, 2]
y2 = boxes[:, 3]
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(y2)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = 1. * (w * h) / area[idxs[:last]]
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.concatenate(
([last], np.where(overlap > overlap_thresh)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return boxes[pick], [labels[i] for i in pick]
def filter_bb_by_size(bbs, labels, img_area):
res_bbs = []
res_labels = []
for bb, l in zip(bbs, labels):
s = (bb[2] - bb[0]) * (bb[3] - bb[1]) / img_area
r = (bb[3] - bb[1]) / (bb[2] - bb[0])
if s < MAX_AREA * 1.1 and RATIO_MEAN - 3 * RATIO_MEAN < r < RATIO_MEAN + 3 * RATIO_MEAN:
res_bbs.append(bb)
res_labels.append(l)
return res_bbs, res_labels
|
the-stack_106_31180 | # -*- coding: utf-8 -*-
"""Functions for loading STARS and ACCACIA datasets of PMCs."""
from octant.core import OctantTrack
import pandas as pd
import mypaths
def read_stars_file(fname=mypaths.starsdir / "PolarLow_tracks_North_2002_2011"):
"""Read data into a `pandas.DataFrame` from the standard file."""
def _date_parser(*x):
return pd.datetime.strptime(" ".join(x), "%Y %m %d %H %M")
dtype_tuple = (int,) + 5 * (str,) + 4 * (float,)
dtypes = {k: v for k, v in enumerate(dtype_tuple)}
df = pd.read_csv(
fname,
dtype=dtypes,
sep=r"\s+",
skiprows=5,
date_parser=_date_parser,
parse_dates={"time": [1, 2, 3, 4, 5]},
)
return df
def read_all_stars():
"""Read both North and South subsets of STARS."""
df_n = read_stars_file(fname=mypaths.starsdir / "PolarLow_tracks_North_2002_2011")
df_s = read_stars_file(fname=mypaths.starsdir / "PolarLow_tracks_South_2002_2011")
df_s.N += df_n.N.values[-1]
return df_n.append(df_s).reset_index(drop=True)
def read_all_accacia():
"""Load ACCACIA tracks as `pandas.DataFrame`"""
def _date_parser(x):
return pd.datetime.strptime(x, "%Y%m%d%H%M")
df = pd.read_csv(
mypaths.acctracks,
delimiter="\t",
names=["N", "time", "lon", "lat"],
parse_dates=["time"],
date_parser=_date_parser,
)
return df
def prepare_tracks(obs_df, filter_funcs=[]):
"""Make a list of those tracks that satisfy the list of conditions."""
selected = []
for i, df in obs_df.groupby("N"):
ot = OctantTrack.from_df(df)
flag = True
for func in filter_funcs:
flag &= func(ot)
if flag:
selected.append(ot)
return selected
|
the-stack_106_31181 | """
!!!!!!!!!!!!!!!!!!!!!!!!!!!
DEPRECATED! DON'T USE THIS
!!!!!!!!!!!!!!!!!!!!!!!!!!!
This example is just here for aiding in migration to v0.2.0.
see examples/napari_image_arithmetic.py instead
"""
from enum import Enum
import numpy
from napari import Viewer, gui_qt
from napari.layers import Image
from magicgui import magicgui
class Operation(Enum):
"""A set of valid arithmetic operations for image_arithmetic.
To create nice dropdown menus with magicgui, it's best (but not required) to use
Enums. Here we make an Enum class for all of the image math operations we want to
allow.
"""
add = numpy.add
subtract = numpy.subtract
multiply = numpy.multiply
divide = numpy.divide
with gui_qt():
# create a viewer and add a couple image layers
viewer = Viewer()
viewer.add_image(numpy.random.rand(20, 20), name="Layer 1")
viewer.add_image(numpy.random.rand(20, 20), name="Layer 2")
# use the magic decorator! This takes a function, generates a custom Widget class
# using the function signature, and adds that class as an attribute named "Gui" on
# the function.
@magicgui(call_button="execute")
def image_arithmetic(layerA: Image, operation: Operation, layerB: Image) -> Image:
"""Add, subtracts, multiplies, or divides to image layers with equal shape."""
return operation.value(layerA.data, layerB.data)
# Gui() is DEPRECATED
# you should now just add the decorated function directly:
# viewer.window.add_dock_widget(image_arithmetic)
gui = image_arithmetic.Gui()
viewer.window.add_dock_widget(gui.native)
# NOTE: gui.native will not be necessary after
# https://github.com/napari/napari/pull/1994
# Use `reset_choices` instead now:
# viewer.layers.events.inserted.connect(image_arithmetic.reset_choices)
# viewer.layers.events.removed.connect(image_arithmetic.reset_choices)
viewer.layers.events.inserted.connect(gui.refresh_choices)
viewer.layers.events.removed.connect(gui.refresh_choices)
|
the-stack_106_31183 | from __future__ import print_function, absolute_import
from future.standard_library import hooks
import os
import copy
from shutil import rmtree
from tempfile import mkdtemp
from datetime import datetime
from numpy import empty, float32, datetime64, timedelta64, argmin, abs, array
from rasterio import open as rasopen
from rasterio.crs import CRS
from rasterio.transform import Affine
from rasterio.mask import mask
from rasterio.warp import reproject, Resampling
from rasterio.warp import calculate_default_transform as cdt
from xlrd.xldate import xldate_from_date_tuple
from xarray import open_dataset
from pandas import date_range, DataFrame
from bounds import GeoBounds
with hooks():
from urllib.parse import urlunparse
class Thredds(object):
""" Unidata's Thematic Real-time Environmental Distributed Data Services (THREDDS)
"""
def __init__(self, start=None, end=None, date=None,
bounds=None, target_profile=None, lat=None, lon=None
):
self.start = start
self.end = end
self.date = date
self.src_bounds_wsen = None
self.target_profile = target_profile
self.bbox = bounds
self.lat = lat
self.lon = lon
def conform(self, subset, out_file=None):
if subset.dtype != float32:
subset = array(subset, dtype=float32)
self._project(subset)
self._reproject()
self._mask()
result = self._resample()
if out_file:
self.save_raster(result, self.target_profile, output_filename=out_file)
return result
def _project(self, subset):
proj_path = os.path.join(self.temp_dir, 'tiled_proj.tif')
setattr(self, 'projection', proj_path)
profile = copy.deepcopy(self.target_profile)
profile['dtype'] = float32
bb = self.bbox.as_tuple()
if self.src_bounds_wsen:
bounds = self.src_bounds_wsen
else:
bounds = (bb[0], bb[1],
bb[2], bb[3])
dst_affine, dst_width, dst_height = cdt(CRS({'init': 'epsg:4326'}),
CRS({'init': 'epsg:4326'}),
subset.shape[1],
subset.shape[2],
*bounds,
)
profile.update({'crs': CRS({'init': 'epsg:4326'}),
'transform': dst_affine,
'width': dst_width,
'height': dst_height})
with rasopen(proj_path, 'w', **profile) as dst:
dst.write(subset)
def _reproject(self):
reproj_path = os.path.join(self.temp_dir, 'reproj.tif')
setattr(self, 'reprojection', reproj_path)
with rasopen(self.projection, 'r') as src:
src_profile = src.profile
src_bounds = src.bounds
src_array = src.read(1)
dst_profile = copy.deepcopy(self.target_profile)
dst_profile['dtype'] = float32
bounds = src_bounds
dst_affine, dst_width, dst_height = cdt(src_profile['crs'],
dst_profile['crs'],
src_profile['width'],
src_profile['height'],
*bounds)
dst_profile.update({'crs': dst_profile['crs'],
'transform': dst_affine,
'width': dst_width,
'height': dst_height})
with rasopen(reproj_path, 'w', **dst_profile) as dst:
dst_array = empty((1, dst_height, dst_width), dtype=float32)
reproject(src_array, dst_array, src_transform=src_profile['transform'],
src_crs=src_profile['crs'], dst_crs=self.target_profile['crs'],
dst_transform=dst_affine, resampling=Resampling.nearest,
num_threads=2)
dst.write(dst_array.reshape(1, dst_array.shape[1], dst_array.shape[2]))
def _mask(self):
mask_path = os.path.join(self.temp_dir, 'masked.tif')
with rasopen(self.reprojection) as src:
out_arr, out_trans = mask(src, self.clip_feature, crop=True,
all_touched=True)
out_meta = src.meta.copy()
out_meta.update({'driver': 'GTiff',
'height': out_arr.shape[1],
'width': out_arr.shape[2],
'transform': out_trans})
with rasopen(mask_path, 'w', **out_meta) as dst:
dst.write(out_arr)
setattr(self, 'mask', mask_path)
delattr(self, 'reprojection')
def _resample(self):
# home = os.path.expanduser('~')
# resample_path = os.path.join(home, 'images', 'sandbox', 'thredds', 'resamp_twx_{}.tif'.format(var))
resample_path = os.path.join(self.temp_dir, 'resample.tif')
with rasopen(self.mask, 'r') as src:
array = src.read(1)
profile = src.profile
res = src.res
try:
target_affine = self.target_profile['affine']
except KeyError:
target_affine = self.target_profile['transform']
target_res = target_affine.a
res_coeff = res[0] / target_res
new_array = empty(shape=(1, round(array.shape[0] * res_coeff),
round(array.shape[1] * res_coeff)), dtype=float32)
aff = src.affine
new_affine = Affine(aff.a / res_coeff, aff.b, aff.c, aff.d, aff.e / res_coeff, aff.f)
profile['transform'] = self.target_profile['transform']
profile['width'] = self.target_profile['width']
profile['height'] = self.target_profile['height']
profile['dtype'] = str(new_array.dtype)
delattr(self, 'mask')
with rasopen(resample_path, 'w', **profile) as dst:
reproject(array, new_array, src_transform=aff, dst_transform=new_affine, src_crs=src.crs,
dst_crs=src.crs, resampling=Resampling.nearest)
dst.write(new_array)
with rasopen(resample_path, 'r') as src:
arr = src.read()
return arr
def _date_index(self):
date_ind = date_range(self.start, self.end, freq='d')
return date_ind
@staticmethod
def _dtime_to_dtime64(dtime):
dtnumpy = datetime64(dtime).astype(datetime64)
return dtnumpy
@staticmethod
def save_raster(arr, geometry, output_filename):
try:
arr = arr.reshape(1, arr.shape[1], arr.shape[2])
except IndexError:
arr = arr.reshape(1, arr.shape[0], arr.shape[1])
geometry['dtype'] = str(arr.dtype)
with rasopen(output_filename, 'w', **geometry) as dst:
dst.write(arr)
return None
class TopoWX(Thredds):
""" Twix
TopoWX Surface Temperature, return as numpy array in daily stack unless modified.
Available variables: [ 'tmmn', 'tmmx']
----------
Observation elements to access. Currently available elements:
- 'tmmn' : daily minimum air temperature [K]
- 'tmmx' : daily maximum air temperature [K]
:param start: datetime object start of period of data
:param end: datetime object end of period of data
:param variables: List of available variables. At lease one.
:param date: single-day datetime date object
:param bounds: met.misc.BBox object representing spatial bounds, default to conterminous US
:return: numpy.ndarray """
def __init__(self, **kwargs):
Thredds.__init__(self)
self.temp_dir = mkdtemp()
for key, val in kwargs.items():
setattr(self, key, val)
self.service = 'cida.usgs.gov'
self.scheme = 'https'
self.variables = ['tmin', 'tmax']
if self.date:
self.start = self.date
self.end = self.date
self.year = self.start.year
def get_data_subset(self, grid_conform=False, var='tmax',
out_file=None, temp_units_out='C'):
if var not in self.variables:
raise TypeError('Must choose from "tmax" or "tmin"..')
url = self._build_url(var)
xray = open_dataset(url)
start = self._dtime_to_dtime64(self.start)
end = self._dtime_to_dtime64(self.end)
if self.date:
end = end + timedelta64(1, 'D')
# find index and value of bounds
# 1/100 degree adds a small buffer for this 800 m res data
north_ind = argmin(abs(xray.lat.values - (self.bbox.north + 1.)))
south_ind = argmin(abs(xray.lat.values - (self.bbox.south - 1.)))
west_ind = argmin(abs(xray.lon.values - (self.bbox.west - 1.)))
east_ind = argmin(abs(xray.lon.values - (self.bbox.east + 1.)))
north_val = xray.lat.values[north_ind]
south_val = xray.lat.values[south_ind]
west_val = xray.lon.values[west_ind]
east_val = xray.lon.values[east_ind]
setattr(self, 'src_bounds_wsen', (west_val, south_val,
east_val, north_val))
subset = xray.loc[dict(time=slice(start, end),
lat=slice(north_val, south_val),
lon=slice(west_val, east_val))]
date_ind = self._date_index()
subset['time'] = date_ind
if not grid_conform:
setattr(self, var, subset)
else:
if var == 'tmin':
arr = subset.tmin.values
elif var == 'tmax':
arr = subset.tmax.values
else:
arr = None
if temp_units_out == 'K':
arr += 273.15
conformed_array = self.conform(arr, out_file=out_file)
return conformed_array
def _build_url(self, var):
# ParseResult('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
url = urlunparse([self.scheme, self.service,
'/thredds/dodsC/topowx?crs,lat[0:1:3249],lon[0:1:6999],{},'
'time'.format(var),
'', '', ''])
return url
class GridMet(Thredds):
""" U of I Gridmet
Return as numpy array per met variable in daily stack unless modified.
Available variables: ['bi', 'elev', 'erc', 'fm100', fm1000', 'pdsi', 'pet', 'pr', 'rmax', 'rmin', 'sph', 'srad',
'th', 'tmmn', 'tmmx', 'vs']
----------
Observation elements to access. Currently available elements:
- 'bi' : burning index [-]
- 'elev' : elevation above sea level [m]
- 'erc' : energy release component [-]
- 'fm100' : 100-hour dead fuel moisture [%]
- 'fm1000' : 1000-hour dead fuel moisture [%]
- 'pdsi' : Palmer Drough Severity Index [-]
- 'pet' : daily reference potential evapotranspiration [mm]
- 'pr' : daily accumulated precipitation [mm]
- 'rmax' : daily maximum relative humidity [%]
- 'rmin' : daily minimum relative humidity [%]
- 'sph' : daily mean specific humidity [kg/kg]
- 'prcp' : daily total precipitation [mm]
- 'srad' : daily mean downward shortwave radiation at surface [W m-2]
- 'th' : daily mean wind direction clockwise from North [degrees]
- 'tmmn' : daily minimum air temperature [K]
- 'tmmx' : daily maximum air temperature [K]
- 'vs' : daily mean wind speed [m -s]
:param start: datetime object start of period of data
:param end: datetime object end of period of data
:param variables: List of available variables. At lease one.
:param date: single-day datetime date object
:param bbox: met.misc.BBox object representing spatial bounds, default to conterminous US
:return: numpy.ndarray
Must have either start and end, or date.
Must have at least one valid variable. Invalid variables will be excluded gracefully.
note: NetCDF dates are in xl '1900' format, i.e., number of days since 1899-12-31 23:59
xlrd.xldate handles this for the time being
"""
def __init__(self, variable=None, date=None, start=None, end=None, bbox=None,
target_profile=None, clip_feature=None):
Thredds.__init__(self)
self.date = date
self.start = start
self.end = end
self.bbox = bbox
self.target_profile = target_profile
self.service = 'thredds.northwestknowledge.net:8080'
self.scheme = 'http'
self.temp_dir = mkdtemp()
self.variable = variable
self.available = ['elev', 'pr', 'rmax', 'rmin', 'sph', 'srad',
'th', 'tmmn', 'tmmx', 'pet', 'vs', 'erc', 'bi',
'fm100', 'pdsi']
if self.variable not in self.available:
Warning('Variable {} is not available'.
format(self.variable))
self.kwords = {'bi': 'burning_index_g',
'elev': '',
'erc': 'energy_release_component-g',
'fm100': 'dead_fuel_moisture_100hr',
'fm1000': 'dead_fuel_moisture_1000hr',
'pdsi': 'palmer_drought_severity_index',
'etr': 'potential_evapotranspiration',
'pet': 'potential_evapotranspiration',
'pr': 'precipitation_amount',
'rmax': 'relative_humidity',
'rmin': 'relative_humidity',
'sph': 'specific_humidity',
'srad': 'surface_downwelling_shortwave_flux_in_air',
'th': 'wind_from_direction',
'tmmn': 'air_temperature',
'tmmx': 'air_temperature',
'vs': 'wind_speed', }
if self.date:
self.start = self.date
self.end = self.date
self.year = self.start.year
if not self.bbox and not self.lat:
self.bbox = GeoBounds()
def subset_daily_tif(self, out_filename=None):
url = self._build_url()
url = url + '#fillmismatch'
xray = open_dataset(url)
north_ind = argmin(abs(xray.lat.values - (self.bbox.north + 1.)))
south_ind = argmin(abs(xray.lat.values - (self.bbox.south - 1.)))
west_ind = argmin(abs(xray.lon.values - (self.bbox.west - 1.)))
east_ind = argmin(abs(xray.lon.values - (self.bbox.east + 1.)))
north_val = xray.lat.values[north_ind]
south_val = xray.lat.values[south_ind]
west_val = xray.lon.values[west_ind]
east_val = xray.lon.values[east_ind]
setattr(self, 'src_bounds_wsen', (west_val, south_val,
east_val, north_val))
if self.variable != 'elev':
xray = xray.rename({'day': 'time'})
subset = xray.loc[dict(time=slice(self.start, self.end),
lat=slice(north_val, south_val),
lon=slice(west_val, east_val))]
date_ind = self._date_index()
subset['time'] = date_ind
setattr(self, 'width', subset.dims['lon'])
setattr(self, 'height', subset.dims['lat'])
arr = subset[self.kwords[self.variable]].values
arr = arr.reshape(arr.shape[1], arr.shape[2]).transpose()
arr = arr.reshape(1, arr.shape[0], arr.shape[1])
arr = self.conform(arr, out_file=out_filename)
rmtree(self.temp_dir)
return arr
else:
subset = xray.loc[dict(lat=slice((self.bbox.north + 1),
(self.bbox.south - 1)),
lon=slice((self.bbox.west - 1),
(self.bbox.east + 1)))]
setattr(self, 'width', subset.dims['lon'])
setattr(self, 'height', subset.dims['lat'])
arr = subset.elevation.values
arr = self.conform(arr, out_file=out_filename)
return arr
def subset_nc(self, out_filename=None, return_array=False):
url = self._build_url()
url = url + '#fillmismatch'
xray = open_dataset(url)
north_ind = argmin(abs(xray.lat.values - (self.bbox.north + 1.)))
south_ind = argmin(abs(xray.lat.values - (self.bbox.south - 1.)))
west_ind = argmin(abs(xray.lon.values - (self.bbox.west - 1.)))
east_ind = argmin(abs(xray.lon.values - (self.bbox.east + 1.)))
north_val = xray.lat.values[north_ind]
south_val = xray.lat.values[south_ind]
west_val = xray.lon.values[west_ind]
east_val = xray.lon.values[east_ind]
setattr(self, 'src_bounds_wsen', (west_val, south_val,
east_val, north_val))
if self.variable != 'elev':
xray = xray.rename({'day': 'time'})
subset = xray.loc[dict(time=slice(self.start, self.end),
lat=slice(north_val, south_val),
lon=slice(west_val, east_val))]
date_ind = self._date_index()
subset['time'] = date_ind
if out_filename:
subset.to_netcdf(out_filename)
if return_array:
return subset
else:
subset = xray.loc[dict(lat=slice((self.bbox.north + 1),
(self.bbox.south - 1)),
lon=slice((self.bbox.west - 1),
(self.bbox.east + 1)))]
if out_filename:
subset.to_netcdf(out_filename)
if return_array:
return subset
def get_point_timeseries(self):
url = self._build_url()
xray = open_dataset(url)
subset = xray.sel(lon=self.lon, lat=self.lat, method='nearest')
subset = subset.loc[dict(day=slice(self.start, self.end))]
subset.rename({'day': 'time'}, inplace=True)
date_ind = self._date_index()
subset['time'] = date_ind
time = subset['time'].values
series = subset[self.kwords[self.variable]].values
df = DataFrame(data=series, index=time)
df.columns = [self.variable]
return df
def _build_url(self):
# ParseResult('scheme', 'netloc', 'path', 'params', 'query', 'fragment')
if self.variable == 'elev':
url = urlunparse([self.scheme, self.service,
'/thredds/dodsC/MET/{0}/metdata_elevationdata.nc'.format(self.variable),
'', '', ''])
else:
url = urlunparse([self.scheme, self.service,
'/thredds/dodsC/MET/{0}/{0}_{1}.nc'.format(self.variable, self.year),
'', '', ''])
return url
def write_netcdf(self, outputroot):
url = self._build_url()
xray = open_dataset(url)
if self.variable != 'elev':
subset = xray.loc[dict(day=slice(self.start, self.end))]
subset.rename({'day': 'time'}, inplace=True)
else:
subset = xray
subset.to_netcdf(path=outputroot, engine='netcdf4')
# ========================= EOF ====================================================================
|
the-stack_106_31184 | ################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
import time
from functools import reduce
from itertools import chain
from typing import List, Tuple, Any, Dict
from apache_beam.coders import PickleCoder
from pyflink.datastream.state import ValueStateDescriptor, ValueState, ListStateDescriptor, \
ListState, MapStateDescriptor, MapState, ReducingStateDescriptor, ReducingState, \
AggregatingStateDescriptor, AggregatingState
from pyflink.datastream import TimeDomain, TimerService
from pyflink.datastream.functions import RuntimeContext, ProcessFunction, KeyedProcessFunction
from pyflink.fn_execution import flink_fn_execution_pb2, operation_utils
from pyflink.fn_execution.state_data_view import extract_data_view_specs
from pyflink.fn_execution.beam.beam_coders import DataViewFilterCoder
from pyflink.fn_execution.operation_utils import extract_user_defined_aggregate_function
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend
try:
from pyflink.fn_execution.aggregate_fast import RowKeySelector, SimpleAggsHandleFunction, \
GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction, \
GroupTableAggFunction
except ImportError:
from pyflink.fn_execution.aggregate_slow import RowKeySelector, SimpleAggsHandleFunction, \
GroupAggFunction, DistinctViewDescriptor, SimpleTableAggsHandleFunction,\
GroupTableAggFunction
from pyflink.metrics.metricbase import GenericMetricGroup
from pyflink.table import FunctionContext, Row
# table operations
SCALAR_FUNCTION_URN = "flink:transform:scalar_function:v1"
TABLE_FUNCTION_URN = "flink:transform:table_function:v1"
STREAM_GROUP_AGGREGATE_URN = "flink:transform:stream_group_aggregate:v1"
STREAM_GROUP_TABLE_AGGREGATE_URN = "flink:transform:stream_group_table_aggregate:v1"
PANDAS_AGGREGATE_FUNCTION_URN = "flink:transform:aggregate_function:arrow:v1"
PANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN = \
"flink:transform:batch_over_window_aggregate_function:arrow:v1"
# datastream operations
DATA_STREAM_STATELESS_FUNCTION_URN = "flink:transform:datastream_stateless_function:v1"
PROCESS_FUNCTION_URN = "flink:transform:process_function:v1"
KEYED_PROCESS_FUNCTION_URN = "flink:transform:keyed_process_function:v1"
class Operation(abc.ABC):
def __init__(self, spec):
super(Operation, self).__init__()
self.spec = spec
self.func, self.user_defined_funcs = self.generate_func(self.spec.serialized_fn)
if self.spec.serialized_fn.metric_enabled:
self.base_metric_group = GenericMetricGroup(None, None)
else:
self.base_metric_group = None
def open(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'open'):
user_defined_func.open(FunctionContext(self.base_metric_group))
def close(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'close'):
user_defined_func.close()
def finish(self):
self._update_gauge(self.base_metric_group)
def _update_gauge(self, base_metric_group):
if base_metric_group is not None:
for name in base_metric_group._flink_gauge:
flink_gauge = base_metric_group._flink_gauge[name]
beam_gauge = base_metric_group._beam_gauge[name]
beam_gauge.set(flink_gauge())
for sub_group in base_metric_group._sub_groups:
self._update_gauge(sub_group)
@abc.abstractmethod
def generate_func(self, serialized_fn) -> Tuple:
pass
class ScalarFunctionOperation(Operation):
def __init__(self, spec):
super(ScalarFunctionOperation, self).__init__(spec)
def generate_func(self, serialized_fn):
"""
Generates a lambda function based on udfs.
:param serialized_fn: serialized function which contains a list of the proto
representation of the Python :class:`ScalarFunction`
:return: the generated lambda function
"""
scalar_functions, variable_dict, user_defined_funcs = reduce(
lambda x, y: (
','.join([x[0], y[0]]),
dict(chain(x[1].items(), y[1].items())),
x[2] + y[2]),
[operation_utils.extract_user_defined_function(udf) for udf in serialized_fn.udfs])
generate_func = eval('lambda value: [%s]' % scalar_functions, variable_dict)
return generate_func, user_defined_funcs
class TableFunctionOperation(Operation):
def __init__(self, spec):
super(TableFunctionOperation, self).__init__(spec)
def generate_func(self, serialized_fn):
"""
Generates a lambda function based on udtfs.
:param serialized_fn: serialized function which contains the proto representation of
the Python :class:`TableFunction`
:return: the generated lambda function
"""
table_function, variable_dict, user_defined_funcs = \
operation_utils.extract_user_defined_function(serialized_fn.udfs[0])
generate_func = eval('lambda value: %s' % table_function, variable_dict)
return generate_func, user_defined_funcs
class PandasAggregateFunctionOperation(Operation):
def __init__(self, spec):
super(PandasAggregateFunctionOperation, self).__init__(spec)
def generate_func(self, serialized_fn):
pandas_functions, variable_dict, user_defined_funcs = reduce(
lambda x, y: (
','.join([x[0], y[0]]),
dict(chain(x[1].items(), y[1].items())),
x[2] + y[2]),
[operation_utils.extract_user_defined_function(udf, True)
for udf in serialized_fn.udfs])
variable_dict['wrap_pandas_result'] = operation_utils.wrap_pandas_result
generate_func = eval('lambda value: wrap_pandas_result([%s])' %
pandas_functions, variable_dict)
return generate_func, user_defined_funcs
class PandasBatchOverWindowAggregateFunctionOperation(Operation):
def __init__(self, spec):
super(PandasBatchOverWindowAggregateFunctionOperation, self).__init__(spec)
self.windows = [window for window in self.spec.serialized_fn.windows]
# the index among all the bounded range over window
self.bounded_range_window_index = [-1 for _ in range(len(self.windows))]
# Whether the specified position window is a bounded range window.
self.is_bounded_range_window = []
window_types = flink_fn_execution_pb2.OverWindow
bounded_range_window_nums = 0
for i, window in enumerate(self.windows):
window_type = window.window_type
if (window_type is window_types.RANGE_UNBOUNDED_PRECEDING) or (
window_type is window_types.RANGE_UNBOUNDED_FOLLOWING) or (
window_type is window_types.RANGE_SLIDING):
self.bounded_range_window_index[i] = bounded_range_window_nums
self.is_bounded_range_window.append(True)
bounded_range_window_nums += 1
else:
self.is_bounded_range_window.append(False)
def generate_func(self, serialized_fn):
user_defined_funcs = []
self.window_indexes = []
self.mapper = []
for udf in serialized_fn.udfs:
pandas_agg_function, variable_dict, user_defined_func, window_index = \
operation_utils.extract_over_window_user_defined_function(udf)
user_defined_funcs.extend(user_defined_func)
self.window_indexes.append(window_index)
self.mapper.append(eval('lambda value: %s' % pandas_agg_function, variable_dict))
return self.wrapped_over_window_function, user_defined_funcs
def wrapped_over_window_function(self, boundaries_series):
import pandas as pd
OverWindow = flink_fn_execution_pb2.OverWindow
input_series = boundaries_series[-1]
# the row number of the arrow format data
input_cnt = len(input_series[0])
results = []
# loop every agg func
for i in range(len(self.window_indexes)):
window_index = self.window_indexes[i]
# the over window which the agg function belongs to
window = self.windows[window_index]
window_type = window.window_type
func = self.mapper[i]
result = []
if self.is_bounded_range_window[window_index]:
window_boundaries = boundaries_series[
self.bounded_range_window_index[window_index]]
if window_type is OverWindow.RANGE_UNBOUNDED_PRECEDING:
# range unbounded preceding window
for j in range(input_cnt):
end = window_boundaries[j]
series_slices = [s.iloc[:end] for s in input_series]
result.append(func(series_slices))
elif window_type is OverWindow.RANGE_UNBOUNDED_FOLLOWING:
# range unbounded following window
for j in range(input_cnt):
start = window_boundaries[j]
series_slices = [s.iloc[start:] for s in input_series]
result.append(func(series_slices))
else:
# range sliding window
for j in range(input_cnt):
start = window_boundaries[j * 2]
end = window_boundaries[j * 2 + 1]
series_slices = [s.iloc[start:end] for s in input_series]
result.append(func(series_slices))
else:
# unbounded range window or unbounded row window
if (window_type is OverWindow.RANGE_UNBOUNDED) or (
window_type is OverWindow.ROW_UNBOUNDED):
series_slices = [s.iloc[:] for s in input_series]
func_result = func(series_slices)
result = [func_result for _ in range(input_cnt)]
elif window_type is OverWindow.ROW_UNBOUNDED_PRECEDING:
# row unbounded preceding window
window_end = window.upper_boundary
for j in range(input_cnt):
end = min(j + window_end + 1, input_cnt)
series_slices = [s.iloc[: end] for s in input_series]
result.append(func(series_slices))
elif window_type is OverWindow.ROW_UNBOUNDED_FOLLOWING:
# row unbounded following window
window_start = window.lower_boundary
for j in range(input_cnt):
start = max(j + window_start, 0)
series_slices = [s.iloc[start: input_cnt] for s in input_series]
result.append(func(series_slices))
else:
# row sliding window
window_start = window.lower_boundary
window_end = window.upper_boundary
for j in range(input_cnt):
start = max(j + window_start, 0)
end = min(j + window_end + 1, input_cnt)
series_slices = [s.iloc[start: end] for s in input_series]
result.append(func(series_slices))
results.append(pd.Series(result))
return results
class StatefulFunctionOperation(Operation):
def __init__(self, spec, keyed_state_backend):
self.keyed_state_backend = keyed_state_backend
super(StatefulFunctionOperation, self).__init__(spec)
def finish(self):
super().finish()
if self.keyed_state_backend:
self.keyed_state_backend.commit()
TRIGGER_TIMER = 1
class AbstractStreamGroupAggregateOperation(StatefulFunctionOperation):
def __init__(self, spec, keyed_state_backend):
self.generate_update_before = spec.serialized_fn.generate_update_before
self.grouping = [i for i in spec.serialized_fn.grouping]
self.group_agg_function = None
# If the upstream generates retract message, we need to add an additional count1() agg
# to track current accumulated messages count. If all the messages are retracted, we need
# to send a DELETE message to downstream.
self.index_of_count_star = spec.serialized_fn.index_of_count_star
self.count_star_inserted = spec.serialized_fn.count_star_inserted
self.state_cache_size = spec.serialized_fn.state_cache_size
self.state_cleaning_enabled = spec.serialized_fn.state_cleaning_enabled
self.data_view_specs = extract_data_view_specs(spec.serialized_fn.udfs)
super(AbstractStreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)
def open(self):
self.group_agg_function.open(FunctionContext(self.base_metric_group))
def close(self):
self.group_agg_function.close()
def generate_func(self, serialized_fn):
user_defined_aggs = []
input_extractors = []
filter_args = []
# stores the indexes of the distinct views which the agg functions used
distinct_indexes = []
# stores the indexes of the functions which share the same distinct view
# and the filter args of them
distinct_info_dict = {}
for i in range(len(serialized_fn.udfs)):
user_defined_agg, input_extractor, filter_arg, distinct_index = \
extract_user_defined_aggregate_function(
i, serialized_fn.udfs[i], distinct_info_dict)
user_defined_aggs.append(user_defined_agg)
input_extractors.append(input_extractor)
filter_args.append(filter_arg)
distinct_indexes.append(distinct_index)
distinct_view_descriptors = {}
for agg_index_list, filter_arg_list in distinct_info_dict.values():
if -1 in filter_arg_list:
# If there is a non-filter call, we don't need to check filter or not before
# writing the distinct data view.
filter_arg_list = []
# use the agg index of the first function as the key of shared distinct view
distinct_view_descriptors[agg_index_list[0]] = DistinctViewDescriptor(
input_extractors[agg_index_list[0]], filter_arg_list)
key_selector = RowKeySelector(self.grouping)
if len(self.data_view_specs) > 0:
state_value_coder = DataViewFilterCoder(self.data_view_specs)
else:
state_value_coder = PickleCoder()
self.group_agg_function = self.create_process_function(
user_defined_aggs, input_extractors, filter_args, distinct_indexes,
distinct_view_descriptors, key_selector, state_value_coder)
return self.process_element_or_timer, []
def process_element_or_timer(self, input_datas: List[Tuple[int, Row, int, Row]]):
# the structure of the input data:
# [element_type, element(for process_element), timestamp(for timer), key(for timer)]
# all the fields are nullable except the "element_type"
for input_data in input_datas:
if input_data[0] != TRIGGER_TIMER:
self.group_agg_function.process_element(input_data[1])
else:
self.group_agg_function.on_timer(input_data[3])
return self.group_agg_function.finish_bundle()
@abc.abstractmethod
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
pass
class StreamGroupAggregateOperation(AbstractStreamGroupAggregateOperation):
def __init__(self, spec, keyed_state_backend):
super(StreamGroupAggregateOperation, self).__init__(spec, keyed_state_backend)
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
aggs_handler_function = SimpleAggsHandleFunction(
user_defined_aggs,
input_extractors,
self.index_of_count_star,
self.count_star_inserted,
self.data_view_specs,
filter_args,
distinct_indexes,
distinct_view_descriptors)
return GroupAggFunction(
aggs_handler_function,
key_selector,
self.keyed_state_backend,
state_value_coder,
self.generate_update_before,
self.state_cleaning_enabled,
self.index_of_count_star)
class StreamGroupTableAggregateOperation(AbstractStreamGroupAggregateOperation):
def __init__(self, spec, keyed_state_backend):
super(StreamGroupTableAggregateOperation, self).__init__(spec, keyed_state_backend)
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
aggs_handler_function = SimpleTableAggsHandleFunction(
user_defined_aggs,
input_extractors,
self.data_view_specs,
filter_args,
distinct_indexes,
distinct_view_descriptors)
return GroupTableAggFunction(
aggs_handler_function,
key_selector,
self.keyed_state_backend,
state_value_coder,
self.generate_update_before,
self.state_cleaning_enabled,
self.index_of_count_star)
class DataStreamStatelessFunctionOperation(Operation):
def __init__(self, spec):
super(DataStreamStatelessFunctionOperation, self).__init__(spec)
def open(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'open'):
runtime_context = RuntimeContext(
self.spec.serialized_fn.runtime_context.task_name,
self.spec.serialized_fn.runtime_context.task_name_with_subtasks,
self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,
self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,
self.spec.serialized_fn.runtime_context.index_of_this_subtask,
self.spec.serialized_fn.runtime_context.attempt_number,
{p.key: p.value for p in self.spec.serialized_fn.runtime_context.job_parameters}
)
user_defined_func.open(runtime_context)
def generate_func(self, serialized_fn):
func, user_defined_func = operation_utils.extract_data_stream_stateless_function(
serialized_fn)
return func, [user_defined_func]
class InternalRuntimeContext(RuntimeContext):
def __init__(self,
task_name: str,
task_name_with_subtasks: str,
number_of_parallel_subtasks: int,
max_number_of_parallel_subtasks: int,
index_of_this_subtask: int,
attempt_number: int,
job_parameters: Dict[str, str],
keyed_state_backend: RemoteKeyedStateBackend):
super(InternalRuntimeContext, self).__init__(
task_name, task_name_with_subtasks, number_of_parallel_subtasks,
max_number_of_parallel_subtasks, index_of_this_subtask, attempt_number,
job_parameters)
self._keyed_state_backend = keyed_state_backend
def get_state(self, state_descriptor: ValueStateDescriptor) -> ValueState:
return self._keyed_state_backend.get_value_state(state_descriptor.name, PickleCoder())
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListState:
return self._keyed_state_backend.get_list_state(state_descriptor.name, PickleCoder())
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapState:
return self._keyed_state_backend.get_map_state(state_descriptor.name, PickleCoder(),
PickleCoder())
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor) -> ReducingState:
return self._keyed_state_backend.get_reducing_state(
state_descriptor.get_name(), PickleCoder(), state_descriptor.get_reduce_function())
def get_aggregating_state(
self, state_descriptor: AggregatingStateDescriptor) -> AggregatingState:
return self._keyed_state_backend.get_aggregating_state(
state_descriptor.get_name(), PickleCoder(), state_descriptor.get_agg_function())
class ProcessFunctionOperation(DataStreamStatelessFunctionOperation):
def __init__(self, spec):
self.timer_service = ProcessFunctionOperation.InternalTimerService()
self.function_context = ProcessFunctionOperation.InternalProcessFunctionContext(
self.timer_service)
super(ProcessFunctionOperation, self).__init__(spec)
def generate_func(self, serialized_fn) -> tuple:
func, proc_func = operation_utils.extract_process_function(
serialized_fn, self.function_context)
return func, [proc_func]
class InternalProcessFunctionContext(ProcessFunction.Context):
"""
Internal implementation of ProcessFunction.Context.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._timestamp = None
def timer_service(self):
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
class InternalTimerService(TimerService):
"""
Internal implementation of TimerService.
"""
def __init__(self):
self._current_watermark = None
def current_processing_time(self) -> int:
return int(time.time() * 1000)
def current_watermark(self):
return self._current_watermark
def set_current_watermark(self, wm):
self._current_watermark = wm
def register_processing_time_timer(self, t: int):
raise Exception("Register timers is only supported on a keyed stream.")
def register_event_time_timer(self, t: int):
raise Exception("Register timers is only supported on a keyed stream.")
class KeyedProcessFunctionOperation(StatefulFunctionOperation):
def __init__(self, spec, keyed_state_backend):
self._collector = KeyedProcessFunctionOperation.InternalCollector()
internal_timer_service = KeyedProcessFunctionOperation.InternalTimerService(
self._collector, keyed_state_backend)
self.function_context = KeyedProcessFunctionOperation.InternalKeyedProcessFunctionContext(
internal_timer_service)
self.on_timer_ctx = KeyedProcessFunctionOperation\
.InternalKeyedProcessFunctionOnTimerContext(internal_timer_service)
super(KeyedProcessFunctionOperation, self).__init__(spec, keyed_state_backend)
def generate_func(self, serialized_fn) -> Tuple:
func, proc_func = operation_utils.extract_keyed_process_function(
serialized_fn, self.function_context, self.on_timer_ctx, self._collector,
self.keyed_state_backend)
return func, [proc_func]
def open(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'open'):
runtime_context = InternalRuntimeContext(
self.spec.serialized_fn.runtime_context.task_name,
self.spec.serialized_fn.runtime_context.task_name_with_subtasks,
self.spec.serialized_fn.runtime_context.number_of_parallel_subtasks,
self.spec.serialized_fn.runtime_context.max_number_of_parallel_subtasks,
self.spec.serialized_fn.runtime_context.index_of_this_subtask,
self.spec.serialized_fn.runtime_context.attempt_number,
{p.key: p.value for p in
self.spec.serialized_fn.runtime_context.job_parameters},
self.keyed_state_backend)
user_defined_func.open(runtime_context)
class InternalCollector(object):
"""
Internal implementation of the Collector. It uses a buffer list to store data to be emitted.
There will be a header flag for each data type. 0 means it is a proc time timer registering
request, while 1 means it is an event time timer and 2 means it is a normal data. When
registering a timer, it must take along with the corresponding key for it.
"""
def __init__(self):
self.buf = []
def collect_reg_proc_timer(self, a: Any, key: Any):
self.buf.append(
(operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_PROC_TIMER.value,
a, key, None))
def collect_reg_event_timer(self, a: Any, key: Any):
self.buf.append(
(operation_utils.KeyedProcessFunctionOutputFlag.REGISTER_EVENT_TIMER.value,
a, key, None))
def collect_del_proc_timer(self, a: Any, key: Any):
self.buf.append(
(operation_utils.KeyedProcessFunctionOutputFlag.DEL_PROC_TIMER.value,
a, key, None))
def collect_del_event_timer(self, a: Any, key: Any):
self.buf.append(
(operation_utils.KeyedProcessFunctionOutputFlag.DEL_EVENT_TIMER.value,
a, key, None))
def collect(self, a: Any):
self.buf.append((operation_utils.KeyedProcessFunctionOutputFlag.NORMAL_DATA.value, a))
def clear(self):
self.buf.clear()
class InternalKeyedProcessFunctionOnTimerContext(KeyedProcessFunction.OnTimerContext):
"""
Internal implementation of ProcessFunction.OnTimerContext.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._time_domain = None
self._timestamp = None
self._current_key = None
def get_current_key(self):
return self._current_key
def set_current_key(self, current_key):
self._current_key = current_key
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
def time_domain(self) -> TimeDomain:
return self._time_domain
def set_time_domain(self, td: TimeDomain):
self._time_domain = td
class InternalKeyedProcessFunctionContext(KeyedProcessFunction.Context):
"""
Internal implementation of KeyedProcessFunction.Context.
"""
def __init__(self, timer_service: TimerService):
self._timer_service = timer_service
self._timestamp = None
self._current_key = None
def get_current_key(self):
return self._current_key
def set_current_key(self, current_key):
self._current_key = current_key
def timer_service(self) -> TimerService:
return self._timer_service
def timestamp(self) -> int:
return self._timestamp
def set_timestamp(self, ts: int):
self._timestamp = ts
class InternalTimerService(TimerService):
"""
Internal implementation of TimerService.
"""
def __init__(self, collector, keyed_state_backend):
self._collector = collector
self._keyed_state_backend = keyed_state_backend
self._current_watermark = None
def current_processing_time(self) -> int:
return int(time.time() * 1000)
def current_watermark(self) -> int:
return self._current_watermark
def set_current_watermark(self, wm):
self._current_watermark = wm
def register_processing_time_timer(self, t: int):
current_key = self._keyed_state_backend.get_current_key()
self._collector.collect_reg_proc_timer(t, current_key)
def register_event_time_timer(self, t: int):
current_key = self._keyed_state_backend.get_current_key()
self._collector.collect_reg_event_timer(t, current_key)
def delete_processing_time_timer(self, t: int):
current_key = self._keyed_state_backend.get_current_key()
self._collector.collect_del_proc_timer(t, current_key)
def delete_event_time_timer(self, t: int):
current_key = self._keyed_state_backend.get_current_key()
self._collector.collect_del_event_timer(t, current_key)
|
the-stack_106_31185 | import time
import torch
from transformer.transformer import Transformer
if __name__ == '__main__':
checkpoint = 'BEST_checkpoint.tar'
print('loading {}...'.format(checkpoint))
start = time.time()
checkpoint = torch.load(checkpoint)
print('elapsed {} sec'.format(time.time() - start))
model = checkpoint['model']
print(type(model))
filename = 'chatbot-v2.pt'
print('saving {}...'.format(filename))
start = time.time()
torch.save(model.state_dict(), filename)
print('elapsed {} sec'.format(time.time() - start))
print('loading {}...'.format(filename))
start = time.time()
model = Transformer()
model.load_state_dict(torch.load(filename))
print('elapsed {} sec'.format(time.time() - start))
|
the-stack_106_31186 | import os
import multiprocessing
from ConfigParser import SafeConfigParser
class Config(object):
""" An object to load and represent the configuration of the current
scraper. This loads scraper configuration from the environment and a
per-user configuration file (``~/.scraperkit.ini``). """
def __init__(self, scraper, config):
self.scraper = scraper
self.config = self._get_defaults()
self.config = self._get_file(self.config)
self.config = self._get_env(self.config)
if config is not None:
self.config.update(config)
def _get_defaults(self):
name = self.scraper.name
return {
'cache_policy': 'http',
'threads': multiprocessing.cpu_count() * 2,
'data_path': os.path.join(os.getcwd(), 'data', name),
'reports_path': None
}
def _get_env(self, config):
""" Read environment variables based on the settings defined in
the defaults. These are expected to be upper-case versions of
the actual setting names, prefixed by ``SCRAPEKIT_``. """
for option, value in config.items():
env_name = 'SCRAPEKIT_%s' % option.upper()
value = os.environ.get(env_name, value)
config[option] = value
return config
def _get_file(self, config):
""" Read a per-user .ini file, which is expected to have either
a ``[scraperkit]`` or a ``[$SCRAPER_NAME]`` section. """
config_file = SafeConfigParser()
config_file.read([os.path.expanduser('~/.scrapekit.ini')])
if config_file.has_section('scrapekit'):
config.update(dict(config_file.items('scrapekit')))
if config_file.has_section(self.scraper.name):
config.update(dict(config_file.items(self.scraper.name)))
return config
def items(self):
return self.config.items()
def __getattr__(self, name):
if name != 'config' and name in self.config:
return self.config.get(name)
try:
return object.__getattribute__(self, name)
except AttributeError:
return None
|
the-stack_106_31187 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for internal use."""
# pylint: disable=g-direct-tensorflow-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import indexed_slices
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.numpy_ops import np_arrays
from tensorflow.python.ops.numpy_ops import np_dtypes
from tensorflow.python.util import nest
tensor_to_ndarray = np_arrays.tensor_to_ndarray
def _canonicalize_axis(axis, rank):
return _canonicalize_axes([axis], rank)[0]
def _canonicalize_axes(axes, rank):
rank = _maybe_static(rank)
if isinstance(rank, ops.Tensor):
canonicalizer = (
lambda axis: cond(axis < 0, lambda: axis + rank, lambda: axis))
else:
canonicalizer = lambda axis: axis + rank if axis < 0 else axis
return [canonicalizer(axis) for axis in axes]
def _supports_signature():
return hasattr(inspect, 'signature')
def _to_tf_type(dtype):
"""Converts a native python or numpy type to TF DType.
Args:
dtype: Could be a python type, a numpy type or a TF DType.
Returns:
A tensorflow `DType`.
"""
return dtypes.as_dtype(dtype)
def _to_numpy_type(dtype):
"""Converts a native python or TF DType to numpy type.
Args:
dtype: Could be a python type, a numpy type or a TF DType.
Returns:
A NumPy `dtype`.
"""
if isinstance(dtype, dtypes.DType):
return dtype.as_numpy_dtype
return np.dtype(dtype)
def finfo(dtype):
"""Returns properties of floating point types.
Note that currently it just forwards to the numpy namesake, while tensorflow
and numpy dtypes may have different properties.
Args:
dtype: Could be a python type, a numpy type or a TF DType.
Returns:
A class describing properties of `dtype`, as described by
https://docs.scipy.org/doc/numpy/reference/generated/numpy.finfo.html
"""
return np.finfo(_to_numpy_type(dtype))
def isscalar(val):
"""Returns whether `val` is a scalar value or scalar Tensor."""
if isinstance(val, np_arrays.ndarray):
val = val.data
if isinstance(val, ops.Tensor):
ndims = val.shape.ndims
if ndims is not None:
return ndims == 0
else:
return math_ops.equal(array_ops.rank(val), 0)
else:
return np.isscalar(val)
# Can't use np_doc because np.result_type is a builtin function.
def result_type(*arrays_and_dtypes):
"""Returns the type resulting from applying NumPy type promotion to arguments.
Args:
*arrays_and_dtypes: A list of array_like objects or dtypes.
Returns:
A numpy dtype.
"""
def maybe_get_dtype(x):
# Don't put np.ndarray in this list, because np.result_type looks at the
# value (not just dtype) of np.ndarray to decide the result type.
if isinstance(
x, (np_arrays.ndarray, ops.Tensor, indexed_slices.IndexedSlices)):
return _to_numpy_type(x.dtype)
elif isinstance(x, dtypes.DType):
return _to_numpy_type(x)
return x
arrays_and_dtypes = [
maybe_get_dtype(x) for x in nest.flatten(arrays_and_dtypes)
]
if not arrays_and_dtypes:
# If arrays_and_dtypes is an empty list, let numpy decide what the dtype is.
arrays_and_dtypes = [np.asarray([])]
return np_dtypes._result_type(*arrays_and_dtypes) # pylint: disable=protected-access
def promote_types(type1, type2):
"""Returns the type resulting from applying NumPy type promotion.
Args:
type1: A numpy type.
type2: A numpy type.
Returns:
A numpy type.
"""
type1 = _to_numpy_type(type1)
type2 = _to_numpy_type(type2)
return np_dtypes.canonicalize_dtype(np.promote_types(type1, type2))
def _has_docstring(f):
return (f and hasattr(f, '__doc__') and isinstance(f.__doc__, str) and
f.__doc__)
def _add_blank_line(s):
if s.endswith('\n'):
return s + '\n'
else:
return s + '\n\n'
def _np_signature(f):
"""An enhanced inspect.signature that can handle numpy.ufunc."""
# TODO(wangpeng): consider migrating away from inspect.signature.
# inspect.signature is supported in Python 3.3.
if not hasattr(inspect, 'signature'):
return None
if f is None:
return None
if not isinstance(f, np.ufunc):
try:
return inspect.signature(f)
except ValueError:
return None
def names_from_num(prefix, n):
if n <= 0:
return []
elif n == 1:
return [prefix]
else:
return [prefix + str(i + 1) for i in range(n)]
input_names = names_from_num('x', f.nin)
output_names = names_from_num('out', f.nout)
keyword_only_params = [('where', True), ('casting', 'same_kind'),
('order', 'K'), ('dtype', None), ('subok', True),
('signature', None), ('extobj', None)]
params = []
params += [
inspect.Parameter(name, inspect.Parameter.POSITIONAL_ONLY)
for name in input_names
]
if f.nout > 1:
params += [
inspect.Parameter(
name, inspect.Parameter.POSITIONAL_ONLY, default=None)
for name in output_names
]
params += [
inspect.Parameter(
'out',
inspect.Parameter.POSITIONAL_OR_KEYWORD,
default=None if f.nout == 1 else (None,) * f.nout)
]
params += [
inspect.Parameter(name, inspect.Parameter.KEYWORD_ONLY, default=default)
for name, default in keyword_only_params
]
return inspect.Signature(params)
# Python 2 doesn't allow keyword-only argument. Python prior to 3.8 doesn't
# allow positional-only argument. So we conflate positional-only, keyword-only
# and positional-or-keyword arguments here.
def _is_compatible_param_kind(a, b):
def relax(k):
if k in (inspect.Parameter.POSITIONAL_ONLY, inspect.Parameter.KEYWORD_ONLY):
return inspect.Parameter.POSITIONAL_OR_KEYWORD
return k
return relax(a) == relax(b)
def _prepare_np_fun_name_and_fun(np_fun_name, np_fun):
"""Mutually propagates information between `np_fun_name` and `np_fun`.
If one is None and the other is not, we'll try to make the former not None in
a best effort.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: the numpy function whose docstring will be used.
Returns:
Processed `np_fun_name` and `np_fun`.
"""
if np_fun_name is not None:
assert isinstance(np_fun_name, str)
if np_fun is not None:
assert not isinstance(np_fun, str)
if np_fun is None:
assert np_fun_name is not None
try:
np_fun = getattr(np, str(np_fun_name))
except AttributeError:
np_fun = None
if np_fun_name is None:
assert np_fun is not None
np_fun_name = np_fun.__name__
return np_fun_name, np_fun
def _np_doc_helper(f, np_f, np_fun_name=None, unsupported_params=None):
"""Helper to get docs."""
assert np_f or np_fun_name
if not np_fun_name:
np_fun_name = np_f.__name__
doc = 'TensorFlow variant of `numpy.%s`.\n\n' % np_fun_name
if unsupported_params:
doc += 'Unsupported arguments: ' + ', '.join(
'`' + name + '`' for name in unsupported_params) + '.\n\n'
if _has_docstring(f):
doc += f.__doc__
doc = _add_blank_line(doc)
if _has_docstring(np_f):
doc += 'Documentation for `numpy.%s`:\n\n' % np_f.__name__
# TODO(wangpeng): It looks like code snippets in numpy doc don't work
# correctly with doctest. Fix that and remove the reformatting of the np_f
# comment.
doc += np_f.__doc__.replace('>>>', '>')
return doc
def np_doc(np_fun_name, np_fun=None):
"""Attachs numpy docstring to a function.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
Returns:
A function decorator that attaches the docstring from `np_fun` to the
decorated function.
"""
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
np_sig = _np_signature(np_fun)
def decorator(f):
"""The decorator."""
unsupported_params = []
if hasattr(inspect, 'signature') and np_sig is not None:
try:
sig = inspect.signature(f)
except ValueError:
sig = None
# TODO(wangpeng): Enable this.
# Looks like this may not work with different versions of numpy.
# if sig is not None:
# for name, param in sig.parameters.items():
# np_param = np_sig.parameters.get(name)
# if np_param is None:
# raise TypeError('Cannot find parameter "%s" in the numpy
# function\'s ' 'signature' % name)
# if not _is_compatible_param_kind(param.kind, np_param.kind):
# raise TypeError(
# 'Parameter "%s" is of kind %s while in numpy it is of '
# 'kind %s' % (name, param.kind, np_param.kind))
# has_default = (param.default != inspect.Parameter.empty)
# np_has_default = (np_param.default != inspect.Parameter.empty)
# if has_default != np_has_default:
# raise TypeError('Parameter "%s" should%s have a default value' %
# (name, '' if np_has_default else ' not'))
# for name in np_sig.parameters:
# if name not in sig.parameters:
# unsupported_params.append(name)
f.__doc__ = _np_doc_helper(
f,
np_fun,
np_fun_name=np_fun_name,
unsupported_params=unsupported_params)
return f
return decorator
def np_doc_only(np_fun_name, np_fun=None):
"""Attachs numpy docstring to a function.
This differs from np_doc in that it doesn't check for a match in signature.
Args:
np_fun_name: name for the np_fun symbol. At least one of np_fun or
np_fun_name shoud be set.
np_fun: (optional) the numpy function whose docstring will be used.
Returns:
A function decorator that attaches the docstring from `np_fun` to the
decorated function.
"""
np_fun_name, np_fun = _prepare_np_fun_name_and_fun(np_fun_name, np_fun)
def decorator(f):
f.__doc__ = _np_doc_helper(f, np_fun, np_fun_name=np_fun_name)
return f
return decorator
def tf_broadcast(*args):
"""Broadcast tensors.
Args:
*args: a list of tensors whose shapes are broadcastable against each other.
Returns:
Tensors broadcasted to the common shape.
"""
if len(args) <= 1:
return args
sh = array_ops.shape(args[0])
for arg in args[1:]:
sh = array_ops.broadcast_dynamic_shape(sh, array_ops.shape(arg))
return [array_ops.broadcast_to(arg, sh) for arg in args]
# TODO(wangpeng): Move the following functions to a separate file and check for
# float dtypes in each of them.
def get_static_value(x):
"""A version of tf.get_static_value that returns None on float dtypes.
It returns None on float dtypes in order to avoid breaking gradients.
Args:
x: a tensor.
Returns:
Same as `tf.get_static_value`, except that it returns None when `x` has a
float dtype.
"""
if isinstance(x, ops.Tensor) and (x.dtype.is_floating or x.dtype.is_complex):
return None
return tensor_util.constant_value(x)
def _maybe_static(x):
value = get_static_value(x)
if value is None:
return x
else:
return value
# All the following functions exist becaues get_static_value can't handle
# their TF counterparts.
def cond(pred, true_fn, false_fn):
"""A version of tf.cond that tries to evaluate the condition."""
v = get_static_value(pred)
if v is None:
return control_flow_ops.cond(pred, true_fn, false_fn)
if v:
return true_fn()
else:
return false_fn()
def add(a, b):
"""A version of tf.add that eagerly evaluates if possible."""
return _maybe_static(a) + _maybe_static(b)
def subtract(a, b):
"""A version of tf.subtract that eagerly evaluates if possible."""
return _maybe_static(a) - _maybe_static(b)
def greater(a, b):
"""A version of tf.greater that eagerly evaluates if possible."""
return _maybe_static(a) > _maybe_static(b)
def greater_equal(a, b):
"""A version of tf.greater_equal that eagerly evaluates if possible."""
return _maybe_static(a) >= _maybe_static(b)
def less_equal(a, b):
"""A version of tf.less_equal that eagerly evaluates if possible."""
return _maybe_static(a) <= _maybe_static(b)
def logical_and(a, b):
"""A version of tf.logical_and that eagerly evaluates if possible."""
a_value = get_static_value(a)
if a_value is not None:
if np.isscalar(a_value):
if a_value:
return _maybe_static(b)
else:
return a_value
else:
return a_value & _maybe_static(b)
else:
return a & _maybe_static(b)
def logical_or(a, b):
"""A version of tf.logical_or that eagerly evaluates if possible."""
a_value = get_static_value(a)
if a_value is not None:
if np.isscalar(a_value):
if a_value:
return a_value
else:
return _maybe_static(b)
else:
return a_value | _maybe_static(b)
else:
return a | _maybe_static(b)
def getitem(a, slice_spec):
"""A version of __getitem__ that eagerly evaluates if possible."""
return _maybe_static(a)[slice_spec]
def reduce_all(input_tensor, axis=None, keepdims=False):
"""A version of tf.reduce_all that eagerly evaluates if possible."""
v = get_static_value(input_tensor)
if v is None:
return math_ops.reduce_all(input_tensor, axis=axis, keepdims=keepdims)
else:
return v.all(axis=axis, keepdims=keepdims)
def reduce_any(input_tensor, axis=None, keepdims=False):
"""A version of tf.reduce_any that eagerly evaluates if possible."""
v = get_static_value(input_tensor)
if v is None:
return math_ops.reduce_any(input_tensor, axis=axis, keepdims=keepdims)
else:
return v.any(axis=axis, keepdims=keepdims)
|
the-stack_106_31189 | import numpy as np
import pandas as pd
import re
def stack_chunks(dat_list):
'''
For preserving categories instead of converting to objects.
If you concat categories w/ different levels (or even the same in a
different order), it silently converts to object
'''
columns, dtypes = dat_list[0].columns, dat_list[0].dtypes
# preserve categories
levels = {col: set() for col in columns}
for col, dt in zip(columns, dtypes):
if str(dt) == 'category':
for d in dat_list:
levels[col] = set.union(levels[col], d[col].cat.categories)
for d in dat_list:
_newlevels = list(levels[col] - set(d[col].cat.categories))
d[col] = d[col].cat.add_categories(_newlevels)
d[col] = d[col].cat.reorder_categories(levels[col])
# recombine the chunks and return the result
return pd.concat(dat_list)
def read_hcup(data_file, sas_script, chunksize=500000, combine_chunks=True,
return_meta=False, strings_to_categorical=True, **kwargs):
'''
Arguments:
data_file (str): Path of fixed-width text data file
sas_script (str): Path of the accompanying SAS load file
chunksize (int, default 500K): Break data into chunks of size chunksize
and read/process each chunk separately (for lower memory usage)
combine_chunks (bool, default True): Return single DataFrame with all
chunks combined (True), or return list of DataFrame chunks (False)
return_meta (bool, default False): Return the data + a DataFrame of
column metadata (True), or just return the processed data (False)
strings_to_categorical (bool, default True): Convert variables defined
as CHAR in SAS script to pd.Categorical upon import
kwargs: passed on to pandas.read_fwf()
Returns:
Default: a single pandas DataFrame
If combine_chunks=False: Generator of pandas DataFrames
If return_meta=True: Return metadata (widths, dtypes, etc. ) *instead
of* the data
'''
# what dtype to use for text columns
text = 'category' if strings_to_categorical else 'object'
# read in the sas script
with open(sas_script) as f:
sas = f.readlines()
# grab the lines that define the fields. returns three match groups:
# 0 = starting position, 1 = field name, 2 = variable type
fields = [re.search(r'@\s*(\d+)\s+(\S+)\s+(\S+)\s?', x) for x in sas]
fields = [x.groups() for x in fields if x]
# from those, grab the names and starting positions, and infer the dtypes
starts = [int(x[0]) for x in fields]
names = [x[1] for x in fields]
# use different dtypes based on whether user requests metadata or data.
# in the latter case we just make everything a category for max compression
# for numerics, must use floats since int columns can't have missing values
# but it's okay because floats hardly use more space than ints
if return_meta:
dtype = [text if re.search(r'CHAR', x[2]) else float for x in fields]
else:
# keep KEY_NIS as numeric so it can be safely sorted on
dtype = [text if col != 'KEY_NIS' else float for col in names]
# convert dtype list into dictionary (for pd.read_fwf)
dtypes = dict(zip(names, dtypes))
# compute the variable widths
maxcols = int(re.search(r'LRECL = (.+);', ''.join(sas)).group(1))
widths = np.diff(starts + [maxcols+1])
# grab all the missing value codes
na_vals = re.findall(r'\'(.+)\' = \S+', ''.join(sas))
na_vals += ['.']
# return meta-data if requested
if return_meta:
return {'names': names, 'starts': starts, 'widths': widths,
'dtypes': dtype, 'na_values': na_vals}
# get a generator that reads the data in chunks
dat = pd.read_fwf(data_file, header=None, names=names, widths=widths,
dtype=dtype, na_values=na_vals, chunksize=chunksize,
**kwargs)
# return generator if requested
if not combine_chunks:
return dat
# convert generator to list and stack the dataframes if applicable
dat = list(dat)
if len(dat) > 1:
dat = stack_chunks(dat)
else:
dat = dat[0]
return dat
def read_mhos(sas_script, data_file=None, chunksize=500000, combine_chunks=True,
return_meta=False, strings_to_categorical=True, **kwargs):
'''
Arguments:
data_file (str): Path of fixed-width text data file
sas_script (str): Path of the accompanying SAS load file
chunksize (int, default 500K): Break data into chunks of size chunksize
and read/process each chunk separately (for lower memory usage)
combine_chunks (bool, default True): Return single DataFrame with all
chunks combined (True), or return list of DataFrame chunks (False)
return_meta (bool, default False): Return the data + a DataFrame of
column metadata (True), or just return the processed data (False)
strings_to_categorical (bool, default True): Convert variables defined
as CHAR in SAS script to pd.Categorical upon import
kwargs: passed on to pandas.read_fwf()
Returns:
Default: a single pandas DataFrame
If combine_chunks=False: Generator of pandas DataFrames
If return_meta=True: Return metadata (colspecs, dtypes, etc. ) *instead
of* the data
'''
if data_file is None:
return_meta = True
# what dtype to use for text columns
text = 'category' if strings_to_categorical else 'object'
# read in the sas script
with open(sas_script) as f:
sas = f.readlines()
# match groups (indexed from 1, not 0)
# 1 = prefix, 2 = field name, 3 = string, 4 = start position,
# 5 = end position, 6 = field number, 7 = field description
regex = r'^\s+(&[c|C].|&[r|R].|&[p|P].)?(\S+)\s+(\$)?\s*(\d{1,3})-?(\d{1,3})?\S*\s*/\*\s+(\d{1,3})(.*)\*/'
fields = [re.search(regex, x) for x in sas if re.search(regex, x)]
# check that we matched all and only the the right field numbers
assert [int(x.group(6)) for x in fields if x] \
== list(range(1, len(fields)+1))
# extract the meta-data
prefix = [x.group(1) for x in fields]
names = [x.group(2).lower() for x in fields]
dtypes = [str if x.group(2)=='CASE_ID' else text if x.group(3) else float
for x in fields]
starts = [int(x.group(4))-1 for x in fields]
ends = [int(x.group(5)) if x.group(5) else int(x.group(4)) for x in fields]
descriptions = [x.group(7).strip() for x in fields]
# handle duplicate names
vc = pd.Series(names).value_counts()
dupes = list(vc.index[vc > 1])
dupes = [x in dupes for x in names]
names = [prefix+name if dupe else name
for prefix, name, dupe in zip(prefix, names, dupes)]
# convert dtype list into dictionary (for pd.read_fwf)
dtypes = dict(zip(names, dtypes))
# return meta-data if requested
if return_meta:
return {'names': names, 'starts': starts, 'ends': ends,
'dtypes': dtypes, 'descriptions': descriptions}
# get a generator that reads the data in chunks
dat = pd.read_fwf(data_file, header=None, names=names,
colspecs=list(zip(starts, ends)), dtype=dtypes,
chunksize=chunksize, **kwargs)
# return generator if requested
if not combine_chunks:
return dat
# convert generator to list and stack the dataframes if applicable
dat = list(dat)
if len(dat) > 1:
dat = stack_chunks(dat)
else:
dat = dat[0]
return dat
|
the-stack_106_31190 | """
Pairwise sequence alignment of Avidin with Streptavidin
=======================================================
This script performs a pairwise sequence alignment of
avidin (*Gallus gallus*)
with streptavidin (*Streptomyces lavendulae*).
"""
# Code source: Patrick Kunzmann
# License: BSD 3 clause
import matplotlib.pyplot as plt
import biotite.sequence as seq
import biotite.sequence.align as align
import biotite.sequence.io.fasta as fasta
import biotite.database.entrez as entrez
import biotite.sequence.graphics as graphics
# Download and parse protein sequences of avidin and streptavidin
fasta_file = fasta.FastaFile.read(entrez.fetch_single_file(
["CAC34569", "ACL82594"], None, "protein", "fasta"
))
for name, sequence in fasta_file.items():
if "CAC34569" in name:
avidin_seq = seq.ProteinSequence(sequence)
elif "ACL82594" in name:
streptavidin_seq = seq.ProteinSequence(sequence)
# Get BLOSUM62 matrix
matrix = align.SubstitutionMatrix.std_protein_matrix()
# Perform pairwise sequence alignment with affine gap penalty
# Terminal gaps are not penalized
alignments = align.align_optimal(avidin_seq, streptavidin_seq, matrix,
gap_penalty=(-10, -1), terminal_penalty=False)
# Draw first and only alignment
# The color intensity indicates the similiarity
fig = plt.figure(figsize=(8.0, 2.5))
ax = fig.add_subplot(111)
graphics.plot_alignment_similarity_based(
ax, alignments[0], matrix=matrix, labels=["Avidin", "Streptavidin"],
show_numbers=True, show_line_position=True
)
fig.tight_layout()
plt.show()
|
the-stack_106_31192 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = [
'altgraph==0.17',
'asn1crypto==1.3.0',
'certifi==2019.11.28',
'cffi==1.13.2',
'chardet==3.0.4',
'cryptography==2.8',
'dis3==0.1.3',
'future==0.18.2',
'idna==2.8',
'ipaddress==1.0.23',
'macholib==1.14',
'pefile==2019.4.18',
'pycparser==2.19',
'PyInstaller==3.6',
'pyOpenSSL==19.1.0',
'requests>=2.22.0',
'urllib3>=1.25.8',
]
setup_requirements = ['pytest-runner', ]
test_requirements = ['pytest', ]
setup(
author="Aidas Bendoraitis",
author_email='[email protected]',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Search in API is a script that allows you to search among multiple pages of an API endpoint.",
entry_points={
'console_scripts': [
'search_in_api=search_in_api.search_in_api:main',
],
},
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='search_in_api',
name='search_in_api',
packages=find_packages(include=['search_in_api']),
setup_requires=setup_requirements,
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/archatas/search_in_api',
version='1.0.1',
zip_safe=False,
)
|
the-stack_106_31193 | class Animal:
def __init__(self, kind, color, name):
# constructor method/a new instance of animal
# slef is like this from JS
self.kind = kind
self.name = name
self.color = color
def description(self):
print("%s is a %s with color %s" % (self.name, self.kind, self.color))
# or f"{self.name} is a {self.kind} with a color {self.color}"
cat = Animal("cat", "orange", "CAty")
dog = Animal("dog", "black", "HIM")
print(cat.kind)
print(dog.color)
cat.description()
|
the-stack_106_31194 | """Tests using pytest_resilient_circuits"""
# -*- coding: utf-8 -*-
# Copyright © IBM Corporation 2010, 2019
from __future__ import print_function, unicode_literals
import pytest
from resilient_circuits.util import get_config_data, get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from test_helper import TasksResilientMock
PACKAGE_NAME = "fn_task_utils"
FUNCTION_NAME = "task_utils_create"
# Read the default configuration-data section from the package
config_data = get_config_data(PACKAGE_NAME)
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = TasksResilientMock
def call_task_utils_create_function(circuits, function_params, timeout=10):
# Fire a message to the function
evt = SubmitTestFunction("task_utils_create", function_params)
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None)
if exception_event is not False:
exception = exception_event.args[1].args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("task_utils_create_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestTskUtilsCreate:
""" Tests for the tsk_utils_create function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "New Task Name", {"type": "text", "content": '{\n"required": false\n}'}),
(123, "My New Task", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_success(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert(results["content"]["task"])
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "Η Θ Ι Κ Λ Μ Ν Ξ Ο Π Ρ", {"type": "text", "content": '{\n"required": false\n}'}),
(123, " Й К Л М Н О П Р С Т ", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_success_unicode(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert (results["content"]["task"])
assert function_params["task_name"] == results["content"]["task"]["name"]
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "text", {"type": "text", "content": '{\n"required": false\n}'}),
])
def test_owner_as_email_user(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert (results["content"]["task"])
@pytest.mark.parametrize("incident_id, task_name, task_utils_payload", [
(123, "text", {"type": "text", "content": '{\n"required": false\n}'}),
(123, "text", {"type": "text", "content": '{\n"required": false\n}'})
])
def test_owner_as_user_id(self, circuits_app, incident_id, task_name, task_utils_payload):
""" Test calling with sample values for the parameters """
function_params = {
"incident_id": incident_id,
"task_name": task_name,
"task_utils_payload": task_utils_payload
}
results = call_task_utils_create_function(circuits_app, function_params)
assert results["content"]["task"]
|
the-stack_106_31195 | #!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# In this example, an image is centered at (0,0,0) before a
# rotation is applied to ensure that the rotation occurs about
# the center of the image.
reader = vtk.vtkPNGReader()
reader.SetDataSpacing(0.8,0.8,1.5)
reader.SetFileName("" + str(VTK_DATA_ROOT) + "/Data/fullhead15.png")
reader.Update()
# first center the image at (0,0,0)
reslice = vtk.vtkImageReslice()
reslice.SetResliceAxesDirectionCosines([0,1,0,-1,0,0,0,0,1])
reslice.SetInputConnection(reader.GetOutputPort())
reslice.SetInformationInput(reader.GetOutput())
# reset the image back to the way it was (you don't have
# to do this, it is just put in as an example)
information2 = vtk.vtkImageChangeInformation()
information2.SetInputConnection(reslice.GetOutputPort())
information2.SetInformationInputData(reader.GetOutput())
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(information2.GetOutputPort())
viewer.SetColorWindow(2000)
viewer.SetColorLevel(1000)
viewer.Render()
# --- end of script --
|
the-stack_106_31196 | import json
import logging
from hashlib import sha1
from .score_cache import ScoreCache
logger = logging.getLogger("ores.score_caches.redis")
TTL = 60 * 60 * 24 * 365 * 16 # 16 years
PREFIX = "ores"
class Redis(ScoreCache):
def __init__(self, redis, ttl=None, prefix=None):
self.redis = redis
self.ttl = int(ttl or TTL)
self.prefix = str(prefix or PREFIX)
def lookup(self, wiki, model, rev_id, version=None, cache=None):
key = self._generate_key(wiki, model, rev_id, version=version,
cache=cache)
logger.debug("Looking up score at {0}".format(key))
value = self.redis.get(key)
if value is None:
raise KeyError(key)
else:
return json.loads(str(value, 'utf-8'))
def store(self, wiki, model, rev_id, score, version=None, cache=None):
key = self._generate_key(wiki, model, rev_id, version=version,
cache=cache)
logger.debug("Storing score at {0}".format(key))
self.redis.setex(key, self.ttl, bytes(json.dumps(score), 'utf-8'))
def _generate_key(self, wiki, model, rev_id, version=None, cache=None):
if cache is None or len(cache) == 0:
key_values = [self.prefix, wiki, model, rev_id, version]
else:
cache_hash = self.hash_cache(cache)
key_values = [self.prefix, wiki, model, rev_id, version,
cache_hash]
return ":".join(str(v) for v in key_values)
@classmethod
def from_parameters(cls, *args, ttl=None, prefix=None, **kwargs):
try:
import redis
except ImportError:
raise ImportError("Could not find redis-py. This packages is " +
"required when using ores.score_caches.Redis.")
return cls(redis.StrictRedis(*args, **kwargs), ttl=ttl, prefix=prefix)
@classmethod
def from_config(cls, config, name, section_key="score_caches"):
"""
score_caches:
redis_cache:
class: ores.score_caches.Redis
host: localhost
prefix: ores-derp
ttl: 9001
"""
logger.info("Loading Redis '{0}' from config.".format(name))
section = config[section_key][name]
kwargs = {k: v for k, v in section.items() if k != "class"}
return cls.from_parameters(**kwargs)
@classmethod
def hash_cache(cls, cache):
sorted_tuple = tuple(sorted(cache.items()))
return sha1(bytes(str(sorted_tuple), 'utf8')).hexdigest()
|
the-stack_106_31200 | from xml.etree import ElementTree
root = ElementTree.fromstring(input())
colors = {"red": 0, "green": 0, "blue": 0}
def getcubes(root, value):
colors[root.attrib['color']] += value
for child in root:
getcubes(child, value+1)
getcubes(root,1)
print(colors["red"], colors["green"], colors["blue"]) |
the-stack_106_31201 | import re
from asteval import Interpreter
import astropy.units as u
from astropy.modeling import models
from qtpy.QtCore import QSortFilterProxyModel, Qt, Signal
from qtpy.QtGui import QStandardItem, QStandardItemModel, QValidator
class ModelFittingModel(QStandardItemModel):
"""
Internel Qt model containing all instanes of the
:class:`astropy.modeling.FittableModel` classes in use in the model editor.
Each item in the model is a :class:`specviz.plugins.model_editor.items.ModelDataItem`
instance.
Attributes
----------
status_changed : :class:`qtpy.QtCore.Signal`
Signal raised when the validator state changes.
"""
status_changed = Signal(QValidator.State, str)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._equation = ""
self.setHorizontalHeaderLabels(["Name", "Value", "Unit", "Fixed"])
@property
def items(self):
"""
The list of all :class:`specviz.plugins.model_editor.items.ModelDataItem`
instances associated with this model.
"""
return [self.item(idx) for idx in range(self.rowCount())]
@property
def equation(self):
"""
The equation used when parsing the set of models into a single
compound model.
"""
return self._equation
@equation.setter
def equation(self, value):
self._equation = value
self.evaluate()
def compose_fittable_models(self):
"""
Generate the set of models with parameters updated by what the user
has inputted in the gui.
Returns
-------
fittable_models : dict
Collection of models with updated parameters where each key is the
model name displayed in the gui, and the value is the model.
"""
# Recompose the model objects with the current values in each of its
# parameter rows.
fittable_models = {}
for model_item in self.items:
model = model_item.data()
model_name = model_item.text()
model_kwargs = {'name': model_name, 'fixed': {}}
if isinstance(model, models.PolynomialModel):
model_args = [model.degree]
else:
model_args = []
# For each of the children `StandardItem`s, parse out their
# individual stored values
for cidx in range(model_item.rowCount()):
param_name = model_item.child(cidx, 0).data()
param_value = model_item.child(cidx, 1).data()
param_unit = model_item.child(cidx, 2).data()
param_fixed = model_item.child(cidx, 3).checkState() == Qt.Checked
model_kwargs[param_name] = (u.Quantity(param_value, param_unit)
if param_unit is not None else param_value)
model_kwargs.get('fixed').setdefault(param_name, param_fixed)
new_model = model.__class__(*model_args, **model_kwargs)
fittable_models[model_name] = new_model
return fittable_models
@property
def fittable_models(self):
"""
Dictionary mapping displayed model name to the model instance with
parameters updated by parsing the user input in the ui.
"""
return self.compose_fittable_models()
def add_model(self, model):
"""
Adds a model to the internal Qt model.
Parameters
----------
model : :class:`astropy.modeling.FittableModel`
The model instance to add.
Returns
-------
:class:`qtpy.QtCore.QModelIndex`
The index in the Qt model where the new model been added.
"""
model_name = model.__class__.name
model_count = len([self.item(idx) for idx in range(self.rowCount())
if model.__class__.name in self.item(idx).text()])
model_name = model_name + str(model_count) if model_count > 0 else model_name
model_item = QStandardItem(model_name)
model_item.setData(model, Qt.UserRole + 1)
for para_name in model.param_names:
# Retrieve the parameter object from the model
parameter = getattr(model, para_name)
# Store the name value
param_name = QStandardItem(parameter.name)
param_name.setData(parameter.name, Qt.UserRole + 1)
param_name.setEditable(False)
# Store the data value of the parameter
param_value = QStandardItem("{:.5g}".format(parameter.value))
param_value.setData(parameter.value, Qt.UserRole + 1)
# Store the unit information
# param_unit = QStandardItem("{}".format(parameter.unit))
param_unit = QStandardItem("Plot Units")
param_unit.setData(parameter.unit, Qt.UserRole + 1)
param_unit.setEditable(False)
# Store the fixed state of the unit
param_fixed = QStandardItem()
param_fixed.setData(parameter.fixed, Qt.UserRole + 1)
param_fixed.setCheckable(True)
param_fixed.setEditable(False)
model_item.appendRow([param_name, param_value, param_unit, param_fixed])
self.appendRow([model_item, None, None, None])
# Add this model to the model equation string. By default, all models
# are simply added together
self._equation += " + {}".format(model_name) \
if len(self._equation) > 0 else "{}".format(model_name)
return model_item.index()
def remove_model(self, row):
"""
Remove an astropy model from the internal qt data model.
Parameters
----------
row : int
The row in the qt model that is to be removed.
"""
# Get the model first so that we can re-parse the equation
model_item = self.item(row, 0)
# Remove the model name from the equation
self.equation = re.sub(
"(\+|-|\*|\/|=|>|<|>=|<=|&|\||%|!|\^|\(|\))*\s*?({})".format(
model_item.text()),
"", self._equation)
# Remove the model item from the internal qt model
self.removeRow(row)
def reset_equation(self):
"""
Resets and reconstructs the equation used when parsing the set of models
into a single model.
"""
self._equation = ""
for item in self.items:
self._equation += " + {}".format(item.text()) \
if len(self._equation) > 0 else "{}".format(item.text())
def evaluate(self):
"""
Validate the input to the equation editor.
Parameters
----------
string : str
Plain text representation of the current equation text edit box.
fittable_models : dict
Mapping of tree view model variables names to their model instances.
"""
fittable_models = self.compose_fittable_models()
# Create an evaluation namespace for use in parsing the string
namespace = {}
namespace.update(fittable_models)
# Create a quick class to dump err output instead of piping to the
# user's terminal. Seems this cannot be None, and must be an object
# that has a `write` method.
aeval = Interpreter(usersyms=namespace,
err_writer=type("FileDump", (object,),
{'write': lambda x: None}))
result = aeval(self.equation)
if len(aeval.error) > 0 or not any((self.equation.find(x) >= 0
for x in fittable_models.keys())):
if len(aeval.error) > 0:
status_text = "<font color='red'>Invalid input: {}</font>".format(
str(aeval.error[0].get_error()[1]).split('\n')[-1])
else:
status_text = "<font color='red'>Invalid input: at least one model must be " \
"used in the equation.</font>"
state = QValidator.Invalid
else:
status_text = "<font color='green'>Valid input.</font>"
state = QValidator.Acceptable
self.status_changed.emit(state, status_text)
return result
|
the-stack_106_31203 | # Copyright (c) 2017 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import absolute_import
from enum import IntEnum, unique
from .device import YubiKey
from .driver_ccid import APDUError, SW
from .util import (
AID, Tlv, parse_tlvs,
is_cve201715361_vulnerable_firmware_version,
ensure_not_cve201715361_vulnerable_firmware_version)
from cryptography import x509
from cryptography.exceptions import InvalidSignature
from cryptography.utils import int_to_bytes, int_from_bytes
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
from cryptography.hazmat.primitives.constant_time import bytes_eq
from cryptography.hazmat.primitives.asymmetric import rsa, ec, padding
from cryptography.hazmat.primitives.serialization import Encoding, PublicFormat
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
from cryptography.hazmat.backends import default_backend
from cryptography.x509.oid import NameOID
from collections import OrderedDict
from threading import Timer
import logging
import struct
import six
import os
logger = logging.getLogger(__name__)
@unique
class INS(IntEnum):
VERIFY = 0x20
CHANGE_REFERENCE = 0x24
RESET_RETRY = 0x2c
GENERATE_ASYMMETRIC = 0x47
AUTHENTICATE = 0x87
SEND_REMAINING = 0xc0
GET_DATA = 0xcb
PUT_DATA = 0xdb
SET_MGMKEY = 0xff
IMPORT_KEY = 0xfe
GET_VERSION = 0xfd
RESET = 0xfb
SET_PIN_RETRIES = 0xfa
ATTEST = 0xf9
@unique
class ALGO(IntEnum):
TDES = 0x03
RSA1024 = 0x06
RSA2048 = 0x07
ECCP256 = 0x11
ECCP384 = 0x14
@classmethod
def from_public_key(cls, key):
if isinstance(key, rsa.RSAPublicKey):
return getattr(cls, 'RSA%d' % key.key_size)
elif isinstance(key, ec.EllipticCurvePublicKey):
curve_name = key.curve.name
if curve_name == 'secp256r1':
return cls.ECCP256
elif curve_name == 'secp384r1':
return cls.ECCP384
raise UnsupportedAlgorithm(
'Unsupported key type: %s' % type(key), key=key)
@classmethod
def is_rsa(cls, algorithm_int):
# Implemented as "not not RSA" to reduce risk of false negatives if
# more algorithms are added
return not (
algorithm_int == cls.TDES
or algorithm_int == cls.ECCP256
or algorithm_int == cls.ECCP384
)
@unique
class SLOT(IntEnum):
AUTHENTICATION = 0x9a
CARD_MANAGEMENT = 0x9b
SIGNATURE = 0x9c
KEY_MANAGEMENT = 0x9d
CARD_AUTH = 0x9e
RETIRED1 = 0x82
RETIRED2 = 0x83
RETIRED3 = 0x84
RETIRED4 = 0x85
RETIRED5 = 0x86
RETIRED6 = 0x87
RETIRED7 = 0x88
RETIRED8 = 0x89
RETIRED9 = 0x8a
RETIRED10 = 0x8b
RETIRED11 = 0x8c
RETIRED12 = 0x8d
RETIRED13 = 0x8e
RETIRED14 = 0x8f
RETIRED15 = 0x90
RETIRED16 = 0x91
RETIRED17 = 0x92
RETIRED18 = 0x93
RETIRED19 = 0x94
RETIRED20 = 0x95
ATTESTATION = 0xf9
@unique
class OBJ(IntEnum):
CAPABILITY = 0x5fc107
CHUID = 0x5fc102
AUTHENTICATION = 0x5fc105 # cert for 9a key
FINGERPRINTS = 0x5fc103
SECURITY = 0x5fc106
FACIAL = 0x5fc108
SIGNATURE = 0x5fc10a # cert for 9c key
KEY_MANAGEMENT = 0x5fc10b # cert for 9d key
CARD_AUTH = 0x5fc101 # cert for 9e key
DISCOVERY = 0x7e
KEY_HISTORY = 0x5fc10c
IRIS = 0x5fc121
RETIRED1 = 0x5fc10d
RETIRED2 = 0x5fc10e
RETIRED3 = 0x5fc10f
RETIRED4 = 0x5fc110
RETIRED5 = 0x5fc111
RETIRED6 = 0x5fc112
RETIRED7 = 0x5fc113
RETIRED8 = 0x5fc114
RETIRED9 = 0x5fc115
RETIRED10 = 0x5fc116
RETIRED11 = 0x5fc117
RETIRED12 = 0x5fc118
RETIRED13 = 0x5fc119
RETIRED14 = 0x5fc11a
RETIRED15 = 0x5fc11b
RETIRED16 = 0x5fc11c
RETIRED17 = 0x5fc11d
RETIRED18 = 0x5fc11e
RETIRED19 = 0x5fc11f
RETIRED20 = 0x5fc120
PIVMAN_DATA = 0x5fff00
PIVMAN_PROTECTED_DATA = 0x5fc109 # Use slot for printed information.
ATTESTATION = 0x5fff01
@classmethod
def from_slot(cls, slot):
return getattr(cls, SLOT(slot).name)
@unique
class TAG(IntEnum):
DYN_AUTH = 0x7c
OBJ_ID = 0x5c
OBJ_DATA = 0x53
CERTIFICATE = 0x70
CERT_INFO = 0x71
ALGO = 0x80
PIN_POLICY = 0xaa
TOUCH_POLICY = 0xab
LRC = 0xfe
@unique
class PIN_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ONCE = 0x2
ALWAYS = 0x3
@unique
class TOUCH_POLICY(IntEnum):
DEFAULT = 0x0
NEVER = 0x1
ALWAYS = 0x2
CACHED = 0x3
class AuthenticationFailed(Exception):
def __init__(self, message, sw, applet_version):
super(AuthenticationFailed, self).__init__(message)
self.tries_left = (
tries_left(sw, applet_version)
if is_verify_fail(sw, applet_version)
else None)
class AuthenticationBlocked(AuthenticationFailed):
def __init__(self, message, sw):
# Dummy applet_version since sw will always be "authentication blocked"
super(AuthenticationBlocked, self).__init__(message, sw, ())
class BadFormat(Exception):
def __init__(self, message, bad_value):
super(BadFormat, self).__init__(message)
self.bad_value = bad_value
class KeypairMismatch(Exception):
def __init__(self, slot, cert):
super(KeypairMismatch, self).__init__(
'The certificate does not match the private key in slot %s.' % slot)
self.slot = slot
self.cert = cert
class UnsupportedAlgorithm(Exception):
def __init__(self, message, algorithm_id=None, key=None, ):
super(UnsupportedAlgorithm, self).__init__(message)
if algorithm_id is None and key is None:
raise ValueError(
'At least one of algorithm_id and key must be given.')
self.algorithm_id = algorithm_id
self.key = key
class WrongPin(AuthenticationFailed):
def __init__(self, sw, applet_version):
super(WrongPin, self).__init__(
'Incorrect PIN', sw, applet_version)
class WrongPuk(AuthenticationFailed):
def __init__(self, sw, applet_version):
super(WrongPuk, self).__init__(
'Incorrect PUK', sw, applet_version)
PIN = 0x80
PUK = 0x81
# 010203040506070801020304050607080102030405060708
DEFAULT_MANAGEMENT_KEY = b'\x01\x02\x03\x04\x05\x06\x07\x08' \
+ b'\x01\x02\x03\x04\x05\x06\x07\x08' \
+ b'\x01\x02\x03\x04\x05\x06\x07\x08'
def _parse_tlv_dict(data):
return dict((tlv.tag, tlv.value) for tlv in parse_tlvs(data))
def _pack_pin(pin):
if isinstance(pin, six.text_type):
pin = pin.encode('utf8')
if len(pin) > 8:
raise BadFormat(
'PIN/PUK too large (max 8 bytes, was %d)' % len(pin), pin)
return pin.ljust(8, b'\xff')
def _get_key_data(key):
if isinstance(key, rsa.RSAPrivateKey):
if key.public_key().public_numbers().e != 65537:
raise UnsupportedAlgorithm(
'Unsupported RSA exponent: %d'
% key.public_key().public_numbers().e,
key=key)
if key.key_size == 1024:
algo = ALGO.RSA1024
ln = 64
elif key.key_size == 2048:
algo = ALGO.RSA2048
ln = 128
else:
raise UnsupportedAlgorithm(
'Unsupported RSA key size: %d' % key.key_size, key=key)
priv = key.private_numbers()
data = Tlv(0x01, int_to_bytes(priv.p, ln)) + \
Tlv(0x02, int_to_bytes(priv.q, ln)) + \
Tlv(0x03, int_to_bytes(priv.dmp1, ln)) + \
Tlv(0x04, int_to_bytes(priv.dmq1, ln)) + \
Tlv(0x05, int_to_bytes(priv.iqmp, ln))
elif isinstance(key, ec.EllipticCurvePrivateKey):
if isinstance(key.curve, ec.SECP256R1):
algo = ALGO.ECCP256
ln = 32
elif isinstance(key.curve, ec.SECP384R1):
algo = ALGO.ECCP384
ln = 48
else:
raise UnsupportedAlgorithm(
'Unsupported elliptic curve: %s', key.curve, key=key)
priv = key.private_numbers()
data = Tlv(0x06, int_to_bytes(priv.private_value, ln))
else:
raise UnsupportedAlgorithm('Unsupported key type!', key=key)
return algo, data
def _dummy_key(algorithm):
if algorithm == ALGO.RSA1024:
return rsa.generate_private_key(65537, 1024, default_backend())
if algorithm == ALGO.RSA2048:
return rsa.generate_private_key(65537, 2048, default_backend())
if algorithm == ALGO.ECCP256:
return ec.generate_private_key(ec.SECP256R1(), default_backend())
if algorithm == ALGO.ECCP384:
return ec.generate_private_key(ec.SECP384R1(), default_backend())
raise UnsupportedAlgorithm(
'Unsupported algorithm: %s' % algorithm, algorithm_id=algorithm)
def _pkcs1_15_pad(algorithm, message):
h = hashes.Hash(hashes.SHA256(), default_backend())
h.update(message)
t = b'\x30\x31\x30\x0d\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05' + \
b'\x00\x04\x20' + h.finalize()
em_len = 128 if algorithm == ALGO.RSA1024 else 256
f_len = em_len - len(t) - 3
return b'\0\1' + b'\xff' * f_len + b'\0' + t
_sign_len_conditions = {
ALGO.RSA1024: lambda ln: ln == 128,
ALGO.RSA2048: lambda ln: ln == 256,
ALGO.ECCP256: lambda ln: ln <= 32,
ALGO.ECCP384: lambda ln: ln <= 48
}
_decrypt_len_conditions = {
ALGO.RSA1024: lambda ln: ln == 128,
ALGO.RSA2048: lambda ln: ln == 256,
ALGO.ECCP256: lambda ln: ln == 65,
ALGO.ECCP384: lambda ln: ln == 97
}
def _derive_key(pin, salt):
kdf = PBKDF2HMAC(hashes.SHA1(), 24, salt, 10000, default_backend())
return kdf.derive(pin.encode('utf-8'))
def generate_random_management_key():
return os.urandom(24)
def is_verify_fail(sw, applet_version):
if applet_version < (1, 0, 4):
return 0x6300 <= sw <= 0x63ff
else:
return SW.is_verify_fail(sw)
def tries_left(sw, applet_version):
if applet_version < (1, 0, 4):
if sw == SW.AUTH_METHOD_BLOCKED:
return 0
if not is_verify_fail(sw, applet_version):
raise ValueError(
'Cannot read remaining tries from status word: %x' % sw)
return sw & 0xff
else:
return SW.tries_left(sw)
class PivmanData(object):
def __init__(self, raw_data=Tlv(0x80)):
data = _parse_tlv_dict(Tlv(raw_data).value)
self._flags = struct.unpack(
'>B', data[0x81])[0] if 0x81 in data else None
self.salt = data.get(0x82)
self.pin_timestamp = struct.unpack('>I', data[0x83]) \
if 0x83 in data else None
def _get_flag(self, mask):
return bool((self._flags or 0) & mask)
def _set_flag(self, mask, value):
if value:
self._flags = (self._flags or 0) | mask
elif self._flags is not None:
self._flags &= ~mask
@property
def puk_blocked(self):
return self._get_flag(0x01)
@puk_blocked.setter
def puk_blocked(self, value):
self._set_flag(0x01, value)
@property
def mgm_key_protected(self):
return self._get_flag(0x02)
@mgm_key_protected.setter
def mgm_key_protected(self, value):
self._set_flag(0x02, value)
def get_bytes(self):
data = b''
if self._flags is not None:
data += Tlv(0x81, struct.pack('>B', self._flags))
if self.salt is not None:
data += Tlv(0x82, self.salt)
if self.pin_timestamp is not None:
data += Tlv(0x83, struct.pack('>I', self.pin_timestamp))
return Tlv(0x80, data)
class PivmanProtectedData(object):
def __init__(self, raw_data=Tlv(0x88)):
data = _parse_tlv_dict(Tlv(raw_data).value)
self.key = data.get(0x89)
def get_bytes(self):
data = b''
if self.key is not None:
data += Tlv(0x89, self.key)
return Tlv(0x88, data)
class PivController(object):
def __init__(self, driver):
driver.select(AID.PIV)
self._authenticated = False
self._driver = driver
self._version = self._read_version()
self._update_pivman_data()
def _update_pivman_data(self):
try:
self._pivman_data = PivmanData(self.get_data(OBJ.PIVMAN_DATA))
except APDUError:
self._pivman_data = PivmanData()
@property
def version(self):
return self._version
@property
def has_protected_key(self):
return self.has_derived_key or self.has_stored_key
@property
def has_derived_key(self):
return self._pivman_data.salt is not None
@property
def has_stored_key(self):
return self._pivman_data.mgm_key_protected
@property
def puk_blocked(self):
return self._pivman_data.puk_blocked
def send_cmd(self, ins, p1=0, p2=0, data=b'', check=SW.OK):
while len(data) > 0xff:
self._driver.send_apdu(0x10, ins, p1, p2, data[:0xff])
data = data[0xff:]
resp, sw = self._driver.send_apdu(0, ins, p1, p2, data, check=None)
while (sw >> 8) == SW.MORE_DATA:
more, sw = self._driver.send_apdu(
0, INS.SEND_REMAINING, 0, 0, b'', check=None)
resp += more
if check is None:
return resp, sw
elif sw != check:
raise APDUError(resp, sw)
return resp
def _read_version(self):
return tuple(six.iterbytes(self.send_cmd(INS.GET_VERSION)))
def _init_pivman_protected(self):
try:
self._pivman_protected_data = PivmanProtectedData(
self.get_data(OBJ.PIVMAN_PROTECTED_DATA))
except APDUError as e:
if e.sw == SW.NOT_FOUND:
# No data there, initialise a new object.
self._pivman_protected_data = PivmanProtectedData()
else:
raise
def verify(self, pin, touch_callback=None):
try:
self.send_cmd(INS.VERIFY, 0, PIN, _pack_pin(pin))
except APDUError as e:
if e.sw == SW.AUTH_METHOD_BLOCKED:
raise AuthenticationBlocked('PIN is blocked.', e.sw)
elif is_verify_fail(e.sw, self.version):
raise WrongPin(e.sw, self.version)
raise
if self.has_derived_key and not self._authenticated:
self.authenticate(
_derive_key(pin, self._pivman_data.salt), touch_callback)
self.verify(pin, touch_callback)
if self.has_stored_key and not self._authenticated:
self._init_pivman_protected()
self.authenticate(self._pivman_protected_data.key, touch_callback)
self.verify(pin, touch_callback)
def change_pin(self, old_pin, new_pin):
try:
self.send_cmd(INS.CHANGE_REFERENCE, 0, PIN,
_pack_pin(old_pin) + _pack_pin(new_pin))
except APDUError as e:
if e.sw == SW.AUTH_METHOD_BLOCKED:
raise AuthenticationBlocked('PIN is blocked.', e.sw)
elif is_verify_fail(e.sw, self.version):
raise WrongPin(e.sw, self.version)
raise
if self.has_derived_key:
if not self._authenticated:
self.authenticate(_derive_key(old_pin, self._pivman_data.salt))
self.use_derived_key(new_pin)
def change_puk(self, old_puk, new_puk):
try:
self.send_cmd(INS.CHANGE_REFERENCE, 0, PUK,
_pack_pin(old_puk) + _pack_pin(new_puk))
except APDUError as e:
if e.sw == SW.AUTH_METHOD_BLOCKED:
raise AuthenticationBlocked('PUK is blocked.', e.sw)
elif is_verify_fail(e.sw, self.version):
raise WrongPuk(e.sw, self.version)
raise
def unblock_pin(self, puk, new_pin):
try:
self.send_cmd(
INS.RESET_RETRY, 0, PIN, _pack_pin(puk) + _pack_pin(new_pin))
except APDUError as e:
if e.sw == SW.AUTH_METHOD_BLOCKED:
raise AuthenticationBlocked('PUK is blocked.', e.sw)
elif is_verify_fail(e.sw, self.version):
raise WrongPuk(e.sw, self.version)
raise
def set_pin_retries(self, pin_retries, puk_retries):
self.send_cmd(INS.SET_PIN_RETRIES, pin_retries, puk_retries)
def use_derived_key(self, pin, touch=False):
self.verify(pin)
if not self.puk_blocked:
self._block_puk()
self._pivman_data.puk_blocked = True
new_salt = os.urandom(16)
new_key = _derive_key(pin, new_salt)
self.send_cmd(INS.SET_MGMKEY, 0xff, 0xfe if touch else 0xff,
six.int2byte(ALGO.TDES) +
Tlv(SLOT.CARD_MANAGEMENT, new_key))
self._pivman_data.salt = new_salt
self.put_data(OBJ.PIVMAN_DATA, self._pivman_data.get_bytes())
def set_pin_timestamp(self, timestamp):
self._pivman_data.pin_timestamp = timestamp
self.put_data(OBJ.PIVMAN_DATA, self._pivman_data.get_bytes())
def authenticate(self, key, touch_callback=None):
ct1 = self.send_cmd(INS.AUTHENTICATE, ALGO.TDES, SLOT.CARD_MANAGEMENT,
Tlv(TAG.DYN_AUTH, Tlv(0x80)))[4:12]
backend = default_backend()
try:
cipher_key = algorithms.TripleDES(key)
except ValueError:
raise BadFormat('Management key must be exactly 24 bytes long, '
'was: {}'.format(len(key)), None)
cipher = Cipher(cipher_key, modes.ECB(), backend)
decryptor = cipher.decryptor()
pt1 = decryptor.update(ct1) + decryptor.finalize()
ct2 = os.urandom(8)
if touch_callback is not None:
touch_timer = Timer(0.500, touch_callback)
touch_timer.start()
try:
pt2 = self.send_cmd(
INS.AUTHENTICATE, ALGO.TDES, SLOT.CARD_MANAGEMENT,
Tlv(TAG.DYN_AUTH, Tlv(0x80, pt1) + Tlv(0x81, ct2))
)[4:12]
except APDUError as e:
if e.sw == SW.SECURITY_CONDITION_NOT_SATISFIED:
raise AuthenticationFailed(
'Incorrect management key', e.sw, self.version)
logger.error('Failed to authenticate management key.', exc_info=e)
raise
except Exception as e:
logger.error('Failed to authenticate management key.', exc_info=e)
raise
finally:
if touch_callback is not None:
touch_timer.cancel()
encryptor = cipher.encryptor()
pt2_cmp = encryptor.update(ct2) + encryptor.finalize()
if not bytes_eq(pt2, pt2_cmp):
raise ValueError('Device challenge did not match!')
self._authenticated = True
def set_mgm_key(self, new_key, touch=False, store_on_device=False):
# If the key should be protected by PIN and no key is given,
# we generate a random key.
if not new_key:
if store_on_device:
new_key = generate_random_management_key()
else:
raise ValueError('new_key was not given and '
'store_on_device was not True')
if len(new_key) != 24:
raise BadFormat(
'Management key must be exactly 24 bytes long, was: {}'.format(
len(new_key)),
new_key)
if store_on_device or (not store_on_device and self.has_stored_key):
# Ensure we have access to protected data before overwriting key
try:
self._init_pivman_protected()
except Exception as e:
logger.debug('Failed to initialize protected pivman data',
exc_info=e)
if store_on_device:
raise
# Set the new management key
self.send_cmd(
INS.SET_MGMKEY, 0xff, 0xfe if touch else 0xff,
six.int2byte(ALGO.TDES) + Tlv(SLOT.CARD_MANAGEMENT, new_key))
if self.has_derived_key:
# Clear salt for old derived keys.
self._pivman_data.salt = None
# Set flag for stored or not stored key.
self._pivman_data.mgm_key_protected = store_on_device
# Update readable pivman data
self.put_data(OBJ.PIVMAN_DATA, self._pivman_data.get_bytes())
if store_on_device:
# Store key in protected pivman data
self._pivman_protected_data.key = new_key
self.put_data(
OBJ.PIVMAN_PROTECTED_DATA,
self._pivman_protected_data.get_bytes())
elif not store_on_device and self.has_stored_key:
# If new key should not be stored and there is an old stored key,
# try to clear it.
try:
self._pivman_protected_data.key = None
self.put_data(
OBJ.PIVMAN_PROTECTED_DATA,
self._pivman_protected_data.get_bytes())
except APDUError as e:
logger.debug("No PIN provided, can't clear key..", exc_info=e)
# Update CHUID and CCC if not set
try:
self.get_data(OBJ.CAPABILITY)
except APDUError as e:
if e.sw == SW.NOT_FOUND:
self.update_ccc()
else:
logger.debug('Failed to read CCC...', exc_info=e)
try:
self.get_data(OBJ.CHUID)
except APDUError as e:
if e.sw == SW.NOT_FOUND:
self.update_chuid()
else:
logger.debug('Failed to read CHUID...', exc_info=e)
def get_pin_tries(self):
"""
Returns the number of PIN retries left,
0 PIN authentication blocked. Note that 15 is the highest
value that will be returned even if remaining tries is higher.
"""
# Verify without PIN gives number of tries left.
_, sw = self.send_cmd(INS.VERIFY, 0, PIN, check=None)
return tries_left(sw, self.version)
def _get_puk_tries(self):
# A failed unblock pin will return number of PUK tries left,
# but also uses one try.
_, sw = self.send_cmd(INS.RESET_RETRY, 0, PIN, _pack_pin('')*2,
check=None)
return tries_left(sw, self.version)
def _block_pin(self):
while self.get_pin_tries() > 0:
self.send_cmd(INS.VERIFY, 0, PIN, _pack_pin(''), check=None)
def _block_puk(self):
while self._get_puk_tries() > 0:
self.send_cmd(INS.RESET_RETRY, 0, PIN, _pack_pin('')*2, check=None)
def reset(self):
self._block_pin()
self._block_puk()
self.send_cmd(INS.RESET)
self._update_pivman_data()
def get_data(self, object_id):
id_bytes = struct.pack(b'>I', object_id).lstrip(b'\0')
tlv = Tlv(self.send_cmd(INS.GET_DATA, 0x3f, 0xff,
Tlv(TAG.OBJ_ID, id_bytes)))
if tlv.tag not in [TAG.OBJ_DATA, OBJ.DISCOVERY]:
raise ValueError('Wrong tag in response data!')
return tlv.value
def put_data(self, object_id, data):
id_bytes = struct.pack(b'>I', object_id).lstrip(b'\0')
self.send_cmd(INS.PUT_DATA, 0x3f, 0xff, Tlv(TAG.OBJ_ID, id_bytes) +
Tlv(TAG.OBJ_DATA, data))
def generate_key(self, slot, algorithm, pin_policy=PIN_POLICY.DEFAULT,
touch_policy=TOUCH_POLICY.DEFAULT):
if ALGO.is_rsa(algorithm):
ensure_not_cve201715361_vulnerable_firmware_version(self.version)
if algorithm not in self.supported_algorithms:
raise UnsupportedAlgorithm(
'Algorithm not supported on this YubiKey: {}'
.format(algorithm),
algorithm_id=algorithm)
data = Tlv(TAG.ALGO, six.int2byte(algorithm))
if pin_policy:
data += Tlv(TAG.PIN_POLICY, six.int2byte(pin_policy))
if touch_policy:
data += Tlv(TAG.TOUCH_POLICY, six.int2byte(touch_policy))
data = Tlv(0xac, data)
resp = self.send_cmd(INS.GENERATE_ASYMMETRIC, 0, slot, data)
if algorithm in [ALGO.RSA1024, ALGO.RSA2048]:
data = _parse_tlv_dict(Tlv(resp[1:]).value)
return rsa.RSAPublicNumbers(
int_from_bytes(data[0x82], 'big'),
int_from_bytes(data[0x81], 'big')
).public_key(default_backend())
elif algorithm in [ALGO.ECCP256, ALGO.ECCP384]:
curve = ec.SECP256R1 if algorithm == ALGO.ECCP256 else ec.SECP384R1
try:
# Added in cryptography 2.5
return ec.EllipticCurvePublicKey.from_encoded_point(
curve(),
resp[5:]
)
except AttributeError:
return ec.EllipticCurvePublicNumbers.from_encoded_point(
curve(),
resp[5:]
).public_key(default_backend())
raise UnsupportedAlgorithm(
'Invalid algorithm: {}'.format(algorithm),
algorithm_id=algorithm)
def generate_self_signed_certificate(
self, slot, public_key, common_name, valid_from, valid_to,
touch_callback=None):
algorithm = ALGO.from_public_key(public_key)
builder = x509.CertificateBuilder()
builder = builder.public_key(public_key)
builder = builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, common_name), ]))
# Same as subject on self-signed certificates.
builder = builder.issuer_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, common_name), ]))
# x509.random_serial_number added in cryptography 1.6
serial = int_from_bytes(os.urandom(20), 'big') >> 1
builder = builder.serial_number(serial)
builder = builder.not_valid_before(valid_from)
builder = builder.not_valid_after(valid_to)
try:
cert = self.sign_cert_builder(
slot, algorithm, builder, touch_callback)
except APDUError as e:
logger.error('Failed to generate certificate for slot %s', slot,
exc_info=e)
raise
self.import_certificate(slot, cert, verify=False)
def generate_certificate_signing_request(self, slot, public_key, subject,
touch_callback=None):
builder = x509.CertificateSigningRequestBuilder()
builder = builder.subject_name(
x509.Name([x509.NameAttribute(NameOID.COMMON_NAME, subject), ]))
try:
return self.sign_csr_builder(
slot, public_key, builder, touch_callback=touch_callback)
except APDUError as e:
logger.error(
'Failed to generate Certificate Signing Request for slot %s',
slot, exc_info=e)
raise
def import_key(self, slot, key, pin_policy=PIN_POLICY.DEFAULT,
touch_policy=TOUCH_POLICY.DEFAULT):
algorithm, data = _get_key_data(key)
if pin_policy:
data += Tlv(TAG.PIN_POLICY, six.int2byte(pin_policy))
if touch_policy:
data += Tlv(TAG.TOUCH_POLICY, six.int2byte(touch_policy))
self.send_cmd(INS.IMPORT_KEY, algorithm, slot, data)
return algorithm
def import_certificate(
self, slot, certificate, verify=False, touch_callback=None):
cert_data = certificate.public_bytes(Encoding.DER)
if verify:
# Verify that the public key used in the certificate
# is from the same keypair as the private key.
try:
public_key = certificate.public_key()
test_data = b'test'
if touch_callback is not None:
touch_timer = Timer(0.500, touch_callback)
touch_timer.start()
test_sig = self.sign(
slot, ALGO.from_public_key(public_key), test_data)
if touch_callback is not None:
touch_timer.cancel()
if isinstance(public_key, rsa.RSAPublicKey):
public_key.verify(
test_sig, test_data, padding.PKCS1v15(),
certificate.signature_hash_algorithm)
elif isinstance(public_key, ec.EllipticCurvePublicKey):
public_key.verify(
test_sig, test_data, ec.ECDSA(hashes.SHA256()))
else:
raise ValueError('Unknown key type: ' + type(public_key))
except APDUError as e:
if e.sw == SW.INCORRECT_PARAMETERS:
raise KeypairMismatch(slot, certificate)
raise
except InvalidSignature:
raise KeypairMismatch(slot, certificate)
self.put_data(OBJ.from_slot(slot), Tlv(TAG.CERTIFICATE, cert_data) +
Tlv(TAG.CERT_INFO, b'\0') + Tlv(TAG.LRC))
self.update_chuid()
def read_certificate(self, slot):
data = _parse_tlv_dict(self.get_data(OBJ.from_slot(slot)))
if TAG.CERT_INFO in data: # Not available in attestation slot
if data[TAG.CERT_INFO] != b'\0':
raise ValueError('Compressed certificates are not supported!')
return x509.load_der_x509_certificate(data[TAG.CERTIFICATE],
default_backend())
def delete_certificate(self, slot):
self.put_data(OBJ.from_slot(slot), b'')
def attest(self, slot):
return x509.load_der_x509_certificate(self.send_cmd(INS.ATTEST, slot),
default_backend())
def _raw_sign_decrypt(self, slot, algorithm, payload, condition):
if not condition(len(payload.value)):
raise BadFormat(
'Input has invalid length for algorithm %s' % algorithm,
len(payload.value))
data = Tlv(TAG.DYN_AUTH, Tlv(0x82) + payload)
resp = self.send_cmd(INS.AUTHENTICATE, algorithm, slot, data)
return Tlv(Tlv(resp).value).value
def sign_raw(self, slot, algorithm, message):
return self._raw_sign_decrypt(slot, algorithm, Tlv(0x81, message),
_sign_len_conditions[algorithm])
def sign(self, slot, algorithm, message):
if algorithm in (ALGO.RSA1024, ALGO.RSA2048):
message = _pkcs1_15_pad(algorithm, message)
elif algorithm in (ALGO.ECCP256, ALGO.ECCP384):
h = hashes.Hash(hashes.SHA256(), default_backend())
h.update(message)
message = h.finalize()
return self.sign_raw(slot, algorithm, message)
def decrypt_raw(self, slot, algorithm, message):
return self._raw_sign_decrypt(slot, algorithm, Tlv(0x85, message),
_decrypt_len_conditions[algorithm])
def list_certificates(self):
certs = OrderedDict()
for slot in set(SLOT) - {SLOT.CARD_MANAGEMENT, SLOT.ATTESTATION}:
try:
certs[slot] = self.read_certificate(slot)
except APDUError:
pass
return certs
def update_chuid(self):
# Non-Federal Issuer FASC-N
# [9999-9999-999999-0-1-0000000000300001]
FASC_N = b'\xd4\xe7\x39\xda\x73\x9c\xed\x39\xce\x73\x9d\x83\x68' + \
b'\x58\x21\x08\x42\x10\x84\x21\xc8\x42\x10\xc3\xeb'
# Expires on: 2030-01-01
EXPIRY = b'\x32\x30\x33\x30\x30\x31\x30\x31'
self.put_data(
OBJ.CHUID,
Tlv(0x30, FASC_N) +
Tlv(0x34, os.urandom(16)) +
Tlv(0x35, EXPIRY) +
Tlv(0x3e) +
Tlv(TAG.LRC)
)
def update_ccc(self):
self.put_data(
OBJ.CAPABILITY,
Tlv(0xf0, b'\xa0\x00\x00\x01\x16\xff\x02' + os.urandom(14)) +
Tlv(0xf1, b'\x21') +
Tlv(0xf2, b'\x21') +
Tlv(0xf3) +
Tlv(0xf4, b'\x00') +
Tlv(0xf5, b'\x10') +
Tlv(0xf6) +
Tlv(0xf7) +
Tlv(0xfa) +
Tlv(0xfb) +
Tlv(0xfc) +
Tlv(0xfd) +
Tlv(TAG.LRC)
)
def sign_cert_builder(self, slot, algorithm, builder, touch_callback=None):
dummy_key = _dummy_key(algorithm)
cert = builder.sign(dummy_key, hashes.SHA256(), default_backend())
if touch_callback is not None:
touch_timer = Timer(0.500, touch_callback)
touch_timer.start()
sig = self.sign(slot, algorithm, cert.tbs_certificate_bytes)
if touch_callback is not None:
touch_timer.cancel()
seq = parse_tlvs(Tlv(cert.public_bytes(Encoding.DER)).value)
# Replace signature, add unused bits = 0
seq[2] = Tlv(seq[2].tag, b'\0' + sig)
# Re-assemble sequence
der = Tlv(0x30, b''.join(seq))
return x509.load_der_x509_certificate(der, default_backend())
def sign_csr_builder(self, slot, public_key, builder, touch_callback=None):
algorithm = ALGO.from_public_key(public_key)
dummy_key = _dummy_key(algorithm)
csr = builder.sign(dummy_key, hashes.SHA256(), default_backend())
seq = parse_tlvs(Tlv(csr.public_bytes(Encoding.DER)).value)
# Replace public key
pub_format = PublicFormat.PKCS1 if algorithm.name.startswith('RSA') \
else PublicFormat.SubjectPublicKeyInfo
dummy_bytes = dummy_key.public_key().public_bytes(
Encoding.DER, pub_format)
pub_bytes = public_key.public_bytes(Encoding.DER, pub_format)
seq[0] = seq[0].replace(dummy_bytes, pub_bytes)
if touch_callback is not None:
touch_timer = Timer(0.500, touch_callback)
touch_timer.start()
sig = self.sign(slot, algorithm, seq[0])
if touch_callback is not None:
touch_timer.cancel()
# Replace signature, add unused bits = 0
seq[2] = Tlv(seq[2].tag, b'\0' + sig)
# Re-assemble sequence
der = Tlv(0x30, b''.join(seq))
return x509.load_der_x509_csr(der, default_backend())
@property
def supports_pin_policies(self):
return self.version >= (4, 0, 0)
@property
def supported_touch_policies(self):
if self.version < (4, 0, 0):
return [] # Touch policy not supported on NEO.
elif self.version < (4, 3, 0):
return [TOUCH_POLICY.DEFAULT, TOUCH_POLICY.NEVER,
TOUCH_POLICY.ALWAYS] # Cached policy was added in 4.3
else:
return [policy for policy in TOUCH_POLICY]
@property
def supported_algorithms(self):
return [
alg for alg in ALGO
if not alg == ALGO.TDES
if not (ALGO.is_rsa(alg) and
is_cve201715361_vulnerable_firmware_version(self.version))
if not (alg == ALGO.ECCP384 and self.version < (4, 0, 0))
if not (alg == ALGO.RSA1024 and
YubiKey.is_fips_version(self.version))
]
|
the-stack_106_31206 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: HJK
@file: basic.py
@time: 2019-05-07
"""
"""
Basic song object
"""
import os
import re
import datetime
import logging
import click
import requests
from . import config
from .utils import colorize
class BasicSong:
"""
Define the basic properties and methods of a song.
Such as title, name, singer etc.
"""
def __init__(self):
self.idx = 0
self.id = 0
self._title = ""
self._singer = ""
self.ext = "mp3"
self.album = ""
self.size = ""
self.rate = ""
self._duration = ""
self.source = ""
self._song_url = ""
# self.song_file = ""
self.cover_url = ""
# self.cover_file = ""
self.lyrics_url = ""
self.lyrics_text = ""
# self.lyrics_file = ""
self._fullname = ""
self.logger = logging.getLogger(__name__)
def __repr__(self):
""" Abstract of the song """
source = colorize("%s" % self.source.upper(), self.source)
return "%s #%s %s-%s-%s \n %s \n" % (
source,
self.id,
self.title,
self.singer,
self.album,
self.song_url,
)
def __str__(self):
""" Song details """
source = colorize("%s" % self.source.upper(), self.source)
return _(
" -> Source: {source} #{id}\n"
" -> Title: {title}\n"
" -> Singer: {singer}\n"
" -> Album: {album}\n"
" -> Duration: {duration}\n"
" -> Size: {size}MB\n"
" -> Bit Rate: {rate}\n"
" -> Song URL: {song_url}\n"
" -> Lyrics URL: {lyrics_url}\n"
" -> Cover URL: {cover_url}\n"
).format(
source=source,
id=self.id,
title=self.title,
singer=self.singer,
album=self.album,
duration=self.duration,
size=self.size,
rate=self.rate,
song_url=self.song_url,
lyrics_url=self.lyrics_url,
cover_url=self.cover_url,
)
@property
def available(self) -> bool:
""" Not available when url is none or size equal 0 """
return bool(self.song_url and self.size)
@property
def name(self) -> str:
""" Song file name """
return "%s - %s.%s" % (self.singer, self.title, self.ext)
@property
def duration(self):
""" 持续时间 H:M:S """
return self._duration
@duration.setter
def duration(self, seconds):
self._duration = str(datetime.timedelta(seconds=int(seconds)))
@property
def song_url(self) -> str:
return self._song_url
@song_url.setter
def song_url(self, url):
""" Set song url and update size. """
try:
r = requests.get(
url,
stream=True,
headers=config.get("wget_headers"),
proxies=config.get("proxies"),
)
self._song_url = url
size = int(r.headers.get("Content-Length", 0))
# 转换成MB并保留两位小数
self.size = round(size / 1048576, 2)
# 设置完整的文件名(不含后缀)
if not self._fullname:
self._set_fullname()
except Exception as e:
self.logger.info(_("Request failed: {url}").format(url=url))
self.logger.info(e)
@property
def row(self) -> list:
""" Song details in list form """
def highlight(s, k):
return s.replace(k, colorize(k, "xiami")).replace(
k.title(), colorize(k.title(), "xiami")
)
ht_singer = self.singer if len(self.singer) < 30 else self.singer[:30] + "..."
ht_title = self.title if len(self.title) < 30 else self.title[:30] + "..."
ht_album = self.album if len(self.album) < 20 else self.album[:20] + "..."
if config.get("keyword"):
keywords = re.split(";|,|\s|\*", config.get("keyword"))
for k in keywords:
if not k:
continue
ht_singer = highlight(ht_singer, k)
ht_title = highlight(ht_title, k)
ht_album = highlight(ht_album, k)
size = "%sMB" % self.size
ht_size = size if int(self.size) < 8 else colorize(size, "flac")
return [
colorize(self.idx, "baidu"),
ht_title,
ht_singer,
ht_size,
self.duration,
ht_album,
self.source.upper(),
]
@property
def title(self):
return self._title
@title.setter
def title(self, value):
value = re.sub(r'[\\/:*?"<>|]', "", value)
self._title = value
@property
def singer(self):
return self._singer
@singer.setter
def singer(self, value):
value = re.sub(r'[\\/:*?"<>|]', "", value)
self._singer = value
def _set_fullname(self):
""" Full name without suffix, to resolve file name conflicts"""
outdir = config.get("outdir")
outfile = os.path.abspath(os.path.join(outdir, self.name))
if os.path.exists(outfile):
name, ext = self.name.rsplit(".", 1)
names = [
x for x in os.listdir(outdir) if x.startswith(name) and x.endswith(ext)
]
names = [x.rsplit(".", 1)[0] for x in names]
suffixes = [x.replace(name, "") for x in names]
# filter suffixes that match ' (x)' pattern
suffixes = [
x[2:-1] for x in suffixes if x.startswith(" (") and x.endswith(")")
]
indexes = [int(x) for x in suffixes if set(x) <= set("0123456789")]
idx = 1
if indexes:
idx += sorted(indexes)[-1]
self._fullname = os.path.abspath(
os.path.join(outdir, "%s (%d)" % (name, idx))
)
else:
self._fullname = outfile.rpartition(".")[0]
@property
def song_fullname(self):
return self._fullname + "." + self.ext
@property
def lyrics_fullname(self):
return self._fullname + ".lrc"
@property
def cover_fullname(self):
return self._fullname + ".jpg"
def _download_file(self, url, outfile, stream=False):
"""
Helper function for download
:param url:
:param outfile:
:param stream: need process bar or not
:return:
"""
if not url:
self.logger.error("URL is empty.")
return
try:
r = requests.get(
url,
stream=stream,
headers=config.get("wget_headers"),
proxies=config.get("proxies"),
)
if stream:
total_size = int(r.headers["content-length"])
with click.progressbar(
length=total_size, label=_(" :: Downloading ...")
) as bar:
with open(outfile, "wb") as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
bar.update(len(chunk))
else:
with open(outfile, "wb") as f:
f.write(r.content)
click.echo(
_(" :: Saved to: {outfile}").format(
outfile=colorize(outfile, "highlight")
)
)
except Exception as e:
click.echo("")
self.logger.error(_("Download failed: ") + "\n")
self.logger.error(_("URL: {url}").format(url=url) + "\n")
self.logger.error(
_("File location: {outfile}").format(outfile=outfile) + "\n"
)
if config.get("verbose"):
self.logger.error(e)
def _save_lyrics_text(self):
with open(self.lyrics_fullname, "w", encoding="utf-8") as f:
f.write(self.lyrics_text)
click.echo(
_(" :: Saved to: {outfile}").format(
outfile=colorize(self.lyrics_fullname, "highlight")
)
)
def download_song(self):
if self.song_url:
self._download_file(self.song_url, self.song_fullname, stream=True)
def download_lyrics(self):
if self.lyrics_url:
self._download_file(self.lyrics_url, self.lyrics_fullname, stream=False)
def download_cover(self):
if self.cover_url:
self._download_file(self.cover_url, self.cover_fullname, stream=False)
def download(self):
""" Main download function """
click.echo("===============================================================")
if config.get("verbose"):
click.echo(str(self))
else:
click.echo(" | ".join(self.row))
self.download_song()
if config.get("lyrics"):
self.download_lyrics()
if config.get("cover"):
self.download_cover()
click.echo("===============================================================\n")
|
the-stack_106_31208 | import sys
import requests
import json
from random import randrange
def updateMap(num = None):
if(num is not None): #is parameter passed
dictToSend = num
else:
dictToSend = str(randrange(1, 9)) #update using random number
json.loads(dictToSend) #convert to json
output = None
try:
res = requests.post('http://192.168.1.100:5000/update', json=dictToSend) #flask endpoint
output = res.json() #success
except requests.ConnectionError as e:
output = "Connection error: " + str(e) #connection refused / aborted
except Exception as e:
output = "Unknown error: " + str(sys.exc_info()[0]) #unexpected
finally:
print(output) #print result (success or error)
if __name__ == "__main__": #if in main module (not imported)
if(len(sys.argv) - 1): #if there are script arguments
updateMap(sys.argv[1]) #update using script arguments
else:
updateMap() #default |
the-stack_106_31209 | from cv2 import flip
from scipy.ndimage import rotate
import numpy as np
rotate_angles = [0, 90, 180, 270]
def tta(image):
images = []
for rotate_angle in rotate_angles:
img = rotate(image, rotate_angle) if rotate_angle != 0 else image
images.append(img)
return np.array(images)
def back_tta(images):
backed = []
i = 0
for rotate_angle in rotate_angles:
image = images[i]
i += 1
img = rotate(image, 360 - rotate_angle) if rotate_angle != 0 else image
backed.append(img)
return backed
|
the-stack_106_31211 | import os
def make_tsv(metadata, save_path):
metadata = [str(x) for x in metadata]
with open(os.path.join(save_path, 'metadata.tsv'), 'w') as f:
for x in metadata:
f.write(x + '\n')
# https://github.com/tensorflow/tensorboard/issues/44 image label will be squared
def make_sprite(label_img, save_path):
import math
import torch
import torchvision
# this ensures the sprite image has correct dimension as described in
# https://www.tensorflow.org/get_started/embedding_viz
nrow = int(math.ceil((label_img.size(0)) ** 0.5))
# augment images so that #images equals nrow*nrow
label_img = torch.cat((label_img, torch.randn(nrow ** 2 - label_img.size(0), *label_img.size()[1:]) * 255), 0)
# Dirty fix: no pixel are appended by make_grid call in save_image (https://github.com/pytorch/vision/issues/206)
xx = torchvision.utils.make_grid(torch.Tensor(1, 3, 32, 32), padding=0)
if xx.size(2) == 33:
sprite = torchvision.utils.make_grid(label_img, nrow=nrow, padding=0)
sprite = sprite[:, 1:, 1:]
torchvision.utils.save_image(sprite, os.path.join(save_path, 'sprite.png'))
else:
torchvision.utils.save_image(label_img, os.path.join(save_path, 'sprite.png'), nrow=nrow, padding=0)
def append_pbtxt(metadata, label_img, save_path, global_step, tag):
with open(os.path.join(save_path, 'projector_config.pbtxt'), 'a') as f:
#step = os.path.split(save_path)[-1]
f.write('embeddings {\n')
f.write('tensor_name: "{}:{}"\n'.format(tag, global_step))
f.write('tensor_path: "{}"\n'.format(os.path.join(global_step,"tensors.tsv")))
if metadata is not None:
f.write('metadata_path: "{}"\n'.format(os.path.join(global_step,"metadata.tsv")))
if label_img is not None:
f.write('sprite {\n')
f.write('image_path: "{}"\n'.format(os.path.join(global_step,"sprite.png")))
f.write('single_image_dim: {}\n'.format(label_img.size(3)))
f.write('single_image_dim: {}\n'.format(label_img.size(2)))
f.write('}\n')
f.write('}\n')
def make_mat(matlist, save_path):
with open(os.path.join(save_path, 'tensors.tsv'), 'w') as f:
for x in matlist:
x = [str(i) for i in x]
f.write('\t'.join(x) + '\n') |
the-stack_106_31212 | from typing import Any, List
from fastapi import APIRouter, Depends, HTTPException
from app.api import crud, deps, push, schemas
from app.db.repository import Repository
router = APIRouter()
@router.get("/", response_model=List[schemas.BookInDBBase])
def get_books(db: Repository = Depends(deps.get_db)) -> Any:
"""
Retrieve books list.
"""
authors = crud.book.get_multi(db=db)
return authors
@router.get("/{id}", response_model=schemas.BookInDBBase)
def get_author_by_id(
*,
db: Repository = Depends(deps.get_db),
id: int,
) -> Any:
"""
Get book by ID
"""
result = crud.book.get(db=db, id=id)
if not result:
raise HTTPException(status_code=404, detail="Author not found")
return result
@router.post("/", response_model=schemas.BookInDBBase)
def create_author(
*,
db: Repository = Depends(deps.get_db),
author_in: schemas.BookCreate,
) -> Any:
"""
Create new book.
"""
result = crud.book.create(db=db, obj_in=author_in)
push.send_push(result.dict())
return result
|
the-stack_106_31213 | import unittest
import i18npy
import os
class TestModule(unittest.TestCase):
def __init__(self, *args, **kwargs):
global i18n
super().__init__(*args, **kwargs)
p = os.path.dirname(__file__)
jp_translation_path = os.path.join(p, "translations/jp.json")
i18n = i18npy.i18n_load(jp_translation_path)
self.lang_en = i18npy.load(os.path.join(p, "translations/en.json"))
self.lang_jp = i18npy.load(jp_translation_path)
self.lang_pl = i18npy.load(os.path.join(p, "translations/pl.json"))
def test_translations_simple(self):
KEY = "Cancel"
self.assertEqual(
i18n(KEY), "キャンセル",
"Should use global translator"
)
self.assertEqual(
self.lang_en.translate(KEY), "Cancel",
"Should use English instance of translator"
)
self.assertEqual(
self.lang_jp.translate(KEY), "キャンセル",
"Should use Japanes instance of translator"
)
self.assertEqual(
self.lang_pl.translate(KEY), "Anuluj",
"Should use Polish instance of translator"
)
def test_pluralism_simple(self):
KEY = "%n comments"
self.assertEqual(
i18n(KEY, 0), "0 コメント",
"Should return proper translation for num=0"
)
self.assertEqual(
i18n(KEY, 1), "1 コメント",
"Should return proper translation for num=1"
)
self.assertEqual(
i18n(KEY, 2), "2 コメント",
"Should return proper translation for num=2"
)
self.assertEqual(
self.lang_en.translate(KEY, None), "Comments disabled",
"Should show fallback translation when num=None"
)
self.assertEqual(
self.lang_en.translate(KEY, 0), "0 comments",
"Should return proper translation for num=0"
)
self.assertEqual(
self.lang_en.translate(KEY, 1), "1 comment",
"Should return proper translation for num=1"
)
self.assertEqual(
self.lang_en.translate(KEY, 2), "2 comments",
"Should return proper translation for num=2"
)
def test_pluralism_complex(self):
KEY = "Due in %n days"
self.assertEqual(
self.lang_en.translate(KEY, None), "Expired",
"Should show fallback translation when num=None"
)
self.assertEqual(
self.lang_en.translate(KEY, -2), "Due 2 days ago",
"Should return proper translation for num=-2"
)
self.assertEqual(
self.lang_en.translate(KEY, -1), "Due Yesterday",
"Should return proper translation for num=-1"
)
self.assertEqual(
self.lang_en.translate(KEY, 0), "Due Today",
"Should return proper translation for num=0"
)
self.assertEqual(
self.lang_en.translate(KEY, 1), "Due Tomorrow",
"Should return proper translation for num=1"
)
self.assertEqual(
self.lang_en.translate(KEY, 2), "Due in 2 days",
"Should return proper translation for num=2"
)
def test_replacements(self):
KEY = "Welcome %{name}"
self.assertEqual(
self.lang_en.translate(KEY, {"name": "John"}), "Welcome John",
"Replacement should work even if KEY is not in dictionary"
)
def test_short_keys(self):
self.assertEqual(
i18n("_short_key", "This is a long piece of text"), "This is a long piece of text",
"Should use default text"
)
self.assertEqual(
i18n("_monkeys"), "猿も木から落ちる",
"Should work normally"
)
def test_contexts_combined(self):
KEY = "%{name} uploaded %n photos to their %{album} album"
self.assertEqual(
i18n(KEY, 1, {
"name": "John",
"album": "Buck's Night"
}, {
"gender": "male"
}),
"Johnは彼のBuck's Nightアルバムに写真1枚をアップロードしました",
"Should use context for male gender"
)
self.assertEqual(
i18n(KEY, 3, {
"name": "Jane",
"album": "Hen's Night"
}, {
"gender": "female"
}),
"Janeは彼女のHen's Nightアルバムに写真3枚をアップロードしました",
"Should use context for female gender"
)
self.assertEqual(
self.lang_en.translate(KEY, 1, {
"name": "John",
"album": "Buck's Night"
}, {
"gender": "male"
}),
"John uploaded 1 photo to his Buck's Night album",
"Should use context for male gender"
)
self.assertEqual(
self.lang_en.translate(KEY, 3, {
"name": "Jane",
"album": "Hen's Night"
}, {
"gender": "female"
}),
"Jane uploaded 3 photos to her Hen's Night album",
"Should use context for female gender"
)
self.assertEqual(
self.lang_pl.translate(KEY, 1, {
"name": "John",
"album": "Buck's Night"
}, {
"gender": "male"
}),
"John przesłał 1 zdjęcie do jego albumu Buck's Night",
"Should use context for male gender"
)
self.assertEqual(
self.lang_pl.translate(KEY, 3, {
"name": "Jane",
"album": "Hen's Night"
}, {
"gender": "female"
}),
"Jane przesłała 3 zdjęcia do jej albumu Hen's Night",
"Should use context for female gender"
)
self.assertEqual(
self.lang_pl.translate(KEY, 5, {
"name": "John",
"album": "Buck's Night"
}, {
"gender": "male"
}),
"John przesłał 5 zdjęć do jego albumu Buck's Night",
"Should use context for male gender"
)
if __name__ == "__main__":
unittest.main()
|
the-stack_106_31216 | import numpy as np
import torch
import torch.nn.functional as F
from torchdistlog import logging
from scipy.special import comb
# sklearn
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from sklearn.decomposition import PCA
# faiss
try:
import faiss
except ModuleNotFoundError:
logging.warning("Faiss Package Not Found! (metrics package)")
_USE_FAISS_ = False
_NORMALIZE_ = True
from ..misc import utils
# cluster algorithm
def get_knn(ref_embeds, embeds, k, embeds_same_source=False, device_ids=None):
d = ref_embeds.shape[1]
if device_ids is not None:
if _USE_FAISS_:
index = faiss.IndexFlatL2(d)
index = utils.index_cpu_to_gpu_multiple(index, gpu_ids=device_ids)
index.add(ref_embeds)
distances, indices = index.search(embeds, k+1)
else:
# TODO: distributed calculation
device = torch.device("cuda:{}".format(device_ids[0]))
ref_embeds = torch.tensor(ref_embeds).to(device)
embeds = torch.tensor(embeds).to(device)
if _NORMALIZE_:
ref_embeds = F.normalize(ref_embeds, dim=-1)
embeds = F.normalize(embeds, dim=-1)
dist_mat = torch.cdist(embeds, ref_embeds, p=2)
topk_search = dist_mat.topk(k=k+1, dim=-1, largest=False)
distances, indices = topk_search.values.cpu().numpy(), topk_search.indices.cpu().numpy()
if embeds_same_source:
return indices[:, 1:], distances[:, 1:]
else:
return indices[:, :k], distances[:, :k]
else:
neigh = NearestNeighbors(n_neighbors=k)
neigh.fit(ref_embeds)
distances, indices = neigh.kneighbors(embeds, k + 1)
if embeds_same_source:
return indices[:, 1:], distances[:, 1:]
else:
return indices[:, :k], distances[:, :k]
def get_knn_from_mat(metric_mat, k, embeds_same_source=False, is_min=True, device_ids=None):
device = torch.device("cuda:{}".format(device_ids[0]))
metric_mat = torch.from_numpy(metric_mat).to(device)
# sort
sorted_value, sorted_indices = torch.sort(metric_mat, dim=-1, descending=not is_min)
if embeds_same_source:
return (
sorted_indices[:, 1:(k+1)].cpu().numpy(),
sorted_value[:, 1:(k+1)].cpu().numpy()
)
else:
return (
sorted_indices[:, :k].cpu().numpy(),
sorted_value[:, :k].cpu().numpy()
)
def run_kmeans(x, num_clusters, device_ids=None):
_, d = x.shape
if device_ids is not None:
# faiss implementation of k-means
clus = faiss.Clustering(d, num_clusters)
clus.niter = 20
clus.max_points_per_centroid = 10000000
index = faiss.IndexFlatL2(d)
index = utils.index_cpu_to_gpu_multiple(index, gpu_ids=device_ids)
# perform the training
clus.train(x, index)
_, idxs = index.search(x, 1)
return np.array([int(n[0]) for n in idxs], dtype=np.int64)
else:
# k-means
kmeans = KMeans(n_clusters=num_clusters, random_state=0).fit(x)
return kmeans.labels_
def run_pca(x, out_dim, device_ids=None):
if device_ids is not None:
mat = faiss.PCAMatrix(x.shape[1], out_dim)
mat.train(x)
assert mat.is_trained
return mat.apply_py(x)
else:
pca = PCA(n_components=out_dim)
data_output = pca.fit_transform(x)
return data_output
# metrics functions: code from: github: pytorch-metric-learning
def get_relevance_mask(shape, gt_labels, embeds_same_source, label_counts):
relevance_mask = np.zeros(shape=shape, dtype=np.int)
for k, v in label_counts.items():
matching_rows = np.where(gt_labels==k)[0]
max_column = v-1 if embeds_same_source else v
relevance_mask[matching_rows, :max_column] = 1
return relevance_mask
def get_label_counts(ref_labels):
unique_labels, label_counts = np.unique(ref_labels, return_counts=True)
num_k = min(1023, int(np.max(label_counts)))
return {k:v for k, v in zip(unique_labels, label_counts)}, num_k
def get_lone_query_labels(query_labels, ref_labels, ref_label_counts, embeds_same_source):
if embeds_same_source:
return np.array([k for k, v in ref_label_counts.items() if v <= 1])
else:
return np.setdiff1d(query_labels, ref_labels)
def r_precision(knn_labels, gt_labels, embeds_same_source, label_counts):
relevance_mask = get_relevance_mask(knn_labels.shape, gt_labels, embeds_same_source, label_counts)
matches_per_row = np.sum((knn_labels == gt_labels) * relevance_mask.astype(bool), axis=1)
max_possible_matches_per_row = np.sum(relevance_mask, axis=1)
accuracy_per_sample = matches_per_row / max_possible_matches_per_row
return np.mean(accuracy_per_sample)
def mean_average_precision_at_r(knn_labels, gt_labels, embeds_same_source, label_counts):
relevance_mask = get_relevance_mask(knn_labels.shape, gt_labels, embeds_same_source, label_counts)
num_samples, num_k = knn_labels.shape
equality = (knn_labels == gt_labels) * relevance_mask.astype(bool)
cumulative_correct = np.cumsum(equality, axis=1)
k_idx = np.tile(np.arange(1, num_k+1), (num_samples, 1))
precision_at_ks = (cumulative_correct * equality) / k_idx
summed_precision_pre_row = np.sum(precision_at_ks * relevance_mask, axis=1)
max_possible_matches_per_row = np.sum(relevance_mask, axis=1)
accuracy_per_sample = summed_precision_pre_row / max_possible_matches_per_row
return np.mean(accuracy_per_sample)
def precision_at_k(knn_labels, gt_labels, k):
curr_knn_labels = knn_labels[:, :k]
accuracy_per_sample = np.sum(curr_knn_labels == gt_labels, axis=1) / k
return np.mean(accuracy_per_sample)
def recall_at_k(knn_labels, gt_labels, k):
accuracy_per_sample = np.array([float(gt_label in recalled_predictions[:k]) for gt_label, recalled_predictions in zip(gt_labels, knn_labels)])
return np.mean(accuracy_per_sample)
def f1_score(query_labels, cluster_labels):
# compute tp_plus_fp
qlabels_set, qlabels_counts = np.unique(query_labels, return_counts=True)
tp_plut_fp = sum([comb(item, 2) for item in qlabels_counts if item > 1])
# compute tp
tp = sum([sum([comb(item, 2) for item in np.unique(cluster_labels[query_labels==query_label], return_counts=True)[1] if item > 1]) for query_label in qlabels_set])
# compute fp
fp = tp_plut_fp - tp
# compute fn
fn = sum([comb(item, 2) for item in np.unique(cluster_labels, return_counts=True)[1] if item > 1]) - tp
# compute F1
P, R = tp / (tp+fp), tp / (tp+fn)
F1 = 2*P*R / (P+R)
return F1 |
the-stack_106_31217 | # --------------------------------------------------------
# SiamMask
# Licensed under The MIT License
# Written by Qiang Wang (wangqiang2015 at ia.ac.cn)
# --------------------------------------------------------
import glob
import time
import sys
sys.path.append("experiments/siammask_sharp")
sys.path.append(".")
from grasping.sensors import RealsenseSensor
from tools.test import *
parser = argparse.ArgumentParser(description='PyTorch Tracking Demo')
parser.add_argument('--resume', default='', type=str, required=True,
metavar='PATH',help='path to latest checkpoint (default: none)')
parser.add_argument('--config', dest='config', default='config_davis.json',
help='hyper-parameter of SiamMask in json format')
parser.add_argument('--base_path', default='../../data/tennis', help='datasets')
parser.add_argument('--cpu', action='store_true', help='cpu mode')
args = parser.parse_args()
if __name__ == '__main__':
# Setup device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
# Setup Model
cfg = load_config(args)
from custom import Custom
siammask = Custom(anchors=cfg['anchors'])
if args.resume:
assert isfile(args.resume), 'Please download {} first.'.format(args.resume)
siammask = load_pretrain(siammask, args.resume)
siammask.eval().to(device)
# Parse Image file
img_files = sorted(glob.glob(join(args.base_path, '*.jp*')))
ims = [cv2.imread(imf) for imf in img_files]
# Select ROI
cv2.namedWindow("SiamMask", cv2.WND_PROP_FULLSCREEN)
cam = RealsenseSensor("cfg/sensors/realsense_config.json")
# cv2.setWindowProperty("SiamMask", cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
cam.start()
time.sleep(0.5)
img, _ = cam.frames()
try:
init_rect = cv2.selectROI('SiamMask', img, False, False)
x, y, w, h = init_rect
except:
exit()
toc = 0
f = 0
while True:
tic = cv2.getTickCount()
im, _ = cam.frames()
if f == 0: # init
target_pos = np.array([x + w / 2, y + h / 2])
target_sz = np.array([w, h])
state = siamese_init(im, target_pos, target_sz, siammask, cfg['hp'], device=device) # init tracker
elif f > 0: # tracking
state = siamese_track(state, im, mask_enable=True, refine_enable=True, device=device) # track
location = state['ploygon'].flatten()
mask = state['mask'] > state['p'].seg_thr
im[:, :, 2] = (mask > 0) * 255 + (mask == 0) * im[:, :, 2]
cv2.polylines(im, [np.int0(location).reshape((-1, 1, 2))], True, (0, 255, 0), 3)
cv2.imshow('SiamMask', im)
key = cv2.waitKey(1)
if key > 0:
break
f += 1
toc += cv2.getTickCount() - tic
toc /= cv2.getTickFrequency()
fps = f / toc
print('SiamMask Time: {:02.1f}s Speed: {:3.1f}fps (with visulization!)'.format(toc, fps))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.