content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import Union
from typing import Optional
import numpy
from datetime import datetime
def extract_sicd(
img_header: Union[ImageSegmentHeader, ImageSegmentHeader0],
transpose: True,
nitf_header: Optional[Union[NITFHeader, NITFHeader0]] = None) -> SICDType:
"""
Extract the best available SICD structure from relevant nitf header structures.
Parameters
----------
img_header : ImageSegmentHeader|ImageSegmentHeader0
transpose : bool
nitf_header : None|NITFHeader|NITFHeader0
Returns
-------
SICDType
"""
def get_collection_info() -> CollectionInfoType:
isorce = img_header.ISORCE.strip()
collector_name = None if len(isorce) < 1 else isorce
iid2 = img_header.IID2.strip()
core_name = img_header.IID1.strip() if len(iid2) < 1 else iid2
class_str = img_header.Security.CLAS
if class_str == 'T':
classification = 'TOPSECRET'
elif class_str == 'S':
classification = 'SECRET'
elif class_str == 'C':
classification = 'CONFIDENTIAL'
elif class_str == 'U':
classification = 'UNCLASSIFIED'
else:
classification = ''
ctlh = img_header.Security.CTLH.strip()
if len(ctlh) < 1:
classification += '//' + ctlh
code = img_header.Security.CODE.strip()
if len(code) < 1:
classification += '//' + code
return CollectionInfoType(
CollectorName=collector_name,
CoreName=core_name,
Classification=classification)
def get_image_data() -> ImageDataType:
pvtype = img_header.PVTYPE
if pvtype == 'C':
if img_header.NBPP != 64:
logger.warning(
'This NITF has complex bands that are not 64-bit.\n\t'
'This is not currently supported.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'R':
if img_header.NBPP == 64:
logger.warning(
'The real/imaginary data in the NITF are stored as 64-bit floating point.\n\t'
'The closest Pixel Type, RE32F_IM32F, will be used,\n\t'
'but there may be overflow issues if converting this file.')
pixel_type = 'RE32F_IM32F'
elif pvtype == 'SI':
pixel_type = 'RE16I_IM16I'
else:
raise ValueError('Got unhandled PVTYPE {}'.format(pvtype))
if transpose:
rows = img_header.NCOLS
cols = img_header.NROWS
else:
rows = img_header.NROWS
cols = img_header.NCOLS
return ImageDataType(
PixelType=pixel_type,
NumRows=rows,
NumCols=cols,
FirstRow=0,
FirstCol=0,
FullImage=(rows, cols),
SCPPixel=(0.5 * rows, 0.5 * cols))
def append_country_code(cc) -> None:
if len(cc) > 0:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CountryCodes=[cc, ])
elif the_sicd.CollectionInfo.CountryCodes is None:
the_sicd.CollectionInfo.CountryCodes = [cc, ]
elif cc not in the_sicd.CollectionInfo.CountryCodes:
the_sicd.CollectionInfo.CountryCodes.append(cc)
def set_image_corners(icps: numpy.ndarray, override: bool = False) -> None:
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(ImageCorners=icps)
elif the_sicd.GeoData.ImageCorners is None or override:
the_sicd.GeoData.ImageCorners = icps
def set_arp_position(arp_ecf: numpy.ndarray, override: bool = False) -> None:
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType(ARPPos=arp_ecf)
elif override:
# prioritize this information first - it should be more reliable than other sources
the_sicd.SCPCOA.ARPPos = arp_ecf
def set_scp(scp_ecf: numpy.ndarray, scp_pixel: Union[numpy.ndarray, list, tuple], override: bool = False) -> None:
def set_scppixel():
if the_sicd.ImageData is None:
the_sicd.ImageData = ImageDataType(SCPPixel=scp_pixel)
else:
the_sicd.ImageData.SCPPixel = scp_pixel
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType(SCP=SCPType(ECF=scp_ecf))
set_scppixel()
elif the_sicd.GeoData.SCP is None or override:
the_sicd.GeoData.SCP = SCPType(ECF=scp_ecf)
set_scppixel()
def set_collect_start(
collect_start: Union[str, datetime, numpy.datetime64], override: bool = False) -> None:
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType(CollectStart=collect_start)
elif the_sicd.Timeline.CollectStart is None or override:
the_sicd.Timeline.CollectStart = collect_start
def set_uvects(row_unit: numpy.ndarray, col_unit: numpy.ndarray) -> None:
if the_sicd.Grid is None:
the_sicd.Grid = GridType(
Row=DirParamType(UVectECF=row_unit),
Col=DirParamType(UVectECF=col_unit))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(UVectECF=row_unit)
elif the_sicd.Grid.Row.UVectECF is None:
the_sicd.Grid.Row.UVectECF = row_unit
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(UVectECF=col_unit)
elif the_sicd.Grid.Col.UVectECF is None:
the_sicd.Grid.Col.UVectECF = col_unit
def try_CMETAA() -> None:
# noinspection PyTypeChecker
tre = None if tres is None else tres['CMETAA'] # type: CMETAA
if tre is None:
return
cmetaa = tre.DATA
if the_sicd.GeoData is None:
the_sicd.GeoData = GeoDataType()
if the_sicd.SCPCOA is None:
the_sicd.SCPCOA = SCPCOAType()
if the_sicd.Grid is None:
the_sicd.Grid = GridType()
if the_sicd.Timeline is None:
the_sicd.Timeline = TimelineType()
if the_sicd.RadarCollection is None:
the_sicd.RadarCollection = RadarCollectionType()
if the_sicd.ImageFormation is None:
the_sicd.ImageFormation = ImageFormationType()
the_sicd.SCPCOA.SCPTime = 0.5*float(cmetaa.WF_CDP)
the_sicd.GeoData.SCP = SCPType(ECF=tre.get_scp())
the_sicd.SCPCOA.ARPPos = tre.get_arp()
the_sicd.SCPCOA.SideOfTrack = cmetaa.CG_LD.strip().upper()
the_sicd.SCPCOA.SlantRange = float(cmetaa.CG_SRAC)
the_sicd.SCPCOA.DopplerConeAng = float(cmetaa.CG_CAAC)
the_sicd.SCPCOA.GrazeAng = float(cmetaa.CG_GAAC)
the_sicd.SCPCOA.IncidenceAng = 90 - float(cmetaa.CG_GAAC)
if hasattr(cmetaa, 'CG_TILT'):
the_sicd.SCPCOA.TwistAng = float(cmetaa.CG_TILT)
if hasattr(cmetaa, 'CG_SLOPE'):
the_sicd.SCPCOA.SlopeAng = float(cmetaa.CG_SLOPE)
the_sicd.ImageData.SCPPixel = [int(cmetaa.IF_DC_IS_COL), int(cmetaa.IF_DC_IS_ROW)]
img_corners = tre.get_image_corners()
if img_corners is not None:
the_sicd.GeoData.ImageCorners = img_corners
if cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'S':
the_sicd.Grid.ImagePlane = 'SLANT'
elif cmetaa.CMPLX_SIGNAL_PLANE.upper() == 'G':
the_sicd.Grid.ImagePlane = 'GROUND'
else:
logger.warning(
'Got unexpected CMPLX_SIGNAL_PLANE value {},\n\t'
'setting ImagePlane to SLANT'.format(cmetaa.CMPLX_SIGNAL_PLANE))
the_sicd.Grid.Row = DirParamType(
SS=float(cmetaa.IF_RSS),
ImpRespWid=float(cmetaa.IF_RGRES),
Sgn=1 if cmetaa.IF_RFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_RFFT_SAMP)/(float(cmetaa.IF_RSS)*float(cmetaa.IF_RFFT_TOT)))
the_sicd.Grid.Col = DirParamType(
SS=float(cmetaa.IF_AZSS),
ImpRespWid=float(cmetaa.IF_AZRES),
Sgn=1 if cmetaa.IF_AFFTS.strip() == '-' else -1, # opposite sign convention
ImpRespBW=float(cmetaa.IF_AZFFT_SAMP)/(float(cmetaa.IF_AZSS)*float(cmetaa.IF_AZFFT_TOT)))
cmplx_weight = cmetaa.CMPLX_WEIGHT.strip().upper()
if cmplx_weight == 'UWT':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='UNIFORM')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='UNIFORM')
elif cmplx_weight == 'HMW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HAMMING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HAMMING')
elif cmplx_weight == 'HNW':
the_sicd.Grid.Row.WgtType = WgtTypeType(WindowName='HANNING')
the_sicd.Grid.Col.WgtType = WgtTypeType(WindowName='HANNING')
elif cmplx_weight == 'TAY':
the_sicd.Grid.Row.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_RNG_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_RNG_TAY_NBAR))})
the_sicd.Grid.Col.WgtType = WgtTypeType(
WindowName='TAYLOR',
Parameters={
'SLL': '-{0:d}'.format(int(cmetaa.CMPLX_AZ_SLL)),
'NBAR': '{0:d}'.format(int(cmetaa.CMPLX_AZ_TAY_NBAR))})
else:
logger.warning(
'Got unsupported CMPLX_WEIGHT value {}.\n\tThe resulting SICD will '
'not have valid weight array populated'.format(cmplx_weight))
the_sicd.Grid.Row.define_weight_function()
the_sicd.Grid.Col.define_weight_function()
# noinspection PyBroadException
try:
date_str = cmetaa.T_UTC_YYYYMMMDD
time_str = cmetaa.T_HHMMSSUTC
date_time = _iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
time_str[:2], time_str[2:4], time_str[4:6])
the_sicd.Timeline.CollectStart = numpy.datetime64(date_time, 'us')
except Exception:
logger.info('Failed extracting start time from CMETAA')
pass
the_sicd.Timeline.CollectDuration = float(cmetaa.WF_CDP)
the_sicd.Timeline.IPP = [
IPPSetType(TStart=0,
TEnd=float(cmetaa.WF_CDP),
IPPStart=0,
IPPEnd=numpy.floor(float(cmetaa.WF_CDP)*float(cmetaa.WF_PRF)),
IPPPoly=[0, float(cmetaa.WF_PRF)])]
the_sicd.RadarCollection.TxFrequency = TxFrequencyType(
Min=float(cmetaa.WF_SRTFR),
Max=float(cmetaa.WF_ENDFR))
the_sicd.RadarCollection.TxPolarization = cmetaa.POL_TR.upper()
the_sicd.RadarCollection.Waveform = [WaveformParametersType(
TxPulseLength=float(cmetaa.WF_WIDTH),
TxRFBandwidth=float(cmetaa.WF_BW),
TxFreqStart=float(cmetaa.WF_SRTFR),
TxFMRate=float(cmetaa.WF_CHRPRT)*1e12)]
tx_rcv_pol = '{}:{}'.format(cmetaa.POL_TR.upper(), cmetaa.POL_RE.upper())
the_sicd.RadarCollection.RcvChannels = [
ChanParametersType(TxRcvPolarization=tx_rcv_pol)]
the_sicd.ImageFormation.TxRcvPolarizationProc = tx_rcv_pol
if_process = cmetaa.IF_PROCESS.strip().upper()
if if_process == 'PF':
the_sicd.ImageFormation.ImageFormAlgo = 'PFA'
scp_ecf = tre.get_scp()
fpn_ned = numpy.array(
[float(cmetaa.CG_FPNUV_X), float(cmetaa.CG_FPNUV_Y), float(cmetaa.CG_FPNUV_Z)], dtype='float64')
ipn_ned = numpy.array(
[float(cmetaa.CG_IDPNUVX), float(cmetaa.CG_IDPNUVY), float(cmetaa.CG_IDPNUVZ)], dtype='float64')
fpn_ecf = ned_to_ecf(fpn_ned, scp_ecf, absolute_coords=False)
ipn_ecf = ned_to_ecf(ipn_ned, scp_ecf, absolute_coords=False)
the_sicd.PFA = PFAType(FPN=fpn_ecf, IPN=ipn_ecf)
elif if_process in ['RM', 'CD']:
the_sicd.ImageFormation.ImageFormAlgo = 'RMA'
# the remainder of this is guesswork to define required fields
the_sicd.ImageFormation.TStartProc = 0 # guess work
the_sicd.ImageFormation.TEndProc = float(cmetaa.WF_CDP)
the_sicd.ImageFormation.TxFrequencyProc = TxFrequencyProcType(
MinProc=float(cmetaa.WF_SRTFR), MaxProc=float(cmetaa.WF_ENDFR))
# all remaining guess work
the_sicd.ImageFormation.STBeamComp = 'NO'
the_sicd.ImageFormation.ImageBeamComp = 'SV' if cmetaa.IF_BEAM_COMP[0] == 'Y' else 'NO'
the_sicd.ImageFormation.AzAutofocus = 'NO' if cmetaa.AF_TYPE[0] == 'N' else 'SV'
the_sicd.ImageFormation.RgAutofocus = 'NO'
def try_AIMIDA() -> None:
tre = None if tres is None else tres['AIMIDA']
if tre is None:
return
aimida = tre.DATA
append_country_code(aimida.COUNTRY.strip())
create_time = datetime.strptime(aimida.CREATION_DATE, '%d%b%y')
if the_sicd.ImageCreation is None:
the_sicd.ImageCreation = ImageCreationType(DateTime=create_time)
elif the_sicd.ImageCreation.DateTime is None:
the_sicd.ImageCreation.DateTime = create_time
collect_start = datetime.strptime(aimida.MISSION_DATE+aimida.TIME, '%d%b%y%H%M')
set_collect_start(collect_start, override=False)
def try_AIMIDB() -> None:
tre = None if tres is None else tres['AIMIDB']
if tre is None:
return
aimidb = tre.DATA
append_country_code(aimidb.COUNTRY.strip())
if the_sicd.ImageFormation is not None and the_sicd.ImageFormation.SegmentIdentifier is None:
the_sicd.ImageFormation.SegmentIdentifier = aimidb.CURRENT_SEGMENT.strip()
date_str = aimidb.ACQUISITION_DATE
collect_start = numpy.datetime64(_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
set_collect_start(collect_start, override=False)
def try_ACFT() -> None:
if tres is None:
return
tre = tres['ACFTA']
if tre is None:
tre = tres['ACFTB']
if tre is None:
return
acft = tre.DATA
sensor_id = acft.SENSOR_ID.strip()
if len(sensor_id) > 1:
if the_sicd.CollectionInfo is None:
the_sicd.CollectionInfo = CollectionInfoType(CollectorName=sensor_id)
elif the_sicd.CollectionInfo.CollectorName is None:
the_sicd.CollectionInfo.CollectorName = sensor_id
row_ss = float(acft.ROW_SPACING)
col_ss = float(acft.COL_SPACING)
if hasattr(acft, 'ROW_SPACING_UNITS') and acft.ROW_SPACING_UNITS.strip().lower() == 'f':
row_ss *= foot
if hasattr(acft, 'COL_SPACING_UNITS') and acft.COL_SPACING_UNITS.strip().lower() == 'f':
col_ss *= foot
# NB: these values are actually ground plane values, and should be
# corrected to slant plane if possible
if the_sicd.SCPCOA is not None:
if the_sicd.SCPCOA.GrazeAng is not None:
col_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.GrazeAng))
if the_sicd.SCPCOA.TwistAng is not None:
row_ss *= numpy.cos(numpy.deg2rad(the_sicd.SCPCOA.TwistAng))
if the_sicd.Grid is None:
the_sicd.Grid = GridType(Row=DirParamType(SS=row_ss), Col=DirParamType(SS=col_ss))
return
if the_sicd.Grid.Row is None:
the_sicd.Grid.Row = DirParamType(SS=row_ss)
elif the_sicd.Grid.Row.SS is None:
the_sicd.Grid.Row.SS = row_ss
if the_sicd.Grid.Col is None:
the_sicd.Grid.Col = DirParamType(SS=col_ss)
elif the_sicd.Grid.Col.SS is None:
the_sicd.Grid.Col.SS = col_ss
def try_BLOCKA() -> None:
tre = None if tres is None else tres['BLOCKA']
if tre is None:
return
blocka = tre.DATA
icps = []
for fld_name in ['FRFC_LOC', 'FRLC_LOC', 'LRLC_LOC', 'LRFC_LOC']:
value = getattr(blocka, fld_name)
# noinspection PyBroadException
try:
lat_val = float(value[:10])
lon_val = float(value[10:21])
except ValueError:
lat_val = lat_lon_parser(value[:10])
lon_val = lat_lon_parser(value[10:21])
icps.append([lat_val, lon_val])
set_image_corners(icps, override=False)
def try_MPDSRA() -> None:
def valid_array(arr):
return numpy.all(numpy.isfinite(arr)) and numpy.any(arr != 0)
tre = None if tres is None else tres['MPDSRA']
if tre is None:
return
mpdsra = tre.DATA
scp_ecf = foot*numpy.array(
[float(mpdsra.ORO_X), float(mpdsra.ORO_Y), float(mpdsra.ORO_Z)], dtype='float64')
if valid_array(scp_ecf):
set_scp(scp_ecf, (int(mpdsra.ORP_COLUMN) - 1, int(mpdsra.ORP_ROW) - 1), override=False)
arp_pos_ned = foot*numpy.array(
[float(mpdsra.ARP_POS_N), float(mpdsra.ARP_POS_E), float(mpdsra.ARP_POS_D)], dtype='float64')
arp_vel_ned = foot*numpy.array(
[float(mpdsra.ARP_VEL_N), float(mpdsra.ARP_VEL_E), float(mpdsra.ARP_VEL_D)], dtype='float64')
arp_acc_ned = foot*numpy.array(
[float(mpdsra.ARP_ACC_N), float(mpdsra.ARP_ACC_E), float(mpdsra.ARP_ACC_D)], dtype='float64')
arp_pos = ned_to_ecf(arp_pos_ned, scp_ecf, absolute_coords=True) if valid_array(arp_pos_ned) else None
set_arp_position(arp_pos, override=False)
arp_vel = ned_to_ecf(arp_vel_ned, scp_ecf, absolute_coords=False) if valid_array(arp_vel_ned) else None
if the_sicd.SCPCOA.ARPVel is None:
the_sicd.SCPCOA.ARPVel = arp_vel
arp_acc = ned_to_ecf(arp_acc_ned, scp_ecf, absolute_coords=False) if valid_array(arp_acc_ned) else None
if the_sicd.SCPCOA.ARPAcc is None:
the_sicd.SCPCOA.ARPAcc = arp_acc
if the_sicd.PFA is not None and the_sicd.PFA.FPN is None:
# TODO: is this already in meters?
fpn_ecf = numpy.array(
[float(mpdsra.FOC_X), float(mpdsra.FOC_Y), float(mpdsra.FOC_Z)], dtype='float64') # *foot
if valid_array(fpn_ecf):
the_sicd.PFA.FPN = fpn_ecf
def try_MENSRB() -> None:
tre = None if tres is None else tres['MENSRB']
if tre is None:
return
mensrb = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensrb.ACFT_LOC[:12]),
lat_lon_parser(mensrb.ACFT_LOC[12:25]),
foot*float(mensrb.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensrb.RP_LOC[:12]),
lat_lon_parser(mensrb.RP_LOC[12:25]),
foot*float(mensrb.RP_ELV)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
set_scp(scp_ecf, (int(mensrb.RP_COL)-1, int(mensrb.RP_ROW)-1), override=False)
row_unit_ned = numpy.array(
[float(mensrb.C_R_NC), float(mensrb.C_R_EC), float(mensrb.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensrb.C_AZ_NC), float(mensrb.C_AZ_EC), float(mensrb.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def try_MENSRA() -> None:
tre = None if tres is None else tres['MENSRA']
if tre is None:
return
mensra = tre.DATA
arp_llh = numpy.array(
[lat_lon_parser(mensra.ACFT_LOC[:10]),
lat_lon_parser(mensra.ACFT_LOC[10:21]),
foot*float(mensra.ACFT_ALT)], dtype='float64')
scp_llh = numpy.array(
[lat_lon_parser(mensra.CP_LOC[:10]),
lat_lon_parser(mensra.CP_LOC[10:21]),
foot*float(mensra.CP_ALT)], dtype='float64')
# TODO: handle the conversion from msl to hae
arp_ecf = geodetic_to_ecf(arp_llh)
scp_ecf = geodetic_to_ecf(scp_llh)
set_arp_position(arp_ecf, override=True)
# TODO: is this already zero based?
set_scp(geodetic_to_ecf(scp_llh), (int(mensra.CCRP_COL), int(mensra.CCRP_ROW)), override=False)
row_unit_ned = numpy.array(
[float(mensra.C_R_NC), float(mensra.C_R_EC), float(mensra.C_R_DC)], dtype='float64')
col_unit_ned = numpy.array(
[float(mensra.C_AZ_NC), float(mensra.C_AZ_EC), float(mensra.C_AZ_DC)], dtype='float64')
set_uvects(ned_to_ecf(row_unit_ned, scp_ecf, absolute_coords=False),
ned_to_ecf(col_unit_ned, scp_ecf, absolute_coords=False))
def extract_corners() -> None:
icps = extract_image_corners(img_header)
if icps is None:
return
# TODO: include symmetry transform issue
set_image_corners(icps, override=False)
def extract_start() -> None:
# noinspection PyBroadException
try:
date_str = img_header.IDATIM
collect_start = numpy.datetime64(
_iso_date_format.format(
date_str[:4], date_str[4:6], date_str[6:8],
date_str[8:10], date_str[10:12], date_str[12:14]), 'us')
except Exception:
logger.info('failed extracting start time from IDATIM tre')
return
set_collect_start(collect_start, override=False)
# noinspection PyUnresolvedReferences
tres = None if img_header.ExtendedHeader.data is None \
else img_header.ExtendedHeader.data # type: Union[None, TREList]
collection_info = get_collection_info()
image_data = get_image_data()
the_sicd = SICDType(
CollectionInfo=collection_info,
ImageData=image_data)
# apply the various tres and associated logic
# NB: this should generally be in order of preference
try_CMETAA()
try_AIMIDB()
try_AIMIDA()
try_ACFT()
try_BLOCKA()
try_MPDSRA()
try_MENSRA()
try_MENSRB()
extract_corners()
extract_start()
return the_sicd | 536c95597ec426da168d3ca05317c9c8515dc73c | 3,659,300 |
import torch
from typing import Optional
def ppo_clip_policy_loss(
logps: torch.Tensor,
logps_old: torch.Tensor,
advs: torch.Tensor,
clipratio: Optional[float] = 0.2
) -> torch.Tensor:
"""
Loss function for a PPO-clip policy.
See paper for full loss function math: https://arxiv.org/abs/1707.06347
Args:
- logps (torch.Tensor): Action log-probabilities under the current policy.
- logps_old (torch.Tensor): Action log-probabilities under the old (pre-update) policy.
- advs (torch.Tensor): Advantage estimates for the actions taken.
- clipratio (float): Clipping parameter for PPO-clip loss. In general, is fine with being left as default.
Returns:
- ppo_loss (torch.Tensor): Loss term for PPO agent.
- kl (torch.Tensor): KL-divergence estimate between new and old policies.
"""
policy_ratio = torch.exp(logps - logps_old)
clipped_adv = torch.clamp(policy_ratio, 1 - clipratio, 1 + clipratio) * advs
ppo_loss = -(torch.min(policy_ratio * advs, clipped_adv)).mean()
kl = (logps_old - logps).mean().item()
return ppo_loss, kl | 203c4072e1c04db9cceb9fa58f70b9af512ffb1c | 3,659,301 |
def reconstruct_modelseed_model(genome_id, model_id, template_reference=None):
""" Reconstruct a draft ModelSEED model for an organism.
Parameters
----------
genome_id : str
Genome ID or workspace reference to genome
model_id : str
ID of output model
template_reference : str, optional
Workspace reference to template model
Returns
-------
dict
Dictionary of current model statistics
"""
# Confirm genome ID is available in PATRIC.
get_genome_summary(genome_id)
# Set input parameters for method.
params = dict()
params['genome'] = 'PATRICSOLR:' + genome_id
# params['fulldb'] = 0
params['output_file'] = model_id
if template_reference is not None:
params['template_model'] = template_reference
params['gapfill'] = 0
params['predict_essentiality'] = 0
# Workaround for ModelSEED workspace bug. The user's modelseed folder must exist before saving
# the model. Otherwise the type of the folder created for the model is not "modelfolder" and
# subsequent operations on the model will fail.
if ms_client.username is None:
ms_client.set_authentication_token()
folder_reference = '/{0}/{1}'.format(ms_client.username, model_folder)
try:
get_workspace_object_meta(folder_reference)
except ObjectNotFoundError:
put_workspace_object(folder_reference, 'folder')
LOGGER.info('Created modelseed folder in workspace for "%s"', ms_client.username)
# Run the server method.
try:
job_id = ms_client.call('ModelReconstruction', params)
LOGGER.info('Started job %s to run model reconstruction for "%s"', job_id, params['genome'])
_wait_for_job(job_id)
except ServerError as e:
references = None
if template_reference is not None:
references = [template_reference]
raise handle_server_error(e, references)
# Get the model statistics for the model.
stats = get_modelseed_model_stats(model_id)
if stats['num_genes'] == 0: # ModelSEED does not return an error if the genome ID is invalid
warn('Model for genome ID {0} has no genes, verify genome ID is valid'.format(genome_id))
return stats | 3547088922fe2df2f81622a8065368227a1948bc | 3,659,302 |
from timeseries import timeseries, loadDBstation
from datetime import datetime
def tide_pred_correc(modfile,lon,lat,time,dbfile,ID,z=None,conlist=None):
"""
Performs a tidal prediction at all points in [lon,lat] at times in vector [time]
Applies an amplitude and phase correction based on a time series
"""
print('Calculating tidal correction factors from time series...')
# Load using the timeseries module
t0 = datetime.strftime(time[0],'%Y%m%d.%H%M%S')
t1 = datetime.strftime(time[-1],'%Y%m%d.%H%M%S')
dt = time[1]-time[0]
print(t0, t1, dt.total_seconds())
timeinfo = (t0,t1,dt.total_seconds())
TS,meta = loadDBstation(dbfile,ID,'waterlevel',timeinfo=timeinfo,filttype='low',cutoff=2*3600,output_meta=True)
lonpt=meta['longitude']
latpt=meta['latitude']
print(lonpt,latpt)
# Extract the OTIS tide prediction
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lonpt,latpt)
h_amp = np.abs(h_re+1j*h_im)[:,0]
h_phs = np.angle(h_re+1j*h_im)[:,0]
# Harmonic analysis of observation time series
amp, phs, frq, frqnames, htide = TS.tidefit(frqnames=conlist)
TS_harm = timeseries(time,htide)
residual = TS.y - htide
# Calculate the amp and phase corrections
dphs = phs - h_phs + np.pi
damp = amp/h_amp
# Extract the data along the specified points
u_re, u_im, v_re, v_im, h_re, h_im, omega, conlist = extract_HC(modfile,lon,lat,z=z,conlist=conlist)
h_amp = np.abs(h_re+1j*h_im)
h_phs = np.angle(h_re+1j*h_im)
u_amp = np.abs(u_re+1j*u_im)
u_phs = np.angle(u_re+1j*u_im)
v_amp = np.abs(v_re+1j*v_im)
v_phs = np.angle(v_re+1j*v_im)
# Initialise the output arrays
sz = lon.shape
nx = np.prod(sz)
nt = time.shape[0]
h=np.zeros((nt,nx))
u=np.zeros((nt,nx))
v=np.zeros((nt,nx))
# Rebuild the time series
#tsec=TS_harm.tsec - TS_harm.tsec[0]
tsec = othertime.SecondsSince(time,basetime=time[0])
print(tsec[0])
for nn,om in enumerate(omega):
for ii in range(0,nx):
h[:,ii] += damp[nn]*h_amp[nn,ii] * np.cos(om*tsec - (h_phs[nn,ii] + dphs[nn]))
u[:,ii] += damp[nn]*u_amp[nn,ii] * np.cos(om*tsec - (u_phs[nn,ii] + dphs[nn]))
v[:,ii] += damp[nn]*v_amp[nn,ii] * np.cos(om*tsec - (v_phs[nn,ii] + dphs[nn]))
szo = (nt,)+sz
return h.reshape(szo), u.reshape(szo), v.reshape(szo), residual | 883ac85787a700a785a0b0a08f521aaf6ad821d1 | 3,659,303 |
def generate_new_filename(this_key):
"""Generates filename for processed data from information in this_key."""
[_, _, source_id, experiment_id, _, _] = this_key.split('.')
this_fname = THIS_VARIABLE_ID+'_'+experiment_id+'_'+source_id
return this_fname | 2e9edb4730257e8fc4c68bbcbb32cce133041bb8 | 3,659,304 |
def get_connection(user, pwd):
""" Obtiene la conexion a Oracle """
try:
connection = cx_Oracle.connect(user + '/' + pwd + '@' +
config.FISCO_CONNECTION_STRING)
connection.autocommit = False
print('Connection Opened')
return connection
except Exception as e:
print('Exception: ' + str(e)) | ba7ca784f0778fb06843e3c68af63e6348406735 | 3,659,305 |
from re import T
def _generate_select_expression_for_extended_string_unix_timestamp_ms_to_timestamp(source_column, name):
"""
More robust conversion from StringType to TimestampType. It is assumed that the
timezone is already set to UTC in spark / java to avoid implicit timezone conversions.
Is able to additionally handle (compared to implicit Spark conversion):
* Unix timestamps in milliseconds
* Preceding whitespace
* Trailing whitespace
* Preceeding and trailing whitespace
Hint
----
Please have a look at the tests to get a better feeling how it behaves under
tests/unit/transformer/test_mapper_custom_data_types.py::TestExtendedStringConversions and
tests/data/test_fixtures/mapper_custom_data_types_fixtures.py
Example
-------
>>> from spooq.transformer import Mapper
>>>
>>> input_df.head(3)
[Row(input_string="2020-08-12T12:43:14+0000"),
Row(input_string="1597069446000"),
Row(input_string="2020-08-12")]
>>> mapping = [("output_value", "input_string", "extended_string_to_timestamp")]
>>> output_df = Mapper(mapping).transform(input_df)
>>> output_df.head(3)
[Row(input_string=datetime.datetime(2020, 8, 12, 12, 43, 14)),
Row(input_string=datetime.datetime(2020, 8, 10, 14, 24, 6)),
Row(input_string=datetime.datetime(2020, 8, 12, 0, 0, 0))]
"""
return (
F.when(
F.trim(source_column).cast(T.LongType()).isNotNull(), (F.trim(source_column) / 1000).cast(T.TimestampType())
)
.otherwise(F.trim(source_column).cast(T.TimestampType()))
.alias(name)
) | 22a1220c260406c82ede127b87fa23356d2f5192 | 3,659,306 |
import xmlrpclib
import getpass
def get_webf_session():
"""
Return an instance of a Webfaction server and a session for authentication
to make further API calls.
"""
server = xmlrpclib.ServerProxy("https://api.webfaction.com/")
print("Logging in to Webfaction as %s." % env.user)
if env.password is None:
env.password = getpass(
"Enter Webfaction password for user %s: " % env.user)
session, account = server.login(env.user, env.password)
print("Succesfully logged in as %s." % env.user)
return server, session, account | e6ebe8ad51cdf4a33fcfa70e5f1983ede6f66d31 | 3,659,307 |
from typing import Dict
from typing import List
def _unpack_school_column_aliases() -> Dict[str, List[str]]:
"""
Unpack the known aliases for lookup table of alias_column_name -> schema_column_name.
:return: lookup table.
:raises: ValueError if an alias has more than one mapping to a schema column
"""
result = dict()
# add to the lookup table all the known aliases from School_aliases module
for (schema_column_name, aliases) in School_aliases.items():
for alias_column_name in aliases:
k = alias_column_name.lower()
v = schema_column_name.lower()
if result.get(k) is not None:
raise ValueError(f"duplicate alias {v} for column name: {k}")
result[k] = v
return result | 3c6cff79c2ce50dff655bee7ac1626277b00e155 | 3,659,308 |
def suntimecorr(ra, dec, obst, coordtable, verbose=False):
"""
This function calculates the light-travel time correction from
observer to a standard location. It uses the 2D coordinates (RA
and DEC) of the object being observed and the 3D position of the
observer relative to the standard location. The latter (and the
former, for solar-system objects) may be gotten from JPL's
Horizons system.
Parameters:
-----------
ra : Float
Right ascension of target object in radians.
dec : Float
Declination of target object in radians.
obst : Float or Numpy Float array
Time of observation in Julian Date (may be a vector)
coordtable : String
Filename of output table from JPL HORIZONS specifying
the position of the observatory relative to the
standard position.
verbose : Boolean
If True, print X,Y,Z coordinates.
Returns:
--------
This function returns the time correction in seconds to be ADDED
to the observation time to get the time when the observed photons
would have reached the plane perpendicular to their travel and
containing the reference position.
Notes:
------
The position vectors from coordtable are given in the following
coordinate system:
Reference epoch: J2000.0
xy-plane: plane of the Earth's mean equator at the reference epoch
x-axis : out along ascending node of instantaneous plane of the Earth's
orbit and the Earth's mean equator at the reference epoch
z-axis : along the Earth mean north pole at the reference epoch
Ephemerides are often calculated for BJD, barycentric Julian date.
That is, they are correct for observations taken at the solar
system barycenter's distance from the target. The BJD of our
observation is the time the photons we observe would have crossed
the sphere centered on the object and containing the barycenter.
We must thus add the light-travel time from our observatory to
this sphere. For non-solar-system observations, we approximate
the sphere as a plane, and calculate the dot product of the vector
from the barycenter to the telescope and a unit vector to from the
barycenter to the target, and divide by the speed of light.
Properly, the coordinates should point from the standard location
to the object. Practically, for objects outside the solar system,
the adjustment from, e.g., geocentric (RA-DEC) coordinates to
barycentric coordinates has a negligible effect on the trig
functions used in the routine.
The horizons file in coordtable should be in the form of the
following example, with a subject line of JOB:
!$$SOF
!
! Example e-mail command file. If mailed to "[email protected]"
! with subject "JOB", results will be mailed back.
!
! This example demonstrates a subset of functions. See main doc for
! full explanation. Send blank e-mail with subject "BATCH-LONG" to
! [email protected] for complete example.
!
EMAIL_ADDR = '[email protected]' ! Send output to this address
! (can be blank for auto-reply)
COMMAND = '-79' ! Target body, closest apparition
OBJ_DATA = 'YES' ! No summary of target body data
MAKE_EPHEM = 'YES' ! Make an ephemeris
START_TIME = '2005-Aug-24 06:00' ! Start of table (UTC default)
STOP_TIME = '2005-Aug-25 02:00' ! End of table
STEP_SIZE = '1 hour' ! Table step-size
TABLE_TYPE = 'VECTOR' ! Specify VECTOR ephemeris table type
CENTER = '@10' ! Set observer (coordinate center)
REF_PLANE = 'FRAME' ! J2000 equatorial plane
VECT_TABLE = '3' ! Selects output type (3=all).
OUT_UNITS = 'KM-S' ! Vector units# KM-S, AU-D, KM-D
CSV_FORMAT = 'NO' ! Comma-separated output (YES/NO)
VEC_LABELS = 'YES' ! Label vectors in output (YES/NO)
VECT_CORR = 'NONE' ! Correct for light-time (LT),
! or lt + stellar aberration (LT+S),
! or (NONE) return geometric
! vectors only.
!$$EOF
Example:
---------
>>> # Spitzer is in nearly the Earth's orbital plane. Light coming from
>>> # the north ecliptic pole should hit the observatory and the sun at
>>> # about the same time.
>>> import suntimecorr as sc
>>> ra = 18.0 * np.pi / 12 # ecliptic north pole coordinates in radians
>>> dec = 66.5 * np.pi / 180 # "
>>> obst = np.array([2453607.078]) # Julian date of 2005-08-24 14:00
>>> print( sc.suntimecorr(ra, dec, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') )
1.00810877 # about 1 sec, close to zero
>>> # If the object has the RA and DEC of Spitzer, light time should be
>>> # about 8 minutes to the sun.
>>> obs = np.array([111093592.8346969, -97287023.315796047,
-42212080.826677799])
>>> # vector to the object
>>> obst = np.array([2453602.5])
>>> print( np.sqrt(np.sum(obs**2.0)) )
153585191.481 # about 1 AU, good
>>> raobs = np.arctan(obs[1]/ obs[0])
>>> decobs = np.arctan(obs[2]/ np.sqrt(obs[0]**2 + obs[1]**2))
>>> print(raobs, decobs)
-0.7192383661, -0.2784282118
>>> print( sc.suntimecorr(raobs, decobs, obst,
'/home/esp01/ancil/horizons/cs41_spitzer.vec') / 60.0)
8.5228630 # good, about 8 minutes light time to travel 1 AU
Modification History:
---------------------
2005-12-01 statia Written by Statia Luszcz.
2006-03-09 jh Corrected 90deg error in algorithm, renamed,
updated header, made Coordtable a positional
arg since it's required, switched to radians.
2007-06-28 jh Renamed to suntimecorr since we now use
barycentric Julian date.
2009-01-28 jh Change variables to long, use spline instead
of linfit so we can use one HORIZONS file for
the whole mission.
2009-02-22 jh Reshape spline results to shape of obst. Make
it handle unsorted unput data properly.
Header update.
2010-07-10 patricio Converted to python. ([email protected])
2010-11-01 patricio Docstring updated.
"""
start_data = '$$SOE'
end_data = '$$EOE'
# Read in whole table as an list of strings, one string per line
ctable = open(coordtable, 'r')
wholetable = ctable.readlines()
ctable.close()
# Find start and end line
i = 0
# while end has not been found:
while wholetable[i].find(end_data) == -1:
# if start is found get the index of next line:
if wholetable[i].find(start_data) != -1:
start = i + 1
i += 1
# Chop table
data = wholetable[start:i-2]
# Extract values:
x, y, z, time = getcoords(data)
# Interpolate to observing times:
# We must preserve the shape and order of obst. Spline takes
# monotonic input and produces linear output. x, y, z, time are
# sorted as HORIZONS produces them.
# Save shape of obst
tshape = np.shape(obst)
# Reshape to 1D and sort
obstime = obst.flatten()
ti = np.argsort(obstime) # indexes of sorted array by time
tsize = np.size(obstime)
# Allocate output arrays
obsx = np.zeros(tsize)
obsy = np.zeros(tsize)
obsz = np.zeros(tsize)
# Interpolate sorted arrays
obsx[ti] = splinterp(obstime[ti], time, x)
obsy[ti] = splinterp(obstime[ti], time, y)
obsz[ti] = splinterp(obstime[ti], time, z)
if verbose:
print( 'X, Y, Z = ', obsx, obsy, obsz)
# Change ra and dec into unit vector n_hat
object_unit_x = np.cos(dec) * np.cos(ra)
object_unit_y = np.cos(dec) * np.sin(ra)
object_unit_z = np.sin(dec)
# Dot product the vectors with n_hat
rdotnhat = ( obsx * object_unit_x +
obsy * object_unit_y +
obsz * object_unit_z )
# Reshape back to the original shape
rdotnhat = rdotnhat.reshape(tshape)
# Time correction is: dt = length/velocity
# Divide by the speed of light and return
return rdotnhat / ( c / 1000.0 ) | c214a065e41ddc85713ab351e537ab08d521f090 | 3,659,309 |
from typing import Union
from typing import Tuple
from re import VERBOSE
import re
def normalize_address(address: str, asHex: bool=False) -> Union[Tuple[str, str], Tuple[str, bytes]]:
"""Takes an address as raw byte or id__ and provides both formats back"""
try:
# convert recipient to raw if provided as id__
if address.startswith("id__"):
address_raw = NyzoStringEncoder.decode(address).get_bytes().hex()
if VERBOSE:
print(f"Raw address is {address_raw}")
else:
raise RuntimeWarning("Not an id__")
except:
if VERBOSE:
print(f"address was not a proper id_ nyzostring")
address_raw = re.sub(r"[^0-9a-f]", "", address.lower())
# print(address_raw)
if len(address_raw) != 64:
raise ValueError("Wrong address format. 64 bytes as hex or id_ nyzostring required")
if VERBOSE:
print(f"Trying with {address_raw}")
address = NyzoStringEncoder.encode(NyzoStringPublicIdentifier.from_hex(address_raw))
# Here we should have both recipient and recipient_raw in all cases.
if asHex:
return address, address_raw
else:
return address, bytes.fromhex(address_raw) | ce574f1b1c5231b97a444d3f3e767016cd879d6e | 3,659,310 |
def get_welcome_response():
""" Prompt the user for the prayer
"""
session_attributes = {}
card_title = "Welcome"
speech_output = "What would you like me to pray with you? I can pray the Rosary and the Divine Mercy Chaplet."
reprompt_text = "What would you like me to pray with you?"
should_end_session = False
return build_response(session_attributes, build_speechlet_response(
card_title, speech_output, reprompt_text, speech_output,
should_end_session, [])) | 9bcf20bcc2afd96e1b3b93abb47ba654e2051884 | 3,659,311 |
from typing import Optional
import math
import time
import requests
import textwrap
def get_news(
limit: int = 60,
post_kind: str = "news",
filter_: Optional[str] = None,
region: str = "en",
) -> pd.DataFrame:
"""Get recent posts from CryptoPanic news aggregator platform. [Source: https://cryptopanic.com/]
Parameters
----------
limit: int
number of news to fetch
post_kind: str
Filter by category of news. Available values: news or media.
filter_: Optional[str]
Filter by kind of news. One from list: rising|hot|bullish|bearish|important|saved|lol
region: str
Filter news by regions. Available regions are: en (English), de (Deutsch), nl (Dutch), es (Español),
fr (Français), it (Italiano), pt (Português), ru (Русский)
Returns
-------
pd.DataFrame
DataFrame with recent news from different sources filtered by provided parameters.
"""
if post_kind not in CATEGORIES:
post_kind = "news"
results = []
response = make_request(post_kind=post_kind, filter_=filter_, region=region)
if response:
data, next_page, _ = (
response["results"],
response.get("next"),
response.get("count"),
)
for post in data:
results.append(_parse_post(post))
number_of_pages = math.ceil(limit // 20)
counter = 0
while counter < number_of_pages and next_page:
counter += 1
try:
time.sleep(0.2)
res = requests.get(next_page).json()
for post in res["results"]:
results.append(_parse_post(post))
next_page = res.get("next")
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print(
"[red]Something went wrong while fetching news from API[/red]\n"
)
return pd.DataFrame()
try:
df = pd.DataFrame(results)
df["title"] = df["title"].apply(
lambda x: "\n".join(textwrap.wrap(x, width=66))
if isinstance(x, str)
else x
)
return df
except Exception as e: # noqa: F841
logger.exception(str(e))
console.print("[red]Something went wrong with DataFrame creation[/red]\n")
return pd.DataFrame()
return pd.DataFrame() | 8535965e4058ce4bd76b2ef44a060e4083f2128e | 3,659,312 |
def login():
""" Display log in form and handle user login."""
email = request.args.get('emailLogin')
password = request.args.get('passwordLogin')
user = User.query.filter(User.email == email).first()
if user is None or not check_password_hash(user.password, password):
flash('Invalid email or password')
return redirect('/')
else:
login_user(user)
flash('Welcome back!')
return redirect('/')
return render_template('login.html') | 9368ccc0e8d02baa58ce434e37e7ca7a2deb53e2 | 3,659,313 |
import _queue
def model_from_queue(model):
""" Returns the model dict if model is enqueued, else None."""
return _queue.get(model, None) | 46eea9b8a218181b000308b080a8c9dad7e866b2 | 3,659,314 |
import codecs
import os
import re
def get_email(package):
"""
Return package email as listed in `__email__` in `init.py`.
"""
init_py = codecs.open(os.path.abspath(os.path.join(package, '__init__.py')), encoding='utf-8').read()
return re.search("^__email__ = ['\"]([^'\"]+)['\"]", init_py, re.MULTILINE).group(1) | e07a2460f0229c16853ba09cf88e59b95659ee77 | 3,659,315 |
def get_class_name(obj, instance=True):
"""
Given a class or instance of a class, returns a string representing the
fully specified path of the class.
Parameters
----------
obj : object
An instance of any object
instance: bool
Indicates whether given object is an instance of the class to be named
"""
typ = type(obj) if instance else obj
return "{}.{}".format(typ.__module__, typ.__name__) | 3a7ebd1fb2682ec5dff6d42cd2cccf918d67f9a0 | 3,659,316 |
from sys import path
def pipeline(opts):
"""Construct the pipeline"""
outdir = path.join(opts.outdir, opts.cancer)
pTCGADownload.input = [opts.snpmani]
pTCGADownload.args.nthread = opts.nthread
pTCGADownload.args.token = opts.token
pTCGADownload.config.export_dir = outdir
pTCGADownload.cache = 'export'
pSample2SubmitterID.depends = pTCGADownload
pSample2SubmitterID.input = lambda ch: ch.cbind(opts.snpmeta)
pSample2SubmitterID.args.len = 16
# remove normal-like
pShell.depends = pSample2SubmitterID
pShell.args.cmd = '''
mkdir {{o.outfile}}
for gtfile in {{i.infile}}/*.txt; do
if [[ $(basename $gtfile | cut -c14) == "0" ]]; then
ln -s $gtfile {{o.outfile}}/
fi
done
'''
pGtFiles2Mat.depends = pShell
pGtFiles2Mat.input = lambda ch: [ch.expand(pattern='*.txt').flatten()]
pGtFiles2Mat.config.echo_jobs = [0]
pGtFiles2Mat.config.echo_types = 'all'
pGtFiles2Mat.config.export_dir = outdir
pGtFiles2Mat.output = 'outfile:file:TCGA-%s.gt.txt' % opts.cancer
pDownloadGet.input = [
'https://gdc.xenahubs.net/download/'
'TCGA-%s.htseq_counts.tsv.gz' % opts.cancer,
'https://gdc.xenahubs.net/download/'
'TCGA-%s.GDC_phenotype.tsv.gz' % opts.cancer,
'https://gdc.xenahubs.net/download/'
'TCGA-%s.survival.tsv.gz' % opts.cancer
]
pDownloadGet.config.export_dir = outdir
pDownloadGet.config.export_part = ['*phenotype.tsv.gz',
'*.survival.tsv.gz']
# remove normal-like
pTsvColSelect.depends = pDownloadGet
pTsvColSelect.input = lambda ch: ch.row_at(0)
pTsvColSelect.args.cols = ('lambda cnames: [name for name in cnames '
'if name[-3:-2] == "0" or name[:5] != "TCGA-"]')
# convert ENSG to gene symbols
pGeneNameNorm.depends = pTsvColSelect
pGeneNameNorm.output = 'outfile:file:TCGA-%s.expr.txt' % opts.cancer
pGeneNameNorm.args.inopts.cnames = True
pGeneNameNorm.args.frm = 'ensembl.gene'
pGeneNameNorm.args.notfound = 'skip'
pGeneNameNorm.config.export_dir = outdir
return pTCGADownload, pDownloadGet | e0475e459d7eb403c2cc292682d956acb4dfb4f0 | 3,659,317 |
def mol_to_smiles(molecule, isomeric=True, explicit_hydrogen=True, mapped=True):
"""
Generate canonical SMILES with RDKit
Parameters
----------
molecule: RDKit Chem.rdchem.Mol instance
The molecule to generate SMILES for
isomeric: bool
If True, SMILES will have isomeric information. If molecule already has isomeric information, this will be retained.
If no isomeric information exists, this function will perceive it and assign the CW (clockwise) flag for chiral
centers and the E-isomer for stereo bonds.
explicit_hydrogen: bool
If True, SMILES will have explicit hydrogens
mapped: bool
If True, SMILES will have map indices. (+1 because the map is 1 indexed)
Returns
-------
smiles: str
The canonical SMILES
"""
if mapped and not explicit_hydrogen:
raise Warning("Tagged SMILES must include hydrogens to retain order")
if mapped and not isomeric:
raise Warning("Tagged SMILES must include stereochemistry ")
if explicit_hydrogen:
# Add explicit hydrogens
molecule = Chem.AddHs(molecule)
if not explicit_hydrogen:
molecule = Chem.RemoveHs(molecule)
try:
json_geometry = int(molecule.GetProp("_json_geometry"))
except KeyError:
json_geometry = False
if isomeric and not has_stereo_defined(molecule):
raise ValueError("Some stereochemistry is not defined")
# Get canonical order for map
if mapped:
if json_geometry:
# keep original ordering
#ToDo this looks like a potential bug that only json_geometry gets atom maps
for i in range(molecule.GetNumAtoms()):
molecule.GetAtomWithIdx(i).SetAtomMapNum(i+1)
else:
molecule = canonical_order_atoms(molecule)
smiles = Chem.MolToSmiles(molecule, allHsExplicit=explicit_hydrogen, isomericSmiles=isomeric, canonical=True)
return smiles | 1727dd4260fda0e9b741dab2b35c66562a8ee007 | 3,659,318 |
def _plot_events_nday(ax, grid, events, scale_factor=1.0):
"""
Plot a map of the total number of days spent in dry spell events.
Parameters
----------
ax : <matplotlib.axes.Axes> instance.
The axes to which the map will be drawn.
grid : <geo_grid.LandGrid> instance
Object describing the spatial grid.
events : list of lists of <event.Event> instances
Catalogue of dry spell events from file_eves.
scale_factor : float, optional
Totals are multipled by this number before plotting. Typically
used to convert from total to per year.
Returns
-------
PCM : mappable
E.,g, <matplotlib.collections.QuadMesh>.
"""
nday = []
for eves in events[0]:
nday.append(sum(e.duration for e in eves))
nday = np.ma.masked_less(nday, 1)
nday = grid.expand(nday) * scale_factor
levs = np.linspace(0, 360, 13)
cmap = _get_cmap("cividis", levs, over="orange")
PCM = _plot_map(ax, grid, nday, levs, cmap, ticks=levs[::3])
ax.set_title("Number of days per year spent in dry spells")
ax.add_feature(LAND, facecolor="lightgrey")
return PCM | dea821d98afe95790742636408b22b4c4fdc9688 | 3,659,319 |
def rank_transform(arr: np.ndarray, centered=True) -> np.ndarray:
"""
Transform a 1-dim ndarray with arbitrary scalar values to an array with equally spaced rank values.
This is a nonlinear transform.
:param arr: input array
:param centered: if the transform should by centered around zero
:return: transformed array
"""
assert isinstance(arr, np.ndarray)
assert arr.ndim == 1
# Create array to sort in
ranks = np.zeros_like(arr)
# Ascending sort
idcs_sort = np.argsort(arr)
# Rearrange to an equal-step array from -0.5 (or 0) to 0.5 (or 1)
if centered:
ranks[idcs_sort] = np.linspace(-.5, .5, idcs_sort.size, endpoint=True)
else:
ranks[idcs_sort] = np.linspace(0., 1., idcs_sort.size, endpoint=True)
return ranks | d6d4cbf1e191c1fa61e58309d478547f88f0550f | 3,659,320 |
from datetime import datetime
def edit_post(id, alias):
"""Edit an existing post.
User has to be logged in and be either:
- Author of the post
- Editor (role)
- Administrator (role)
"""
post = Post.query.get_or_404(id)
if current_user != post.author and not (
current_user.has_role('Administrator') or current_user.has_role('Editor')
):
abort(403)
form = PostForm()
if form.validate_on_submit():
upload = Upload.query.filter_by(filename=form.image.data).first()
category = Category.query.filter_by(id=form.category.data).first()
post.title = form.title.data
post.alias = sanitize_alias(form.alias.data)
post.timestamp = form.timestamp.data
post.description = form.description.data
post.body = form.body.data
post.image = upload
post.featured = form.featured.data
post.commenting = form.commenting.data
post.category = category
db.session.add(post)
# update tags
new_tags = sanitize_tags(form.tags.data)
old_tags = sanitize_tags(', '.join([c.title for c in post.tags.all()]))
added_tag_titles, removed_tag_titles = get_added_removed(new_tags, old_tags)
# add new tags
added_tag_aliases = [sanitize_alias(c) for c in added_tag_titles]
for c in zip(added_tag_titles, added_tag_aliases):
tag_alias = c[1]
tag = Tag.query.filter(Tag.alias == tag_alias).first()
# if tag doesn't exist in the db, add it
if not tag:
tag = Tag(title=c[0], alias=c[1])
db.session.add(tag)
# add relation between the Post and the Tag
# flush session to obtain tag.id, if the tag has been added recently
db.session.flush()
cl = Tagification(tag_id=tag.id, post_id=id)
db.session.add(cl)
# remove obsolete tags
removed_tag_aliases = [sanitize_alias(c) for c in removed_tag_titles]
for c in zip(removed_tag_titles, removed_tag_aliases):
tag_alias = c[1]
tag = Tag.query.filter(Tag.alias == tag_alias).first()
# remove relations
old_cl = Tagification.query.filter(
Tagification.tag_id == tag.id, Tagification.post_id == id
).first()
db.session.delete(old_cl)
# remove tag, if it's not used in other posts
other_cl = Tagification.query.filter(
Tagification.tag_id == tag.id, Tagification.post_id != id
).first()
if not other_cl:
db.session.delete(tag)
flash('The post has been updated.', 'success')
return redirect(
url_for(
'main.post', category=post.category.alias, id=post.id, alias=post.alias
)
)
form.title.data = post.title
form.alias.data = post.alias
form.timestamp.data = post.timestamp
form.description.data = post.description
form.body.data = post.body
if post.image:
form.image.data = post.image.filename
form.featured.data = post.featured
form.commenting.data = post.commenting
form.category.data = post.category
form.tags.data = ', '.join([c.title for c in post.tags.all()])
return render_template(
'ctrl/edit_post.html', form=form, datetimepicker=datetime.utcnow()
) | 038b5f15125bd2263f8ea6a677f3de9a22edba04 | 3,659,321 |
from matplotlib import dates
def _date_list_to_num(ifg_date_list):
"""Convert list of dates, or list of date pairs, numpy array of floats
for 'days since 1970'
Handles both strings and datetimes
"""
arr = np.array(ifg_date_list)
if isinstance(arr.ravel()[0], str):
return dates.datestr2num(ifg_date_list)
else:
return dates.date2num(ifg_date_list) | 20171703003227307b971e68f48e03d478c5fcf5 | 3,659,322 |
import os
def get_font():
"""
Sets up a font capable of rendering the characters
"""
path = os.path.join(os.getcwd(), 'scripts', 'assets', 'fonts', 'NotoSansCJKjp-Regular.otf')
return font_manager.FontProperties(fname=path) | 7b1e532de6c8e68cb62bbde0e8dc67fb1ec5bdf9 | 3,659,323 |
def maxindices(l):
"""
Get indices for all occurences of maximal element in list
:param l:
:return:
"""
max_indices = []
max_value = l[0] #Assume un-exhaustible iterator
for i, v in enumerate(l):
if v > max_value:
max_value = v
max_indices = [i]
elif v == max_value:
max_indices.append(i)
return max_indices | b2f155fa97455c0327b2717591ebea2176773012 | 3,659,324 |
def get_grid_extents(data, edges=True):
"""
Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict
Args:
data: xarray Dataset or dict
A GEOS-Chem dataset or a grid dict
edges (optional): bool
Whether grid extents should use cell edges instead of centers
Default value: True
Returns:
minlon: float
Minimum longitude of data grid
maxlon: float
Maximum longitude of data grid
minlat: float
Minimum latitude of data grid
maxlat: float
Maximum latitude of data grid
"""
if isinstance(data, dict):
if "lon_b" in data and edges:
return np.min(
data["lon_b"]), np.max(
data["lon_b"]), np.min(
data["lat_b"]), np.max(
data["lat_b"])
elif not edges:
return np.min(
data["lon"]), np.max(
data["lon"]), np.min(
data["lat"]), np.max(
data["lat"])
else:
return -180, 180, -90, 90
elif "lat" in data.dims and "lon" in data.dims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
# No extents for CS plots right now
return -180, 180, -90, 90
else:
lat = np.sort(lat)
minlat = np.min(lat)
if abs(abs(lat[1]) - abs(lat[0])
) != abs(abs(lat[2]) - abs(lat[1])):
#pole is cutoff
minlat = minlat - 1
maxlat = np.max(lat)
if abs(abs(lat[-1]) - abs(lat[-2])
) != abs(abs(lat[-2]) - abs(lat[-3])):
maxlat = maxlat + 1
# add longitude res to max longitude
lon = np.sort(lon)
minlon = np.min(lon)
maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2]))
return minlon, maxlon, minlat, maxlat
else:
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
return -180, 180, -90, 90 | c8cbef8b0dc3f6ce9c009955c2eff88fd5011ded | 3,659,325 |
def millisToNanos(millis):
"""
Converts milliseconds to nanoseconds.
:param millis: (long) - The long milliseconds value to convert.
:return: (long) QueryConstants.NULL_LONG if the input is equal to QueryConstants.NULL_LONG. Throws
DBTimeUtils.DBDateTimeOverflowException if the input is too large for conversion. Otherwise returns a long of
the equivalent number of nanoseconds to the input.
"""
return _java_type_.millisToNanos(millis) | 4f659a6d994551c0ce72875009d688cb7c91571d | 3,659,326 |
def recursive_seed_part(
graph,
parts,
pop_target,
pop_col,
epsilon,
method=bipartition_tree,
node_repeats=1,
n=None,
ceil=None
):
"""
Returns a partition with ``num_dists`` districts balanced within ``epsilon`` of
``pop_target`` by recursively splitting graph using recursive_seed_part_inner.
:param graph: The graph
:param parts: Iterable of part labels (like ``[0,1,2]`` or ``range(4)``
:param pop_target: Target population for each part of the partition
:param pop_col: Node attribute key holding population data
:param epsilon: How far (as a percentage of ``pop_target``) from ``pop_target`` the parts
of the partition can be
:param method: Function used to find balanced partitions at the 2-district level
:param node_repeats: Parameter for :func:`~gerrychain.tree_methods.bipartition_tree` to use.
:param n: Either a positive integer (greater than 1) or None. If n is a positive integer,
this function will recursively create a seed plan by either biting off districts from graph
or dividing graph into n chunks and recursing into each of these. If n is None, this
function prime factors ``num_dists``=n_1*n_2*...*n_k (n_1 > n_2 > ... n_k) and recursively
partitions graph into n_1 chunks.
:param ceil: Either a positive integer (at least 2) or None. Relevant only if n is None. If
``ceil`` is a positive integer then finds the largest factor of ``num_dists`` less than or
equal to ``ceil``, and recursively splits graph into that number of chunks, or bites off a
district if that number is 1.
:return: New assignments for the nodes of ``graph``.
:rtype: dict
"""
flips = {}
assignment = recursive_seed_part_inner(
graph,
len(parts),
pop_target,
pop_col,
epsilon,
method=bipartition_tree,
node_repeats=node_repeats,
n=n,
ceil=ceil
)
for i in range(len(assignment)):
for node in assignment[i]:
flips[node] = parts[i]
return flips | 8d2517e74d8726696d865ea4993a10602af74450 | 3,659,327 |
def codegen_reload_data():
"""Parameters to codegen used to generate the fn_carbon_black_cloud_devices package"""
reload_params = {"package": u"fn_carbon_black_cloud_devices",
"incident_fields": [],
"action_fields": [],
"function_params": [u"carbon_black_device_query_string", u"incident_id"],
"datatables": [],
"message_destinations": [u"carbon_black_cloud"],
"functions": [u"carbon_black_cloud_devices_quarantine"],
"phases": [],
"automatic_tasks": [],
"scripts": [],
"workflows": [u"example_carbon_black_cloud_devices_quarantine"],
"actions": [u"Example: Run Carbon Black Device Quarantine"],
"incident_artifact_types": []
}
return reload_params | 0f116dd5c9f7496af86dde2afbdd3442904dc40f | 3,659,328 |
def validate_investment_amount(investment_amount, intent_request):
"""
Validates the investment_amount provided by the user.
"""
# Validate the investment_amount should be equal to or greater than 5000.
if investment_amount is not None:
investment_amount = parse_int(
investment_amount
) # Since parameters are strings it's important to cast values
if investment_amount < 5000:
return build_validation_result(
False,
"investmentAmount",
"The investmentAmount should be greater than or equal to 5000, "
"Please provide a correct investmentAmount in dollars.",
)
# A True results is returned if age or amount are valid
return build_validation_result(True, None, None) | 6f645d196e452377f4c16ae585ecc113ae5997b0 | 3,659,329 |
def new_id():
"""
Generates new bson ObjectId
"""
return str(ObjectId()) | aa02c802abf937720119f9843e55395d485b11c1 | 3,659,330 |
import lumapi
import pathlib
import json
def gc_sweep(
session=None,
draw_function=gc2d,
dirpath=CONFIG["workspace"],
overwrite=False,
run=True,
base_fsp_path=str(CONFIG["grating_coupler_2D_base"]),
**kwargs
):
""" grating coupler sweep
grating_coupler_2D_base optimizes Transmission and does not calculate Sparameters
"""
function_name = draw_function.__name__ + "_sweep"
filename = kwargs.pop("name", get_function_name(function_name, **kwargs))
dirpath = pathlib.Path(dirpath) / function_name
dirpath.mkdir(exist_ok=True)
filepath = dirpath / filename
filepath_sim_settings = filepath.with_suffix(".settings.json")
filepath_json = filepath.with_suffix(".json")
filepath_fsp = str(filepath.with_suffix(".fsp"))
if filepath_json.exists() and not overwrite and run:
return json.loads(open(filepath_json).read())
s = session or lumapi.FDTD(hide=False)
simdict = draw_function(session=s, base_fsp_path=base_fsp_path, **kwargs)
s.save(filepath_fsp)
if not run:
return
s.run()
T = s.getresult("fom", "T")
results = dict(wavelength_nm=list(T["lambda"].ravel() * 1e9), T=list(T["T"]))
with open(filepath_json, "w") as f:
json.dump(results, f)
settings = simdict.get("settings")
if settings:
with open(filepath_sim_settings, "w") as f:
json.dump(settings, f)
return results | ae22a103adb2d440f25b14eac1d681d591237b67 | 3,659,331 |
def stem(word, stemmer=PORTER, **kwargs):
""" Returns the base form of the word when counting words in count().
With stemmer=PORTER, the Porter2 stemming algorithm is used.
With stemmer=LEMMA, either uses Word.lemma or inflect.singularize().
(with optional parameter language="en", pattern.en.inflect is used).
"""
if hasattr(word, "string") and stemmer in (PORTER, None):
word = word.string
if isinstance(word, basestring):
word = decode_utf8(word.lower())
if stemmer is None:
return word.lower()
if stemmer == PORTER:
return _stemmer.stem(word, **kwargs)
if stemmer == LEMMA:
if hasattr(word, "lemma"): # pattern.en.Word
w = word.string.lower()
if word.lemma is not None:
return word.lemma
if word.pos == "NNS":
return singularize(w)
if word.pos.startswith(("VB", "MD")):
return conjugate(w, "infinitive") or w
if word.pos.startswith(("JJ",)):
return predicative(w)
if word.pos.startswith(("DT", "PR", "WP")):
return singularize(w, pos=word.pos)
return w
return singularize(word, pos=kwargs.get("pos", "NN"))
if hasattr(stemmer, "__call__"):
return decode_utf8(stemmer(word))
return word.lower() | 71ac3a3ee30a226fcf13b2d4a3288003feeb7c3e | 3,659,332 |
def verify_bounce_message(msg):
"""
Verify an SES/SNS bounce notification message.
"""
verifier = BounceMessageVerifier(msg)
return verifier.is_verified() | c181e82d5748ed6a310650730bc1fc94cde8e33d | 3,659,333 |
def multiples(a, b):
"""This function checks if a number is a multiple of another."""
if type(a) != int or type(b) != int:
raise Exception('Values must be integers.')
elif a == 0:
raise Exception('0 is not valid.')
elif a == b:
raise Exception('Numbers should not be the same.')
else:
if b > a:
check = b % a
if not check:
return True
else:
return False
else:
raise Exception("Error! {0} isn't greater than {1}."
.format(b, a)) | 3f8bccd5429b5d307c0a018b7186bd75a76e996a | 3,659,334 |
import os
import datasets
def train_gilbo(gan, sess, outdir, checkpoint_path, dataset, options):
"""Build and train GILBO model.
Args:
gan: GAN object.
sess: tf.Session.
outdir: Output directory. A pickle file will be written there.
checkpoint_path: Path where gan"s checkpoints are written. Only used to
ensure that GILBO files are written to a unique
subdirectory of outdir.
dataset: Name of dataset used to train the GAN.
options: Options dictionary.
Returns:
mean_eval_info: Mean GILBO computed over a large number of images generated
by the trained GAN
mean_train_consistency: Mean consistency of the trained GILBO model with
data from the training set.
mean_eval_consistency: Same consistency measure for the trained model with
data from the validation set.
mean_self_consistency: Same consistency measure for the trained model with
data generated by the trained model itself.
See the GILBO paper for an explanation of these metrics.
Raises:
ValueError: If the GAN has uninitialized variables.
"""
uninitialized = sess.run(tf.report_uninitialized_variables())
if uninitialized:
raise ValueError("Model has uninitialized variables!\n%r" % uninitialized)
outdir = os.path.join(outdir, checkpoint_path.replace("/", "_"))
tf.gfile.MakeDirs(outdir)
with tf.variable_scope("gilbo"):
ones = tf.ones((gan.batch_size, gan.z_dim))
# Get a distribution for the prior.
z_dist = ds.Independent(ds.Uniform(-ones, ones), 1)
z_sample = z_dist.sample()
epsneg = np.finfo("float32").epsneg
# Clip samples from the GAN uniform prior because the Beta distribution
# doesn"t include the top endpoint and has issues with the bottom endpoint.
ganz_clip = tf.clip_by_value(gan.z, -(1 - epsneg), 1 - epsneg)
# Get generated images from the model.
fake_images = gan.fake_images
# Build the regressor distribution that encodes images back to predicted
# samples from the prior.
with tf.variable_scope("regressor"):
z_pred_dist = _build_regressor(fake_images, gan.z_dim)
# Capture the parameters of the distributions for later analysis.
dist_p1 = z_pred_dist.distribution.distribution.concentration0
dist_p2 = z_pred_dist.distribution.distribution.concentration1
# info and avg_info compute the GILBO.
info = z_pred_dist.log_prob(ganz_clip) - z_dist.log_prob(ganz_clip)
avg_info = tf.reduce_mean(info)
# Set up training of the GILBO model.
lr = options.get("gilbo_learning_rate", 4e-4)
learning_rate = tf.get_variable(
"learning_rate", initializer=lr, trainable=False)
gilbo_step = tf.get_variable("gilbo_step", dtype=tf.int32, initializer=0,
trainable=False)
opt = tf.train.AdamOptimizer(learning_rate)
regressor_vars = tf.contrib.framework.get_variables("gilbo/regressor")
train_op = opt.minimize(-info, var_list=regressor_vars)
# Initialize the variables we just created.
uninitialized = plist(tf.report_uninitialized_variables().eval())
uninitialized_vars = uninitialized.apply(
tf.contrib.framework.get_variables_by_name)._[0]
tf.variables_initializer(uninitialized_vars).run()
saver = tf.train.Saver(uninitialized_vars, max_to_keep=1)
try:
checkpoint_path = tf.train.latest_checkpoint(outdir)
saver.restore(sess, checkpoint_path)
except ValueError:
# Failing to restore just indicates that we don"t have a valid checkpoint,
# so we will just start training a fresh GILBO model.
pass
_train_gilbo(sess, gan, saver, learning_rate, gilbo_step, z_sample, avg_info,
z_pred_dist, train_op, outdir, options)
mean_eval_info = _eval_gilbo(sess, gan, z_sample, avg_info,
dist_p1, dist_p2, fake_images, outdir, options)
# Collect encoded distributions on the training and eval set in order to do
# kl-nearest-neighbors on generated samples and measure consistency.
dataset = datasets.get_dataset(dataset)
x_train = dataset.load_dataset(split_name="train", num_threads=1)
x_train = x_train.batch(gan.batch_size, drop_remainder=True)
x_train = x_train.make_one_shot_iterator().get_next()[0]
x_train = tf.reshape(x_train, fake_images.shape)
x_eval = dataset.load_dataset(split_name="test", num_threads=1)
x_eval = x_eval.batch(gan.batch_size, drop_remainder=True)
x_eval = x_eval.make_one_shot_iterator().get_next()[0]
x_eval = tf.reshape(x_eval, fake_images.shape)
mean_train_consistency = _run_gilbo_consistency(
x_train, "train", extract_input_images=0,
save_consistency_images=20, num_batches=5, **locals())
mean_eval_consistency = _run_gilbo_consistency(
x_eval, "eval", extract_input_images=0,
save_consistency_images=20, num_batches=5, **locals())
mean_self_consistency = _run_gilbo_consistency(
fake_images, "self", extract_input_images=20,
save_consistency_images=20, num_batches=5, **locals())
return (mean_eval_info, mean_train_consistency, mean_eval_consistency,
mean_self_consistency) | bc018f2fd875dd267a33f1ba42cabe8a2127b861 | 3,659,335 |
def unquote_keys(data):
"""Restores initial view of 'quoted' keys in dictionary data
:param data: is a dictionary
:return: data with restored keys if they were 'quoted'.
"""
if isinstance(data, dict):
for key, value in data.items():
if isinstance(value, dict):
unquote_keys(value)
if key.startswith('%24'):
k = parse.unquote(key)
data[k] = data.pop(key)
return data | de3802fdf0b278fcb39870f49ec7435ae5a63f38 | 3,659,336 |
def retr_amplslen(peri, radistar, masscomp, massstar):
"""
Calculate the self-lensing amplitude.
Arguments
peri: orbital period [days]
radistar: radius of the star [Solar radius]
masscomp: mass of the companion [Solar mass]
massstar: mass of the star [Solar mass]
Returns
amplslen: the fractional amplitude of the self-lensing
"""
amplslen = 7.15e-5 * radistar**(-2.) * peri**(2. / 3.) * masscomp * (masscomp + massstar)**(1. / 3.) * 1e3 # [ppt]
return amplslen | 32c0618f0e5965357fbcadd090443d0baf0e65bd | 3,659,337 |
import os
import tarfile
def main_work(indir, outdir, aux=None, hv=None):
"""
:param indir:
:param outdir:
:param aux:
:param hv:
:return:
"""
if not os.path.exists(outdir):
os.makedirs(outdir)
if hv is None:
hv_ = all_hv
else:
hv_ = [hv]
if aux is None:
aux_ = all_aux
else:
aux_ = [aux]
for tile in hv_:
get_extent = GetExtents(int(tile[0]), int(tile[1]))
out_file = None
tarlist = []
for prod in aux_:
print(f"\nWorking on TILE: {tile}\n\t\tAUX: {prod}")
out_file = make_filename(tile, prod, outdir)
src_file = f"{indir}{os.sep}{prod}.tif"
if not os.path.exists(out_file):
run_subset(src_file, out_file, get_extent.TILE_EXTENT)
tarlist.append(out_file)
archive = outdir + os.sep + os.path.basename(out_file)[:35] + ".tar"
with tarfile.open(archive, "w") as tar:
for f in tarlist:
tar.add(f, os.path.basename(f))
os.remove(f)
return None | fecdeec5b329d3a5b8ff809e947bd234ee6808e9 | 3,659,338 |
from datetime import datetime
def calculate_current_teach_week(semester_first_week_date='2021-3-08 08:00:00'):
"""
计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周
----
param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00'
return: 当前教学周
"""
# 获取指定日期属于当年的第几周, 返回字符串
semester_first_week = datetime.strptime(semester_first_week_date, '%Y-%m-%d %H:%M:%S').strftime('%W')
# 获取当前日期是一年中的第几周, 返回字符串
current_year_week = datetime.now().strftime('%W')
# 计算当前日期所属的教学周
# ( ) 中的减一表示第一周之前的周数
# 最后加一是因为计算周数是从索引00开始的,所以需要加1
current_teach_week = int(current_year_week) - (int(semester_first_week) - 1) + 1
return current_teach_week | 01a8df84b878e192dae1b1d0d38d78fb5c19f93e | 3,659,339 |
def get_first_model_each_manufacturer(cars=cars):
"""return a list of matching models (original ordering)"""
return [cars[key][0] for key in cars] | 639cd912a68149864f4d0ff6c1b2dc7bc911052f | 3,659,340 |
import re
def get_sandbox_table_name(dataset_id, rule_name):
"""
A helper function to create a table in the sandbox dataset
:param dataset_id: the dataset_id to which the rule is applied
:param rule_name: the name of the cleaning rule
:return: the concatenated table name
"""
return '{dataset_id}_{rule_name}'.format(dataset_id=dataset_id,
rule_name=re.sub(
r'\W', '_', rule_name)) | ee07d40f885cb9d6d0d34cc0215620a2572b6b5f | 3,659,341 |
def index():
"""for i in range(0, 30):
data = QuizQuestions("math", None, "en_US", 7, "normal", "This is placeholder question number " + str(i), "c", "Answer A", "Answer B", "Answer C", "Answer D", True)
db.session.add(data)
db.session.commit()"""
return render_template("quiz_index.html") | 5be34099cead47e4d339edf14bdc39519e7242f5 | 3,659,342 |
from typing import Any
def _get_invoke_function_name() -> Any:
"""
Get invoke function Name.
Returns
-------
Function Name.
"""
props = get_properties()
functionName = f"orbit-{props['AWS_ORBIT_ENV']}-{props['AWS_ORBIT_TEAM_SPACE']}-container-runner"
return functionName | 387ed3d80e3fedcee4662ec9db62622bb8393aba | 3,659,343 |
def sigmaG(a, axis=None, overwrite_input=False, keepdims=False):
"""
Compute the rank-based estimate of the standard deviation
Parameters
----------
a : array_like
Array containing numbers whose mean is desired. If `a` is not an
array, a conversion is attempted.
axis : int, optional
Axis along which the means are computed. The default is to compute
the mean of the flattened array.
overwrite_input : bool, optional
If True, then allow use of memory of input array `a` for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted.
Default is False. Note that, if `overwrite_input` is True and the
input is not already an array, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray, see dtype parameter above
array containing the median values
sigmaG : ndarray, see dtype parameter above.
array containing the robust estimator of the standard deviation
See Also
--------
median_sigmaG : robust rank-based estimate of mean and standard deviation
Notes
-----
This routine uses a single call to ``np.nanpercentile`` to find the
quartiles along the given axis, and uses these to compute the
sigmaG, a robust estimate of the standard deviation sigma:
sigmaG = 0.7413 * (q75 - q25)
where 0.7413 ~ 1 / (2 sqrt(2) erf^-1(0.5))
"""
q25, q75 = np.nanpercentile(a, [25, 75], axis=axis, overwrite_input=overwrite_input)
sigmaG = sigmaG_factor * (q75 - q25)
if keepdims:
if axis is None:
newshape = a.ndim * (1,)
else:
newshape = np.asarray(a.shape)
newshape[axis] = 1
sigmaG = sigmaG.reshape(newshape)
return sigmaG | 7731a1ad94f85baf02125479fc96e89a59c8b594 | 3,659,344 |
def traverse_tagged_databases(
functional_unit, method, label="tag", default_tag="other", secondary_tags=[], fg_databases=None
):
"""Traverse a functional unit throughout its foreground database(s) or the
listed databses in fg_databses, and group impacts by tag label.
Contribution analysis work by linking impacts to individual activities.
However, you also might want to group impacts in other ways. For example,
give individual biosphere exchanges their own grouping, or aggregate two
activities together.
Consider this example system, where the letters are the tag labels, and the
numbers are exchange amounts. The functional unit is one unit of the tree
root.
.. image:: images/tagged-traversal.png
:alt: Example tagged supply chain
In this supply chain, tags are applied to activities and biosphere exchanges.
If a biosphere exchange is not tagged, it inherits the tag of its producing
activity. Similarly, links to other databases are assessed with the usual
LCA machinery, and the total LCA score is tagged according to its consuming
activity. If an activity does not have a tag, a default tag is applied.
We can change our visualization to show the use of the default tags:
.. image:: images/tagged-traversal-2.png
:alt: Example tagged supply chain
And then we can manually calculate the tagged impacts. Normally we would
need to know the actual biosphere flows and their respective
characterization factors (CF), but in this example we assume that each
CF is one. Our result, group by tags, would therefore be:
* **A**: :math:`6 + 27 = 33`
* **B**: :math:`30 + 44 = 74`
* **C**: :math:`5 + 16 + 48 = 69`
* **D**: :math:`14`
This function will only traverse the foreground database, i.e. the
database of the functional unit activity. A functional unit can have
multiple starting nodes; in this case, all foreground databases are
traversed.
Input arguments:
* ``functional_unit``: A functional unit dictionary, e.g. ``{("foo", "bar"): 42}``.
* ``method``: A method name, e.g. ``("foo", "bar")``
* ``label``: The label of the tag classifier. Default is ``"tag"``
* ``default_tag``: The tag classifier to use if none was given. Default is ``"other"``
* ``secondary_tags``: List of tuples in the format (secondary_label, secondary_default_tag). Default is empty list.
* ``fg_databases``: a list of foreground databases to be traversed, e.g. ['foreground', 'biomass', 'machinery']
It's not recommended to include all databases of a project in the list to be traversed, especially not ecoinvent itself
Returns:
Aggregated tags dictionary from ``aggregate_tagged_graph``, and tagged supply chain graph from ``recurse_tagged_database``.
"""
lca = LCA(functional_unit, method)
lca.lci()
lca.lcia()
method_dict = {o[0]: o[1] for o in Method(method).load()}
graph = [
recurse_tagged_database(
key, amount, method_dict, lca, label, default_tag, secondary_tags, fg_databases
)
for key, amount in functional_unit.items()
]
return aggregate_tagged_graph(graph), graph | 02edd2f9b33760a730ea7906240b48418059430c | 3,659,345 |
def graft(
repo,
ctx,
base=None,
labels=None,
keepparent=False,
keepconflictparent=False,
wctx=None,
):
"""Do a graft-like merge.
This is a merge where the merge ancestor is chosen such that one
or more changesets are grafted onto the current changeset. In
addition to the merge, this fixes up the dirstate to include only
a single parent (if keepparent is False) and tries to duplicate any
renames/copies appropriately.
ctx - changeset to rebase
base - merge base, or ctx.p1() if not specified
labels - merge labels eg ['local', 'graft']
keepparent - keep second parent if any
keepconflictparent - if unresolved, keep parent used for the merge
"""
# If we're grafting a descendant onto an ancestor, be sure to pass
# mergeancestor=True to update. This does two things: 1) allows the merge if
# the destination is the same as the parent of the ctx (so we can use graft
# to copy commits), and 2) informs update that the incoming changes are
# newer than the destination so it doesn't prompt about "remote changed foo
# which local deleted".
# We also pass mergeancestor=True when base is the same revision as p1. 2)
# doesn't matter as there can't possibly be conflicts, but 1) is necessary.
wctx = wctx or repo[None]
pctx = wctx.p1()
base = base or ctx.p1()
mergeancestor = (
repo.changelog.isancestor(pctx.node(), ctx.node())
or pctx.rev() == base.rev()
)
stats = _update(
repo,
ctx.node(),
True,
True,
base.node(),
mergeancestor=mergeancestor,
labels=labels,
wc=wctx,
)
if keepconflictparent and stats.unresolvedcount:
pother = ctx.node()
else:
pother = nullid
parents = ctx.parents()
if keepparent and len(parents) == 2 and base in parents:
parents.remove(base)
pother = parents[0].node()
# Never set both parents equal to each other
if pother == pctx.node():
pother = nullid
if wctx.isinmemory():
wctx.setparents(pctx.node(), pother)
# fix up dirstate for copies and renames
copies.graftcopies(wctx, ctx, base)
else:
with repo.dirstate.parentchange():
repo.setparents(pctx.node(), pother)
repo.dirstate.write(repo.currenttransaction())
# fix up dirstate for copies and renames
copies.graftcopies(wctx, ctx, base)
return stats | b715ead11ea83c61eff52a670396885cd6373739 | 3,659,346 |
def GetSiteFilters(filename):
""" Reader for a file of reportable sites.
The file contains 2 tokens: the site name and a normalization factor.
The method returns a hash table with the key being site and the value
the normalization factor to use.
"""
try:
#--- process the reportable sites file ---
sites = {}
fd = open(filename)
while 1:
filter = fd.readline()
if filter == "": # EOF
break
filter = filter.strip().strip("\n")
if filter.startswith("#"):
continue
if len(filter) == 0:
continue
site = filter.split()
if sites.has_key(site[0]):
raise Exception("System error: duplicate - site (%s) already set" % site[0])
factor = 0
if len(site) == 1:
raise Exception("System error: No normalization factory was provide for site: %s" % site[0])
elif len(site) > 1:
#-- verify the factor is an integer --
try:
tmp = int(site[1])
factor = float(site[1])/1000
except:
raise Exception("Error in %s file: 2nd token must be an integer (%s" % (filename,filter))
#-- set the factor --
sites[site[0]] = factor
else:
continue
#-- end of while loop --
fd.close()
#-- verify there is at least 1 site --
if len(sites) == 0:
raise Exception("Error in %s file: there are no sites to process" % filename)
return sites
except IOError, (errno,strerror):
raise Exception("IO error(%s): %s (%s)" % (errno,strerror,filename)) | 5f5e02b4213a060ca2ea2485d9f4ce4a09e9995f | 3,659,347 |
import warnings
def MiniMobileNetV2(input_shape=None,
alpha=1.0,
expansion_factor=6,
depth_multiplier=1,
dropout=0.,
weight_decay=0.,
include_top=True,
weights=None,
input_tensor=None,
pooling=None,
classes=10):
"""Instantiates the MobileNet architecture.
MobileNet V2 is from the paper:
- [Inverted Residuals and Linear Bottlenecks: Mobile Networks for Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381)
Note that only TensorFlow is supported for now,
therefore it only works with the data format
`image_data_format='channels_last'` in your Keras config
at `~/.keras/keras.json`.
To load a MobileNet model via `load_model`, import the custom
objects `relu6` and `DepthwiseConv2D` and pass them to the
`custom_objects` parameter.
E.g.
model = load_model('mobilenet.h5', custom_objects={
'relu6': mobilenet.relu6,
'DepthwiseConv2D': mobilenet.DepthwiseConv2D})
# Arguments
input_shape: optional shape tuple, only to be specified
if `include_top` is False (otherwise the input shape
has to be `(224, 224, 3)` (with `channels_last` data format)
or (3, 224, 224) (with `channels_first` data format).
It should have exactly 3 inputs channels,
and width and height should be no smaller than 32.
E.g. `(200, 200, 3)` would be one valid value.
alpha: controls the width of the network.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
expansion_factor: controls the expansion of the internal bottleneck
blocks. Should be a positive integer >= 1
depth_multiplier: depth multiplier for depthwise convolution
(also called the resolution multiplier)
dropout: dropout rate
weight_decay: Weight decay factor.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: `None` (random initialization) or
`imagenet` (ImageNet weights)
input_tensor: optional Keras tensor (i.e. output of
`layers.Input()`)
to use as image input for the model.
pooling: Optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model
will be the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a
2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
"""
if K.backend() != 'tensorflow':
raise RuntimeError('Only Tensorflow backend is currently supported, '
'as other backends do not support '
'depthwise convolution.')
if weights not in {'imagenet', None}:
raise ValueError('The `weights` argument should be either '
'`None` (random initialization) or `imagenet` '
'(pre-training on ImageNet).')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as ImageNet with `include_top` '
'as true, `classes` should be 1000')
# Determine proper input shape and default size.
if input_shape is None:
default_size = 224
else:
if K.image_data_format() == 'channels_first':
rows = input_shape[1]
cols = input_shape[2]
else:
rows = input_shape[0]
cols = input_shape[1]
if rows == cols and rows in [96, 128, 160, 192, 224]:
default_size = rows
else:
default_size = 224
input_shape = _obtain_input_shape(input_shape,
default_size=default_size,
min_size=32,
data_format=K.image_data_format(),
require_flatten=include_top or weights)
if K.image_data_format() == 'channels_last':
row_axis, col_axis = (0, 1)
else:
row_axis, col_axis = (1, 2)
rows = input_shape[row_axis]
cols = input_shape[col_axis]
if weights == 'imagenet':
if depth_multiplier != 1:
raise ValueError('If imagenet weights are being loaded, '
'depth multiplier must be 1')
if alpha not in [0.35, 0.50, 0.75, 1.0, 1.3, 1.4]:
raise ValueError('If imagenet weights are being loaded, '
'alpha can be one of'
'`0.35`, `0.50`, `0.75`, `1.0`, `1.3` and `1.4` only.')
if rows != cols or rows not in [96, 128, 160, 192, 224]:
raise ValueError('If imagenet weights are being loaded, '
'input must have a static square shape (one of '
'(06, 96), (128,128), (160,160), (192,192), or '
'(224, 224)).Input shape provided = %s' % (input_shape,))
if K.image_data_format() != 'channels_last':
warnings.warn('The MobileNet family of models is only available '
'for the input data format "channels_last" '
'(width, height, channels). '
'However your settings specify the default '
'data format "channels_first" (channels, width, height).'
' You should set `image_data_format="channels_last"` '
'in your Keras config located at ~/.keras/keras.json. '
'The model being returned right now will expect inputs '
'to follow the "channels_last" data format.')
K.set_image_data_format('channels_last')
old_data_format = 'channels_first'
else:
old_data_format = None
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
x = _conv_block(img_input, 32, alpha, bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay)
x = _depthwise_conv_block_v2(x, 16, alpha, 1, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=1)
x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, block_id=2,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2))
x = _depthwise_conv_block_v2(x, 24, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=3)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, block_id=4,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=5)
x = _depthwise_conv_block_v2(x, 32, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=6)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, block_id=7,
bn_epsilon=1e-3, bn_momentum=0.99, weight_decay=weight_decay, strides=(2, 2))
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=8)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=9)
x = _depthwise_conv_block_v2(x, 64, alpha, expansion_factor, depth_multiplier, bn_epsilon=1e-3, bn_momentum=0.99,
weight_decay=weight_decay, block_id=10)
if alpha <= 1.0:
penultimate_filters = 1280
else:
penultimate_filters = int(1280 * alpha)
x = _conv_block(x, penultimate_filters, alpha=1.0, kernel=(1, 1), bn_epsilon=1e-3, bn_momentum=0.99,
block_id=18)
if include_top:
if K.image_data_format() == 'channels_first':
shape = (penultimate_filters, 1, 1)
else:
shape = (1, 1, penultimate_filters)
x = GlobalAveragePooling2D()(x)
x = Reshape(shape, name='reshape_1')(x)
x = Dropout(dropout, name='dropout')(x)
x = Conv2D(classes, (1, 1),
kernel_initializer=initializers.he_normal(),
kernel_regularizer=regularizers.l2(weight_decay),
padding='same', name='conv_preds')(x)
x = Activation('softmax', name='act_softmax')(x)
x = Reshape((classes,), name='reshape_2')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
# Create model.
model = Model(inputs, x, name='mobilenetV2_%0.2f_%s' % (alpha, rows))
# load weights
if weights == 'imagenet':
if K.image_data_format() == 'channels_first':
raise ValueError('Weights for "channels_last" format '
'are not available.')
if alpha == 1.0:
alpha_text = '1_0'
elif alpha == 1.3:
alpha_text = '1_3'
elif alpha == 1.4:
alpha_text = '1_4'
elif alpha == 0.75:
alpha_text = '7_5'
elif alpha == 0.50:
alpha_text = '5_0'
else:
alpha_text = '3_5'
if include_top:
model_name = 'mobilenet_v2_%s_%d_tf.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH_V2 + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
else:
model_name = 'mobilenet_v2_%s_%d_tf_no_top.h5' % (alpha_text, rows)
weigh_path = BASE_WEIGHT_PATH_V2 + model_name
weights_path = get_file(model_name,
weigh_path,
cache_subdir='models')
model.load_weights(weights_path)
if old_data_format:
K.set_image_data_format(old_data_format)
return model | 695d592f3b8e75f9d416702c3e2b74f8f608e211 | 3,659,348 |
import logging
def update_forward_cnt(**kwargs):
"""
更新被转发的次数,进行加1操作
:param kwargs: {'object_id': object_id}
:return:
"""
session = None
try:
session = get_session()
# 转发次数 +1
session.query(SecondHand).filter(SecondHand.OBJECT_ID == kwargs['object_id']).update(
{SecondHand.FORWARD_CNT: SecondHand.FORWARD_CNT + 1})
# 提交到数据库
session.commit()
logging.info('OK : second_hand.py--->update_forward_cnt() 成功')
return RESULT_OK
except Exception as e:
session.rollback()
logging.critical('Error : second_hand.py--->update_forward_cnt() 失败 :{}'.format(e))
return []
finally:
session.close() | f6525dfa61e6fb2a5c39e344a6528d5f581393cd | 3,659,349 |
import copy
def recursive_dict_merge(dict1, dict2):
"""
Merges dictionaries (of dictionaries).
Preference is given to the second dict, i.e. if a key occurs in both dicts, the value from `dict2` is used.
"""
result = copy.deepcopy(dict1)
for key in dict2:
if key in dict1 and isinstance(dict1[key], dict) and isinstance(dict2[key], dict):
result[key] = recursive_dict_merge(dict1[key], dict2[key])
else:
result[key] = dict2[key]
return result | fbcb51ad47de0dd4d1c95cd59873918187736b63 | 3,659,350 |
import torch
def define_loss(name, device="cuda"):
"""
Defines the loss function associated to the name.
Supports losses in the LOSSES list, as well as the Lovasz, Softdice and Haussdorf losses.
Args:
name (str): Loss name.
device (str, optional): Device for torch. Defaults to "cuda".
Raises:
NotImplementedError: Specified loss name is not supported.
Returns:
torch loss: Loss function
"""
if name in LOSSES:
loss = getattr(torch.nn, name)(reduction="none")
elif name == "lovasz":
loss = lovasz_loss
else:
raise NotImplementedError
return loss | b3705a116af18dbe7d4a8cf8539c544e057a08d6 | 3,659,351 |
import os
def get_default_archive_path():
"""
Makeup default archive path.
Unify the archive path between local machine and cloud.
"""
if not XT_HWC_WORKSPACE:
return os.path.join(os.path.expanduser("~"), DEFAULT_ARCHIVE_DIR)
else:
return os.path.join(XT_HWC_WORKSPACE, DEFAULT_ARCHIVE_DIR) | 38ec50c5d841f470c37f1c5749a028e3ee4551a8 | 3,659,352 |
import os
def get_hosts_from_file(hostfile):
"""
Return the list of hosts from a given host file.
"""
hosts = []
if os.path.exists(hostfile):
for line in open(hostfile, "r").readlines():
hosts.append(line.split(' ', 1)[0])
return hosts | f49b7734caa679b328a9649c5fb6fd3009c20e18 | 3,659,353 |
import os
import errno
def process_exists(pid): # type: (int) -> bool
""" Checks if the processed with the given *pid* exists. Returns #True if
that is the case, #False otherwise. """
if pid == 0:
return False
try:
os.kill(pid, 0)
except OSError as exc:
if exc.errno == errno.ESRCH:
return False
return True | 80bc3de2270d69ca7b4b5e60e4e87d13253e2d11 | 3,659,354 |
import sys
import os
def get_application_name():
"""Attempts to find the application name using system arguments."""
if hasattr(sys, 'argv') and sys.argv[0]:
app_name = os.path.basename(sys.argv[0])
else:
app_name = None
return app_name | f37c0913b2e227e20a22e3d6bd8ba1fdf4b7f6f3 | 3,659,355 |
def get_atomate_wflows(wf_coll,
states,
seed_regex=None,
project_regex=None) -> pd.DataFrame:
"""Obtain workflow informaton for atomate jobs"""
return get_workflows(wf_coll, ['atomate-relax'],
states,
seed_regex=seed_regex,
project_regex=project_regex) | 131609b7360ed6f378235b2b0c34268bbce5b641 | 3,659,356 |
def the_H_function(sorted_citations_list, n=1):
"""from a list of integers [n1, n2 ..] representing publications citations,
return the max list-position which is >= integer
eg
>>> the_H_function([10, 8, 5, 4, 3]) => 4
>>> the_H_function([25, 8, 5, 3, 3]) => 3
>>> the_H_function([1000, 20]) => 2
"""
if sorted_citations_list and sorted_citations_list[0] >= n:
return the_H_function(sorted_citations_list[1:], n + 1)
else:
return n - 1 | 24ad3d85963ef0a9d4531ba552371d7e829f1c2a | 3,659,357 |
from pydantic import BaseModel # noqa: E0611
import importlib
def build_trainer(model: BaseModel,
params: Parameters,
dataset: Dataset,
target_processor: TargetProcessor,
batch_processor: BatchProcessor) \
-> BaseTrainer:
"""
Build a neural network trainer/optimizer based on different backend
:param model: Model (inherited from nnimgproc.model.BaseModel)
:param params: Parameters (from nnimgproc.util.parameters),
training parameter set such as learning rate
:param dataset: Dataset (from nnimgproc.dataset), image provider
:param target_processor: TargetProcessor (from nnimgproc.processor)
:param batch_processor: BatchProcessor (from nnimgproc.processor)
:return: Trainer (from nnimgproc.trainer)
"""
lib = importlib.import_module('nnimgproc.backend.%s' % model.backend)
return lib.Trainer(model, params, dataset,
target_processor, batch_processor) | b9a1ede91818e024b4a0932f13e37cda4b9b6d28 | 3,659,358 |
import random
from bs4 import BeautifulSoup
def get_random_quote(quotes_list):
"""Return a random quote to user."""
upper_limit = len(quotes_list)-1
select = random.randint(0, upper_limit)
selected_quote = quotes_list[select]
soup = BeautifulSoup(selected_quote, 'html.parser')
return soup.text | c50f99640da88319c2b643b0fe1c386206c0c00b | 3,659,359 |
from unittest.mock import patch
def record_states(hass):
"""Record some test states.
We inject a bunch of state updates from media player, zone and
thermostat.
"""
mp = "media_player.test"
mp2 = "media_player.test2"
mp3 = "media_player.test3"
therm = "thermostat.test"
therm2 = "thermostat.test2"
zone = "zone.home"
script_c = "script.can_cancel_this_one"
def set_state(entity_id, state, **kwargs):
"""Set the state."""
hass.states.set(entity_id, state, **kwargs)
wait_recording_done(hass)
return hass.states.get(entity_id)
zero = dt_util.utcnow()
one = zero + timedelta(seconds=1)
two = one + timedelta(seconds=1)
three = two + timedelta(seconds=1)
four = three + timedelta(seconds=1)
states = {therm: [], therm2: [], mp: [], mp2: [], mp3: [], script_c: []}
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=one):
states[mp].append(
set_state(mp, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[mp].append(
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp2].append(
set_state(mp2, "YouTube", attributes={"media_title": str(sentinel.mt2)})
)
states[mp3].append(
set_state(mp3, "idle", attributes={"media_title": str(sentinel.mt1)})
)
states[therm].append(
set_state(therm, 20, attributes={"current_temperature": 19.5})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=two):
# This state will be skipped only different in time
set_state(mp, "YouTube", attributes={"media_title": str(sentinel.mt3)})
# This state will be skipped because domain is excluded
set_state(zone, "zoning")
states[script_c].append(
set_state(script_c, "off", attributes={"can_cancel": True})
)
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 19.8})
)
states[therm2].append(
set_state(therm2, 20, attributes={"current_temperature": 19})
)
with patch("homeassistant.components.recorder.dt_util.utcnow", return_value=three):
states[mp].append(
set_state(mp, "Netflix", attributes={"media_title": str(sentinel.mt4)})
)
states[mp3].append(
set_state(mp3, "Netflix", attributes={"media_title": str(sentinel.mt3)})
)
# Attributes changed even though state is the same
states[therm].append(
set_state(therm, 21, attributes={"current_temperature": 20})
)
return zero, four, states | eb24d3ce56aa2ba4df9423107252bdd9142e0861 | 3,659,360 |
def ask_why(doc):
"""
Ask questions of the form “Why is ..x..?” where x is either a
combination of object and adjective or subject and adjective
or “Why ..prep.. the ..noun..”
"""
chunk = find_subj_chunk(doc)
if chunk != None and chunk["adjective"] != None:
subj = chunk["subject"]
adj = chunk["adjective"]
respond = "Why is {} {}?".format(subj, adj)
return respond
chunk = find_obj_chunk(doc)
if chunk != None and chunk["adjective"] != None:
subj = chunk["objective"]
adj = chunk["adjective"]
respond = "Why is {} {}?".format(subj, adj)
return respond
# I had similar experience in high school --> why in high school?
chunk = find_prep_chunk(doc)
if chunk != None:
subj = chunk["full_subject"]
prep = chunk["prep"]
respond = "Why {} the {}?".format(prep, subj)
return respond
return None | 64ef365f5ebd64dff4384cc558ecfa7856661fdd | 3,659,361 |
def get_extension(fname):
"""
Get file extension.
"""
return '.' + fname.split(".")[-1] | 9fa6f63d848aa7781b55e9cc384c9a8cb9665c69 | 3,659,362 |
def large_xyz_to_luv_star(large_xyz, white_xyz):
"""
# 概要
XYZ から L*u*v* を計算する。
# 参考
https://en.wikipedia.org/wiki/CIELUV
"""
large_x, large_y, large_z = np.dsplit(large_xyz, 3)
white_xyz = np.array(white_xyz)
white_xyz = (white_xyz / white_xyz[1]).reshape((1, 1, 3))
x_n, y_n, z_n = np.dsplit(white_xyz, 3)
threshold = (6/29) ** 3
judge = (large_y / y_n)
l_lower = (judge <= threshold) * (((29/3) ** 3) * (large_y / y_n))
l_upper = (judge > threshold) * (116 * ((large_y / y_n) ** (1/3)) - 16)
l_star = l_lower + l_upper
u_dash, v_dash = np.dsplit(large_xyz_to_uv_dash(large_xyz), 2)
u_n_dash, v_n_dash = np.dsplit(large_xyz_to_uv_dash(white_xyz), 2)
u_star = 13 * l_star * (u_dash - u_n_dash)
v_star = 13 * l_star * (v_dash - v_n_dash)
return np.dstack((l_star, u_star, v_star)) | 689c13d99c7e8b279c6cd718aad33fce8baa5a67 | 3,659,363 |
def rotation(new_rotation=0):
"""Set the display rotation.
:param new_rotation: Specify the rotation in degrees: 0, 90, 180 or 270
"""
global _rotation
if new_rotation in [0, 90, 180, 270]:
_rotation = new_rotation
return True
else:
raise ValueError("Rotation: 0, 90, 180 or 270 degrees only") | 4f12a90e104ef66e50520523d23b3fff421fa991 | 3,659,364 |
def parse_url (url:str) -> str:
"""
规范化 URL
-> hello/world
<- /hello/world
"""
if url == "": url = "/"
if not url.startswith ('/'): url = "/" + url # 添加开头斜杠
# if not url.endswith ("/"): url += "/" # 添加末尾斜杠
return url | dd2ace64bd5926f2b20a77c81a1e885e8a4d3d2b | 3,659,365 |
def bulk_edit(modeladmin, request, queryset):
""" Bulk edit selected items. """
form = None
if 'apply' in request.POST:
form = BulkEditForm(request.POST)
if form.is_valid():
property = form.cleaned_data['property']
cf_value = form.cleaned_data['cf_value']
ff_value = form.cleaned_data['ff_value']
inline_notes = form.cleaned_data['inline_notes']
footnotes = form.cleaned_data['footnotes']
overwrite = form.cleaned_data['overwrite']
delete_only = form.cleaned_data['delete_only']
if queryset.model is Subject:
entity_type = 'SO'
elif queryset.model is Location:
entity_type = 'SL'
elif queryset.model is Media:
entity_type = 'MP'
elif queryset.model is File:
entity_type = 'MF'
else:
entity_type = 'PO'
if not delete_only and (property.control_field and not cf_value):
modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Controlled Property, you must selected a Controlled Term', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
elif not delete_only and (not property.control_field and (not ff_value or ff_value == '')):
modeladmin.message_user(request, 'UPDATE FAILED: If you would like to update a Free-Form Property, you must provide a Free-Form value', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
if property.primary_type != 'AL' and property.primary_type != entity_type:
modeladmin.message_user(request, 'UPDATE FAILED: You selected a property which is not available for this Entity. If you would like to make it available, go to the Descriptive Property table and change Primary Type to All', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
if cf_value and cf_value.type != property:
modeladmin.message_user(request, 'UPDATE FAILED: You selected a Controlled Term that is not a value for the selected Property', level=messages.ERROR)
return HttpResponseRedirect(request.get_full_path())
for item in queryset:
if queryset.model is Subject:
if property.control_field:
if overwrite or delete_only:
control_props = SubjectControlProperty.objects.filter(subject = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = SubjectControlProperty(subject = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = SubjectProperty.objects.filter(subject = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = SubjectProperty(subject = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is Location:
if property.control_field:
if overwrite or delete_only:
control_props = LocationControlProperty.objects.filter(location = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = LocationControlProperty(location = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = LocationProperty.objects.filter(location = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = LocationProperty(location = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is Media:
if property.control_field:
if overwrite or delete_only:
control_props = MediaControlProperty.objects.filter(media = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = MediaControlProperty(media = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = MediaProperty.objects.filter(media = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = MediaProperty(media = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is PersonOrg:
if property.control_field:
if overwrite or delete_only:
control_props = PersonOrgControlProperty.objects.filter(person_org = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = PersonOrgControlProperty(person_org = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = PersonOrgProperty.objects.filter(person_org = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = PersonOrgProperty(person_org = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
elif queryset.model is File:
if property.control_field:
if overwrite or delete_only:
control_props = FileControlProperty.objects.filter(file = item, control_property = property)
for cp in control_props:
cp.delete()
if not delete_only:
new_cp = FileControlProperty(file = item, control_property = property, control_property_value = cf_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_cp.save()
else:
if overwrite or delete_only:
ff_props = FileProperty.objects.filter(file = item, property = property)
for fp in ff_props:
fp.delete()
if not delete_only:
new_fp = FileProperty(file = item, property = property, property_value = ff_value, notes = footnotes, inline_notes = inline_notes, last_mod_by = request.user)
new_fp.save()
modeladmin.message_user(request, _("%s %s." % ('Selected property edited: ', property)))
return HttpResponseRedirect(request.get_full_path())
if not form:
form = BulkEditForm(initial={'_selected_action': request.POST.getlist(admin.ACTION_CHECKBOX_NAME)})
opts = queryset.model._meta
app_label = opts.app_label
return render_to_response(
'admin/bulk_edit.html',
{'items': queryset, 'bulk_edit_form': form, "opts": opts, "app_label": app_label},
context_instance = RequestContext(request)
) | 069eeb1a32d91bf7eeb055fa4014210f52e4792b | 3,659,366 |
def smooth_GF(C,S,avg_rad, start_deg):
"""from Wahr et al: Time-variable gravity recovery from space eq. 34.
This is Jekeli's [1981] smoothing method."""
C_smooth = C
S_smooth = S
Re = 6378.1363; # Radius of Earth in km
b = np.log(2) / (1 - np.cos(avg_rad / Re))
W=[]
W.append(1 / (2 * np.pi))
W.append(1 / (2 * np.pi) * ((1 + np.exp(-2 * b)) / (1 - np.exp(-2 * b)) - 1 / b))
for j in range(start_deg,C.shape[0]):
w = (-(2*(j-1)+1)/b*W[j-1]) + W[j-2]
W.append(w)
if W[j] < 0.: W[j] = 0.
if W[j-1] == 0.: W[j] = 0.
for i in range(start_deg-1,C.shape[0]):
C_smooth[i,:]=C[i,:]*W[i]*2.*np.pi
S_smooth[i,:] = S[i,:]*W[i]*2.*np.pi
return C_smooth, S_smooth | 89c0edfe4af3ef476485517e06363acb09be4fda | 3,659,367 |
def get_vmstat():
"""
Get and format the content of /proc/vmstat
"""
buf = open("/proc/vmstat").read()
buf = [v.replace(' ', ":") for v in buf.split("\n")]
buf = ";".join(buf)
return buf | b2db72bbc3b143ff1ba37ee7e2dcc33295d4a4ea | 3,659,368 |
def upcomingIPOs(
symbol="",
exactDate="",
token="",
version="stable",
filter="",
format="json",
):
"""This will return all upcoming estimates, dividends, splits for a given symbol or the market. If market is passed for the symbol, IPOs will also be included.
https://iexcloud.io/docs/api/#upcoming-events
Args:
symbol (str): Symbol to look up
exactDate (str): exactDate Optional. Exact date for which to get data
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""
_raiseIfNotStr(symbol)
symbol = _quoteSymbols(symbol)
if symbol:
url = "stock/{}/upcoming-ipos".format(symbol)
else:
url = "stock/market/upcoming-ipos"
if exactDate:
url += "?exactDate={}".format(exactDate)
return _get(url, token, version, filter) | 0e456c36fb4cbb11eb863f22ae06fb01589fc21a | 3,659,369 |
from typing import Iterable
from typing import List
def sort_tokens(tokens: Iterable[Cwf]) -> List[Cwf]:
"""Sort tokens by natural order (sent, offset)"""
return sorted(tokens, key=lambda t: (t.get_sent(), int(t.get_offset()))) | 6b9ced6bdb72a1f53c7e721f5212894caa2c4756 | 3,659,370 |
def deep_seq_map(xss, fun, keys=None, fun_name=None, expand=False):
"""Applies fun to list of or dict of lists; adds the results in-place.
Usage: Transform a corpus iteratively by applying functions like
`tokenize`, `lower`, or vocabulary functions (word -> embedding id) to it.
from jtr.sisyphos.vocab import Vocab
vocab = Vocab()
keys = ['question', 'support']
corpus = deep_map(corpus, lambda x: x.lower(), keys)
corpus = deep_map(corpus, tokenize, keys)
corpus = deep_map(corpus, vocab, keys)
corpus = deep_map(corpus, vocab._normalize, keys=keys)
-> through tokenize we go from a dict of sentences to
a dict of words (list of lists), thus we now apply deep_seq_map for
processing to add start of and end of sentence tags:
corpus = deep_seq_map(corpus, lambda xs: ["<SOS>"] + xs +
["<EOS>"],
['question', 'support'])
-> From here we can create batches from the corpus and feed it into a model.
In case `expand==False` each top-level entry of `xs` to be transformed
replaces the original entry.
`deep_map` supports `xs` to be a dictionary or a list/tuple:
- In case `xs` is a dictionary, its transformed value is also a dictionary, and `keys` contains the keys of the
values to be transformed.
- In case `xs` is a list/tuple, `keys` contains the indices of the entries to be transformed
The function `deep_map` is recursively applied to the values of `xs`;
the function `fun` takes a sequence as input, and is applied at the one but deepest level,
where the entries are sequences of objects (no longer sequences of sequences).
This is the only difference with `deep_map`
Args:
`xs`: a sequence (list/tuple) of objects or sequences of objects.
`fun`: a function to transform sequences
`keys`: seq with keys if `xs` is dict; seq with integer indices if `xs` is seq.
For entries not in `keys`, the original `xs` value is retained.
`fun_name`: default value 'trf'; string with function tag (e.g. 'lengths'),
used if '''expand==True''' and '''isinstance(xs,dict)'''
Say for example fun_name='count', and `keys` contains 'sentence', then the transformed dict would look like
'''{'sentence':[sentences], 'sentence_lengths':[fun(sentences)] ...}'''
Returns:
Transformed sequence or dictionary.
Example:
>>> dave = [
... "All work and no play makes Jack a dull boy",
... "All work and no play makes Jack a dull boy.",
... "All work and no play makes Jack a very dull boy!"]
>>> jack = [
... "I'm sorry Dave, I'm afraid I can't do that!",
... "I'm sorry Dave, I'm afraid I can't do that",
... "I'm sorry Dave, I'm afraid I cannot do that"]
>>> support = [
... ["Play makes really dull", "really dull"],
... ["Dave is human"],
... ["All work", "all dull", "dull"]]
>>> data2 = {'dave': dave, 'jack': jack, 'support': support}
>>> vocab2 = Vocab()
>>> data2_processed = deep_map(data2, lambda x: tokenize(x.lower()))
>>> data2_ids = deep_map(data2_processed, vocab2)
>>> data2_ids_with_lengths = deep_seq_map(data2_ids, lambda xs: len(xs), keys=['dave','jack','support'],
... fun_name='lengths', expand=True)
>>> pprint.pprint(data2_ids_with_lengths)
{'dave': [[1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11],
[1, 2, 3, 4, 5, 6, 7, 8, 12, 9, 10, 13]],
'dave_lengths': [10, 11, 12],
'jack': [[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24, 13],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 21, 15, 22, 23, 24],
[14, 15, 16, 17, 18, 19, 14, 15, 16, 20, 14, 25, 23, 24]],
'jack_lengths': [17, 16, 14],
'support': [[[5, 6, 26, 9], [26, 9]], [[18, 27, 28]], [[1, 2], [1, 9], [9]]],
'support_lengths': [[4, 2], [3], [2, 2, 1]]}
"""
if isinstance(xss, list) and all([not isinstance(xs, list) for xs in xss]):
return fun(xss)
else:
if isinstance(xss, dict):
xss_mapped = {}
for k, xs in xss.items():
if keys is None or k in keys:
if expand:
xss_mapped[k] = xs
k = '%s_%s' % (str(k), str(fun_name) if fun_name is not None else 'trf')
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped[k] = fun(xs)
else:
xss_mapped[k] = deep_seq_map(xs, fun) # fun_name not needed, because expand==False
else:
xss_mapped[k] = xs
else:
xss_mapped = []
for k, xs in enumerate(xss):
if keys is None or k in keys:
if expand:
xss_mapped.append(xs)
if isinstance(xs, list) and all([not isinstance(x, list) for x in xs]):
xss_mapped.append(fun(xs))
else:
xss_mapped.append(deep_seq_map(xs, fun))
else:
xss_mapped.append(xs)
return xss_mapped | 59406ae1ee87bfea82f4b22fb3d5fb96c29ccda6 | 3,659,371 |
def create_user(steamid, admin):
"""Create a user"""
steamid = string_to_steamid(steamid)
if not steamid.is_valid() or not steamid.type == EType.Individual:
echo('Invalid steam ID')
return 1
user = User(steamid64=steamid.as_64, admin=admin)
user.refresh_name()
if user.name is not None:
db.session.add(user)
db.session.commit()
echo('added ' + user.name)
else:
echo('No such steam user')
return 1 | 22f6a63d85d57e7df0ae5dad0e62e41ee7c6388a | 3,659,372 |
import json
def get_vocabularies():
"""
Return the currently used ontology
:return:
"""
vocabs = vocs.get_vocabularies()
vocabs = [(x, url_for('get_vocabulary', vid=x, _external=True)) for x in vocabs]
response = make_response(json.dumps(dict(vocabs)))
response.headers['Content-Type'] = 'application/json'
return response | 3b447b297209e8d6fa238ba8f0cf932f0e3eed84 | 3,659,373 |
def do3byte(*args):
"""do3byte(ea_t ea, asize_t length) -> bool"""
return _idaapi.do3byte(*args) | fa82f7dc5bfa5dcaea7db604ea4c7f1fc5883794 | 3,659,374 |
import logging
def compute_sap(ground_truth_data,
representation_function,
random_state,
num_train=gin.REQUIRED,
num_test=gin.REQUIRED,
batch_size=16,
continuous_factors=gin.REQUIRED):
"""Computes the SAP score.
Args:
ground_truth_data: GroundTruthData to be sampled from.
representation_function: Function that takes observations as input and
outputs a dim_representation sized representation for each observation.
random_state: Numpy random state used for randomness.
num_train: Number of points used for training.
num_test: Number of points used for testing discrete variables.
batch_size: Batch size for sampling.
continuous_factors: Factors are continuous variable (True) or not (False).
Returns:
Dictionary with SAP score.
"""
logging.info("Generating training set.")
mus, ys = utils.generate_batch_factor_code(
ground_truth_data, representation_function, num_train,
random_state, batch_size)
mus_test, ys_test = utils.generate_batch_factor_code(
ground_truth_data, representation_function, num_test,
random_state, batch_size)
logging.info("Computing score matrix.")
score_matrix = _compute_score_matrix(mus, ys, mus_test,
ys_test, continuous_factors)
# Score matrix should have shape [num_latents, num_factors].
assert score_matrix.shape[0] == mus.shape[0]
assert score_matrix.shape[1] == ys.shape[0]
scores_dict = {}
scores_dict["SAP_score"] = _compute_avg_diff_top_two(score_matrix)
logging.info("SAP score: %.2g", scores_dict["SAP_score"])
return scores_dict | f9aad7f12597491cb57d5b7180debb76da3bc01f | 3,659,375 |
import re
def clean_cmd(cmd):
"""Removes multiple spaces and whitespace at beginning or end of command.
Args:
cmd (str): A string containing the command to clean.
Returns:
A cleaned command string.
"""
return re.sub(r'\s{2, }', ' ', cmd).strip(' \t\n\r') | d98f4fea9791cbb5936b306ee74335efc6515902 | 3,659,376 |
def multiply_scalar(mat, value):
""" Multiplies every element in the matrix by the given scalar value.
Args:
mat (Matrix): The input matrix.
value (int or float): The number that mat will be multipled by.
Returns:
Matrix: The resulting matrix from the multiplication of mat and value.
"""
mat_1d = flatten(mat.data)
result = [x * value for x in mat_1d]
return reshape(Matrix(result), mat.shape()) | 3b2469213ddb93e06ce210ee082a403f5ed2cc4a | 3,659,377 |
def bin4D(data4D, bin_factor):
"""
Bin 4D data in spectral dimensions
Parameters
----------
data4D: ndarray of shape (4,4)
the first two dimensions are Fourier
space, while the next two dimensions
are real space
bin_factor: int
Value by which to bin data
Returns
-------
binned_data: ndarray of shape (4,4)
Data binned in the spectral dimensions
Notes
-----
The data is binned in the first two dimensions - which are
the Fourier dimensions using the internal numba functions
`resizer2D_numbaopt` and `resizer1D_numbaopt`
See Also
--------
resizer1D_numbaopt
resizer2D_numbaopt
"""
data4D_flat = np.reshape(
data4D, (data4D.shape[0], data4D.shape[1], data4D.shape[2] * data4D.shape[3])
)
datashape = np.asarray(data4D_flat.shape)
res_shape = np.copy(datashape)
res_shape[0:2] = np.round(datashape[0:2] / bin_factor)
data4D_res = np.zeros(res_shape.astype(int), dtype=data4D_flat.dtype)
resampled_x = np.zeros((datashape[0], res_shape[1]), data4D_flat.dtype)
resampled_f = np.zeros(res_shape[0:2], dtype=data4D_flat.dtype)
for zz in range(data4D_flat.shape[-1]):
data4D_res[:, :, zz] = resizer2D_numbaopt(
data4D_flat[:, :, zz], resampled_x, resampled_f, bin_factor
)
binned_data = np.reshape(
data4D_res,
(resampled_f.shape[0], resampled_f.shape[1], data4D.shape[2], data4D.shape[3]),
)
return binned_data | baa00278bb5e4c7fcef6f1a78019f227533de586 | 3,659,378 |
def lorentz(x, a, mu, ga):
""" Input: x - value and a=I, mu=x_0, ga - lorentz f. coeffitients (float)
Return: value of function with desired parameters in x (float)
Descr.: Calculate L-type function for given x and parameters"""
return (a * ga ** 2) / ((x - mu) ** 2 + ga ** 2) | 1af83bdca1ff14f25da86cb0f3dacbd36409f1e1 | 3,659,379 |
def main(arguments):
"""
if you call this then it will create and return the thunder obj for you
:param arguments: a thunder object or a dicitonary to initialise the thunder obj
:return:
"""
thunder = Thunder(deepcopy(arguments)) # load object
return thunder | d628d6b1fb3550b1d1056613680437b847ab7102 | 3,659,380 |
def make_train_func(
model,
loss_func,
optimizer,
dtype=None,
device=None,
call_model=None,
get_train_loss=None,
):
"""Create a train func for ``ignite``.
This function assumes that each batch is of the form ``(x, y)`` with no assumptions placed on ``x`` or ``y``.
Each batch will be transformed into a ``torch.Tensor``.
:param model:
the model to optimize, it will be called with the features
:param loss_func:
the loss function to optimize, it will be called with the
model output and the targets. The return value of the loss
function must be compatible with ``get_train_loss``.
:param optimizer:
the optimizer to use
:param dtype:
the dtype of the batch, can be a structured object, e.g., a tuple of dtypes
:param device:
the device to assign to the batch, can be a structured object, e.g.,
a tuple of devices
:param call_model:
instead of calling the model directly, ``call_model(model, x)`` it will be used.
If not given a default implementation is used, that passes ``x`` as varargs if it is a tuple,
as keyword args if it is a dict and directly otherwise.
:param get_train_loss:
The output of ``loss_func`` will be passed through ``get_train_loss`` before calling backward.
If not given a default implementation is used that takes the first item of the loss if it is a tuple
and the loss directly otherwise.
"""
def default_get_train_loss(loss):
return loss[0] if isinstance(loss, tuple) else loss
if call_model is None:
call_model = default_call_model
if get_train_loss is None:
get_train_loss = default_get_train_loss
def train_func(engine, batch):
x, y = n2t(batch, dtype=dtype, device=device)
optimizer.zero_grad()
pred = call_model(model, x)
loss = loss_func(pred, y)
train_loss = get_train_loss(loss)
train_loss.backward()
optimizer.step()
return t2n(loss)
return train_func | 5ceb230e7c6fce891a23416883b12bd09c83ccd5 | 3,659,381 |
import time
def waitAndLocate(btn_img, params):
"""
Function to locate a button in the window
:param btn_img: path to the image of the button to look for
:return: coordinates + dimensions of the button
"""
start = time.time()
while True:
if time.time() - start > (3*60):
print("Timeout Error")
raise TimeoutError(f"wait and locate exceeded {str(time.time()-start)}")
# Find window and maximize
if 'no_fullscreen' not in params or params['no_fullscreen'] == False:
maximizeWindows(params)
# Make foreground window full screen - replaced with exact window name lookup
# win32gui.ShowWindow(win32gui.GetForegroundWindow(), win32con.SW_MAXIMIZE)
# Look for the button on the screen
res = pyautogui.locateOnScreen(btn_img, confidence=0.75)
# If button is found, return the location
if (res):
return res
# Wait 0.5 seconds before retrying to keep CPU usage low
time.sleep(0.5) | daf7c32d67f1d958c8dcd15e3721823863b44365 | 3,659,382 |
from typing import List
from typing import Tuple
import torch
def make_preds_batch(classifier: nn.Module,
batch_elements: List[SentenceEvidence],
device: str=None,
criterion: nn.Module=None,
tensorize_model_inputs: bool=True) -> Tuple[float, List[float], List[int], List[int]]:
"""Batch predictions
Args:
classifier: a module that looks like an AttentiveClassifier
batch_elements: a list of elements to make predictions over. These must be SentenceEvidence objects.
device: Optional; what compute device this should run on
criterion: Optional; a loss function
tensorize_model_inputs: should we convert our data to tensors before passing it to the model? Useful if we have a model that performs its own tokenization
"""
# delete any "None" padding, if any (imposed by the use of the "grouper")
batch_elements = filter(lambda x: x is not None, batch_elements)
targets, queries, sentences = zip(*[(s.kls, s.query, s.sentence) for s in batch_elements])
ids = [(s.ann_id, s.docid, s.index) for s in batch_elements]
targets = torch.tensor(targets, dtype=torch.long, device=device)
if tensorize_model_inputs:
queries = [torch.tensor(q, dtype=torch.long) for q in queries]
sentences = [torch.tensor(s, dtype=torch.long) for s in sentences]
#queries = PaddedSequence.autopad(queries, device=device, batch_first=batch_first)
#sentences = PaddedSequence.autopad(sentences, device=device, batch_first=batch_first)
preds = classifier(queries, ids, sentences)
targets = targets.to(device=preds.device)
if criterion:
loss = criterion(preds, targets)
else:
loss = None
# .float() because pytorch 1.3 introduces a bug where argmax is unsupported for float16
hard_preds = torch.argmax(preds.float(), dim=-1)
return loss, preds, hard_preds, targets | 8656bcb3c9895c25cd3a62428d9426a34fc2d324 | 3,659,383 |
def quick_sort(array):
"""
Not Inplace, but Standard version
"""
if array == []:
return []
else:
pivot = array[-1]
smaller = quick_sort([x for x in array[0:-1] if x <= pivot])
larger = quick_sort([x for x in array[0:-1] if x > pivot])
return smaller + [pivot] + larger | 40b969855394600a94ed264f5bffade95c72455e | 3,659,384 |
def calc_laplacian_matrix(D, W):
"""
给定图的度矩阵和相似度矩阵,计算拉普拉斯矩阵
:param W: 相似度矩阵
:param D: 图的度矩阵
:return: 拉普拉斯矩阵
"""
return D - W | 542efe382457a34587615d24935c040238098610 | 3,659,385 |
def _potrf_mhlo(platform, gpu_solver, dtype, a, lower):
"""Cholesky decomposition."""
a_type = ir.RankedTensorType(a.type)
dims = a_type.shape
m, n = dims[-2:]
assert m == n
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
lwork, opaque = gpu_solver.build_potrf_descriptor(
np.dtype(dtype), lower, batch, n)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
info_layout = tuple(range(num_bd - 1, -1, -1))
i32_type = ir.IntegerType.get_signless(32)
info_type = ir.RankedTensorType.get(batch_dims, i32_type)
work_layout = [0]
out = custom_call(
f"{platform}solver_potrf",
[
a.type,
info_type,
ir.RankedTensorType.get([lwork], ir.IntegerType.get_signless(8)),
],
[a],
backend_config=opaque,
operand_layouts=[layout],
result_layouts=[layout, info_layout, work_layout])
return out[:2] | 6f9a8aef2bec2d063ebaf7c739b11879a67fd342 | 3,659,386 |
def _bin2bcd(value):
"""Convert a binary value to binary coded decimal.
:param value: the binary value to convert to BCD. (required, no default)
"""
return value + 6 * (value // 10) | 508383fe8964da3a09699ee8e68f36cea4162746 | 3,659,387 |
import requests
from bs4 import BeautifulSoup
def osm_get_info(idx):
"""Получаем информацию об административной территории
"""
link = 'https://www.openstreetmap.org/api/0.6/relation/' + str(idx)
response = requests.get(link)
if response.status_code == 200:
soup = BeautifulSoup(response.text, 'xml')
subarea_ids = [member.get('ref') for member in soup.find_all('member', {'role':'subarea'})]
name = soup.find('tag', {'k': 'name'})
name = name.get('v')
return name, subarea_ids
return False | 0d2ee403c6bcf3a5c5ee37d01b9b0925bfac6081 | 3,659,388 |
import sqlite3
def get_test_cases_coverage(session_id):
"""
coverage by test case
"""
tc_stats={}
tc_stats_list=[]
total_executed=0
sql='SELECT DISTINCT(test_id) FROM stats WHERE session_id=:sid AND test_id!="null"'
params={"sid":session_id}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
tests=c.fetchall()
conn.close()
if len(tests)>0:
for t in tests:
total_executed=0
sql="SELECT DISTINCT(file_id) FROM stats WHERE session_id=:sid AND test_id=:tid"
params={"sid":session_id,"tid":t[0]}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
files=c.fetchall()
conn.close()
for f in files:
line_count=get_executable_lines_count_for_file(f[0])
# get executions
sql="SELECT COUNT(DISTINCT line_guid) FROM stats WHERE session_id= :sid AND file_id= :fid AND test_id=:tid"
params={"sid":session_id,"tid":t[0],"fid":f[0]}
conn=sqlite3.connect(CONNECTION_STRING)
c=conn.cursor()
c.execute(sql,params)
executed=c.fetchone()
conn.close()
total_executed+=executed[0]
# save test case and it's executions
tc_stats={}
tc_stats["test_id"]=t[0]
tc_stats["total_executed"]=total_executed
tc_stats["total_executed"]
tc_stats_list.append(tc_stats)
return tc_stats_list | acceee566f53b95316c9ccd2654b89f6a60c7a5a | 3,659,389 |
def can_minimize_file(file_path):
"""Check to see if we support minimization for this file."""
# If this is not a binary file, we should be able to minimize it in some way.
if not utils.is_binary_file(file_path):
return True
# Attempt to minimize IPC dumps.
if file_path.endswith(testcase_manager.IPCDUMP_EXTENSION):
return supports_ipc_minimization(file_path)
# Other binary file formats are not supported.
return False | 3d8f5e2ee4f834a353ce8973cd64da66712c8c1c | 3,659,390 |
def generate_new_xen_xml(VIRSH_TEMPLATE, vm_name,
disk_img,
mac_addr,
memory_size=1048576, # 1GB of memory
cpu_count=1):
"""
Given a name, disk, and mac, this will output the appropriate xml
config
"""
tmp = VIRSH_TEMPLATE
tmp = tmp.replace(REPLACE_STRINGS.vm_name, vm_name)
tmp = tmp.replace(REPLACE_STRINGS.disk_img, disk_img)
tmp = tmp.replace(REPLACE_STRINGS.mac_addr, mac_addr)
tmp = tmp.replace(REPLACE_STRINGS.memory_size, str(memory_size))
tmp = tmp.replace(REPLACE_STRINGS.cpu_count, str(cpu_count))
return tmp | 28eec535a5924a847bd887b903014aca4a97dd9b | 3,659,391 |
def literal_query(query):
"""Don't interprete any special query syntax
SQLite's FTS extensions support special query syntax for AND, OR and
prefix searches, as well as grouping and negation. There are not of much
use in the dictionary case, but they break some legitimate queries. So
let's treat all queries literally by enlosing them in quotes.
"""
return '"' + query.replace('"', '') + '"' | 65c5f3215a2d36fb15b54e5420ce52ac27d1b420 | 3,659,392 |
from typing import Dict
import os
def gene2symbol(key: str, value: str) -> Dict[str, str]:
"""Map between S. pombe gene IDs, symbols, synonyms and names.
# Arguments
key: str, one of {"ID", "Symbol", "Synonym", "Name"}
value: str, one of {"ID", "Symbol", "Synonym", "Name"}
# Returns
dict: key value mapping
"""
df = pd.read_csv(os.path.join(data, "sysID2product.tsv"),
skiprows=1,
header=None,
sep="\t")
df.columns = ["ID", "Symbol", "Synonymns", "Name"]
return dictify(df, key, value) | 2625a07a832ba7d4bf05d2743f1f0b56c2923e1f | 3,659,393 |
def graph_from_string(s):
"""
Turn a string like "1 2; 1->2" into a graph.
"""
vertex_string, edge_string = s.split(';')
vertices = vertex_string.split()
edge_pairs = []
for edge_sequence in edge_string.split():
sequence_nodes = edge_sequence.split('->')
for tail, head in zip(sequence_nodes[:-1], sequence_nodes[1:]):
edge_pairs.append((tail, head))
return DirectedGraph.from_edge_pairs(vertices, edge_pairs) | 772c876eb4c38fb4d595ee57fb5192622c92e837 | 3,659,394 |
def WideResnetBlocknt(channels, strides=(1,1), channel_mismatch=False, batchnorm='std', parameterization='ntk'):
"""A WideResnet block, with or without BatchNorm."""
Main = stax_nt.serial(_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization),_batch_norm_internal(batchnorm), stax_nt.Relu(), stax_nt.Conv(channels, (3,3), padding='SAME', parameterization=parameterization))
Shortcut = stax_nt.Identity() if not channel_mismatch else stax_nt.Conv(channels, (3,3), strides, padding='SAME', parameterization=parameterization)
return stax_nt.serial(stax_nt.FanOut(2), stax_nt.parallel(Main, Shortcut), stax_nt.FanInSum()) | d1786bf36703669627807f9bf881630ff1592ef5 | 3,659,395 |
import torch
def inverse_pinhole_matrix(pinhole, eps=1e-6):
"""
Returns the inverted pinhole matrix from a pinhole model
"""
assert len(pinhole.shape) == 2 and pinhole.shape[1] == 12, pinhole.shape
# unpack pinhole values
fx, fy, cx, cy = torch.chunk(pinhole[..., :4], 4, dim=1) # Nx1
# create output container
k = torch.eye(4, device=pinhole.device, dtype=pinhole.dtype)
k = k.view(1, 4, 4).repeat(pinhole.shape[0], 1, 1) # Nx4x4
# fill output with inverse values
k[..., 0, 0:1] = 1. / (fx + eps)
k[..., 1, 1:2] = 1. / (fy + eps)
k[..., 0, 2:3] = -1. * cx / (fx + eps)
k[..., 1, 2:3] = -1. * cy / (fy + eps)
return k | e2fd741598b858f9d8731e4dc2b0c79913941dbf | 3,659,396 |
from unittest.mock import patch
async def init_integration_empty_response(hass) -> MockConfigEntry:
"""Set up the Nightscout integration in Home Assistant."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_URL: "https://some.url:1234"},
)
with patch(
"homeassistant.components.nightscout.NightscoutAPI.get_sgvs", return_value=[]
), patch(
"homeassistant.components.nightscout.NightscoutAPI.get_server_status",
return_value=SERVER_STATUS,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry | 890230dabafbfa939433251fb1b4ba2a9b14a7bf | 3,659,397 |
def create_DCT_NETWORK_INFO(networkid: str) -> dict:
"""Computes dictionary DCT_NETWORK_INFO for XML file
:param networkid: network identifier
:type networkid: str
:return: dict
:rtype: [type]
"""
DCT_NETWORK_INFO.update({"id": networkid})
return DCT_NETWORK_INFO | 96eeb48e35bebfc4bc1923685f8bb627dfc5f473 | 3,659,398 |
def retrieve_question(request, uuid):
"""
"""
try:
question = Question.objects.get(pk=uuid)
except (Question.DoesNotExist, ValueError):
response_data = {
"error": {
"state": "not found",
"details": "Question object with ID {} could not be found.".format(uuid)
}
}
return Response(response_data, status=status.HTTP_404_NOT_FOUND)
if question.survey.is_private:
if request.user.is_authenticated:
if request.user == question.survey.admin or request.user in question.survey.users.all():
serializer = QuestionSerializer(question, context={'request': request})
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK)
else:
return Response({"error": "This question is part of a private survey."}, status=status.HTTP_403_FORBIDDEN)
else:
return Response({"error": "Please login."}, status=status.HTTP_401_UNAUTHORIZED)
else:
serializer = QuestionSerializer(question, context={'request': request})
response_data = serializer.data
return Response(response_data, status=status.HTTP_200_OK) | ee6409c2b724977744d66d3fba7efa17fa75284c | 3,659,399 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.