code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
from urllib.parse import urljoin
from qiniu import Auth,put_file
from swiper import config
def qn_upload(filename,filepath):
'''将文件上传至七牛云'''
#构建鉴权对象
qn = Auth(config.QN_ACCESS_KEY,config.QN_SECRET_KEY)
#生产上传 Token,有效期为1小时
token = qn.upload_token(config.QN_BUCKET,filename,3600)
#上传文件
ret,info = put_file(token,filename,filepath)
if info.ok():
url = urljoin(config.QN_BASEURL,filename)
return True,url
else:
return False,''
|
[
"qiniu.put_file",
"urllib.parse.urljoin",
"qiniu.Auth"
] |
[((169, 217), 'qiniu.Auth', 'Auth', (['config.QN_ACCESS_KEY', 'config.QN_SECRET_KEY'], {}), '(config.QN_ACCESS_KEY, config.QN_SECRET_KEY)\n', (173, 217), False, 'from qiniu import Auth, put_file\n'), ((326, 361), 'qiniu.put_file', 'put_file', (['token', 'filename', 'filepath'], {}), '(token, filename, filepath)\n', (334, 361), False, 'from qiniu import Auth, put_file\n'), ((393, 429), 'urllib.parse.urljoin', 'urljoin', (['config.QN_BASEURL', 'filename'], {}), '(config.QN_BASEURL, filename)\n', (400, 429), False, 'from urllib.parse import urljoin\n')]
|
#!/usr/bin/env python
#
# Generated Mon Jun 10 11:49:52 2019 by generateDS.py version 2.32.0.
# Python 3.6.7 (default, Oct 22 2018, 11:32:17) [GCC 8.2.0]
#
# Command line options:
# ('-f', '')
# ('-o', 's3_api.py')
# ('-s', 's3_sub.py')
# ('--super', 's3_api')
#
# Command line arguments:
# schemas/AmazonS3.xsd
#
# Command line:
# generateDS.py -f -o "s3_api.py" -s "s3_sub.py" --super="s3_api" schemas/AmazonS3.xsd
#
# Current working directory (os.getcwd()):
# ks33requests
#
import os
import sys
from lxml import etree as etree_
from . import s3_api as supermod
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
parser = etree_.ETCompatXMLParser()
try:
if isinstance(infile, os.PathLike):
infile = os.path.join(infile)
except AttributeError:
pass
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
#
# Globals
#
ExternalEncoding = ''
#
# Data representation classes
#
class CreateBucketSub(supermod.CreateBucket):
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
**kwargs_):
super(CreateBucketSub, self).__init__(Bucket, AccessControlList, AWSAccessKeyId, Timestamp, Signature,
**kwargs_)
supermod.CreateBucket.subclass = CreateBucketSub
# end class CreateBucketSub
class MetadataEntrySub(supermod.MetadataEntry):
def __init__(self, Name=None, Value=None, **kwargs_):
super(MetadataEntrySub, self).__init__(Name, Value, **kwargs_)
supermod.MetadataEntry.subclass = MetadataEntrySub
# end class MetadataEntrySub
class CreateBucketResponseSub(supermod.CreateBucketResponse):
def __init__(self, CreateBucketReturn=None, **kwargs_):
super(CreateBucketResponseSub, self).__init__(CreateBucketReturn, **kwargs_)
supermod.CreateBucketResponse.subclass = CreateBucketResponseSub
# end class CreateBucketResponseSub
class StatusSub(supermod.Status):
def __init__(self, Code=None, Description=None, **kwargs_):
super(StatusSub, self).__init__(Code, Description, **kwargs_)
supermod.Status.subclass = StatusSub
# end class StatusSub
class ResultSub(supermod.Result):
def __init__(self, Status=None, extensiontype_=None, **kwargs_):
super(ResultSub, self).__init__(Status, extensiontype_, **kwargs_)
supermod.Result.subclass = ResultSub
# end class ResultSub
class CreateBucketResultSub(supermod.CreateBucketResult):
def __init__(self, BucketName=None, **kwargs_):
super(CreateBucketResultSub, self).__init__(BucketName, **kwargs_)
supermod.CreateBucketResult.subclass = CreateBucketResultSub
# end class CreateBucketResultSub
class DeleteBucketSub(supermod.DeleteBucket):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(DeleteBucketSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.DeleteBucket.subclass = DeleteBucketSub
# end class DeleteBucketSub
class DeleteBucketResponseSub(supermod.DeleteBucketResponse):
def __init__(self, DeleteBucketResponse_member=None, **kwargs_):
super(DeleteBucketResponseSub, self).__init__(DeleteBucketResponse_member, **kwargs_)
supermod.DeleteBucketResponse.subclass = DeleteBucketResponseSub
# end class DeleteBucketResponseSub
class BucketLoggingStatusSub(supermod.BucketLoggingStatus):
def __init__(self, LoggingEnabled=None, **kwargs_):
super(BucketLoggingStatusSub, self).__init__(LoggingEnabled, **kwargs_)
supermod.BucketLoggingStatus.subclass = BucketLoggingStatusSub
# end class BucketLoggingStatusSub
class LoggingSettingsSub(supermod.LoggingSettings):
def __init__(self, TargetBucket=None, TargetPrefix=None, TargetGrants=None, **kwargs_):
super(LoggingSettingsSub, self).__init__(TargetBucket, TargetPrefix, TargetGrants, **kwargs_)
supermod.LoggingSettings.subclass = LoggingSettingsSub
# end class LoggingSettingsSub
class GetBucketLoggingStatusSub(supermod.GetBucketLoggingStatus):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetBucketLoggingStatusSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.GetBucketLoggingStatus.subclass = GetBucketLoggingStatusSub
# end class GetBucketLoggingStatusSub
class GetBucketLoggingStatusResponseSub(supermod.GetBucketLoggingStatusResponse):
def __init__(self, GetBucketLoggingStatusResponse_member=None, **kwargs_):
super(GetBucketLoggingStatusResponseSub, self).__init__(GetBucketLoggingStatusResponse_member, **kwargs_)
supermod.GetBucketLoggingStatusResponse.subclass = GetBucketLoggingStatusResponseSub
# end class GetBucketLoggingStatusResponseSub
class SetBucketLoggingStatusSub(supermod.SetBucketLoggingStatus):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
BucketLoggingStatus=None, **kwargs_):
super(SetBucketLoggingStatusSub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
BucketLoggingStatus, **kwargs_)
supermod.SetBucketLoggingStatus.subclass = SetBucketLoggingStatusSub
# end class SetBucketLoggingStatusSub
class SetBucketLoggingStatusResponseSub(supermod.SetBucketLoggingStatusResponse):
def __init__(self, **kwargs_):
super(SetBucketLoggingStatusResponseSub, self).__init__(**kwargs_)
supermod.SetBucketLoggingStatusResponse.subclass = SetBucketLoggingStatusResponseSub
# end class SetBucketLoggingStatusResponseSub
class GetObjectAccessControlPolicySub(supermod.GetObjectAccessControlPolicy):
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
super(GetObjectAccessControlPolicySub, self).__init__(Bucket, Key, AWSAccessKeyId, Timestamp, Signature,
Credential, **kwargs_)
supermod.GetObjectAccessControlPolicy.subclass = GetObjectAccessControlPolicySub
# end class GetObjectAccessControlPolicySub
class GetObjectAccessControlPolicyResponseSub(supermod.GetObjectAccessControlPolicyResponse):
def __init__(self, GetObjectAccessControlPolicyResponse_member=None, **kwargs_):
super(GetObjectAccessControlPolicyResponseSub, self).__init__(GetObjectAccessControlPolicyResponse_member,
**kwargs_)
supermod.GetObjectAccessControlPolicyResponse.subclass = GetObjectAccessControlPolicyResponseSub
# end class GetObjectAccessControlPolicyResponseSub
class GetBucketAccessControlPolicySub(supermod.GetBucketAccessControlPolicy):
def __init__(self, Bucket=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetBucketAccessControlPolicySub, self).__init__(Bucket, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.GetBucketAccessControlPolicy.subclass = GetBucketAccessControlPolicySub
# end class GetBucketAccessControlPolicySub
class GetBucketAccessControlPolicyResponseSub(supermod.GetBucketAccessControlPolicyResponse):
def __init__(self, GetBucketAccessControlPolicyResponse_member=None, **kwargs_):
super(GetBucketAccessControlPolicyResponseSub, self).__init__(GetBucketAccessControlPolicyResponse_member,
**kwargs_)
supermod.GetBucketAccessControlPolicyResponse.subclass = GetBucketAccessControlPolicyResponseSub
# end class GetBucketAccessControlPolicyResponseSub
class GranteeSub(supermod.Grantee):
def __init__(self, extensiontype_=None, **kwargs_):
super(GranteeSub, self).__init__(extensiontype_, **kwargs_)
supermod.Grantee.subclass = GranteeSub
# end class GranteeSub
class UserSub(supermod.User):
def __init__(self, extensiontype_=None, **kwargs_):
super(UserSub, self).__init__(extensiontype_, **kwargs_)
supermod.User.subclass = UserSub
# end class UserSub
class AmazonCustomerByEmailSub(supermod.AmazonCustomerByEmail):
def __init__(self, EmailAddress=None, **kwargs_):
super(AmazonCustomerByEmailSub, self).__init__(EmailAddress, **kwargs_)
supermod.AmazonCustomerByEmail.subclass = AmazonCustomerByEmailSub
# end class AmazonCustomerByEmailSub
class CanonicalUserSub(supermod.CanonicalUser):
def __init__(self, ID=None, DisplayName=None, **kwargs_):
super(CanonicalUserSub, self).__init__(ID, DisplayName, **kwargs_)
supermod.CanonicalUser.subclass = CanonicalUserSub
# end class CanonicalUserSub
class GroupSub(supermod.Group):
def __init__(self, URI=None, **kwargs_):
super(GroupSub, self).__init__(URI, **kwargs_)
supermod.Group.subclass = GroupSub
# end class GroupSub
class GrantSub(supermod.Grant):
def __init__(self, Grantee=None, Permission=None, **kwargs_):
super(GrantSub, self).__init__(Grantee, Permission, **kwargs_)
supermod.Grant.subclass = GrantSub
# end class GrantSub
class AccessControlListSub(supermod.AccessControlList):
def __init__(self, Grant=None, **kwargs_):
super(AccessControlListSub, self).__init__(Grant, **kwargs_)
supermod.AccessControlList.subclass = AccessControlListSub
# end class AccessControlListSub
class CreateBucketConfigurationSub(supermod.CreateBucketConfiguration):
def __init__(self, LocationConstraint=None, **kwargs_):
super(CreateBucketConfigurationSub, self).__init__(LocationConstraint, **kwargs_)
supermod.CreateBucketConfiguration.subclass = CreateBucketConfigurationSub
# end class CreateBucketConfigurationSub
class LocationConstraintSub(supermod.LocationConstraint):
def __init__(self, valueOf_=None, **kwargs_):
super(LocationConstraintSub, self).__init__(valueOf_, **kwargs_)
supermod.LocationConstraint.subclass = LocationConstraintSub
# end class LocationConstraintSub
class AccessControlPolicySub(supermod.AccessControlPolicy):
def __init__(self, Owner=None, AccessControlList=None, **kwargs_):
super(AccessControlPolicySub, self).__init__(Owner, AccessControlList, **kwargs_)
supermod.AccessControlPolicy.subclass = AccessControlPolicySub
# end class AccessControlPolicySub
class SetObjectAccessControlPolicySub(supermod.SetObjectAccessControlPolicy):
def __init__(self, Bucket=None, Key=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None,
Signature=None, Credential=None, **kwargs_):
super(SetObjectAccessControlPolicySub, self).__init__(Bucket, Key, AccessControlList, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.SetObjectAccessControlPolicy.subclass = SetObjectAccessControlPolicySub
# end class SetObjectAccessControlPolicySub
class SetObjectAccessControlPolicyResponseSub(supermod.SetObjectAccessControlPolicyResponse):
def __init__(self, **kwargs_):
super(SetObjectAccessControlPolicyResponseSub, self).__init__(**kwargs_)
supermod.SetObjectAccessControlPolicyResponse.subclass = SetObjectAccessControlPolicyResponseSub
# end class SetObjectAccessControlPolicyResponseSub
class SetBucketAccessControlPolicySub(supermod.SetBucketAccessControlPolicy):
def __init__(self, Bucket=None, AccessControlList=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
super(SetBucketAccessControlPolicySub, self).__init__(Bucket, AccessControlList, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.SetBucketAccessControlPolicy.subclass = SetBucketAccessControlPolicySub
# end class SetBucketAccessControlPolicySub
class SetBucketAccessControlPolicyResponseSub(supermod.SetBucketAccessControlPolicyResponse):
def __init__(self, **kwargs_):
super(SetBucketAccessControlPolicyResponseSub, self).__init__(**kwargs_)
supermod.SetBucketAccessControlPolicyResponse.subclass = SetBucketAccessControlPolicyResponseSub
# end class SetBucketAccessControlPolicyResponseSub
class GetObjectSub(supermod.GetObject):
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(GetObjectSub, self).__init__(Bucket, Key, GetMetadata, GetData, InlineData, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.GetObject.subclass = GetObjectSub
# end class GetObjectSub
class GetObjectResponseSub(supermod.GetObjectResponse):
def __init__(self, GetObjectResponse_member=None, **kwargs_):
super(GetObjectResponseSub, self).__init__(GetObjectResponse_member, **kwargs_)
supermod.GetObjectResponse.subclass = GetObjectResponseSub
# end class GetObjectResponseSub
class GetObjectResultSub(supermod.GetObjectResult):
def __init__(self, Status=None, Metadata=None, Data=None, LastModified=None, ETag=None, **kwargs_):
super(GetObjectResultSub, self).__init__(Status, Metadata, Data, LastModified, ETag, **kwargs_)
supermod.GetObjectResult.subclass = GetObjectResultSub
# end class GetObjectResultSub
class GetObjectExtendedSub(supermod.GetObjectExtended):
def __init__(self, Bucket=None, Key=None, GetMetadata=None, GetData=None, InlineData=None, ByteRangeStart=None,
ByteRangeEnd=None, IfModifiedSince=None, IfUnmodifiedSince=None, IfMatch=None, IfNoneMatch=None,
ReturnCompleteObjectOnConditionFailure=None, AWSAccessKeyId=None, Timestamp=None, Signature=None,
Credential=None, **kwargs_):
super(GetObjectExtendedSub, self).__init__(Bucket, Key, GetMetadata, GetData, InlineData, ByteRangeStart,
ByteRangeEnd, IfModifiedSince, IfUnmodifiedSince, IfMatch,
IfNoneMatch, ReturnCompleteObjectOnConditionFailure, AWSAccessKeyId,
Timestamp, Signature, Credential, **kwargs_)
supermod.GetObjectExtended.subclass = GetObjectExtendedSub
# end class GetObjectExtendedSub
class GetObjectExtendedResponseSub(supermod.GetObjectExtendedResponse):
def __init__(self, GetObjectResponse=None, **kwargs_):
super(GetObjectExtendedResponseSub, self).__init__(GetObjectResponse, **kwargs_)
supermod.GetObjectExtendedResponse.subclass = GetObjectExtendedResponseSub
# end class GetObjectExtendedResponseSub
class PutObjectSub(supermod.PutObject):
def __init__(self, Bucket=None, Key=None, Metadata=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(PutObjectSub, self).__init__(Bucket, Key, Metadata, ContentLength, AccessControlList, StorageClass,
AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.PutObject.subclass = PutObjectSub
# end class PutObjectSub
class PutObjectResponseSub(supermod.PutObjectResponse):
def __init__(self, PutObjectResponse_member=None, **kwargs_):
super(PutObjectResponseSub, self).__init__(PutObjectResponse_member, **kwargs_)
supermod.PutObjectResponse.subclass = PutObjectResponseSub
# end class PutObjectResponseSub
class PutObjectResultSub(supermod.PutObjectResult):
def __init__(self, ETag=None, LastModified=None, **kwargs_):
super(PutObjectResultSub, self).__init__(ETag, LastModified, **kwargs_)
supermod.PutObjectResult.subclass = PutObjectResultSub
# end class PutObjectResultSub
class PutObjectInlineSub(supermod.PutObjectInline):
def __init__(self, Bucket=None, Key=None, Metadata=None, Data=None, ContentLength=None, AccessControlList=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(PutObjectInlineSub, self).__init__(Bucket, Key, Metadata, Data, ContentLength, AccessControlList,
StorageClass, AWSAccessKeyId, Timestamp, Signature, Credential,
**kwargs_)
supermod.PutObjectInline.subclass = PutObjectInlineSub
# end class PutObjectInlineSub
class PutObjectInlineResponseSub(supermod.PutObjectInlineResponse):
def __init__(self, PutObjectInlineResponse_member=None, **kwargs_):
super(PutObjectInlineResponseSub, self).__init__(PutObjectInlineResponse_member, **kwargs_)
supermod.PutObjectInlineResponse.subclass = PutObjectInlineResponseSub
# end class PutObjectInlineResponseSub
class DeleteObjectSub(supermod.DeleteObject):
def __init__(self, Bucket=None, Key=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None,
**kwargs_):
super(DeleteObjectSub, self).__init__(Bucket, Key, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.DeleteObject.subclass = DeleteObjectSub
# end class DeleteObjectSub
class DeleteObjectResponseSub(supermod.DeleteObjectResponse):
def __init__(self, DeleteObjectResponse_member=None, **kwargs_):
super(DeleteObjectResponseSub, self).__init__(DeleteObjectResponse_member, **kwargs_)
supermod.DeleteObjectResponse.subclass = DeleteObjectResponseSub
# end class DeleteObjectResponseSub
class ListBucketSub(supermod.ListBucket):
def __init__(self, Bucket=None, Prefix=None, Marker=None, MaxKeys=None, Delimiter=None, AWSAccessKeyId=None,
Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(ListBucketSub, self).__init__(Bucket, Prefix, Marker, MaxKeys, Delimiter, AWSAccessKeyId, Timestamp,
Signature, Credential, **kwargs_)
supermod.ListBucket.subclass = ListBucketSub
# end class ListBucketSub
class ListBucketResponseSub(supermod.ListBucketResponse):
def __init__(self, ListBucketResponse_member=None, **kwargs_):
super(ListBucketResponseSub, self).__init__(ListBucketResponse_member, **kwargs_)
supermod.ListBucketResponse.subclass = ListBucketResponseSub
# end class ListBucketResponseSub
class ListVersionsResponseSub(supermod.ListVersionsResponse):
def __init__(self, ListVersionsResponse_member=None, **kwargs_):
super(ListVersionsResponseSub, self).__init__(ListVersionsResponse_member, **kwargs_)
supermod.ListVersionsResponse.subclass = ListVersionsResponseSub
# end class ListVersionsResponseSub
class ListEntrySub(supermod.ListEntry):
def __init__(self, Key=None, LastModified=None, ETag=None, Size=None, Owner=None, StorageClass=None, **kwargs_):
super(ListEntrySub, self).__init__(Key, LastModified, ETag, Size, Owner, StorageClass, **kwargs_)
supermod.ListEntry.subclass = ListEntrySub
# end class ListEntrySub
class VersionEntrySub(supermod.VersionEntry):
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, ETag=None, Size=None, Owner=None,
StorageClass=None, **kwargs_):
super(VersionEntrySub, self).__init__(Key, VersionId, IsLatest, LastModified, ETag, Size, Owner, StorageClass,
**kwargs_)
supermod.VersionEntry.subclass = VersionEntrySub
# end class VersionEntrySub
class DeleteMarkerEntrySub(supermod.DeleteMarkerEntry):
def __init__(self, Key=None, VersionId=None, IsLatest=None, LastModified=None, Owner=None, **kwargs_):
super(DeleteMarkerEntrySub, self).__init__(Key, VersionId, IsLatest, LastModified, Owner, **kwargs_)
supermod.DeleteMarkerEntry.subclass = DeleteMarkerEntrySub
# end class DeleteMarkerEntrySub
class PrefixEntrySub(supermod.PrefixEntry):
def __init__(self, Prefix=None, **kwargs_):
super(PrefixEntrySub, self).__init__(Prefix, **kwargs_)
supermod.PrefixEntry.subclass = PrefixEntrySub
# end class PrefixEntrySub
class ListBucketResultSub(supermod.ListBucketResult):
def __init__(self, Metadata=None, Name=None, Prefix=None, Marker=None, NextMarker=None, MaxKeys=None,
Delimiter=None, IsTruncated=None, Contents=None, CommonPrefixes=None, **kwargs_):
super(ListBucketResultSub, self).__init__(Metadata, Name, Prefix, Marker, NextMarker, MaxKeys, Delimiter,
IsTruncated, Contents, CommonPrefixes, **kwargs_)
supermod.ListBucketResult.subclass = ListBucketResultSub
# end class ListBucketResultSub
class ListVersionsResultSub(supermod.ListVersionsResult):
def __init__(self, Metadata=None, Name=None, Prefix=None, KeyMarker=None, VersionIdMarker=None, NextKeyMarker=None,
NextVersionIdMarker=None, MaxKeys=None, Delimiter=None, IsTruncated=None, Version=None,
DeleteMarker=None, CommonPrefixes=None, **kwargs_):
super(ListVersionsResultSub, self).__init__(Metadata, Name, Prefix, KeyMarker, VersionIdMarker, NextKeyMarker,
NextVersionIdMarker, MaxKeys, Delimiter, IsTruncated, Version,
DeleteMarker, CommonPrefixes, **kwargs_)
supermod.ListVersionsResult.subclass = ListVersionsResultSub
# end class ListVersionsResultSub
class ListAllMyBucketsSub(supermod.ListAllMyBuckets):
def __init__(self, AWSAccessKeyId=None, Timestamp=None, Signature=None, **kwargs_):
super(ListAllMyBucketsSub, self).__init__(AWSAccessKeyId, Timestamp, Signature, **kwargs_)
supermod.ListAllMyBuckets.subclass = ListAllMyBucketsSub
# end class ListAllMyBucketsSub
class ListAllMyBucketsResponseSub(supermod.ListAllMyBucketsResponse):
def __init__(self, ListAllMyBucketsResponse_member=None, **kwargs_):
super(ListAllMyBucketsResponseSub, self).__init__(ListAllMyBucketsResponse_member, **kwargs_)
supermod.ListAllMyBucketsResponse.subclass = ListAllMyBucketsResponseSub
# end class ListAllMyBucketsResponseSub
class ListAllMyBucketsEntrySub(supermod.ListAllMyBucketsEntry):
def __init__(self, Name=None, CreationDate=None, **kwargs_):
super(ListAllMyBucketsEntrySub, self).__init__(Name, CreationDate, **kwargs_)
supermod.ListAllMyBucketsEntry.subclass = ListAllMyBucketsEntrySub
# end class ListAllMyBucketsEntrySub
class ListAllMyBucketsResultSub(supermod.ListAllMyBucketsResult):
def __init__(self, Owner=None, Buckets=None, **kwargs_):
super(ListAllMyBucketsResultSub, self).__init__(Owner, Buckets, **kwargs_)
supermod.ListAllMyBucketsResult.subclass = ListAllMyBucketsResultSub
# end class ListAllMyBucketsResultSub
class ListAllMyBucketsListSub(supermod.ListAllMyBucketsList):
def __init__(self, Bucket=None, **kwargs_):
super(ListAllMyBucketsListSub, self).__init__(Bucket, **kwargs_)
supermod.ListAllMyBucketsList.subclass = ListAllMyBucketsListSub
# end class ListAllMyBucketsListSub
class PostResponseSub(supermod.PostResponse):
def __init__(self, Location=None, Bucket=None, Key=None, ETag=None, **kwargs_):
super(PostResponseSub, self).__init__(Location, Bucket, Key, ETag, **kwargs_)
supermod.PostResponse.subclass = PostResponseSub
# end class PostResponseSub
class CopyObjectSub(supermod.CopyObject):
def __init__(self, SourceBucket=None, SourceKey=None, DestinationBucket=None, DestinationKey=None,
MetadataDirective=None, Metadata=None, AccessControlList=None, CopySourceIfModifiedSince=None,
CopySourceIfUnmodifiedSince=None, CopySourceIfMatch=None, CopySourceIfNoneMatch=None,
StorageClass=None, AWSAccessKeyId=None, Timestamp=None, Signature=None, Credential=None, **kwargs_):
super(CopyObjectSub, self).__init__(SourceBucket, SourceKey, DestinationBucket, DestinationKey,
MetadataDirective, Metadata, AccessControlList, CopySourceIfModifiedSince,
CopySourceIfUnmodifiedSince, CopySourceIfMatch, CopySourceIfNoneMatch,
StorageClass, AWSAccessKeyId, Timestamp, Signature, Credential, **kwargs_)
supermod.CopyObject.subclass = CopyObjectSub
# end class CopyObjectSub
class CopyObjectResponseSub(supermod.CopyObjectResponse):
def __init__(self, CopyObjectResult=None, **kwargs_):
super(CopyObjectResponseSub, self).__init__(CopyObjectResult, **kwargs_)
supermod.CopyObjectResponse.subclass = CopyObjectResponseSub
# end class CopyObjectResponseSub
class CopyObjectResultSub(supermod.CopyObjectResult):
def __init__(self, LastModified=None, ETag=None, **kwargs_):
super(CopyObjectResultSub, self).__init__(LastModified, ETag, **kwargs_)
supermod.CopyObjectResult.subclass = CopyObjectResultSub
# end class CopyObjectResultSub
class RequestPaymentConfigurationSub(supermod.RequestPaymentConfiguration):
def __init__(self, Payer=None, **kwargs_):
super(RequestPaymentConfigurationSub, self).__init__(Payer, **kwargs_)
supermod.RequestPaymentConfiguration.subclass = RequestPaymentConfigurationSub
# end class RequestPaymentConfigurationSub
class VersioningConfigurationSub(supermod.VersioningConfiguration):
def __init__(self, Status=None, MfaDelete=None, **kwargs_):
super(VersioningConfigurationSub, self).__init__(Status, MfaDelete, **kwargs_)
supermod.VersioningConfiguration.subclass = VersioningConfigurationSub
# end class VersioningConfigurationSub
class NotificationConfigurationSub(supermod.NotificationConfiguration):
def __init__(self, TopicConfiguration=None, **kwargs_):
super(NotificationConfigurationSub, self).__init__(TopicConfiguration, **kwargs_)
supermod.NotificationConfiguration.subclass = NotificationConfigurationSub
# end class NotificationConfigurationSub
class TopicConfigurationSub(supermod.TopicConfiguration):
def __init__(self, Topic=None, Event=None, **kwargs_):
super(TopicConfigurationSub, self).__init__(Topic, Event, **kwargs_)
supermod.TopicConfiguration.subclass = TopicConfigurationSub
# end class TopicConfigurationSub
def get_root_tag(node):
tag = supermod.Tag_pattern_.match(node.tag).groups()[-1]
rootClass = None
rootClass = supermod.GDSClassesMapping.get(tag)
if rootClass is None and hasattr(supermod, tag):
rootClass = getattr(supermod, tag)
return tag, rootClass
def parse(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"',
pretty_print=True)
return rootObj
def parseEtree(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
if sys.version_info.major == 2:
from StringIO import StringIO
else:
from io import BytesIO as StringIO
parser = None
doc = parsexml_(StringIO(inString), parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='xmlns:tns="http://s3.amazonaws.com/doc/2006-03-01/"')
return rootObj
def parseLiteral(inFilename, silence=False):
parser = None
doc = parsexml_(inFilename, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'CreateBucket'
rootClass = supermod.CreateBucket
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from s3_api import *\n\n')
sys.stdout.write('import s3_api as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
USAGE_TEXT = """
Usage: python ???.py <infilename>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def main():
args = sys.argv[1:]
if len(args) != 1:
usage()
infilename = args[0]
parse(infilename)
if __name__ == '__main__':
# import pdb; pdb.set_trace()
main()
|
[
"sys.stdout.write",
"io.BytesIO",
"lxml.etree.tostring",
"lxml.etree.parse",
"lxml.etree.ETCompatXMLParser",
"os.path.join",
"sys.exit"
] |
[((945, 990), 'lxml.etree.parse', 'etree_.parse', (['infile'], {'parser': 'parser'}), '(infile, parser=parser, **kwargs)\n', (957, 990), True, 'from lxml import etree as etree_\n'), ((30208, 30219), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (30216, 30219), False, 'import sys\n'), ((773, 799), 'lxml.etree.ETCompatXMLParser', 'etree_.ETCompatXMLParser', ([], {}), '()\n', (797, 799), True, 'from lxml import etree as etree_\n'), ((27517, 27561), 'sys.stdout.write', 'sys.stdout.write', (['"""<?xml version="1.0" ?>\n"""'], {}), '(\'<?xml version="1.0" ?>\\n\')\n', (27533, 27561), False, 'import sys\n'), ((28368, 28459), 'lxml.etree.tostring', 'etree_.tostring', (['rootElement'], {'pretty_print': '(True)', 'xml_declaration': '(True)', 'encoding': '"""utf-8"""'}), "(rootElement, pretty_print=True, xml_declaration=True,\n encoding='utf-8')\n", (28383, 28459), True, 'from lxml import etree as etree_\n'), ((28489, 28514), 'sys.stdout.write', 'sys.stdout.write', (['content'], {}), '(content)\n', (28505, 28514), False, 'import sys\n'), ((28523, 28545), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (28539, 28545), False, 'import sys\n'), ((28813, 28831), 'io.BytesIO', 'StringIO', (['inString'], {}), '(inString)\n', (28821, 28831), True, 'from io import BytesIO as StringIO\n'), ((29182, 29226), 'sys.stdout.write', 'sys.stdout.write', (['"""<?xml version="1.0" ?>\n"""'], {}), '(\'<?xml version="1.0" ?>\\n\')\n', (29198, 29226), False, 'import sys\n'), ((29839, 29884), 'sys.stdout.write', 'sys.stdout.write', (['"""#from s3_api import *\n\n"""'], {}), "('#from s3_api import *\\n\\n')\n", (29855, 29884), False, 'import sys\n'), ((29893, 29940), 'sys.stdout.write', 'sys.stdout.write', (['"""import s3_api as model_\n\n"""'], {}), "('import s3_api as model_\\n\\n')\n", (29909, 29940), False, 'import sys\n'), ((29949, 29998), 'sys.stdout.write', 'sys.stdout.write', (['"""rootObj = model_.rootClass(\n"""'], {}), "('rootObj = model_.rootClass(\\n')\n", (29965, 29998), False, 'import sys\n'), ((30067, 30090), 'sys.stdout.write', 'sys.stdout.write', (['""")\n"""'], {}), "(')\\n')\n", (30083, 30090), False, 'import sys\n'), ((874, 894), 'os.path.join', 'os.path.join', (['infile'], {}), '(infile)\n', (886, 894), False, 'import os\n')]
|
from os import path
import numpy as np
from torch import nn
import torch
def get_embedding(embedding_path=None,
embedding_np=None,
num_embeddings=0, embedding_dim=0, freeze=True, **kargs):
"""Create embedding from:
1. saved numpy vocab array, embedding_path, freeze
2. numpy embedding array, embedding_np, freeze
3. raw embedding n_vocab, embedding_dim
"""
if isinstance(embedding_path, str) and path.exists(embedding_path):
embedding_np = np.load(embedding_path)
if embedding_np is not None:
return nn.Embedding.from_pretrained(torch.Tensor(embedding_np), freeze=freeze)
return nn.Embedding(num_embeddings, embedding_dim, **kargs)
# extract last output in last time step
def extract_last_timestep(output, lengths, batch_first):
"""Get the output of last time step.
output: seq_len x batch_size x dim if not batch_first. Else batch_size x seq_len x dim
length: one dimensional torch.LongTensor of lengths in a batch.
"""
idx = (lengths - 1).view(-1, 1).expand(len(lengths), output.size(2))
time_dimension = 1 if batch_first else 0
idx = idx.unsqueeze(time_dimension)
if output.is_cuda:
idx = idx.cuda(output.data.get_device())
return output.gather(time_dimension, idx).squeeze(time_dimension)
|
[
"torch.nn.Embedding",
"torch.Tensor",
"os.path.exists",
"numpy.load"
] |
[((665, 717), 'torch.nn.Embedding', 'nn.Embedding', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim, **kargs)\n', (677, 717), False, 'from torch import nn\n'), ((458, 485), 'os.path.exists', 'path.exists', (['embedding_path'], {}), '(embedding_path)\n', (469, 485), False, 'from os import path\n'), ((510, 533), 'numpy.load', 'np.load', (['embedding_path'], {}), '(embedding_path)\n', (517, 533), True, 'import numpy as np\n'), ((611, 637), 'torch.Tensor', 'torch.Tensor', (['embedding_np'], {}), '(embedding_np)\n', (623, 637), False, 'import torch\n')]
|
# coding=utf-8
"""
Copyright (c) 2021 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import binaryninja
from .amigahunk import AmigaHunk
from .constants import HUNKTYPES
class AmigaLoadSeg(AmigaHunk):
name :str = 'AmigaLoadSeg'
long_name :str = 'Amiga 500 LoadSeg format'
loadseg_magic :bytes = b"\x00\x00\x03\xf3"
def __init__(self, data)->None:
super().__init__(data)
if self.is_valid_for_data(self.data):
self.create_segments()
def create_segments(self)->None:
hunktypes :list = []
numhunks :int = 0
self.br.seek(0x08)
numhunks = self.br.read32be()
first_hunk :int = self.br.read32be()
last_hunk :int = self.br.read32be()
self.br.seek_relative(0x04)
binaryninja.log_debug("%d %d %d %d" % (len(self.data),numhunks, first_hunk, last_hunk))
for i in range(0, numhunks):
hunktypes.append(self.br.read32be())
self.parse_hunktype(hunktypes[i])
@classmethod
def is_valid_for_data(self, data)->bool:
header :bytes = data.read(0,8)
strings :bytes = header[4:8]
self.is_loadseg :bool = header[0:4] == b"\x00\x00\x03\xf3"
if strings != b"\x00\x00\x00\x00" and self.is_loadseg == True:
binaryninja.log_error("λ - Unsupported LOADSEG file")
return False
return self.is_loadseg
|
[
"binaryninja.log_error"
] |
[((2267, 2320), 'binaryninja.log_error', 'binaryninja.log_error', (['"""λ - Unsupported LOADSEG file"""'], {}), "('λ - Unsupported LOADSEG file')\n", (2288, 2320), False, 'import binaryninja\n')]
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gym.envs.registration import register
register(
id='MB_FetchSlide-v1',
entry_point='pddm.envs.fetch.slide:FetchSlideEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPush-v1',
entry_point='pddm.envs.fetch.push:FetchPushEnv',
max_episode_steps=50,
)
register(
id='MB_FetchPickAndPlace-v1',
entry_point='pddm.envs.fetch.pick_and_place:FetchPickAndPlaceEnv',
max_episode_steps=50,
)
register(
id='MB_FetchReach-v1',
entry_point='pddm.envs.fetch.reach:FetchReachEnv',
max_episode_steps=50,
)
|
[
"gym.envs.registration.register"
] |
[((620, 729), 'gym.envs.registration.register', 'register', ([], {'id': '"""MB_FetchSlide-v1"""', 'entry_point': '"""pddm.envs.fetch.slide:FetchSlideEnv"""', 'max_episode_steps': '(50)'}), "(id='MB_FetchSlide-v1', entry_point=\n 'pddm.envs.fetch.slide:FetchSlideEnv', max_episode_steps=50)\n", (628, 729), False, 'from gym.envs.registration import register\n'), ((741, 847), 'gym.envs.registration.register', 'register', ([], {'id': '"""MB_FetchPush-v1"""', 'entry_point': '"""pddm.envs.fetch.push:FetchPushEnv"""', 'max_episode_steps': '(50)'}), "(id='MB_FetchPush-v1', entry_point=\n 'pddm.envs.fetch.push:FetchPushEnv', max_episode_steps=50)\n", (749, 847), False, 'from gym.envs.registration import register\n'), ((859, 996), 'gym.envs.registration.register', 'register', ([], {'id': '"""MB_FetchPickAndPlace-v1"""', 'entry_point': '"""pddm.envs.fetch.pick_and_place:FetchPickAndPlaceEnv"""', 'max_episode_steps': '(50)'}), "(id='MB_FetchPickAndPlace-v1', entry_point=\n 'pddm.envs.fetch.pick_and_place:FetchPickAndPlaceEnv', max_episode_steps=50\n )\n", (867, 996), False, 'from gym.envs.registration import register\n'), ((1003, 1112), 'gym.envs.registration.register', 'register', ([], {'id': '"""MB_FetchReach-v1"""', 'entry_point': '"""pddm.envs.fetch.reach:FetchReachEnv"""', 'max_episode_steps': '(50)'}), "(id='MB_FetchReach-v1', entry_point=\n 'pddm.envs.fetch.reach:FetchReachEnv', max_episode_steps=50)\n", (1011, 1112), False, 'from gym.envs.registration import register\n')]
|
lastlineKEY = ""
lastlineTOKEN = ""
lastlinePORTALACCOUNT = ""
import json
try:
import requests
HAVE_REQUESTS = True
except ImportError:
HAVE_REQUESTS = False
from viper.common.abstracts import Module
from viper.core.session import __sessions__
BASE_URL = 'https://analysis.lastline.com'
SUBMIT_URL = '/analysis/submit/file'
class LastLine(Module):
cmd = 'lastline'
description = 'Submit files and retrieve reports from LastLine (default will print short summary) '
authors = ['gelos']
def __init__(self):
super(LastLine, self).__init__()
self.parser.add_argument('-s', '--submit', action='store_true', help='Submit file to LastLine')
self.parser.add_argument('-r','--report', action='store_true', help='Get report from LastLine')
def run(self):
super(LastLine, self).run()
if self.args is None:
return
if not HAVE_REQUESTS:
self.log('error', "Missing dependency, install requests (`pip install requests`)")
return
if not __sessions__.is_set():
self.log('error', "No session opened")
return
if self.args.submit:
try:
file = {'file' : open(__sessions__.current.file.path, 'rb').read()}
data = {'key':lastlineKEY, 'api_token':lastlineTOKEN,'push_to_portal_account':lastlinePORTALACCOUNT}
response = requests.post(BASE_URL+SUBMIT_URL,data=data,files=file)
response = response.json()
if response['success'] == 0:
self.log('error',response['error'])
return
if response['success'] == 1:
self.log('info','Successfully submitted file to LastLine, task UUID: '+response['data']['task_uuid'])
return
except Exception as e:
self.log('error', "Failed performing request: {0}".format(e))
return
try:
data = {'key':lastlineKEY, 'api_token':lastlineTOKEN,'md5':__sessions__.current.file.md5,'push_to_portal_account':lastlinePORTALACCOUNT}
response = requests.post(BASE_URL+SUBMIT_URL,data=data)
response = response.json()
if response['success'] == 0:
self.log('error',response['error'])
return
if response['success'] == 1:
self.log('info', "LastLine Report:")
if self.args.report:
self.log('',json.dumps(response,indent=4,sort_keys=False))
return
#file malicious scoring
if 'score' in response['data']:
self.log('info','Malicious score: '+str(response['data']['score']))
if 'submission' in response['data']:
self.log('info','Submission date: '+str(response['data']['submission']))
#generating malicous activity list
if 'malicious_activity' in response['data']:
malicous_activity = []
i = 0
while(i < len(response['data']['malicious_activity'])):
malicous_activity.append([i,response['data']['malicious_activity'][i]])
i += 1
self.log('table', dict(header=['id', 'Malicious Activity'], rows=malicous_activity))
#generating url_summary list
if 'url_summary' in response['data']['report']:
url_summary = []
i = 0
while (i < len(response['data']['report']['url_summary'])):
url_summary.append([i,response['data']['report']['url_summary'][i]])
i += 1
self.log('table', dict(header=['id', 'URL Found'], rows=url_summary))
return
except Exception as e:
self.log('error', "Failed performing request: {0}".format(e))
|
[
"viper.core.session.__sessions__.is_set",
"requests.post",
"json.dumps"
] |
[((1062, 1083), 'viper.core.session.__sessions__.is_set', '__sessions__.is_set', ([], {}), '()\n', (1081, 1083), False, 'from viper.core.session import __sessions__\n'), ((2177, 2224), 'requests.post', 'requests.post', (['(BASE_URL + SUBMIT_URL)'], {'data': 'data'}), '(BASE_URL + SUBMIT_URL, data=data)\n', (2190, 2224), False, 'import requests\n'), ((1430, 1489), 'requests.post', 'requests.post', (['(BASE_URL + SUBMIT_URL)'], {'data': 'data', 'files': 'file'}), '(BASE_URL + SUBMIT_URL, data=data, files=file)\n', (1443, 1489), False, 'import requests\n'), ((2540, 2587), 'json.dumps', 'json.dumps', (['response'], {'indent': '(4)', 'sort_keys': '(False)'}), '(response, indent=4, sort_keys=False)\n', (2550, 2587), False, 'import json\n')]
|
from functools import wraps
from flask import session, url_for, request, redirect
def is_authenticated():
return 'username' in session
def login_required(f):
@wraps(f)
def wrapper(*args, **kwargs):
if is_authenticated():
return f(*args, **kwargs)
else:
return redirect(url_for('auth.display_login_form') + '?next={}'.format(request.path))
return wrapper
def redirect_if_authenticated(f):
@wraps(f)
def wrapper(*args, **kwargs):
if is_authenticated():
try:
return redirect(request.args['next'])
except KeyError:
return redirect(url_for('home_page.display_problem_list'))
else:
return f(*args, **kwargs)
return wrapper
|
[
"flask.redirect",
"flask.url_for",
"functools.wraps"
] |
[((169, 177), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (174, 177), False, 'from functools import wraps\n'), ((452, 460), 'functools.wraps', 'wraps', (['f'], {}), '(f)\n', (457, 460), False, 'from functools import wraps\n'), ((566, 596), 'flask.redirect', 'redirect', (["request.args['next']"], {}), "(request.args['next'])\n", (574, 596), False, 'from flask import session, url_for, request, redirect\n'), ((323, 357), 'flask.url_for', 'url_for', (['"""auth.display_login_form"""'], {}), "('auth.display_login_form')\n", (330, 357), False, 'from flask import session, url_for, request, redirect\n'), ((658, 699), 'flask.url_for', 'url_for', (['"""home_page.display_problem_list"""'], {}), "('home_page.display_problem_list')\n", (665, 699), False, 'from flask import session, url_for, request, redirect\n')]
|
# #This will allow us to create file paths accross operating systems
import pathlib
# #Path to collect data from Recources folder
election_csvpath =pathlib.Path('PyPoll/Resources/election_data.csv')
#Module for reading CSV files
import csv
with open(election_csvpath, mode='r') as csvfile:
#CSV reader specifies delimiter and variable that holds content
reader = csv.reader(csvfile, delimiter= ',')
header = next(csvfile)
votes = {}
for row in reader:
#complete list of canditates who received votes
#candidates vote count
candidate_name = row[2]
if candidate_name in votes:
votes[candidate_name] += 1
else:
votes[candidate_name] = 1
print (votes)
vote_counts = (list(votes.values()))
# Total number of votes cast
total_count = sum(vote_counts)
print(total_count)
winner = list(votes.keys())[0]
votes_summary = {}
for candidate in votes.keys():
if votes[candidate] >votes[winner]:
winner = candidate
votes_summary[candidate] = {'votes': votes[candidate], 'vote_pct': round((votes[candidate]/total_count)*100,2)}
if candidate== winner:
votes_summary[candidate]["is_winner"] = True
else:
votes_summary[candidate]["is_winner"] = False
print(votes_summary)
election_result = pathlib.Path('PyPoll/Analysis/election_results.txt')
with open(election_result,'w') as outputfile:
csvwriter = csv.writer(outputfile)
election_result = (
f"\n\nElection Results\n"
f"-------------------------\n"
f"Total Votes: {total_count}\n"
f"-------------------------\n"
)
print(election_result, end="")
outputfile.write(election_result)
for candidate in votes_summary.keys():
voter_output = f"{candidate}: {votes_summary[candidate]['vote_pct']}% ({votes_summary[candidate]['votes']})\n"
print(voter_output, end="")
outputfile.write(voter_output)
winning_candidate_summary = (
f"-------------------------\n"
f"Winner: {winner}\n"
f"-------------------------\n"
)
outputfile.write(winning_candidate_summary)
print(winning_candidate_summary)
|
[
"pathlib.Path",
"csv.reader",
"csv.writer"
] |
[((149, 199), 'pathlib.Path', 'pathlib.Path', (['"""PyPoll/Resources/election_data.csv"""'], {}), "('PyPoll/Resources/election_data.csv')\n", (161, 199), False, 'import pathlib\n'), ((1334, 1386), 'pathlib.Path', 'pathlib.Path', (['"""PyPoll/Analysis/election_results.txt"""'], {}), "('PyPoll/Analysis/election_results.txt')\n", (1346, 1386), False, 'import pathlib\n'), ((375, 409), 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (385, 409), False, 'import csv\n'), ((1450, 1472), 'csv.writer', 'csv.writer', (['outputfile'], {}), '(outputfile)\n', (1460, 1472), False, 'import csv\n')]
|
#!/usr/bin/env python3
# goal: of the 6230 objects exported by v5 (vat-mints), how many are Purses vs Payments vs other?
import sys, json, time, hashlib, base64
from collections import defaultdict
exports = {} # kref -> type
double_spent = set()
unspent = set() # kref
died_unspent = {}
def find_interfaces(body):
if isinstance(body, list):
for item in body:
yield from find_interfaces(item)
elif isinstance(body, dict):
if "@qclass" in body:
if body["@qclass"] == "slot":
iface = body.get("iface", None)
index = body["index"]
yield (index, iface)
else:
for item in body.values():
yield from find_interfaces(item)
for line in sys.stdin:
data = json.loads(line)
if data.get("vatID", None) != "v5":
continue
if data["type"] == "syscall":
if data["ksc"][0] == "send":
raise Error("vat-mints never exports anything")
if data["ksc"][0] == "resolve":
resolutions = data["ksc"][2]
for (kref, rejection, capdata) in resolutions:
slots = capdata["slots"]
for (index, iface) in find_interfaces(json.loads(capdata["body"])):
kref = slots[index]
#print("export", kref, iface)
exports[kref] = iface
unspent.add(kref)
if data["type"] == "deliver":
if data["kd"][0] == "message" and data["kd"][2]["method"] in ["deposit", "burn"]:
kref = data["kd"][2]["args"]["slots"][0]
if kref not in unspent:
double_spent.add(kref)
unspent.discard(kref)
if data["kd"][0] == "dropExports":
for kref in data["kd"][1]:
#print("delete", kref)
if kref in unspent:
print("died unspent:", kref)
died_unspent[kref] = exports[kref]
unspent.remove(kref)
del exports[kref]
counts = defaultdict(int)
for kref in sorted(exports):
iface = exports[kref].removeprefix("Alleged: ")
counts[iface] += 1
#print(kref, exports[kref])
for iface in sorted(counts):
print("%20s : %4d" % (iface, counts[iface]))
print("total:", sum(counts.values()))
print("live unspent:", len(unspent))
counts = defaultdict(int)
for kref in unspent:
iface = exports[kref].removeprefix("Alleged: ")
counts[iface] += 1
for iface in sorted(counts):
print(" %20s : %4d" % (iface, counts[iface]))
print("died unspent:", len(died_unspent))
counts = defaultdict(int)
for kref,iface in died_unspent.items():
iface = iface.removeprefix("Alleged: ")
counts[iface] += 1
for iface in sorted(counts):
print(" %20s : %4d" % (iface, counts[iface]))
print("double spent:", len(double_spent))
|
[
"collections.defaultdict",
"json.loads"
] |
[((2059, 2075), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2070, 2075), False, 'from collections import defaultdict\n'), ((2375, 2391), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2386, 2391), False, 'from collections import defaultdict\n'), ((2619, 2635), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (2630, 2635), False, 'from collections import defaultdict\n'), ((786, 802), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (796, 802), False, 'import sys, json, time, hashlib, base64\n'), ((1226, 1253), 'json.loads', 'json.loads', (["capdata['body']"], {}), "(capdata['body'])\n", (1236, 1253), False, 'import sys, json, time, hashlib, base64\n')]
|
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import numpy as np
import tensorflow as tf
from parlai.core.agents import Teacher
from . import utils
from .build import build
from ...utils import coreference_utils
class CoreferenceTeacher(Teacher):
"""Teacher for coreference resolution task"""
@staticmethod
def add_cmdline_args(argparser):
"""Parameters of agent and default values"""
group = argparser.add_argument_group('Coreference Teacher')
group.add_argument('--language', type=str, default='ru')
group.add_argument('--predictions_folder', type=str, default='predicts',
help='folder where to dump conll predictions, scorer will use this folder')
group.add_argument('--scorer_path', type=str, default='scorer/reference-coreference-scorers/v8.01/scorer.pl',
help='path to CoNLL scorer perl script')
group.add_argument('--valid_ratio', type=float,
default=0.2, help='valid_set ratio')
group.add_argument('--test_ratio', type=float,
default=0.2, help='test_set ratio')
group.add_argument('--teacher_seed', type=int, default=42, help='seed')
group.add_argument('--raw-dataset-path', type=str, default=None,
help='Path to folder with two subfolders: dataset and scorer. '
'These two folders are extracted rucoref_29.10.2015.zip and '
'reference-coreference-scorers.v8.01.tar.gz')
def __init__(self, opt, shared=None):
"""Initialize the parameters for CoreferenceTeacher"""
super().__init__(opt, shared)
self.last_observation = None
self.id = 'two-step-coref'
self.seed = opt['teacher_seed']
np.random.seed(seed=self.seed)
random.seed(a=self.seed)
tf.set_random_seed(seed=self.seed)
if shared:
raise RuntimeError('Additional batching is not supported')
build(opt)
self.dt = opt['datatype'].split(':')[0]
self.datapath = os.path.join(opt['datapath'], 'coreference_scorer_model', opt['language'])
self.valid_path = None
self.train_path = None
self.predictions_folder = os.path.join(self.datapath, opt['predictions_folder'], self.dt)
self.scorer_path = os.path.join(self.datapath, opt['scorer_path'])
# in train mode we use train dataset to train model
# and valid dataset to adjust threshold
# in valid and test mode we use test dataset
if self.dt == 'train':
self.valid_path = os.path.join(self.datapath, 'valid')
self.train_path = os.path.join(self.datapath, 'train')
elif self.dt in ['test', 'valid']:
self.valid_path = os.path.join(self.datapath, 'test')
else:
raise ValueError('Unknown mode: {}. Available modes: train, test, valid.'.format(self.dt))
self.train_documents = [] if self.train_path is None else list(sorted(os.listdir(self.train_path)))
self.valid_documents = [] if self.valid_path is None else list(sorted(os.listdir(self.valid_path)))
self.len = 1
self.epoch = 0
self._epoch_done = False
def act(self):
"""reads all documents and returns them"""
self._epoch_done = True
train_conll = [open(os.path.join(self.train_path, file), 'r').readlines() for file in self.train_documents]
valid_conll = [open(os.path.join(self.valid_path, file), 'r').readlines() for file in self.valid_documents]
return {'id': self.id, 'conll': train_conll, 'valid_conll': valid_conll}
def observe(self, observation):
"""saves observation"""
self.last_observation = observation
self.epoch += 1
def report(self):
"""calls scorer on last observation and reports result"""
utils.save_observations(self.last_observation['valid_conll'], self.predictions_folder)
res = coreference_utils.score(self.scorer_path, self.valid_path, self.predictions_folder)
return {'f1': res['conll-F-1']}
def reset(self):
self._epoch_done = False
def epoch_done(self):
return self._epoch_done
def __len__(self):
return self.len
|
[
"numpy.random.seed",
"tensorflow.set_random_seed",
"random.seed",
"os.path.join",
"os.listdir"
] |
[((2425, 2455), 'numpy.random.seed', 'np.random.seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (2439, 2455), True, 'import numpy as np\n'), ((2464, 2488), 'random.seed', 'random.seed', ([], {'a': 'self.seed'}), '(a=self.seed)\n', (2475, 2488), False, 'import random\n'), ((2497, 2531), 'tensorflow.set_random_seed', 'tf.set_random_seed', ([], {'seed': 'self.seed'}), '(seed=self.seed)\n', (2515, 2531), True, 'import tensorflow as tf\n'), ((2716, 2790), 'os.path.join', 'os.path.join', (["opt['datapath']", '"""coreference_scorer_model"""', "opt['language']"], {}), "(opt['datapath'], 'coreference_scorer_model', opt['language'])\n", (2728, 2790), False, 'import os\n'), ((2887, 2950), 'os.path.join', 'os.path.join', (['self.datapath', "opt['predictions_folder']", 'self.dt'], {}), "(self.datapath, opt['predictions_folder'], self.dt)\n", (2899, 2950), False, 'import os\n'), ((2978, 3025), 'os.path.join', 'os.path.join', (['self.datapath', "opt['scorer_path']"], {}), "(self.datapath, opt['scorer_path'])\n", (2990, 3025), False, 'import os\n'), ((3249, 3285), 'os.path.join', 'os.path.join', (['self.datapath', '"""valid"""'], {}), "(self.datapath, 'valid')\n", (3261, 3285), False, 'import os\n'), ((3316, 3352), 'os.path.join', 'os.path.join', (['self.datapath', '"""train"""'], {}), "(self.datapath, 'train')\n", (3328, 3352), False, 'import os\n'), ((3426, 3461), 'os.path.join', 'os.path.join', (['self.datapath', '"""test"""'], {}), "(self.datapath, 'test')\n", (3438, 3461), False, 'import os\n'), ((3658, 3685), 'os.listdir', 'os.listdir', (['self.train_path'], {}), '(self.train_path)\n', (3668, 3685), False, 'import os\n'), ((3766, 3793), 'os.listdir', 'os.listdir', (['self.valid_path'], {}), '(self.valid_path)\n', (3776, 3793), False, 'import os\n'), ((4004, 4039), 'os.path.join', 'os.path.join', (['self.train_path', 'file'], {}), '(self.train_path, file)\n', (4016, 4039), False, 'import os\n'), ((4120, 4155), 'os.path.join', 'os.path.join', (['self.valid_path', 'file'], {}), '(self.valid_path, file)\n', (4132, 4155), False, 'import os\n')]
|
#!/usr/bin/env python
# coding: utf-8
# ## Load and process Park et al. data
#
# For each sample, we want to compute:
#
# * (non-silent) binary mutation status in the gene of interest
# * binary copy gain/loss status in the gene of interest
# * what "class" the gene of interest is in (more detail on what this means below)
#
# We'll save this to a file since the preprocessing takes a few minutes, so we can load it quickly in downstream analysis scripts.
# In[1]:
from pathlib import Path
import pickle as pkl
import pandas as pd
import sys; sys.path.append('..')
import config as cfg
get_ipython().run_line_magic('load_ext', 'autoreload')
get_ipython().run_line_magic('autoreload', '2')
# In[2]:
# park et al. geneset info
park_loss_data = cfg.data_dir / 'park_loss_df.tsv'
park_gain_data = cfg.data_dir / 'park_gain_df.tsv'
# park et al. significant gene info
park_loss_sig_data = cfg.data_dir / 'park_loss_df_sig_only.tsv'
park_gain_sig_data = cfg.data_dir / 'park_gain_df_sig_only.tsv'
# mutation and copy number data
pancancer_pickle = Path('/home/jake/research/mpmp/data/pancancer_data.pkl')
# ### Load data from Park et al. supp. info
# In[3]:
park_loss_df = pd.read_csv(park_loss_data, sep='\t', index_col=0)
park_loss_df.head()
# In[4]:
park_gain_df = pd.read_csv(park_gain_data, sep='\t', index_col=0)
park_gain_df.head()
# ### Load mutation and CNV info
# In[5]:
with open(pancancer_pickle, 'rb') as f:
pancancer_data = pkl.load(f)
# In[6]:
# get (binary) mutation data
# 1 = observed non-silent mutation in this gene for this sample, 0 otherwise
mutation_df = pancancer_data[1]
print(mutation_df.shape)
mutation_df.iloc[:5, :5]
# In[7]:
# we use the data source and preprocessing code from the pancancer repo, here:
# https://github.com/greenelab/pancancer/blob/d1b3de7fa387d0a44d0a4468b0ac30918ed66886/scripts/initialize/process_copynumber.py#L21
copy_thresh_df = (
pd.read_csv(cfg.data_dir / 'pancan_GISTIC_threshold.tsv',
sep='\t', index_col=0)
.drop(columns=['Locus ID', 'Cytoband'])
)
copy_thresh_df.columns = copy_thresh_df.columns.str[0:15]
# thresholded copy number includes 5 values [-2, -1, 0, 1, 2], which
# correspond to "deep loss", "moderate loss", "no change",
# "moderate gain", and "deep gain", respectively.
print(copy_thresh_df.shape)
copy_thresh_df.iloc[:5, :5]
# In[8]:
sample_freeze_df = pancancer_data[0]
copy_samples = list(
set(sample_freeze_df.SAMPLE_BARCODE)
.intersection(set(copy_thresh_df.columns))
)
print(len(copy_samples))
# In[9]:
# make sure we're not losing too many samples, a few is fine
print(sorted(set(sample_freeze_df.SAMPLE_BARCODE) - set(copy_thresh_df.columns)))
# In[10]:
copy_thresh_df = (copy_thresh_df
.T
.loc[sorted(copy_samples)]
.fillna(0)
.astype(int)
)
print(copy_thresh_df.shape)
copy_thresh_df.iloc[:5, :5]
# In[11]:
# here, we want to use "moderate" and "deep" loss/gain to define CNV
# loss/gain (to match Park et al.)
#
# note that this is different to the more conservative approach of using
# "deep loss/gain" only as in our classifiers
copy_loss_df = (copy_thresh_df
.replace(to_replace=[1, 2], value=0)
.replace(to_replace=[-1, -2], value=1)
)
print(copy_loss_df.shape)
copy_loss_df.iloc[:5, :5]
# In[12]:
copy_gain_df = (copy_thresh_df
.replace(to_replace=[-1, -2], value=0)
.replace(to_replace=[1, 2], value=1)
)
print(copy_gain_df.shape)
copy_gain_df.iloc[:5, :5]
# ### Classify genes/cancer types into "classes"
#
# In [the Park et al. paper](https://www.nature.com/articles/s41467-021-27242-3#Sec4), they describe 4 "classes" of driver genes:
#
# 1. Genes that function exclusively as one-hit drivers, no significant co-occurrence with CNAs
# 2. Genes that interact with CNA loss in at least one cancer type - "two-hit loss" drivers (i.e. classical tumor suppressors)
# 3. Genes that interact with CNA gain in at least one cancer type - "two-hit gain" drivers (for some examples/explanation of "two-hit" oncogenes, see [this paper](https://www.nature.com/articles/s41586-020-2175-2))
# 4. Genes that interact with both CNA loss and CNA gain across multiple cancer types - "two-hit loss and gain" drivers
#
# Here, we label each of the genes from the Park et al. data with their "class", since we want to segment our analyses in this way too.
# In[13]:
park_loss_sig_df = pd.read_csv(park_loss_sig_data, sep='\t', index_col=0)
park_gain_sig_df = pd.read_csv(park_gain_sig_data, sep='\t', index_col=0)
class_4_ids = (
set(park_loss_sig_df.index.unique()).intersection(
set(park_gain_sig_df.index.unique())
))
class_2_ids = set(park_loss_sig_df.index.unique()) - class_4_ids
class_3_ids = set(park_gain_sig_df.index.unique()) - class_4_ids
class_1_ids = (
set(park_loss_df.index.unique()) - (
class_4_ids.union(class_2_ids, class_3_ids)
)
)
print(len(park_loss_df.index.unique()))
print('class 1:', len(class_1_ids))
print('class 2:', len(class_2_ids))
print('class 3:', len(class_3_ids))
print('class 4:', len(class_4_ids))
print(sorted(class_4_ids))
# In[14]:
def id_to_class(i):
if i in class_2_ids:
return 'class 2'
elif i in class_3_ids:
return 'class 3'
elif i in class_4_ids:
return 'class 4'
else:
return 'class 1'
loss_class = {i: id_to_class(i) for i in park_loss_df.index.unique()}
park_loss_df['class'] = park_loss_df.index.map(loss_class)
print(park_loss_df['class'].unique())
park_loss_df.head()
# In[15]:
gain_class = {i: id_to_class(i) for i in park_gain_df.index.unique()}
park_gain_df['class'] = park_gain_df.index.map(gain_class)
print(park_gain_df['class'].unique())
park_gain_df.head()
# In[16]:
sample_freeze_df.head()
# ### Retrieve and format per-sample information
#
# We want to generate a dataframe with the following information:
#
# * Sample ID, gene/tissue
# * Mutation status (binary) for sample in gene
# * CNV status (binary) for sample in gene, gain/loss for oncogene/TSG respectively
# * Park et al. gene "class" (class 1/2/3/4 as defined above)
# * Sample "number of hits" (none/one/both)
# In[17]:
def get_info_for_gene_and_tissue(identifier, all_info_df, copy_change):
"""Given a gene and tissue, load the relevant mutation information.
'status' is what we will segment our plots by: 'none' == neither a point
mutation or CNV observed for the given sample, 'one' == either a point
mutation or CNV but not both, 'both' == both point mutation and CNV
"""
info_df = {}
gene, tissue = identifier.split('_')
if tissue == 'COADREAD':
tissue_samples = (
sample_freeze_df[sample_freeze_df.DISEASE.isin(['COAD', 'READ'])]
.SAMPLE_BARCODE
)
else:
tissue_samples = (
sample_freeze_df[sample_freeze_df.DISEASE == tissue]
.SAMPLE_BARCODE
)
# TODO: not sure why these don't match
tissue_samples = (
mutation_df.index.intersection(tissue_samples)
.intersection(copy_loss_df.index)
.intersection(copy_gain_df.index)
)
class_name = (all_info_df
.loc[all_info_df.index == identifier, ['class']]
).values[0]
info_df['class_name'] = class_name
# get mutation status for samples
info_df['mutation_status'] = mutation_df.loc[tissue_samples, gene].values
# get copy status for samples
if copy_change == 'gain':
info_df['cnv_status'] = copy_loss_df.loc[tissue_samples, gene].values
elif copy_change == 'loss':
info_df['cnv_status'] = copy_gain_df.loc[tissue_samples, gene].values
info_df = pd.DataFrame(info_df, index=tissue_samples)
def hits_from_mut_info(row):
if row['mutation_status'] == 1 and row['cnv_status'] == 1:
return 'both'
elif row['mutation_status'] == 1 or row['cnv_status'] == 1:
return 'one'
else:
return 'none'
info_df['num_hits'] = info_df.apply(hits_from_mut_info, axis=1)
return info_df
get_info_for_gene_and_tissue('TP53_BRCA', park_loss_df, 'loss')
# ### Format and pickle all per-sample info
#
# We'll end up pickling a dict that maps each identifier (gene/cancer type combination) to a dataframe, assigning a "num_hits" class to each sample for that gene.
#
# We'll create two of these, one for copy gains and one for copy losses, to be used downstream in our distance/similarity analyses.
# In[18]:
cfg.distance_data_dir.mkdir(exist_ok=True)
park_gain_num_hits = {}
for identifier in park_gain_df.index:
park_gain_num_hits[identifier] = get_info_for_gene_and_tissue(identifier,
park_gain_df,
'gain')
park_gain_num_hits['TP53_BRCA'].head()
# In[19]:
with open(cfg.distance_gain_info, 'wb') as f:
pkl.dump(park_gain_num_hits, f)
# In[20]:
park_loss_num_hits = {}
for identifier in park_loss_df.index:
park_loss_num_hits[identifier] = get_info_for_gene_and_tissue(identifier,
park_loss_df,
'loss')
park_loss_num_hits['TP53_BRCA'].head()
# In[21]:
with open(cfg.distance_loss_info, 'wb') as f:
pkl.dump(park_loss_num_hits, f)
|
[
"sys.path.append",
"config.distance_data_dir.mkdir",
"pandas.DataFrame",
"pickle.dump",
"pandas.read_csv",
"pathlib.Path",
"pickle.load"
] |
[((553, 574), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (568, 574), False, 'import sys\n'), ((1059, 1115), 'pathlib.Path', 'Path', (['"""/home/jake/research/mpmp/data/pancancer_data.pkl"""'], {}), "('/home/jake/research/mpmp/data/pancancer_data.pkl')\n", (1063, 1115), False, 'from pathlib import Path\n'), ((1189, 1239), 'pandas.read_csv', 'pd.read_csv', (['park_loss_data'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(park_loss_data, sep='\\t', index_col=0)\n", (1200, 1239), True, 'import pandas as pd\n'), ((1288, 1338), 'pandas.read_csv', 'pd.read_csv', (['park_gain_data'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(park_gain_data, sep='\\t', index_col=0)\n", (1299, 1338), True, 'import pandas as pd\n'), ((4393, 4447), 'pandas.read_csv', 'pd.read_csv', (['park_loss_sig_data'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(park_loss_sig_data, sep='\\t', index_col=0)\n", (4404, 4447), True, 'import pandas as pd\n'), ((4467, 4521), 'pandas.read_csv', 'pd.read_csv', (['park_gain_sig_data'], {'sep': '"""\t"""', 'index_col': '(0)'}), "(park_gain_sig_data, sep='\\t', index_col=0)\n", (4478, 4521), True, 'import pandas as pd\n'), ((8532, 8574), 'config.distance_data_dir.mkdir', 'cfg.distance_data_dir.mkdir', ([], {'exist_ok': '(True)'}), '(exist_ok=True)\n', (8559, 8574), True, 'import config as cfg\n'), ((1467, 1478), 'pickle.load', 'pkl.load', (['f'], {}), '(f)\n', (1475, 1478), True, 'import pickle as pkl\n'), ((7694, 7737), 'pandas.DataFrame', 'pd.DataFrame', (['info_df'], {'index': 'tissue_samples'}), '(info_df, index=tissue_samples)\n', (7706, 7737), True, 'import pandas as pd\n'), ((8978, 9009), 'pickle.dump', 'pkl.dump', (['park_gain_num_hits', 'f'], {}), '(park_gain_num_hits, f)\n', (8986, 9009), True, 'import pickle as pkl\n'), ((9426, 9457), 'pickle.dump', 'pkl.dump', (['park_loss_num_hits', 'f'], {}), '(park_loss_num_hits, f)\n', (9434, 9457), True, 'import pickle as pkl\n'), ((1928, 2013), 'pandas.read_csv', 'pd.read_csv', (["(cfg.data_dir / 'pancan_GISTIC_threshold.tsv')"], {'sep': '"""\t"""', 'index_col': '(0)'}), "(cfg.data_dir / 'pancan_GISTIC_threshold.tsv', sep='\\t', index_col=0\n )\n", (1939, 2013), True, 'import pandas as pd\n')]
|
"""
Parse, don't validate. - <NAME>
"""
from munch import Munch
from .functions import TomlFunction
from .shared import OnThrowValue
def parse_on_throw(from_obj, to_obj):
"""
Expects "or_else" to already have been processed on "to_obj"
"""
throw_action = {
"or_else": OnThrowValue.OrElse,
"throw": OnThrowValue.Throw,
"skip": OnThrowValue.Skip,
}.get(from_obj.on_throw, None)
if not throw_action:
raise TypeError(f"Unknown 'on_throw' action given: {from_obj.on_throw}")
if throw_action == OnThrowValue.OrElse and not hasattr(to_obj, "or_else"):
raise TypeError(
"If 'on_throw' action is 'or_else', then 'or_else' must be defined."
)
return throw_action
class ProcessParser:
def process(self, process):
process_obj = Munch()
for action_name, action in process.items():
process_obj[action_name] = self.process_action(action)
return process_obj
def process_action(self, action):
action_obj = Munch()
if isinstance(action.input_paths, list) and all(
isinstance(element, str) for element in action.input_paths
):
action_obj.input_paths = action.input_paths
else:
raise TypeError("input_paths must be a list of strings.")
if isinstance(action.output_path, str):
action_obj.output_path = action.output_path
else:
raise TypeError("output_path must be a string.")
if action.function in TomlFunction._functions:
action_obj.function = TomlFunction._functions[action.function]
else:
raise TypeError(f"unknown function: {action.function}")
many = action.pop("many", False) is True
action_obj.many = many
if action.get("or_else"):
action_obj.or_else = action.or_else
if action.get("on_throw"):
throw_action = parse_on_throw(action, action_obj)
action_obj.on_throw = throw_action
return action_obj
class PreprocessParser(ProcessParser):
def parse(self, preprocess):
return self.process(preprocess)
class PostprocessParser(ProcessParser):
def parse(self, postprocess):
return self.process(postprocess)
class FieldsParser:
reserved_words = {
"input_paths",
"possible_paths",
"path_condition",
"from_type",
"to_type",
"function",
"or_else",
"on_throw",
"mapping",
}
def parse(self, fields):
field_objs = Munch()
many = fields.pop("many", False) is True
field_objs.many = many
if not fields:
raise TypeError("'fields' cannot be empty (what are we mapping?)")
for field_name, field in fields.items():
field_obj = self.parse_field(field)
field_objs[field_name] = self.parse_extra_fields(
field_name, field, field_obj
)
return field_objs
def parse_field(self, field):
field_obj = Munch()
field_obj = self.parse_paths(field, field_obj)
if hasattr(field, "or_else"):
field_obj.or_else = field.or_else
if field.get("on_throw"):
throw_action = parse_on_throw(field, field_obj)
field_obj.on_throw = throw_action
if field.get("from_type"):
# TODO: Is it possible to check valid definitions during parse?
field_obj.from_type = field.from_type
if field.get("mapping"):
# TODO: 'mapping' and 'from_type' should not both be possible
field_obj.mapping = field.mapping
if field.get("function"):
if field.function in TomlFunction._functions:
field_obj.function = TomlFunction._functions[field.function]
else:
raise TypeError(f"unknown function: {field.function}")
return field_obj
def parse_paths(self, field, field_obj):
if not hasattr(field, "input_paths") and not hasattr(field, "possible_paths"):
raise TypeError(
"Either 'input_paths' or 'possible_paths' must be declared. Aborting."
)
if hasattr(field, "input_paths") and hasattr(field, "possible_paths"):
raise TypeError(
"Either 'input_paths' or 'possible_paths' must be declared, but not both."
)
if hasattr(field, "input_paths"):
field_obj.input_paths = self.parse_input_paths(field)
else:
field_obj.possible_paths = self.parse_possible_paths(field)
field_obj.path_condition = field.path_condition
return field_obj
def parse_input_paths(self, field):
if isinstance(field.input_paths, list) and all(
isinstance(element, str) for element in field.input_paths
):
if len(field.input_paths) > 1 and not field.get("function"):
raise TypeError(
"'input_paths' must be of length 1 if 'function' is not defined"
)
return field.input_paths
else:
raise TypeError("input_paths must be a list of strings.")
def parse_possible_paths(self, field):
if isinstance(field.possible_paths, list) and all(
isinstance(element, str) for element in field.possible_paths
):
if not field.get("path_condition"):
raise TypeError(
"'path_condition' must be set if 'possible_paths' is set."
)
return field.possible_paths
else:
raise TypeError("possible_paths must be a list of strings.")
def parse_extra_fields(self, field_name, field, field_obj):
"""
Handle non-reserved keywords on the Field object
For now, the only allowed non-reserved keyword is the parent's field_name
"""
from_type = field.get("from_type")
field_obj["_copy_fields"] = []
for key, value in field.items():
if key in self.reserved_words:
continue
if key != field_name:
raise TypeError(f"Unknown key found on field definition: {field_name}")
if not from_type:
raise TypeError(
"Custom values cannot be set on a definition without declaring a nested object from_type"
)
field_obj[key] = value
for nested_key in value:
field_obj["_copy_fields"].append(nested_key)
return field_obj
class Parser:
def parse(self, toml_obj: Munch):
if not hasattr(toml_obj, "from_type"):
raise TypeError("'from_type' must be declared at the top-level.")
from_type = toml_obj.from_type
if not hasattr(toml_obj, "to_type"):
raise TypeError("'to_type' must be declared at the top-level.")
to_type = toml_obj.to_type
type_ = toml_obj.pop("__type__", "object")
include_type = toml_obj.pop("include_type", True) is True
if type_ not in ("object", "list"):
raise TypeError(
f"Only declared types available for __type__ are: object, list. Found: {type_}"
)
parsed_obj = Munch()
parsed_obj.to_type = to_type
parsed_obj.__type__ = type_
parsed_obj.include_type = include_type
if toml_obj.get("preprocess"):
parser = PreprocessParser()
parsed_obj["preprocess"] = parser.parse(toml_obj.preprocess)
if not hasattr(toml_obj, "fields"):
raise TypeError(
"'fields' is a required field for a Styx definition mapping."
)
fields_parser = FieldsParser()
parsed_obj["fields"] = fields_parser.parse(toml_obj.fields)
if toml_obj.get("postprocess"):
parser = PostprocessParser()
parsed_obj["postprocess"] = parser.parse(toml_obj.postprocess)
return from_type, to_type, parsed_obj
|
[
"munch.Munch"
] |
[((825, 832), 'munch.Munch', 'Munch', ([], {}), '()\n', (830, 832), False, 'from munch import Munch\n'), ((1040, 1047), 'munch.Munch', 'Munch', ([], {}), '()\n', (1045, 1047), False, 'from munch import Munch\n'), ((2582, 2589), 'munch.Munch', 'Munch', ([], {}), '()\n', (2587, 2589), False, 'from munch import Munch\n'), ((3075, 3082), 'munch.Munch', 'Munch', ([], {}), '()\n', (3080, 3082), False, 'from munch import Munch\n'), ((7320, 7327), 'munch.Munch', 'Munch', ([], {}), '()\n', (7325, 7327), False, 'from munch import Munch\n')]
|
# -*- coding: utf-8 -*-
"""
Project: neurohacking
File: clench.py.py
Author: wffirilat
"""
import numpy as np
import time
import sys
import plugin_interface as plugintypes
from open_bci_v3 import OpenBCISample
class PluginClench(plugintypes.IPluginExtended):
def __init__(self):
self.release = True
self.packetnum = -1
self.threshold = None
self.uthreshold = None
self.ticknum = None
self.storelength = 1024
self.starttime = None
self.state = 'unstarted'
self.channel = 3
self.restingmax, self.restingmin = 0, 0
self.clenchmax, self.clenchmin = 0, 0
self.unclenchmax, self.unclenchmin = 0, 0
self.rawdata = np.zeros((8, self.storelength))
self.data = np.zeros((8, self.storelength))
def activate(self):
print("clench activated")
# called with each new sample
def __call__(self, sample: OpenBCISample):
if sample.id == 0:
if self.packetnum == -1:
self.starttime = time.time()
self.packetnum += 1
self.ticknum = self.packetnum * 256 + sample.id
self.rawdata[:, (sample.id + 256 * self.packetnum) % self.storelength] = sample.channel_data
self.data[:, (sample.id + 256 * self.packetnum) % self.storelength] = [v - avg for avg, v in zip(
[sum(self.rawdata[i, :]) / self.storelength for i in range(8)],
sample.channel_data
)]
#print(np.median(self.rawdata[3,:])) #The reason this is here is because it might help our basis be better
if self.state != 'calibrated':
self.calibratetick()
else:
self.tick()
def calibratetick(self):
# print(self.data)
dt = time.time() - self.starttime
if self.state == "unstarted":
print("Prepare to calibrate")
self.state = "positioning"
elif self.state == "positioning":
if dt > 4:
print('Calibrating')
self.state = 'resting'
elif self.state == 'resting':
if dt > 6:
print("Resting data gathered; Prepare to clench")
self.state = 'clench'
return
if self.current >= self.restingmax:
self.restingmax = self.current
if self.current <= self.restingmin:
self.restingmin = self.current
elif self.state == 'clench':
if dt > 7:
print("Clench NOW!")
self.state = 'clenching'
return
elif self.state == 'clenching':
if dt > 9:
print('Unclench!!')
self.state = 'postclench'
return
if self.current > self.clenchmax:
self.clenchmax = self.current
if self.current < self.clenchmin:
self.clenchmin = self.current
elif self.state == 'postclench':
if dt > 10:
self.threshold = self.restingmax + ((self.clenchmax - self.restingmax) / 2)
if self.release:
self.uthreshold = self.restingmin + ((self.clenchmin - self.restingmin) / 2)
self.state = 'calibrated'
print ("Resting Max", self.restingmax, "Resting Min", self.restingmin, "\n")
print ("Clench Max,", self.clenchmax, "Clench Min",self.clenchmin, "\n")
if self.release:
print ("Unclench Max,", self.unclenchmax, "Unclench Min",self.unclenchmin, "\n")
return
if self.release:
if self.current > self.unclenchmax:
self.unclenchmax = self.current
if self.current < self.unclenchmin:
self.unclenchmin = self.current
@property
def current(self):
return self.data[self.channel, self.ticknum % self.storelength]
def tick(self):
if self.current > self.unclenchmax-((self.current-self.unclenchmax)/5):#watch this work!
print(f" {self.current}: Clenched!!")
...
#if self.release:
# if self.current < self.uthreshold:
# print(f" {self.ticknum}: Unclenched!!")
|
[
"numpy.zeros",
"time.time"
] |
[((719, 750), 'numpy.zeros', 'np.zeros', (['(8, self.storelength)'], {}), '((8, self.storelength))\n', (727, 750), True, 'import numpy as np\n'), ((771, 802), 'numpy.zeros', 'np.zeros', (['(8, self.storelength)'], {}), '((8, self.storelength))\n', (779, 802), True, 'import numpy as np\n'), ((1766, 1777), 'time.time', 'time.time', ([], {}), '()\n', (1775, 1777), False, 'import time\n'), ((1042, 1053), 'time.time', 'time.time', ([], {}), '()\n', (1051, 1053), False, 'import time\n')]
|
import discord, time, os, praw, random, json
from discord.ext import commands, tasks
from discord.ext.commands import has_permissions, MissingPermissions
from discord.utils import get
from itertools import cycle
import datetime as dt
from datetime import datetime
done3 = []
beg_lim_users = []
timers = {}
done = []
steal_lim_users = []
timers2 = {}
done2 = []
embeddata = {}
embeddata["icon"] = "http://luna-development.orgfree.com/data/discord/meme6/logo.jpg"
embeddata["name"] = "Meme6"
embeddata["version"] = "2.0"
class App(commands.Cog):
def __init__(self, client):
self.client = client
print("Loading Cog: MONEY")
@commands.command(aliases=["resetmoney", "moneyreset"])
@has_permissions(administrator=True)
async def moneysetup(self, ctx):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
usr_num = 0
bot_num = 0
for user in ctx.guild.members:
if user.bot == True:
bot_num += 1
elif user.id in done3:
pass
else:
done3.append(user.id)
f = open(f"__{user.id}__.json", "w+")
f.write("500")
f.close()
usr_num += 1
embed=discord.Embed(title="SETUP", description="Running Setup", color=0x00eeff)
embed.set_author(name=embeddata["name"], icon_url=embeddata["icon"])
embed.add_field(name="Guild id:", value=str(ctx.guild.id), inline=False)
embed.add_field(name="Users", value=str(usr_num), inline=False)
embed.add_field(name="Bots", value=str(bot_num), inline=True)
embed.set_footer(text=embeddata["name"]+" ["+embeddata["version"]+"]")
await ctx.send(embed=embed)
os.chdir("..")
os.chdir("..")
@commands.command(aliases=["bal", "bank"])
async def balance(self, ctx, who=None):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
if who == None:
who = int(ctx.message.author.id)
else:
who = who.replace("@", "").replace("!", "").replace(">", "").replace("<", "")
who = int(who)
f = open(f"__{who}__.json", "r")
bal = f.read()
f.close()
embed=discord.Embed(title="Balance", color=0x00eeff)
embed.set_author(name=embeddata["name"], icon_url=embeddata["icon"])
embed.add_field(name="Total", value="£"+str(bal), inline=False)
embed.set_footer(text=embeddata["name"]+" ["+embeddata["version"]+"]")
await ctx.send(embed=embed)
os.chdir("..")
os.chdir("..")
@tasks.loop(seconds = 1)
async def begtimer():
for user_id in beg_lim_users:
if user_id in done:
pass
old = timers[user_id]
new = old - 1
timers[user_id] = new
if timers[user_id] == 0:
beg_lim_users.remove(user_id)
timers.pop(user_id)
done.remove(user_id)
else:
done.append(user_id)
timers[user_id] = 50
@tasks.loop(seconds = 1)
async def stealtimer():
for user_id in steal_lim_users:
if user_id in done2:
pass
old = timers2[user_id]
new = old - 1
timers2[user_id] = new
if timers2[user_id] == 0:
steal_lim_users.remove(user_id)
timers2.pop(user_id)
done2.remove(user_id)
else:
done2.append(user_id)
timers2[user_id] = 50
@commands.command()
async def beg(self, ctx):
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
if ctx.message.author.id in beg_lim_users:
left = timers[ctx.message.author.id]
await ctx.send(f"You need to wait {left} seconds before you can use this again!")
os.chdir("..")
os.chdir("..")
return
else:
beg_lim_users.append(ctx.message.author.id)
x = random.randint(0, 100)
if x > 25:
c = True
else:
c = False
await ctx.send("No Coins for you!")
if c == True:
amm = random.randint(50, 300)
if amm > 295:
amm = random.randint(400, 500)
await ctx.send(f"Here have {amm} coins!")
f = open(f"__{ctx.message.author.id}__.json")
c_b = f.read()
f.close()
c_b =int(c_b)+int(amm)
f = open(f"__{ctx.message.author.id}__.json", "w+")
f.write(str(c_b))
f.close()
os.chdir("..")
os.chdir("..")
@commands.command()
async def passive(self, ctx, choice):
if choice == "on":
with open("passive.json") as f:
passive_list = json.load(f)
passive_list[ctx.message.author.id] = True
with open("passive.json", "w+") as f:
json.dump(passive_list, f)
await ctx.send(f"Passive mode now {choice}")
elif choice == "off":
with open("passive.json") as f:
passive_list = json.load(f)
passive_list[ctx.message.author.id] = False
with open("passive.json", "w+") as f:
json.dump(passive_list, f)
await ctx.send(f"Passive mode now {choice}")
else:
await ctx.send(f"{choice} is not a valid option, please choose from on or off")
@commands.command()
async def steal(self, ctx, who=None):
with open("passive.json") as f:
passive_list = json.load(f)
p = passive_list[str(ctx.message.author.id)]
if p == True:
await ctx.send("You can't steal, your in passive mode you can change that using the passive command")
return
if ctx.message.author.id in steal_lim_users:
left = timers2[ctx.message.author.id]
await ctx.send(f"You need to wait {left} seconds before you can use this again!")
os.chdir("..")
os.chdir("..")
return
else:
steal_lim_users.append(ctx.message.author.id)
os.chdir("money/")
folder = str(str(ctx.guild.id)+"/")
try:
os.chdir(folder)
except:
os.mkdir(folder)
os.chdir(folder)
w = who
if who == None:
await ctx.send("You need to tell me who to steal from!")
else:
who = who.replace("@", "").replace("!", "").replace(">", "").replace("<", "")
who = int(who)
f = open(f"__{who}__.json")
ee = f.read()
if int(ee) < 400:
await ctx.send("This person does not have more than 400 in there bank its not worth it!")
f.close()
return
f.close()
chance = random.randint(0, 100)
if chance > 30:
x = True
else:
await ctx.send("Oh no, you have been caught!")
x = False
if x == True:
amm = random.randint(1, 400)
await ctx.send(f"You stole {amm} from {w}")
f = open(f"__{ctx.message.author.id}__.json")
c = f.read()
f.close()
c = int(c)+amm
f = open(f"__{ctx.message.author.id}__.json", "w+")
f.write(str(c))
f.close()
f = open(f"__{who}__.json")
c = f.read()
f.close()
c = int(c)-amm
f = open(f"__{who}__.json", "w+")
f.write(str(c))
f.close()
os.chdir("..")
os.chdir("..")
begtimer.start()
stealtimer.start()
def setup(client):
client.add_cog(App(client))
|
[
"os.mkdir",
"json.dump",
"json.load",
"discord.ext.commands.command",
"random.randint",
"discord.Embed",
"discord.ext.commands.has_permissions",
"discord.ext.tasks.loop",
"os.chdir"
] |
[((652, 706), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['resetmoney', 'moneyreset']"}), "(aliases=['resetmoney', 'moneyreset'])\n", (668, 706), False, 'from discord.ext import commands, tasks\n'), ((712, 747), 'discord.ext.commands.has_permissions', 'has_permissions', ([], {'administrator': '(True)'}), '(administrator=True)\n', (727, 747), False, 'from discord.ext.commands import has_permissions, MissingPermissions\n'), ((1920, 1961), 'discord.ext.commands.command', 'commands.command', ([], {'aliases': "['bal', 'bank']"}), "(aliases=['bal', 'bank'])\n", (1936, 1961), False, 'from discord.ext import commands, tasks\n'), ((2852, 2873), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(1)'}), '(seconds=1)\n', (2862, 2873), False, 'from discord.ext import commands, tasks\n'), ((3369, 3390), 'discord.ext.tasks.loop', 'tasks.loop', ([], {'seconds': '(1)'}), '(seconds=1)\n', (3379, 3390), False, 'from discord.ext import commands, tasks\n'), ((3900, 3918), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (3916, 3918), False, 'from discord.ext import commands, tasks\n'), ((5205, 5223), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (5221, 5223), False, 'from discord.ext import commands, tasks\n'), ((6026, 6044), 'discord.ext.commands.command', 'commands.command', ([], {}), '()\n', (6042, 6044), False, 'from discord.ext import commands, tasks\n'), ((793, 811), 'os.chdir', 'os.chdir', (['"""money/"""'], {}), "('money/')\n", (801, 811), False, 'import discord, time, os, praw, random, json\n'), ((1379, 1449), 'discord.Embed', 'discord.Embed', ([], {'title': '"""SETUP"""', 'description': '"""Running Setup"""', 'color': '(61183)'}), "(title='SETUP', description='Running Setup', color=61183)\n", (1392, 1449), False, 'import discord, time, os, praw, random, json\n'), ((1876, 1890), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1884, 1890), False, 'import discord, time, os, praw, random, json\n'), ((1899, 1913), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (1907, 1913), False, 'import discord, time, os, praw, random, json\n'), ((2014, 2032), 'os.chdir', 'os.chdir', (['"""money/"""'], {}), "('money/')\n", (2022, 2032), False, 'import discord, time, os, praw, random, json\n'), ((2489, 2532), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Balance"""', 'color': '(61183)'}), "(title='Balance', color=61183)\n", (2502, 2532), False, 'import discord, time, os, praw, random, json\n'), ((2808, 2822), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (2816, 2822), False, 'import discord, time, os, praw, random, json\n'), ((2831, 2845), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (2839, 2845), False, 'import discord, time, os, praw, random, json\n'), ((3957, 3975), 'os.chdir', 'os.chdir', (['"""money/"""'], {}), "('money/')\n", (3965, 3975), False, 'import discord, time, os, praw, random, json\n'), ((5161, 5175), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5169, 5175), False, 'import discord, time, os, praw, random, json\n'), ((5184, 5198), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (5192, 5198), False, 'import discord, time, os, praw, random, json\n'), ((881, 897), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (889, 897), False, 'import discord, time, os, praw, random, json\n'), ((2102, 2118), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (2110, 2118), False, 'import discord, time, os, praw, random, json\n'), ((4045, 4061), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (4053, 4061), False, 'import discord, time, os, praw, random, json\n'), ((4342, 4356), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (4350, 4356), False, 'import discord, time, os, praw, random, json\n'), ((4369, 4383), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (4377, 4383), False, 'import discord, time, os, praw, random, json\n'), ((4489, 4511), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4503, 4511), False, 'import discord, time, os, praw, random, json\n'), ((6154, 6166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6163, 6166), False, 'import discord, time, os, praw, random, json\n'), ((6585, 6599), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (6593, 6599), False, 'import discord, time, os, praw, random, json\n'), ((6612, 6626), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (6620, 6626), False, 'import discord, time, os, praw, random, json\n'), ((6730, 6748), 'os.chdir', 'os.chdir', (['"""money/"""'], {}), "('money/')\n", (6738, 6748), False, 'import discord, time, os, praw, random, json\n'), ((8381, 8395), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (8389, 8395), False, 'import discord, time, os, praw, random, json\n'), ((8408, 8422), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (8416, 8422), False, 'import discord, time, os, praw, random, json\n'), ((926, 942), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (934, 942), False, 'import discord, time, os, praw, random, json\n'), ((955, 971), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (963, 971), False, 'import discord, time, os, praw, random, json\n'), ((2147, 2163), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (2155, 2163), False, 'import discord, time, os, praw, random, json\n'), ((2176, 2192), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (2184, 2192), False, 'import discord, time, os, praw, random, json\n'), ((4090, 4106), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (4098, 4106), False, 'import discord, time, os, praw, random, json\n'), ((4119, 4135), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (4127, 4135), False, 'import discord, time, os, praw, random, json\n'), ((4704, 4727), 'random.randint', 'random.randint', (['(50)', '(300)'], {}), '(50, 300)\n', (4718, 4727), False, 'import discord, time, os, praw, random, json\n'), ((5368, 5380), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5377, 5380), False, 'import discord, time, os, praw, random, json\n'), ((5504, 5530), 'json.dump', 'json.dump', (['passive_list', 'f'], {}), '(passive_list, f)\n', (5513, 5530), False, 'import discord, time, os, praw, random, json\n'), ((6830, 6846), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (6838, 6846), False, 'import discord, time, os, praw, random, json\n'), ((7523, 7545), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (7537, 7545), False, 'import discord, time, os, praw, random, json\n'), ((7774, 7796), 'random.randint', 'random.randint', (['(1)', '(400)'], {}), '(1, 400)\n', (7788, 7796), False, 'import discord, time, os, praw, random, json\n'), ((4784, 4808), 'random.randint', 'random.randint', (['(400)', '(500)'], {}), '(400, 500)\n', (4798, 4808), False, 'import discord, time, os, praw, random, json\n'), ((5693, 5705), 'json.load', 'json.load', (['f'], {}), '(f)\n', (5702, 5705), False, 'import discord, time, os, praw, random, json\n'), ((5830, 5856), 'json.dump', 'json.dump', (['passive_list', 'f'], {}), '(passive_list, f)\n', (5839, 5856), False, 'import discord, time, os, praw, random, json\n'), ((6883, 6899), 'os.mkdir', 'os.mkdir', (['folder'], {}), '(folder)\n', (6891, 6899), False, 'import discord, time, os, praw, random, json\n'), ((6916, 6932), 'os.chdir', 'os.chdir', (['folder'], {}), '(folder)\n', (6924, 6932), False, 'import discord, time, os, praw, random, json\n')]
|
#!/usr/bin/env python
#fn; get_mismatch.py
#ACTGCAGCGTCATAGTTTTTGAG
import os
import copy
def getMismatch(start,seq,name,end):
#name = seq
quality = 'IIIIIIIIIIIIIIIIIIIIII'
OUTFILE = open('./mis_test.fastq','a')
ls = list(seq)
ls_1 = copy.deepcopy(ls)
ii = start+1
for i in ls_1[ii:end]:
if i == 'A':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'T':
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'G':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'C'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
if i == 'C':
ls_1[ii] = 'T'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'G'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ls_1[ii] = 'A'
ls_1[21] = 'G'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
ls_1[22] = 'A'
OUTFILE.write('@'+name+'\n'+''.join(ls_1)+'\n'+'+'+name+'\n'+quality+'\n')
getMismatch(ii,ls_1,name,end - 1)
ii+=1
seq = 'GCTGCGTCGTCGTAGTTTTTTGG'
getMismatch(-1, seq, seq, 21)
|
[
"copy.deepcopy"
] |
[((259, 276), 'copy.deepcopy', 'copy.deepcopy', (['ls'], {}), '(ls)\n', (272, 276), False, 'import copy\n')]
|
# Generated by Django 2.0.10 on 2020-05-25 19:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('circles', '0003_auto_20200525_1531'),
]
operations = [
migrations.RemoveField(
model_name='membership',
name='is_Active',
),
migrations.AddField(
model_name='membership',
name='is_active',
field=models.BooleanField(default=True, help_text='Only active users are allowed to interact in the circle.', verbose_name='active status'),
),
migrations.AlterField(
model_name='membership',
name='is_admin',
field=models.BooleanField(default=False, help_text="Circle admins can update the circle's data and manage its members.", verbose_name='circle admin'),
),
migrations.AlterField(
model_name='membership',
name='remaining_invitations',
field=models.PositiveSmallIntegerField(default=0),
),
migrations.AlterField(
model_name='membership',
name='used_invitations',
field=models.PositiveSmallIntegerField(default=0),
),
]
|
[
"django.db.migrations.RemoveField",
"django.db.models.BooleanField",
"django.db.models.PositiveSmallIntegerField"
] |
[((236, 301), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""membership"""', 'name': '"""is_Active"""'}), "(model_name='membership', name='is_Active')\n", (258, 301), False, 'from django.db import migrations, models\n'), ((452, 594), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(True)', 'help_text': '"""Only active users are allowed to interact in the circle."""', 'verbose_name': '"""active status"""'}), "(default=True, help_text=\n 'Only active users are allowed to interact in the circle.',\n verbose_name='active status')\n", (471, 594), False, 'from django.db import migrations, models\n'), ((713, 865), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'help_text': '"""Circle admins can update the circle\'s data and manage its members."""', 'verbose_name': '"""circle admin"""'}), '(default=False, help_text=\n "Circle admins can update the circle\'s data and manage its members.",\n verbose_name=\'circle admin\')\n', (732, 865), False, 'from django.db import migrations, models\n'), ((997, 1040), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1029, 1040), False, 'from django.db import migrations, models\n'), ((1176, 1219), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'default': '(0)'}), '(default=0)\n', (1208, 1219), False, 'from django.db import migrations, models\n')]
|
# -*- coding: utf-8 -*-
r"""Run the vacuum coefficients 3nu example shown in README.md.
Runs the three-neutrino example of coefficients for oscillations in
vacuum shown in README.md
References
----------
.. [1] <NAME>, "Exact neutrino oscillation probabilities:
a fast general-purpose computation method for two and three neutrino
flavors", arXiv:1904.XXXXX.
Created: 2019/04/29 23:48
Last modified: 2019/04/29 23:48
"""
from __future__ import print_function
__version__ = "1.0"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import sys
sys.path.append('../src')
import numpy as np
import oscprob3nu
import hamiltonians3nu
from globaldefs import *
energy = 1.e9 # Neutrino energy [eV]
baseline = 1.3e3 # Baseline [km]
h_vacuum_energy_indep = \
hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent( S12_NO_BF,
S23_NO_BF,
S13_NO_BF,
DCP_NO_BF,
D21_NO_BF,
D31_NO_BF)
h_vacuum = np.multiply(1./energy, h_vacuum_energy_indep)
h1, h2, h3, h4, h5, h6, h7, h8 = \
oscprob3nu.hamiltonian_3nu_coefficients(h_vacuum)
print('h1: {:.4e}'.format(h1))
print('h2: {:.4e}'.format(h2))
print('h3: {:.4e}'.format(h3))
print('h4: {:.4e}'.format(h4))
print('h5: {:.4e}'.format(h5))
print('h6: {:.4e}'.format(h6))
print('h7: {:.4e}'.format(h7))
print('h8: {:.4e}'.format(h8))
print()
u0, u1, u2, u3, u4, u5, u6, u7, u8 = \
oscprob3nu.evolution_operator_3nu_u_coefficients( \
h_vacuum,
baseline*CONV_KM_TO_INV_EV)
print('u0: {:.4f}'.format(u0))
print('u1: {:.4f}'.format(u1))
print('u2: {:.4f}'.format(u2))
print('u3: {:.4f}'.format(u3))
print('u4: {:.4f}'.format(u4))
print('u5: {:.4f}'.format(u5))
print('u6: {:.4f}'.format(u6))
print('u7: {:.4f}'.format(u7))
print('u8: {:.4f}'.format(u8))
print()
evol_operator = \
oscprob3nu.evolution_operator_3nu(h_vacuum, baseline*CONV_KM_TO_INV_EV)
print('U3 = ')
with np.printoptions(precision=3, suppress=True):
print(np.array(evol_operator))
|
[
"sys.path.append",
"oscprob3nu.evolution_operator_3nu",
"numpy.multiply",
"hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent",
"oscprob3nu.hamiltonian_3nu_coefficients",
"numpy.array",
"numpy.printoptions",
"oscprob3nu.evolution_operator_3nu_u_coefficients"
] |
[((549, 574), 'sys.path.append', 'sys.path.append', (['"""../src"""'], {}), "('../src')\n", (564, 574), False, 'import sys\n'), ((769, 896), 'hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent', 'hamiltonians3nu.hamiltonian_3nu_vacuum_energy_independent', (['S12_NO_BF', 'S23_NO_BF', 'S13_NO_BF', 'DCP_NO_BF', 'D21_NO_BF', 'D31_NO_BF'], {}), '(S12_NO_BF,\n S23_NO_BF, S13_NO_BF, DCP_NO_BF, D21_NO_BF, D31_NO_BF)\n', (826, 896), False, 'import hamiltonians3nu\n'), ((1226, 1274), 'numpy.multiply', 'np.multiply', (['(1.0 / energy)', 'h_vacuum_energy_indep'], {}), '(1.0 / energy, h_vacuum_energy_indep)\n', (1237, 1274), True, 'import numpy as np\n'), ((1312, 1361), 'oscprob3nu.hamiltonian_3nu_coefficients', 'oscprob3nu.hamiltonian_3nu_coefficients', (['h_vacuum'], {}), '(h_vacuum)\n', (1351, 1361), False, 'import oscprob3nu\n'), ((1662, 1754), 'oscprob3nu.evolution_operator_3nu_u_coefficients', 'oscprob3nu.evolution_operator_3nu_u_coefficients', (['h_vacuum', '(baseline * CONV_KM_TO_INV_EV)'], {}), '(h_vacuum, baseline *\n CONV_KM_TO_INV_EV)\n', (1710, 1754), False, 'import oscprob3nu\n'), ((2166, 2239), 'oscprob3nu.evolution_operator_3nu', 'oscprob3nu.evolution_operator_3nu', (['h_vacuum', '(baseline * CONV_KM_TO_INV_EV)'], {}), '(h_vacuum, baseline * CONV_KM_TO_INV_EV)\n', (2199, 2239), False, 'import oscprob3nu\n'), ((2258, 2301), 'numpy.printoptions', 'np.printoptions', ([], {'precision': '(3)', 'suppress': '(True)'}), '(precision=3, suppress=True)\n', (2273, 2301), True, 'import numpy as np\n'), ((2313, 2336), 'numpy.array', 'np.array', (['evol_operator'], {}), '(evol_operator)\n', (2321, 2336), True, 'import numpy as np\n')]
|
# coding: utf-8
"""
Hydrogen Atom API
The Hydrogen Atom API # noqa: E501
OpenAPI spec version: 1.7.0
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import nucleus_api
from nucleus_api.api.roundup_api import RoundupApi # noqa: E501
from nucleus_api.rest import ApiException
class TestRoundupApi(unittest.TestCase):
"""RoundupApi unit test stubs"""
def setUp(self):
self.api = nucleus_api.api.roundup_api.RoundupApi() # noqa: E501
def tearDown(self):
pass
def test_create_roundup_settings_using_post(self):
"""Test case for create_roundup_settings_using_post
Create a Roundup Settings # noqa: E501
"""
pass
def test_create_roundup_using_post(self):
"""Test case for create_roundup_using_post
Create a roundup # noqa: E501
"""
pass
def test_delete_roundup_settings_using_delete(self):
"""Test case for delete_roundup_settings_using_delete
Delete a roundup settings # noqa: E501
"""
pass
def test_get_roundup_all_using_get(self):
"""Test case for get_roundup_all_using_get
List all roundups # noqa: E501
"""
pass
def test_get_roundup_settings_all_using_get(self):
"""Test case for get_roundup_settings_all_using_get
List all roundup settings # noqa: E501
"""
pass
def test_get_roundup_settings_using_get(self):
"""Test case for get_roundup_settings_using_get
Retrieve a Roundup Setting # noqa: E501
"""
pass
def test_get_roundup_using_get(self):
"""Test case for get_roundup_using_get
Retrieve a Roundup # noqa: E501
"""
pass
def test_update_roundup_settings_using_put(self):
"""Test case for update_roundup_settings_using_put
Update a roundup settings # noqa: E501
"""
pass
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"nucleus_api.api.roundup_api.RoundupApi"
] |
[((2072, 2087), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2085, 2087), False, 'import unittest\n'), ((517, 557), 'nucleus_api.api.roundup_api.RoundupApi', 'nucleus_api.api.roundup_api.RoundupApi', ([], {}), '()\n', (555, 557), False, 'import nucleus_api\n')]
|
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.http import require_POST
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.db.models import Q
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from watson import search
from .models import Box, Activity, Category, SubZone, UserBookmark, UserRating, FeaturedActivity, UserReview
from .forms import FeedbackForm
from .decorators import require_user_authenticated, require_activity
import logging
logger = logging.getLogger(__name__)
def get_paginated_list(lst, num_objects_on_page, page):
paginator = Paginator(lst, num_objects_on_page)
try:
paginated_list = paginator.page(page)
except PageNotAnInteger:
paginated_list = paginator.page(1)
except EmptyPage:
paginated_list = paginator.page(paginator.num_pages)
return paginated_list
def handler404(request):
response = render(request, 'outingbox/404.html', {})
response.status_code = 404
return response
def handler500(request):
response = render(request, 'outingbox/500.html', {})
response.status_code = 500
return response
def index_view(request):
boxes = Box.objects.all()
featured_set = FeaturedActivity.objects.all()
featured = []
if featured_set.count() > 0:
featured = featured_set[0]
return render(request, 'outingbox/index.html', {'boxes': boxes, 'featured': featured})
def contact_us_view(request):
if request.method == 'GET':
form = FeedbackForm()
else:
form = FeedbackForm(request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('feedback-thanks'))
return render(request, 'outingbox/contact-us.html', {'form': form})
def contact_us_thanks(request):
return render(request, 'outingbox/contact-us.html', {'thanks': True})
def about_us_view(request):
return render(request, 'outingbox/about-us.html')
def box_view(request, id=None, title=None):
box = get_object_or_404(Box, pk=id)
categories = box.category_set.all()
activities = Activity.objects.filter(category__in=categories).distinct()
# Default to page 1
page = request.GET.get('page', 1)
activities = get_paginated_list(activities, 12, page)
url_prev_page_number = None
if activities.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'box', {'page': activities.previous_page_number()}, kwargs={'id':id, 'title':box.title})
url_next_page_number = None
if activities.has_next():
url_next_page_number = add_page_to_request_url(request, 'box', {'page': activities.next_page_number()}, kwargs={'id':id, 'title':box.title})
return render(request, 'box/box.html', {
'box': box,
'activities': activities,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number
})
@login_required
def profile_bookmarks_view(request):
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
bookmarks = []
page = request.GET.get('page', 1)
bookmarks = get_paginated_list(bookmarks, 12, page)
url_prev_page_number = None
if bookmarks.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'profile_bookmarks', {'page': bookmarks.previous_page_number()})
url_next_page_number = None
if bookmarks.has_next():
url_next_page_number = add_page_to_request_url(request, 'profile_bookmarks', {'page': bookmarks.next_page_number()})
return render(request, 'account/bookmarks.html', {
'bookmarks': bookmarks,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number
})
def activity_view(request, id=None, title=None):
activity = get_object_or_404(Activity, pk=id)
user_bookmarks = None
user_rating = 0
user_review = None
if request.user.is_authenticated():
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
user_bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
pass
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
user_rating = user_rating_inst.rating
except UserRating.DoesNotExist:
pass
try:
user_review = UserReview.objects.get(user=request.user, activity=activity)
except UserReview.DoesNotExist:
pass
reviews = UserReview.objects.filter(activity=activity)
context = {
'activity': activity,
'bookmarks': user_bookmarks,
'photos': activity.photos.all(),
'reviews': reviews,
'user_rating': user_rating,
'user_review': user_review
}
return render(request, 'activity/activity.html', context)
@login_required
def profile_view(request):
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()[:3]
except UserBookmark.DoesNotExist:
bookmarks = []
return render(request, 'account/profile.html', {
'bookmarks': bookmarks
});
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def rate_activity(request, activity):
delete_rating = request.POST.get('delete', None)
if delete_rating:
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
old_rating = user_rating_inst.rating
if activity.votes == 1:
activity.rating = 0
else:
activity.rating = (activity.rating*activity.votes - old_rating)/(activity.votes-1)
activity.votes = activity.votes - 1
activity.save()
user_rating_inst.delete()
except UserRating.DoesNotExist:
pass
return JsonResponse({'msg': 'ok', 'status': '0'})
rating_str = request.POST.get('new_rating', None)
if not rating_str:
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
# query string params are always string; coerce to int
try:
rating = int(rating_str)
except ValueError:
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
if (rating > 5) or (rating <= 0):
res = JsonResponse({'msg': 'invalid rating', 'status': '1'})
res.status_code = 400
return res
old_rating = None
try:
user_rating_inst = UserRating.objects.get(user=request.user, activity=activity)
old_rating = user_rating_inst.rating
except UserRating.DoesNotExist:
user_rating_inst = UserRating(user=request.user, activity=activity)
if old_rating is None:
user_rating_inst.rating = rating
user_rating_inst.save()
activity.rating = (activity.rating*activity.votes+rating)/(activity.votes+1)
activity.votes = activity.votes + 1
activity.save()
elif old_rating != rating:
user_rating_inst.rating = rating
user_rating_inst.save()
activity.rating = (activity.rating*activity.votes - old_rating + rating)/(activity.votes)
activity.save()
return JsonResponse({'msg': 'ok', 'status': '0'})
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def bookmark_activity(request, activity):
user_bookmark_inst, created = UserBookmark.objects.get_or_create(user=request.user)
delete_bookmark = request.POST.get('delete', None)
if delete_bookmark:
user_bookmark_inst.bookmarks.remove(activity)
else:
user_bookmark_inst.bookmarks.add(activity)
return JsonResponse({'msg': 'ok', 'status': '0'})
@csrf_protect
@require_POST
@require_user_authenticated
@require_activity
def comment_activity(request, activity):
delete_review = request.POST.get('delete', None)
if delete_review:
try:
user_review_inst = UserReview.objects.get(user=request.user, activity=activity)
user_review_inst.delete()
except UserReview.DoesNotExist:
pass
return JsonResponse({
'msg': 'ok',
'status': '0'
})
review = request.POST.get('review', '')
if not review or len(review) > 512:
res = JsonResponse({'msg': 'comment too long/short', 'status': '1'})
return res
try:
user_review_inst = UserReview.objects.get(user=request.user, activity=activity)
except UserReview.DoesNotExist:
user_review_inst = UserReview(user=request.user, activity=activity)
user_review_inst.review = review
user_review_inst.pub_date = timezone.now()
user_review_inst.is_published = True
user_review_inst.save()
date_format = '%b. %d, %Y'
return JsonResponse({
'msg': 'ok',
'status': '0',
'date': user_review_inst.pub_date.strftime(date_format),
'username': request.user.username
})
# Ensure _new_params to be a dictionary
def add_page_to_request_url(request, view_name, _new_params, kwargs=None):
_dict = request.GET.copy()
# Django query dict update method appends instead of replacing the value if a key is present in both dicts
# Therefore remove page from original dict
try:
_dict.pop('page')
except KeyError:
pass
_dict.update(_new_params)
return reverse(view_name, kwargs=kwargs)+'?'+_dict.urlencode()
def get_search_filter_urls(request, order_by):
_dict = request.GET.copy()
_dict['page'] = 1
_dict['ob'] = order_by
return reverse('search')+'?'+_dict.urlencode()
def search_view(request):
query = request.GET.get('query', '')
page = request.GET.get('page', 1)
order_by = request.GET.get('ob', '')
sub_zones_selected_list = request.GET.getlist('sz', [])
categories_selected_list = request.GET.getlist('c', [])
sub_zone_list = SubZone.objects.all_name_value_list()
category_list = Category.objects.all_name_value_list()
activities = Activity.objects.all()
int_sub_zones_selected_list = []
if sub_zones_selected_list:
for sub_zone in sub_zones_selected_list:
try:
int_sub_zone = int(sub_zone)
int_sub_zones_selected_list.append(int_sub_zone)
except ValueError:
raise Http404("No results")
activities = activities.filter(address__sub_zone__in=int_sub_zones_selected_list)
int_categories_selected_list = []
if categories_selected_list:
for category in categories_selected_list:
try:
int_category = int(category)
int_categories_selected_list.append(int_category)
except ValueError:
raise Http404("No results")
activities = activities.filter(category__in=int_categories_selected_list)
if query:
activities = search.filter(activities, query)
activities = activities.distinct()
order_dict = {
'raa': 'rating', # Rating ascending
'rad': '-rating', # Rating descending
'pra': 'cost', # Price ascending
'prd': '-cost' # Price descending
}
if order_by:
activities = activities.order_by(order_dict[order_by])
results_paginator = Paginator(activities, 10)
try:
results_page = results_paginator.page(page)
except PageNotAnInteger:
results_page = results_paginator.page(1)
except EmptyPage:
results_page = results_paginator.page(results_paginator.num_pages)
activities = results_page
order_by_relevance_url = get_search_filter_urls(request, '')
order_by_rating_url = get_search_filter_urls(request, 'rad')
order_by_price_url = get_search_filter_urls(request, 'pra')
url_prev_page_number = None
url_next_page_number = None
if activities.has_previous():
url_prev_page_number = add_page_to_request_url(request, 'search', {'page': activities.previous_page_number()})
if activities.has_next():
url_next_page_number = add_page_to_request_url(request, 'search', {'page': activities.next_page_number()})
bookmarks = None
if request.user.is_authenticated():
try:
user_bookmark_inst = UserBookmark.objects.get(user=request.user)
bookmarks = user_bookmark_inst.bookmarks.all()
except UserBookmark.DoesNotExist:
pass
context = {
'activities': activities,
'order_by_relevance_url': order_by_relevance_url,
'order_by_rating_url': order_by_rating_url,
'order_by_price_url': order_by_price_url,
'url_next_page_number': url_next_page_number,
'url_prev_page_number': url_prev_page_number,
'sub_zone_list': sub_zone_list,
'category_list': category_list,
'sub_zones_selected_list': int_sub_zones_selected_list,
'categories_selected_list': int_categories_selected_list,
'query': query,
'page': page,
'bookmarks': bookmarks
}
return render(request, 'search/search.html', context)
|
[
"django.core.urlresolvers.reverse",
"django.utils.timezone.now",
"watson.search.filter",
"django.http.JsonResponse",
"django.shortcuts.get_object_or_404",
"django.core.paginator.Paginator",
"django.http.Http404",
"django.shortcuts.render",
"logging.getLogger"
] |
[((820, 847), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (837, 847), False, 'import logging\n'), ((921, 956), 'django.core.paginator.Paginator', 'Paginator', (['lst', 'num_objects_on_page'], {}), '(lst, num_objects_on_page)\n', (930, 956), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((1236, 1277), 'django.shortcuts.render', 'render', (['request', '"""outingbox/404.html"""', '{}'], {}), "(request, 'outingbox/404.html', {})\n", (1242, 1277), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1370, 1411), 'django.shortcuts.render', 'render', (['request', '"""outingbox/500.html"""', '{}'], {}), "(request, 'outingbox/500.html', {})\n", (1376, 1411), False, 'from django.shortcuts import render, get_object_or_404\n'), ((1668, 1747), 'django.shortcuts.render', 'render', (['request', '"""outingbox/index.html"""', "{'boxes': boxes, 'featured': featured}"], {}), "(request, 'outingbox/index.html', {'boxes': boxes, 'featured': featured})\n", (1674, 1747), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2026, 2086), 'django.shortcuts.render', 'render', (['request', '"""outingbox/contact-us.html"""', "{'form': form}"], {}), "(request, 'outingbox/contact-us.html', {'form': form})\n", (2032, 2086), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2131, 2193), 'django.shortcuts.render', 'render', (['request', '"""outingbox/contact-us.html"""', "{'thanks': True}"], {}), "(request, 'outingbox/contact-us.html', {'thanks': True})\n", (2137, 2193), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2234, 2276), 'django.shortcuts.render', 'render', (['request', '"""outingbox/about-us.html"""'], {}), "(request, 'outingbox/about-us.html')\n", (2240, 2276), False, 'from django.shortcuts import render, get_object_or_404\n'), ((2332, 2361), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Box'], {'pk': 'id'}), '(Box, pk=id)\n', (2349, 2361), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3050, 3221), 'django.shortcuts.render', 'render', (['request', '"""box/box.html"""', "{'box': box, 'activities': activities, 'url_next_page_number':\n url_next_page_number, 'url_prev_page_number': url_prev_page_number}"], {}), "(request, 'box/box.html', {'box': box, 'activities': activities,\n 'url_next_page_number': url_next_page_number, 'url_prev_page_number':\n url_prev_page_number})\n", (3056, 3221), False, 'from django.shortcuts import render, get_object_or_404\n'), ((3997, 4164), 'django.shortcuts.render', 'render', (['request', '"""account/bookmarks.html"""', "{'bookmarks': bookmarks, 'url_next_page_number': url_next_page_number,\n 'url_prev_page_number': url_prev_page_number}"], {}), "(request, 'account/bookmarks.html', {'bookmarks': bookmarks,\n 'url_next_page_number': url_next_page_number, 'url_prev_page_number':\n url_prev_page_number})\n", (4003, 4164), False, 'from django.shortcuts import render, get_object_or_404\n'), ((4252, 4286), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Activity'], {'pk': 'id'}), '(Activity, pk=id)\n', (4269, 4286), False, 'from django.shortcuts import render, get_object_or_404\n'), ((5285, 5335), 'django.shortcuts.render', 'render', (['request', '"""activity/activity.html"""', 'context'], {}), "(request, 'activity/activity.html', context)\n", (5291, 5335), False, 'from django.shortcuts import render, get_object_or_404\n'), ((5594, 5659), 'django.shortcuts.render', 'render', (['request', '"""account/profile.html"""', "{'bookmarks': bookmarks}"], {}), "(request, 'account/profile.html', {'bookmarks': bookmarks})\n", (5600, 5659), False, 'from django.shortcuts import render, get_object_or_404\n'), ((7805, 7847), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'ok', 'status': '0'}"], {}), "({'msg': 'ok', 'status': '0'})\n", (7817, 7847), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((8264, 8306), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'ok', 'status': '0'}"], {}), "({'msg': 'ok', 'status': '0'})\n", (8276, 8306), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((9252, 9266), 'django.utils.timezone.now', 'timezone.now', ([], {}), '()\n', (9264, 9266), False, 'from django.utils import timezone\n'), ((11880, 11905), 'django.core.paginator.Paginator', 'Paginator', (['activities', '(10)'], {}), '(activities, 10)\n', (11889, 11905), False, 'from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n'), ((13627, 13673), 'django.shortcuts.render', 'render', (['request', '"""search/search.html"""', 'context'], {}), "(request, 'search/search.html', context)\n", (13633, 13673), False, 'from django.shortcuts import render, get_object_or_404\n'), ((6395, 6437), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'ok', 'status': '0'}"], {}), "({'msg': 'ok', 'status': '0'})\n", (6407, 6437), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((6530, 6584), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'invalid rating', 'status': '1'}"], {}), "({'msg': 'invalid rating', 'status': '1'})\n", (6542, 6584), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((6930, 6984), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'invalid rating', 'status': '1'}"], {}), "({'msg': 'invalid rating', 'status': '1'})\n", (6942, 6984), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((8714, 8756), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'ok', 'status': '0'}"], {}), "({'msg': 'ok', 'status': '0'})\n", (8726, 8756), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((8890, 8952), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'comment too long/short', 'status': '1'}"], {}), "({'msg': 'comment too long/short', 'status': '1'})\n", (8902, 8952), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((11492, 11524), 'watson.search.filter', 'search.filter', (['activities', 'query'], {}), '(activities, query)\n', (11505, 11524), False, 'from watson import search\n'), ((6773, 6827), 'django.http.JsonResponse', 'JsonResponse', (["{'msg': 'invalid rating', 'status': '1'}"], {}), "({'msg': 'invalid rating', 'status': '1'})\n", (6785, 6827), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((9969, 10002), 'django.core.urlresolvers.reverse', 'reverse', (['view_name'], {'kwargs': 'kwargs'}), '(view_name, kwargs=kwargs)\n', (9976, 10002), False, 'from django.core.urlresolvers import reverse\n'), ((10165, 10182), 'django.core.urlresolvers.reverse', 'reverse', (['"""search"""'], {}), "('search')\n", (10172, 10182), False, 'from django.core.urlresolvers import reverse\n'), ((1986, 2012), 'django.core.urlresolvers.reverse', 'reverse', (['"""feedback-thanks"""'], {}), "('feedback-thanks')\n", (1993, 2012), False, 'from django.core.urlresolvers import reverse\n'), ((10931, 10952), 'django.http.Http404', 'Http404', (['"""No results"""'], {}), "('No results')\n", (10938, 10952), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n'), ((11351, 11372), 'django.http.Http404', 'Http404', (['"""No results"""'], {}), "('No results')\n", (11358, 11372), False, 'from django.http import HttpResponse, JsonResponse, HttpResponseRedirect, Http404\n')]
|
import time
import logging
import betfairlightweight
from betfairlightweight.filters import streaming_market_filter
from pythonjsonlogger import jsonlogger
from flumine import Flumine, clients, BaseStrategy
from flumine.order.trade import Trade
from flumine.order.ordertype import LimitOrder
from flumine.order.order import OrderStatus
logger = logging.getLogger()
custom_format = "%(asctime) %(levelname) %(message)"
log_handler = logging.StreamHandler()
formatter = jsonlogger.JsonFormatter(custom_format)
formatter.converter = time.gmtime
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
class ExampleStrategy(BaseStrategy):
def start(self):
# subscribe to streams
print("starting strategy 'ExampleStrategy'")
def check_market_book(self, market, market_book):
# process_market_book only executed if this returns True
if market_book.status != "CLOSED":
return True
def process_market_book(self, market, market_book):
# process marketBook object
for runner in market_book.runners:
if (
runner.status == "ACTIVE"
and runner.last_price_traded
and runner.selection_id == 11982403
):
trade = Trade(
market_id=market_book.market_id,
selection_id=runner.selection_id,
handicap=runner.handicap,
strategy=self,
)
order = trade.create_order(
side="LAY", order_type=LimitOrder(price=1.01, size=2.00)
)
self.place_order(market, order)
def process_orders(self, market, orders):
for order in orders:
if order.status == OrderStatus.EXECUTABLE:
if order.elapsed_seconds and order.elapsed_seconds > 5:
# print(order.bet_id, order.average_price_matched, order.size_matched)
if order.size_remaining == 2.00:
self.cancel_order(market, order, size_reduction=1.51)
# self.update_order(market, order, "PERSIST")
# if order.order_type.price == 1.01 and order.size_remaining == 0.49:
# self.replace_order(market, order, 1.02)
# if order.order_type.price == 1.02:
# self.replace_order(market, order, 1.03)
# if order.order_type.price == 1.03:
# self.replace_order(market, order, 1.05)
pass
trading = betfairlightweight.APIClient("username")
client = clients.BetfairClient(trading)
framework = Flumine(client=client)
strategy = ExampleStrategy(
market_filter=streaming_market_filter(market_ids=["1.170378175"]),
streaming_timeout=2,
)
framework.add_strategy(strategy)
framework.run()
|
[
"pythonjsonlogger.jsonlogger.JsonFormatter",
"betfairlightweight.filters.streaming_market_filter",
"betfairlightweight.APIClient",
"flumine.order.ordertype.LimitOrder",
"logging.StreamHandler",
"flumine.Flumine",
"flumine.clients.BetfairClient",
"logging.getLogger",
"flumine.order.trade.Trade"
] |
[((347, 366), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (364, 366), False, 'import logging\n'), ((435, 458), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (456, 458), False, 'import logging\n'), ((471, 510), 'pythonjsonlogger.jsonlogger.JsonFormatter', 'jsonlogger.JsonFormatter', (['custom_format'], {}), '(custom_format)\n', (495, 510), False, 'from pythonjsonlogger import jsonlogger\n'), ((2602, 2642), 'betfairlightweight.APIClient', 'betfairlightweight.APIClient', (['"""username"""'], {}), "('username')\n", (2630, 2642), False, 'import betfairlightweight\n'), ((2652, 2682), 'flumine.clients.BetfairClient', 'clients.BetfairClient', (['trading'], {}), '(trading)\n', (2673, 2682), False, 'from flumine import Flumine, clients, BaseStrategy\n'), ((2696, 2718), 'flumine.Flumine', 'Flumine', ([], {'client': 'client'}), '(client=client)\n', (2703, 2718), False, 'from flumine import Flumine, clients, BaseStrategy\n'), ((2766, 2817), 'betfairlightweight.filters.streaming_market_filter', 'streaming_market_filter', ([], {'market_ids': "['1.170378175']"}), "(market_ids=['1.170378175'])\n", (2789, 2817), False, 'from betfairlightweight.filters import streaming_market_filter\n'), ((1304, 1421), 'flumine.order.trade.Trade', 'Trade', ([], {'market_id': 'market_book.market_id', 'selection_id': 'runner.selection_id', 'handicap': 'runner.handicap', 'strategy': 'self'}), '(market_id=market_book.market_id, selection_id=runner.selection_id,\n handicap=runner.handicap, strategy=self)\n', (1309, 1421), False, 'from flumine.order.trade import Trade\n'), ((1604, 1636), 'flumine.order.ordertype.LimitOrder', 'LimitOrder', ([], {'price': '(1.01)', 'size': '(2.0)'}), '(price=1.01, size=2.0)\n', (1614, 1636), False, 'from flumine.order.ordertype import LimitOrder\n')]
|
from unittest.mock import patch
from urllib.parse import urlencode, quote_plus
from kairon.shared.utils import Utility
import pytest
import os
from mongoengine import connect, ValidationError
from kairon.shared.chat.processor import ChatDataProcessor
from re import escape
import responses
class TestChat:
@pytest.fixture(autouse=True, scope='class')
def setup(self):
os.environ["system_file"] = "./tests/testing_data/system.yaml"
Utility.load_environment()
db_url = Utility.environment['database']["url"]
pytest.db_url = db_url
connect(**Utility.mongoengine_connection(Utility.environment['database']["url"]))
def test_save_channel_config_invalid(self):
with pytest.raises(ValidationError, match="Invalid channel type custom"):
ChatDataProcessor.save_channel_config({"connector_type": "custom",
"config": {
"bot_user_oAuth_token": "<PASSWORD>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"slack_signing_secret": "<KEY>"}},
"test",
"test")
with pytest.raises(ValidationError,
match=escape("Missing ['bot_user_oAuth_token', 'slack_signing_secret'] all or any in config")):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<PASSWORD>01939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K",
}},
"test",
"test")
def test_save_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<PASSWORD>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
def test_update_channel_config(self):
ChatDataProcessor.save_channel_config({"connector_type": "slack",
"config": {
"bot_user_oAuth_token": "<KEY>",
"slack_signing_secret": "<KEY>"}},
"test",
"test")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).startswith("Test")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_list_channel_config(self):
channels = list(ChatDataProcessor.list_channel_config("test"))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
channels = list(ChatDataProcessor.list_channel_config("test", mask_characters=False))
slack = channels[0]
assert channels.__len__() == 1
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_get_channel_config_slack(self):
slack = ChatDataProcessor.get_channel_config("slack", "test")
assert slack.get("connector_type") == "slack"
assert str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert str(slack["config"].get("slack_signing_secret")).__contains__("***")
slack = ChatDataProcessor.get_channel_config("slack", "test", mask_characters=False)
assert slack.get("connector_type") == "slack"
assert not str(slack["config"].get("bot_user_oAuth_token")).__contains__("***")
assert not str(slack["config"].get("slack_signing_secret")).__contains__("***")
def test_delete_channel_config_slack(self):
ChatDataProcessor.delete_channel_config("slack", "test")
assert list(ChatDataProcessor.list_channel_config("test")).__len__() == 0
@responses.activate
def test_save_channel_config_telegram(self):
access_token = "<PASSWORD>"
webhook = urlencode({'url': "https://[email protected]/api/bot/telegram/tests/test"}, quote_via=quote_plus)
responses.add("GET",
json={'result': True},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
def __mock_endpoint(*args):
return f"https://[email protected]/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
@responses.activate
def test_save_channel_config_telegram_invalid(self):
access_token = "<PASSWORD>"
webhook = {'url': "https://[email protected]/api/bot/telegram/tests/test"}
webhook = urlencode(webhook, quote_via=quote_plus)
responses.add("GET",
json={'result': False, 'error_code': 400, 'description': "Invalid Webhook!"},
url=f"{Utility.system_metadata['channels']['telegram']['api']['url']}/bot{access_token}/setWebhook?{webhook}")
with pytest.raises(ValidationError, match="Invalid Webhook!"):
def __mock_endpoint(*args):
return f"https://[email protected]/api/bot/telegram/tests/test"
with patch('kairon.shared.data.utils.DataUtility.get_channel_endpoint', __mock_endpoint):
ChatDataProcessor.save_channel_config({"connector_type": "telegram",
"config": {
"access_token": access_token,
"webhook_url": webhook,
"username_for_bot": "test"}},
"test",
"test")
|
[
"kairon.shared.chat.processor.ChatDataProcessor.save_channel_config",
"kairon.shared.chat.processor.ChatDataProcessor.list_channel_config",
"urllib.parse.urlencode",
"kairon.shared.utils.Utility.load_environment",
"pytest.fixture",
"responses.add",
"re.escape",
"unittest.mock.patch",
"pytest.raises",
"kairon.shared.chat.processor.ChatDataProcessor.get_channel_config",
"kairon.shared.utils.Utility.mongoengine_connection",
"kairon.shared.chat.processor.ChatDataProcessor.delete_channel_config"
] |
[((315, 358), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)', 'scope': '"""class"""'}), "(autouse=True, scope='class')\n", (329, 358), False, 'import pytest\n'), ((459, 485), 'kairon.shared.utils.Utility.load_environment', 'Utility.load_environment', ([], {}), '()\n', (483, 485), False, 'from kairon.shared.utils import Utility\n'), ((2422, 2596), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'slack', 'config': {'bot_user_oAuth_token': '<PASSWORD>',\n 'slack_signing_secret': '<KEY>'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'slack', 'config':\n {'bot_user_oAuth_token': '<PASSWORD>', 'slack_signing_secret': '<KEY>'}\n }, 'test', 'test')\n", (2459, 2596), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((2881, 3049), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'slack', 'config': {'bot_user_oAuth_token': '<KEY>',\n 'slack_signing_secret': '<KEY>'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'slack', 'config':\n {'bot_user_oAuth_token': '<KEY>', 'slack_signing_secret': '<KEY>'}},\n 'test', 'test')\n", (2918, 3049), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((3300, 3376), 'kairon.shared.chat.processor.ChatDataProcessor.get_channel_config', 'ChatDataProcessor.get_channel_config', (['"""slack"""', '"""test"""'], {'mask_characters': '(False)'}), "('slack', 'test', mask_characters=False)\n", (3336, 3376), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((4457, 4510), 'kairon.shared.chat.processor.ChatDataProcessor.get_channel_config', 'ChatDataProcessor.get_channel_config', (['"""slack"""', '"""test"""'], {}), "('slack', 'test')\n", (4493, 4510), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((4750, 4826), 'kairon.shared.chat.processor.ChatDataProcessor.get_channel_config', 'ChatDataProcessor.get_channel_config', (['"""slack"""', '"""test"""'], {'mask_characters': '(False)'}), "('slack', 'test', mask_characters=False)\n", (4786, 4826), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((5114, 5170), 'kairon.shared.chat.processor.ChatDataProcessor.delete_channel_config', 'ChatDataProcessor.delete_channel_config', (['"""slack"""', '"""test"""'], {}), "('slack', 'test')\n", (5153, 5170), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((5381, 5478), 'urllib.parse.urlencode', 'urlencode', (["{'url': 'https://[email protected]/api/bot/telegram/tests/test'}"], {'quote_via': 'quote_plus'}), "({'url': 'https://[email protected]/api/bot/telegram/tests/test'},\n quote_via=quote_plus)\n", (5390, 5478), False, 'from urllib.parse import urlencode, quote_plus\n'), ((5483, 5647), 'responses.add', 'responses.add', (['"""GET"""'], {'json': "{'result': True}", 'url': 'f"""{Utility.system_metadata[\'channels\'][\'telegram\'][\'api\'][\'url\']}/bot{access_token}/setWebhook?{webhook}"""'}), '(\'GET\', json={\'result\': True}, url=\n f"{Utility.system_metadata[\'channels\'][\'telegram\'][\'api\'][\'url\']}/bot{access_token}/setWebhook?{webhook}"\n )\n', (5496, 5647), False, 'import responses\n'), ((6590, 6630), 'urllib.parse.urlencode', 'urlencode', (['webhook'], {'quote_via': 'quote_plus'}), '(webhook, quote_via=quote_plus)\n', (6599, 6630), False, 'from urllib.parse import urlencode, quote_plus\n'), ((6639, 6862), 'responses.add', 'responses.add', (['"""GET"""'], {'json': "{'result': False, 'error_code': 400, 'description': 'Invalid Webhook!'}", 'url': 'f"""{Utility.system_metadata[\'channels\'][\'telegram\'][\'api\'][\'url\']}/bot{access_token}/setWebhook?{webhook}"""'}), '(\'GET\', json={\'result\': False, \'error_code\': 400,\n \'description\': \'Invalid Webhook!\'}, url=\n f"{Utility.system_metadata[\'channels\'][\'telegram\'][\'api\'][\'url\']}/bot{access_token}/setWebhook?{webhook}"\n )\n', (6652, 6862), False, 'import responses\n'), ((726, 793), 'pytest.raises', 'pytest.raises', (['ValidationError'], {'match': '"""Invalid channel type custom"""'}), "(ValidationError, match='Invalid channel type custom')\n", (739, 793), False, 'import pytest\n'), ((807, 982), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'custom', 'config': {'bot_user_oAuth_token':\n '<PASSWORD>', 'slack_signing_secret': '<KEY>'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'custom', 'config':\n {'bot_user_oAuth_token': '<PASSWORD>', 'slack_signing_secret': '<KEY>'}\n }, 'test', 'test')\n", (844, 982), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((1416, 1547), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'slack', 'config': {'slack_signing_secret': '<KEY>'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'slack', 'config':\n {'slack_signing_secret': '<KEY>'}}, 'test', 'test')\n", (1453, 1547), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((1931, 2124), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'slack', 'config': {'bot_user_oAuth_token':\n '<PASSWORD>01939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'slack', 'config':\n {'bot_user_oAuth_token':\n '<PASSWORD>01939352912-801478018484-v3zq6MYNu62oSs8vammWOY8K'}}, 'test',\n 'test')\n", (1968, 2124), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((3667, 3712), 'kairon.shared.chat.processor.ChatDataProcessor.list_channel_config', 'ChatDataProcessor.list_channel_config', (['"""test"""'], {}), "('test')\n", (3704, 3712), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((4028, 4096), 'kairon.shared.chat.processor.ChatDataProcessor.list_channel_config', 'ChatDataProcessor.list_channel_config', (['"""test"""'], {'mask_characters': '(False)'}), "('test', mask_characters=False)\n", (4065, 4096), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((5805, 5892), 'unittest.mock.patch', 'patch', (['"""kairon.shared.data.utils.DataUtility.get_channel_endpoint"""', '__mock_endpoint'], {}), "('kairon.shared.data.utils.DataUtility.get_channel_endpoint',\n __mock_endpoint)\n", (5810, 5892), False, 'from unittest.mock import patch\n'), ((5902, 6089), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'telegram', 'config': {'access_token': access_token,\n 'webhook_url': webhook, 'username_for_bot': 'test'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'telegram',\n 'config': {'access_token': access_token, 'webhook_url': webhook,\n 'username_for_bot': 'test'}}, 'test', 'test')\n", (5939, 6089), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((6906, 6962), 'pytest.raises', 'pytest.raises', (['ValidationError'], {'match': '"""Invalid Webhook!"""'}), "(ValidationError, match='Invalid Webhook!')\n", (6919, 6962), False, 'import pytest\n'), ((592, 662), 'kairon.shared.utils.Utility.mongoengine_connection', 'Utility.mongoengine_connection', (["Utility.environment['database']['url']"], {}), "(Utility.environment['database']['url'])\n", (622, 662), False, 'from kairon.shared.utils import Utility\n'), ((7098, 7185), 'unittest.mock.patch', 'patch', (['"""kairon.shared.data.utils.DataUtility.get_channel_endpoint"""', '__mock_endpoint'], {}), "('kairon.shared.data.utils.DataUtility.get_channel_endpoint',\n __mock_endpoint)\n", (7103, 7185), False, 'from unittest.mock import patch\n'), ((7199, 7386), 'kairon.shared.chat.processor.ChatDataProcessor.save_channel_config', 'ChatDataProcessor.save_channel_config', (["{'connector_type': 'telegram', 'config': {'access_token': access_token,\n 'webhook_url': webhook, 'username_for_bot': 'test'}}", '"""test"""', '"""test"""'], {}), "({'connector_type': 'telegram',\n 'config': {'access_token': access_token, 'webhook_url': webhook,\n 'username_for_bot': 'test'}}, 'test', 'test')\n", (7236, 7386), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n'), ((1314, 1411), 're.escape', 'escape', (['"""Missing [\'bot_user_oAuth_token\', \'slack_signing_secret\'] all or any in config"""'], {}), '(\n "Missing [\'bot_user_oAuth_token\', \'slack_signing_secret\'] all or any in config"\n )\n', (1320, 1411), False, 'from re import escape\n'), ((1829, 1926), 're.escape', 'escape', (['"""Missing [\'bot_user_oAuth_token\', \'slack_signing_secret\'] all or any in config"""'], {}), '(\n "Missing [\'bot_user_oAuth_token\', \'slack_signing_secret\'] all or any in config"\n )\n', (1835, 1926), False, 'from re import escape\n'), ((5191, 5236), 'kairon.shared.chat.processor.ChatDataProcessor.list_channel_config', 'ChatDataProcessor.list_channel_config', (['"""test"""'], {}), "('test')\n", (5228, 5236), False, 'from kairon.shared.chat.processor import ChatDataProcessor\n')]
|
from flask import (
Blueprint,
flash,
redirect,
render_template,
request,
url_for)
from flask_login import current_user, login_user, logout_user
from werkzeug.wrappers import Response
from .forms import (
AccountForm,
DeleteForm,
LoginForm,
RegisterForm,
ResetForm,
UpdateForm)
from utils import admin_auth, is_staging, make_api_request
user_blueprint = Blueprint(
'users',
__name__,
template_folder='templates')
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = LoginForm()
login_args = {'title': 'Login', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/login', data={
'email': form.email.data,
'password': form.password.data})
if res.status_code == 200:
login_user(res.json().get('user'))
flash(res.message, 'message')
next = request.args.get('next')
if next is None or not next[0] == '/':
next = url_for('core.index')
return redirect(next)
else:
flash(res.message, 'warning')
return render_template('login.html', **login_args)
return render_template('login.html', **login_args)
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = RegisterForm()
register_args = {'title': 'Register', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/register', data={
'email': form.email.data,
'password': form.password.data,
'confirm_pw': form.confirm_pw.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('register.html', **register_args)
return render_template('register.html', **register_args)
@user_blueprint.route('/reset', methods=['GET', 'POST'])
def reset() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = ResetForm()
reset_args = {'title': 'Reset', 'form': form}
if form.validate_on_submit():
res = make_api_request('post', 'users/reset', data={
'email': form.email.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.update'))
else:
flash(res.message, 'warning')
return render_template('reset.html', **reset_args)
return render_template('reset_pw.html', **reset_args)
@user_blueprint.route('/update', methods=['GET', 'POST'])
def update() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = UpdateForm()
update_args = {'title': 'Update', 'form': form}
if form.validate_on_submit():
res = make_api_request('put', 'users/update', data={
'email': current_user.email,
'password': form.password.data,
'new_pw': form.new_pw.data,
'confirm_pw': form.confirm_pw.data})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('update.html', **update_args)
return render_template('update.html', **update_args)
@user_blueprint.route('/account', methods=['GET', 'POST'])
def account() -> Response:
if is_staging() and not admin_auth():
return redirect(url_for('admin.login_admin'))
form = AccountForm()
del_form = DeleteForm()
if form.validate_on_submit():
res = make_api_request('put', 'users/account', data={
'email': form.email.data,
'current_pw': form.current_pw.data,
'new_pw': form.new_pw.data,
'confirm_pw': form.confirm_pw.data,
})
if res.status_code == 200:
flash(res.message, 'message')
else:
flash(res.message, 'warning')
if del_form.validate_on_submit():
email = del_form.email.data,
if email != current_user.email:
flash('Invalid email', 'warning')
else:
res = make_api_request('delete', 'users/delete', data={
'email': email})
if res.status_code == 200:
flash(res.message, 'message')
return redirect(url_for('users.login'))
else:
flash(res.message, 'warning')
return render_template('account.html', form=form, del_form=del_form)
@user_blueprint.route('/logout', methods=['POST'])
def logout() -> Response:
logout_user()
flash('Logged out', 'message')
return redirect(url_for('users.login'))
|
[
"flask.flash",
"flask.Blueprint",
"flask.request.args.get",
"flask.redirect",
"utils.admin_auth",
"flask_login.logout_user",
"utils.make_api_request",
"flask.url_for",
"flask.render_template",
"utils.is_staging"
] |
[((404, 461), 'flask.Blueprint', 'Blueprint', (['"""users"""', '__name__'], {'template_folder': '"""templates"""'}), "('users', __name__, template_folder='templates')\n", (413, 461), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1334, 1377), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html', **login_args)\n", (1349, 1377), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2140, 2189), 'flask.render_template', 'render_template', (['"""register.html"""'], {}), "('register.html', **register_args)\n", (2155, 2189), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2837, 2883), 'flask.render_template', 'render_template', (['"""reset_pw.html"""'], {}), "('reset_pw.html', **reset_args)\n", (2852, 2883), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3672, 3717), 'flask.render_template', 'render_template', (['"""update.html"""'], {}), "('update.html', **update_args)\n", (3687, 3717), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4861, 4922), 'flask.render_template', 'render_template', (['"""account.html"""'], {'form': 'form', 'del_form': 'del_form'}), "('account.html', form=form, del_form=del_form)\n", (4876, 4922), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((5006, 5019), 'flask_login.logout_user', 'logout_user', ([], {}), '()\n', (5017, 5019), False, 'from flask_login import current_user, login_user, logout_user\n'), ((5024, 5054), 'flask.flash', 'flash', (['"""Logged out"""', '"""message"""'], {}), "('Logged out', 'message')\n", (5029, 5054), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((566, 578), 'utils.is_staging', 'is_staging', ([], {}), '()\n', (576, 578), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((776, 884), 'utils.make_api_request', 'make_api_request', (['"""post"""', '"""users/login"""'], {'data': "{'email': form.email.data, 'password': form.password.data}"}), "('post', 'users/login', data={'email': form.email.data,\n 'password': form.password.data})\n", (792, 884), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((1475, 1487), 'utils.is_staging', 'is_staging', ([], {}), '()\n', (1485, 1487), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((1694, 1841), 'utils.make_api_request', 'make_api_request', (['"""post"""', '"""users/register"""'], {'data': "{'email': form.email.data, 'password': form.password.data, 'confirm_pw':\n form.confirm_pw.data}"}), "('post', 'users/register', data={'email': form.email.data,\n 'password': form.password.data, 'confirm_pw': form.confirm_pw.data})\n", (1710, 1841), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((2281, 2293), 'utils.is_staging', 'is_staging', ([], {}), '()\n', (2291, 2293), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((2491, 2563), 'utils.make_api_request', 'make_api_request', (['"""post"""', '"""users/reset"""'], {'data': "{'email': form.email.data}"}), "('post', 'users/reset', data={'email': form.email.data})\n", (2507, 2563), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((2977, 2989), 'utils.is_staging', 'is_staging', ([], {}), '()\n', (2987, 2989), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((3190, 3369), 'utils.make_api_request', 'make_api_request', (['"""put"""', '"""users/update"""'], {'data': "{'email': current_user.email, 'password': form.password.data, 'new_pw':\n form.new_pw.data, 'confirm_pw': form.confirm_pw.data}"}), "('put', 'users/update', data={'email': current_user.email,\n 'password': form.password.data, 'new_pw': form.new_pw.data,\n 'confirm_pw': form.confirm_pw.data})\n", (3206, 3369), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((3813, 3825), 'utils.is_staging', 'is_staging', ([], {}), '()\n', (3823, 3825), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((4003, 4184), 'utils.make_api_request', 'make_api_request', (['"""put"""', '"""users/account"""'], {'data': "{'email': form.email.data, 'current_pw': form.current_pw.data, 'new_pw':\n form.new_pw.data, 'confirm_pw': form.confirm_pw.data}"}), "('put', 'users/account', data={'email': form.email.data,\n 'current_pw': form.current_pw.data, 'new_pw': form.new_pw.data,\n 'confirm_pw': form.confirm_pw.data})\n", (4019, 4184), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((5075, 5097), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (5082, 5097), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((587, 599), 'utils.admin_auth', 'admin_auth', ([], {}), '()\n', (597, 599), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((625, 653), 'flask.url_for', 'url_for', (['"""admin.login_admin"""'], {}), "('admin.login_admin')\n", (632, 653), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1000, 1029), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (1005, 1029), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1049, 1073), 'flask.request.args.get', 'request.args.get', (['"""next"""'], {}), "('next')\n", (1065, 1073), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1189, 1203), 'flask.redirect', 'redirect', (['next'], {}), '(next)\n', (1197, 1203), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1230, 1259), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (1235, 1259), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1279, 1322), 'flask.render_template', 'render_template', (['"""login.html"""'], {}), "('login.html', **login_args)\n", (1294, 1322), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1496, 1508), 'utils.admin_auth', 'admin_auth', ([], {}), '()\n', (1506, 1508), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((1534, 1562), 'flask.url_for', 'url_for', (['"""admin.login_admin"""'], {}), "('admin.login_admin')\n", (1541, 1562), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1922, 1951), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (1927, 1951), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2030, 2059), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (2035, 2059), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2079, 2128), 'flask.render_template', 'render_template', (['"""register.html"""'], {}), "('register.html', **register_args)\n", (2094, 2128), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2302, 2314), 'utils.admin_auth', 'admin_auth', ([], {}), '()\n', (2312, 2314), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((2340, 2368), 'flask.url_for', 'url_for', (['"""admin.login_admin"""'], {}), "('admin.login_admin')\n", (2347, 2368), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2624, 2653), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (2629, 2653), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2733, 2762), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (2738, 2762), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2782, 2825), 'flask.render_template', 'render_template', (['"""reset.html"""'], {}), "('reset.html', **reset_args)\n", (2797, 2825), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2998, 3010), 'utils.admin_auth', 'admin_auth', ([], {}), '()\n', (3008, 3010), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((3036, 3064), 'flask.url_for', 'url_for', (['"""admin.login_admin"""'], {}), "('admin.login_admin')\n", (3043, 3064), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3458, 3487), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (3463, 3487), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3566, 3595), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (3571, 3595), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3615, 3660), 'flask.render_template', 'render_template', (['"""update.html"""'], {}), "('update.html', **update_args)\n", (3630, 3660), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3834, 3846), 'utils.admin_auth', 'admin_auth', ([], {}), '()\n', (3844, 3846), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((3872, 3900), 'flask.url_for', 'url_for', (['"""admin.login_admin"""'], {}), "('admin.login_admin')\n", (3879, 3900), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4283, 4312), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (4288, 4312), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4339, 4368), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (4344, 4368), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4496, 4529), 'flask.flash', 'flash', (['"""Invalid email"""', '"""warning"""'], {}), "('Invalid email', 'warning')\n", (4501, 4529), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4562, 4627), 'utils.make_api_request', 'make_api_request', (['"""delete"""', '"""users/delete"""'], {'data': "{'email': email}"}), "('delete', 'users/delete', data={'email': email})\n", (4578, 4627), False, 'from utils import admin_auth, is_staging, make_api_request\n'), ((1148, 1169), 'flask.url_for', 'url_for', (['"""core.index"""'], {}), "('core.index')\n", (1155, 1169), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((1980, 2002), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (1987, 2002), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((2682, 2705), 'flask.url_for', 'url_for', (['"""users.update"""'], {}), "('users.update')\n", (2689, 2705), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((3516, 3538), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (3523, 3538), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4700, 4729), 'flask.flash', 'flash', (['res.message', '"""message"""'], {}), "(res.message, 'message')\n", (4705, 4729), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4820, 4849), 'flask.flash', 'flash', (['res.message', '"""warning"""'], {}), "(res.message, 'warning')\n", (4825, 4849), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n'), ((4762, 4784), 'flask.url_for', 'url_for', (['"""users.login"""'], {}), "('users.login')\n", (4769, 4784), False, 'from flask import Blueprint, flash, redirect, render_template, request, url_for\n')]
|
from tkinter import *
from tkinter import messagebox
from tkinter.ttk import *
import re
class TagSettings(Toplevel):
def __init__(self, parent):
super().__init__(parent)
# Class variables
self.tags = dict()
self.changes_made = False
# Window Parameters
self.title('Record Highlight Settings')
self.resizable(width=False, height=False)
# Create and place the widgets
self._init_widgets()
self.populate_tags(parent.current_project.config.get('events', {}).get('colors', {}))
self._place_widgets()
def _init_widgets(self):
"""
Creates the elements of this window and sets configuration values.
:return:
"""
# Master container frame
self.container = Frame(self)
# Treeview for tags
self.listbox_container = Frame(self.container)
self.tag_list = Treeview(self.listbox_container, columns=('source', 'id'), show='headings')
# Set up the tree headings
self.tag_list.heading('source', text='Event Source', command=lambda: self.sort_column('source', False))
self.tag_list.heading('id', text='Event ID', command=lambda: self.sort_column('id', False))
# Set up the tree columns
self.tag_list.column('id', minwidth=0, width=60, stretch=NO)
self.tag_list.column('source', minwidth=0, width=100, stretch=YES)
self.tag_list.bind('<<TreeviewSelect>>', self.callback_update_select_background)
# Scrollbar settings
self.vsb = Scrollbar(self.listbox_container, orient='vertical', command=self.tag_list.yview)
self.hsb = Scrollbar(self.listbox_container, orient='horizontal', command=self.tag_list.xview)
self.tag_list.configure(yscrollcommand=self.vsb.set)
self.tag_list.configure(xscrollcommand=self.hsb.set)
# Color preview
self.color_block = Canvas(self.container, width=300, height=20, relief=SUNKEN)
self.color_block_rect = self.color_block.create_rectangle(0, 0, 301, 21, fill='#FFFFFF')
self.color_block_text = self.color_block.create_text(5, 5, anchor='nw',
text='The quick brown fox jumps over the lazy dog.')
# Sliders
self.slider_container = Frame(self.container)
# Red config
self.red = IntVar()
self.r_label = Label(self.slider_container, text='R: ')
self.r_slider = Scale(self.slider_container, from_=0, to=255, variable=self.red,
command=lambda *args: self.truncate(self.r_slider))
self.r_value_label = Label(self.slider_container, text='0')
self.red.trace('w', lambda *args: self.callback_update_label(self.red, self.r_value_label))
self.r_slider.set(255)
# Green config
self.green = IntVar()
self.g_label = Label(self.slider_container, text='G: ')
self.g_slider = Scale(self.slider_container, from_=0, to=255, variable=self.green,
command=lambda *args: self.truncate(self.g_slider))
self.g_value_label = Label(self.slider_container, text='0')
self.green.trace('w', lambda *args: self.callback_update_label(self.green, self.g_value_label))
self.g_slider.set(255)
# Blue config
self.blue = IntVar()
self.b_label = Label(self.slider_container, text='B: ')
self.b_slider = Scale(self.slider_container, from_=0, to=255, variable=self.blue,
command=lambda *args: self.truncate(self.b_slider))
self.b_value_label = Label(self.slider_container, text='0')
self.blue.trace('w', lambda *args: self.callback_update_label(self.blue, self.b_value_label))
self.b_slider.set(255)
# Buttons for editing tags
self.add_button = Button(self.container, text='Add', command=self.callback_add_tag, underline=0)
self.bind('<Alt-a>', self.callback_add_tag)
self.delete_button = Button(self.container, text='Delete', command=self.callback_remove_tag, underline=0)
self.bind('<Alt-d>', self.callback_remove_tag)
# Finish and cancel buttons
self.finish_button = Button(self.container, text='Finish', command=self.callback_finish, underline=0)
self.cancel_button = Button(self.container, text='Cancel', command=self.callback_cancel, underline=0)
self.bind('<Alt-f>', self.callback_finish)
self.bind('<Return>', self.callback_finish)
self.bind('<Alt-c>', self.callback_cancel)
self.bind('<Escape>', self.callback_cancel)
# Focus on window.
self.focus_set()
def _place_widgets(self):
"""
Lays out the elements in this window.
:return:
"""
padding = 3
# Listbox for tags
self.tag_list.grid(row=0, column=0, columnspan=4, sticky='NESW')
self.vsb.grid(row=0, column=4, sticky='NESW')
self.hsb.grid(row=1, column=0, sticky='NESW')
self.listbox_container.columnconfigure(0, weight=4)
self.listbox_container.grid(row=0, column=0, columnspan=5, padx=padding, pady=padding, sticky='NESW')
# Color box
self.color_block.grid(row=1, column=0, columnspan=5, padx=padding, pady=padding, sticky='NS')
# Red config
self.r_label.grid(row=2, column=0, sticky='EW')
self.r_slider.grid(row=2, column=1, columnspan=3, sticky='EW')
self.r_value_label.grid(row=2, column=4, sticky='EW')
# Green config
self.g_label.grid(row=3, column=0, sticky='EW')
self.g_slider.grid(row=3, column=1, columnspan=3, sticky='EW')
self.g_value_label.grid(row=3, column=4, sticky='EW')
# Blue config
self.b_label.grid(row=4, column=0, sticky='EW')
self.b_slider.grid(row=4, column=1, columnspan=3, sticky='EW')
self.b_value_label.grid(row=4, column=4, sticky='EW')
# Slider container
self.slider_container.columnconfigure(1, weight=4)
self.slider_container.columnconfigure(4, minsize=25)
self.slider_container.grid(row=2, column=0, columnspan=5, padx=padding, sticky='NESW')
# Buttons for editing tags
self.add_button.grid(row=5, column=1, padx=padding, pady=padding, sticky='E')
self.delete_button.grid(row=5, column=2, padx=padding, pady=padding, sticky='EW')
# Finish and cancel buttons
self.finish_button.grid(row=5, column=3, padx=padding, pady=padding, sticky='EW')
self.cancel_button.grid(row=5, column=4, padx=padding, pady=padding, sticky='EW')
# Master container frame
self.container.columnconfigure(1, minsize=100)
self.container.pack(side=LEFT, fill=BOTH)
@staticmethod
def truncate(slider):
"""
Used to truncate slider values since ttk doesn't support the resolution option.
:return:
"""
value = slider.get()
if int(value) != value:
slider.set(int(value))
def sort_column(self, col, reverse):
"""
Sorts the tag list based on a particular column.
:param col: The column to sort.
:param reverse: Whether or not to sort in reverse order.
:return:
"""
column_elements = [(self.tag_list.set(k, col), k) for k in self.tag_list.get_children('')]
if col == 'id':
column_elements = [(int(v), k) for v, k in column_elements]
column_elements.sort(reverse=reverse)
for index, (val, k) in enumerate(column_elements):
self.tag_list.move(k, '', index)
self.tag_list.heading(col, command=lambda _col=col: self.sort_column(_col, not reverse))
def callback_update_label(self, var, label):
"""
Callback used to update the label associated with a slider. Also updates the color associated with the tag.
:param var: The variable bound to the slider.
:param label: The label to update.
:return:
"""
label.config(text=str(int(var.get())))
self.update_tag()
def populate_tags(self, tags):
"""
Iterates over the tag dictionary and inserts each tag.
:param tags: A dictionary containing tag, color pairs. The color should be a hex string.
:return:
"""
tag_config = ((source, event, color) for source, events in tags.items() for event, color in events.items())
for source, event, color in tag_config:
self.insert_tag(source, event, color)
def insert_tag(self, source, event, color):
"""
Inserts a tag into the ui and the tag list.
:param source: The event source.
:param event: The event id as a string.
:param color: The color to associate with the tag as a string in hex format.
:return:
"""
tag = f'{source}::{event}'
self.tag_list.insert('', 'end', values=(source, int(event)), tags=(tag,))
self.tag_list.tag_configure(tag, background=color)
self.tags[source] = self.tags.get(source, dict())
self.tags[source][event] = color
def callback_update_select_background(self, event=None):
"""
Callback used to update the selection background and sliders to match the selection.
:return:
"""
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
hex_color = self.tags[source][event]
# self.color_block.create_rectangle(0, 0, 301, 21, fill=hex_color)
self.color_block.itemconfigure(self.color_block_rect, fill=hex_color)
hex_color = hex_color.lstrip('#')
r, g, b = tuple(int(hex_color[i:i + 2], 16) for i in range(0, 5, 2))
self.r_slider.set(r)
self.g_slider.set(g)
self.b_slider.set(b)
def update_tag(self):
"""
Updates the colors associated with a tag
:return:
"""
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
r, g, b = tuple(map(int, (self.r_slider.get(), self.g_slider.get(), self.b_slider.get())))
hex_color = f'#{r:02x}{g:02x}{b:02x}'
self.tags[source][event] = hex_color
self.color_block.itemconfigure(self.color_block_rect, fill=hex_color)
self.tag_list.tag_configure('::'.join((source, event)), background=hex_color)
self.changes_made = True
def callback_add_tag(self, event=None):
"""
Creates a dialog window for the user to enter a new tag.
:return:
"""
window = TagPrompt(self)
window.grab_set()
def callback_remove_tag(self, event=None):
selection = self.tag_list.focus()
if not selection:
return
source, event = (str(v) for v in self.tag_list.item(selection)['values'])
self.tags[source].pop(event)
if len(self.tags[source].keys()) == 0:
self.tags.pop(source)
self.tag_list.delete(selection)
self.changes_made = True
def callback_finish(self, event=None):
"""
Callback used to finish making changes to the tags and return to master.
:return:
"""
self.master.current_project.config['events'] = self.master.current_project.config.get('events', {})
self.master.current_project.config['events']['colors'] = self.tags
if self.master.timeline is not None:
self.master.timeline.update_tags(self.master.current_project.config['events']['colors'])
self.master.changes_made |= self.changes_made
self.destroy()
def callback_cancel(self, event=None):
"""
Callback used to discard changes made. Destroys the widget and returns control to the master
without making any changes.
:return:
"""
self.destroy()
def __destroy__(self):
"""
Returns focus and control to the master.
:return:
"""
self.grab_release()
class TagPrompt(Toplevel):
def __init__(self, parent):
super().__init__(parent)
# Window settings
self.title('New Tag')
self.resizable(width=False, height=False)
# Create and place the widgets
self._init_widgets()
self._place_widgets()
def _init_widgets(self):
self.container = Frame(self)
self.source_label = Label(self.container, text='Event Source')
self.source_entry = Entry(self.container)
self.id_label = Label(self.container, text='Event ID')
id_vcmd = (self.container.register(self.validate_command_id), '%d', '%P')
self.id_entry = Entry(self.container, validate='key', validatecommand=id_vcmd)
self.ok_button = Button(self.container, text='Ok', command=self.callback_ok)
def _place_widgets(self):
padding = 3
self.source_label.grid(row=0, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.source_entry.grid(row=1, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.id_label.grid(row=2, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.id_entry.grid(row=3, column=0, columnspan=3, padx=padding, pady=padding, sticky='EW')
self.ok_button.grid(row=4, column=1, padx=padding, sticky='NESW')
self.container.pack()
@staticmethod
def validate_command_id(action, value):
"""
Restricts entry to only allow integers.
:return:
"""
if action != '1':
return True
if re.match(r'^[0-9]+$', value):
return True
return False
def callback_ok(self):
source, event = self.source_entry.get(), str(self.id_entry.get())
if not all((source, event)):
messagebox.showerror('Error', 'You must enter a value.')
return
if event in self.master.tags.get(source, {}):
messagebox.showerror('Error', 'That tag already exists.')
return
self.master.insert_tag(source, event, '#FFFFFF')
self.master.changes_made = True
self.destroy()
def __destroy__(self):
"""
Returns focus and control to the master.
:return:
"""
self.grab_release()
|
[
"tkinter.messagebox.showerror",
"re.match"
] |
[((13733, 13760), 're.match', 're.match', (['"""^[0-9]+$"""', 'value'], {}), "('^[0-9]+$', value)\n", (13741, 13760), False, 'import re\n'), ((13959, 14015), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""You must enter a value."""'], {}), "('Error', 'You must enter a value.')\n", (13979, 14015), False, 'from tkinter import messagebox\n'), ((14101, 14158), 'tkinter.messagebox.showerror', 'messagebox.showerror', (['"""Error"""', '"""That tag already exists."""'], {}), "('Error', 'That tag already exists.')\n", (14121, 14158), False, 'from tkinter import messagebox\n')]
|
"""
Convert ground truth latent classes into binary sensitive attributes
"""
def attr_fn_0(y):
return y[:,0] >= 1
def attr_fn_1(y):
return y[:,1] >= 1
def attr_fn_2(y):
return y[:,2] >= 3
def attr_fn_3(y):
return y[:,3] >= 20
def attr_fn_4(y):
return y[:,4] >= 16
def attr_fn_5(y):
return y[:,5] >= 16
dsprites_attr_fns = [attr_fn_0, attr_fn_1, attr_fn_2, attr_fn_3, attr_fn_4, attr_fn_5]
# celeba stuff
def attr_fn_chubby(a):
return a[:,13] > 0.
def attr_fn_eyeglasses(a):
return a[:,15] > 0.
def attr_fn_male(a):
return a[:,20] > 0.
def attr_fn_heavy_makeup(a):
return a[:,18] > 0.
CELEBA_SUBGROUPS = {
'H': attr_fn_heavy_makeup,
'S': lambda a: a[:,31] > 0., # smiling
'W': lambda a: a[:,36] > 0., # wears lipstick
'A': lambda a: a[:,2] > 0., # wears lipstick
'C': attr_fn_chubby,
'E': attr_fn_eyeglasses,
'M': attr_fn_male,
'C $\land$ E': lambda a: attr_fn_chubby(a) * attr_fn_eyeglasses(a),
'C $\land$ M': lambda a: attr_fn_chubby(a) * attr_fn_male(a),
'E $\land$ M': lambda a: attr_fn_eyeglasses(a) * attr_fn_male(a),
'C $\land$ $\\neg$ E': lambda a: attr_fn_chubby(a) * (1 - attr_fn_eyeglasses(a)),
'C $\land$ $\\neg$ M': lambda a: attr_fn_chubby(a) * (1 - attr_fn_male(a)),
'E $\land$ $\\neg$ M': lambda a: attr_fn_eyeglasses(a) * (1 - attr_fn_male(a)),
'$\\neg$ C $\land$ E': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_eyeglasses(a),
'$\\neg$ C $\land$ M': lambda a: (1 - attr_fn_chubby(a)) * attr_fn_male(a),
'$\\neg$ E $\land$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * attr_fn_male(a),
'$\\neg$ C $\land$ $\\neg$ E': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_eyeglasses(a)),
'$\\neg$ C $\land$ $\\neg$ M': lambda a: (1 - attr_fn_chubby(a)) * (1 - attr_fn_male(a)),
'$\\neg$ E $\land$ $\\neg$ M': lambda a: (1 - attr_fn_eyeglasses(a)) * (1 - attr_fn_male(a)),
} # cf. generate_celeba_audit_table.format_subgroups
CELEBA_SENS_IDX = {
'C': [13],
'E': [15],
'M': [20],
'C $\land$ E': [13, 15],
'C $\land$ M': [13, 20],
'E $\land$ M': [15, 20],
'C $\land$ $\\neg$ E': [13, 15],
'C $\land$ $\\neg$ M': [13, 20],
'E $\land$ $\\neg$ M': [15, 20],
'$\\neg$ C $\land$ E': [13, 15],
'$\\neg$ C $\land$ M': [13, 20],
'$\\neg$ E $\land$ M': [15, 20],
'$\\neg$ C $\land$ $\\neg$ E': [13, 15],
'$\\neg$ C $\land$ $\\neg$ M': [13, 20],
'$\\neg$ E $\land$ $\\neg$ M': [15, 20],
} # maps named subgroups to the sensitive indices they depend on
# comcrime stuff
CC_ATTR_STRING = 'cc_attr_fn'
def create_cc_attr_fn(i):
def f(y):
# print('column', i)
return y[:, i] #>= 0.5 - should be already binarized
return f
cc_attr_fn_0 = create_cc_attr_fn(0)
cc_attr_fn_1 = create_cc_attr_fn(1)
cc_attr_fn_2 = create_cc_attr_fn(2)
cc_attr_fn_3 = create_cc_attr_fn(3)
cc_attr_fn_4 = create_cc_attr_fn(4)
cc_attr_fn_5 = create_cc_attr_fn(5)
cc_attr_fn_6 = create_cc_attr_fn(6)
cc_attr_fn_7 = create_cc_attr_fn(7)
cc_attr_fn_8 = create_cc_attr_fn(8)
cc_attr_fn_9 = create_cc_attr_fn(9)
cc_attr_fn_10 = create_cc_attr_fn(10)
cc_attr_fn_11 = create_cc_attr_fn(11)
cc_attr_fn_12 = create_cc_attr_fn(12)
cc_attr_fn_13 = create_cc_attr_fn(13)
cc_attr_fn_14 = create_cc_attr_fn(14)
cc_attr_fn_15 = create_cc_attr_fn(15)
cc_attr_fn_16 = create_cc_attr_fn(16)
cc_attr_fn_17 = create_cc_attr_fn(17)
cc_attr_fn_18 = create_cc_attr_fn(18)
if __name__ == '__main__':
import numpy as np
x = np.zeros((10, 10))
print('should print 5')
cc_attr_fn_5(x)
cc_attr_fn_6(x)
cc_attr_fn_7(x)
|
[
"numpy.zeros"
] |
[((3653, 3671), 'numpy.zeros', 'np.zeros', (['(10, 10)'], {}), '((10, 10))\n', (3661, 3671), True, 'import numpy as np\n')]
|
# coding: utf-8
# # Example
# In[3]:
import turicreate as tc
# ## Get the data
# In[22]:
data = 'path-to-data-here'
sf = tc.SFrame(data).dropna(columns=['Age'])
train, test = sf.random_split(fraction=0.8)
test, validations = test.random_split(fraction=0.5)
# ## Modeling
# In[27]:
from turicreate import logistic_classifier
model = logistic_classifier.create(train, target='Survived',validation_set=validations)
# ## Evaluate
# Use turi
# In[ ]:
|
[
"turicreate.SFrame",
"turicreate.logistic_classifier.create"
] |
[((345, 430), 'turicreate.logistic_classifier.create', 'logistic_classifier.create', (['train'], {'target': '"""Survived"""', 'validation_set': 'validations'}), "(train, target='Survived', validation_set=validations\n )\n", (371, 430), False, 'from turicreate import logistic_classifier\n'), ((129, 144), 'turicreate.SFrame', 'tc.SFrame', (['data'], {}), '(data)\n', (138, 144), True, 'import turicreate as tc\n')]
|
import argparse
import os
import random
import time
import warnings
from math import cos, pi
import cv2
import numpy as np
import torch
import torch.optim as optim
from DLBio.pt_train_printer import Printer
from DLBio.pytorch_helpers import get_lr
class ITrainInterface():
"""
TrainInterfaces handle the prediction of the network, the loss
computation and the computation of additional training metrics.
These steps can quickly change depending on the dataset, the model
architecture, the task and so on. Therefore, it is reasonable to
create separate modules that are passed to the Training class.
You need to implement the constructor and the train_step method,
if the computations in the validation step differ from the train_step
you need to overwrite val_step.
"""
def __init__(self, *args, **kwargs):
"""Constructor. Usually you need to provide and process:
- a model
- a device
- implement a loss function
- implement additional metrics
"""
raise NotImplementedError('Needs model and loss fcn and metrics')
def train_step(self, *args, **kwargs):
"""
In the Training class, this functions is called for each drawn batch
like this:
loss, metrics = self.train_interface.train_step(sample)
(for more information see '_train_step' method)
Accordingly, you should compute the loss based on the prediction of
your model and other metrics.
The loss is used to update the weights of the model
returns list with loss, metrics, counters, functions
subsets like loss, metrics, counters and loss, metrics are possible
"""
raise NotImplementedError('Implement to run training')
def val_step(self, *args, **kwargs):
"""
By default, the same code as in train_step is excecuted.
"""
# usually exactly the same as the train step
return self.train_step(*args, **kwargs)
def test_step(self, *args, **kwargs):
"""
By default, the same code as in val_step is excecuted.
"""
# usually exactly the same as the train step
return self.val_step(*args, **kwargs)
def after_training_process(self, *args, **kwargs):
"""
Use this if you want to run a specific process after the training that
depends on the model
"""
pass
class Training():
"""A Class that contains all necessary ingredients to train a pytorch
model. To start training, simply call the instantiated object with the
desired number of epochs, e.g.:
TODO: 'add_do_not_update' boolean for SAM optimization
training = Training(...)
training(100) # train for 100 epochs
"""
def __init__(
self, optimizer, data_loader, train_interface,
save_steps=-1, save_path=None,
printer=None, scheduler=None, clip=None,
retain_graph=False, val_data_loader=None, early_stopping=None,
validation_only=False, save_state_dict=False,
test_data_loader=None, batch_scheduler=None, start_epoch=0,
time_log_printer=None, stop_conditions=[]
):
"""Constructor
Parameters
----------
optimizer : pytorch optimizer
Controls the weight updates, see get_optimizer for more information
data_loader : pytorch dataloader
When iterated over in a for loop, data are returned in batches.
Note that the for loop is executed as
'for sample in data_loader:'
You need to specify what a sample actually is in the training-
interface.
train_interface : ITrainInterface
Computes the loss of a batch, see method _train_step
save_steps : int, optional
Every 'save_steps' the model is saved to 'save_path'. If 0, the
model is only saved on the end of the training. By default -1,
which means the model is not saved at all (if early_stopping is
None).
save_path : str, optional
Where to save the model, by default None. Needs to be specified if
save_steps != 1. Note that the model is always overwritten, i.e.,
there is only one '[model].pt' file after training at save_path.
printer : Printer (pt_train_printer), optional
Prints current training values to terminal and possibly a
log.json file. By default None, nothing is logged.
scheduler : pytorch scheduler, optional
Updates the learning rate according to some schedule. By default
None, no scheduling is used.
clip : float, optional
Gradient clipping, by default None, no gradient clipping
retain_graph : bool, optional
Needed for special backpropagation functions, see pytorch
documentation for more information. By default False.
val_data_loader : pytorch data_loader, optional
Can be used to validate/test the network performance. These data
are not used for training (but maybe early stopping). The model is
in eval-mode, when those data are processed. The val_step of the
TrainingInterface is applied to these data.
By default None, no validation is done.
early_stopping : EarlyStopping object, optional
Save the model based on a specified metric, each time the best
value of this metric is reached. By default None, no early stopping
validation_only: bool
When called, only the validation steps are computed. Note that, if
the flag is set to true, the model is not trained.
save_state_dict: save the model's state dict instead of the model
test_data_loader : pytorch data_loader, optional
Can be used to test the network performance. The model is
in eval-mode, when those data are processed. The test_step of the
TrainingInterface is applied to these data.
batch_scheduler: BatchScheduler object
For scheduling algorithms that adjust the learning
rate within an epoch, instead each epoch's end.
start_epoch: int
Set to a value other than 0 if a previous training is resumed.
In this case, start_epoch should be set to the last epoch the
previous training stopped.
time_log_printer: Printer (pt_train_printer)
If not none, the time needed for different training steps
is logged and written by this logger.
stop_conditions: List of [IStopCondition]
Similar to early stopping, stops the training based on a
train phase metric (no val- or test metric). Use, for example, to
quickly stop processes where the training does not converge.
Returns
-------
Training object
"""
self.optimizer = optimizer
self.data_loader = data_loader
assert issubclass(train_interface.__class__, ITrainInterface)
self.train_interface = train_interface
self.scheduler = scheduler
self.batch_scheduler = batch_scheduler
self.early_stopping = early_stopping
self.stop_conditions = stop_conditions
if printer is None:
self.printer = Printer(100, None)
else:
self.printer = printer
self.time_log_printer = time_log_printer
self.time_logger = TimeLogger(is_active=(time_log_printer is not None))
assert isinstance(save_steps, int)
if save_steps >= 0:
assert save_path is not None
self.do_save = save_steps >= 0 and save_path is not None
self.save_steps = save_steps
self.save_path = save_path
self.save_state_dict = save_state_dict
print(self.save_state_dict)
self.clip = clip
self.retain_graph = retain_graph
self.phases = ['train']
if val_data_loader is not None:
self.phases.append('validation')
if test_data_loader is not None:
self.phases.append('test')
# there should be no instance with ['train', 'test']. For now ['train', 'val'] should be used instead
# maybe this needs to be changed in the future
if 'test' in self.phases:
assert 'validation' in self.phases, 'No combination train and test allowed.'
self.validation_only = validation_only
if validation_only:
assert 'test' not in self.phases
self.phases = ['validation']
print('Running in validation only mode.')
self.data_loaders_ = {
'train': data_loader,
'validation': val_data_loader,
'test': test_data_loader
}
if start_epoch > 0:
self.start_ep = start_epoch + 1
else:
self.start_ep = 0
if not torch.cuda.is_available():
warnings.warn('No GPU detected. Training can be slow.')
# check for right order of training phases
if 'train' in self.phases and 'validation' in self.phases:
assert self.phases.index('train') == 0
assert self.phases.index('validation') == 1
if 'validation' in self.phases and 'test' in self.phases:
assert self.phases.index('validation') == 1
assert self.phases.index('test') == 2
def __call__(self, epochs_):
"""Train the model for a specified number of epochs
Parameters
----------
epochs_ : int
how many epochs for training
"""
self.printer.restart()
do_stop = False
if self.validation_only:
num_batches = 0
else:
num_batches = len(self.data_loaders_['train'])
if self.start_ep > 0:
if self.batch_scheduler is not None:
self._batch_schedule(
'train', self.start_ep, 0,
self.data_loaders_['train'].batch_size
)
if self.scheduler is not None:
# TODO: if resume, compute the learning rate beforehand
raise NotImplementedError
print('STARTING TRAINING')
for epoch in range(self.start_ep, epochs_):
self.printer.learning_rate = get_lr(self.optimizer)
for current_phase in self.phases:
if current_phase == 'train':
self.train_interface.model.train()
else:
self.train_interface.model.eval()
self.time_logger.start(current_phase + '_load_data')
for idx, sample in enumerate(self.data_loaders_[current_phase]):
self.time_logger.stop(current_phase + '_load_data')
self._batch_schedule(
current_phase, epoch, idx, num_batches
)
loss, metrics, counters, functions = self._iteration_step(
sample, current_phase)
self._update_printer(
epoch, loss, metrics, counters, functions, current_phase
)
if current_phase == 'train':
self._update_weights(loss)
self.time_logger.start(current_phase + '_load_data')
# ----------- end of phase ----------------------------
self.time_logger.stop(
current_phase + '_load_data', do_log=False
)
# do certain actions depending on which phase we are in
if self.early_stopping is not None and current_phase == 'validation':
do_stop = self.early_stopping(
self.printer.get_metrics(),
self.train_interface.model,
self.save_path,
self.save_state_dict
)
if self.stop_conditions and current_phase == 'train':
for sc in self.stop_conditions:
do_stop = sc(epoch, self.printer.get_metrics())
self.printer.on_epoch_end()
self._schedule(current_phase)
self._save(epoch, epochs_, current_phase)
# compute statistics on time values that are collected during
# the upper for-loop
if self.time_log_printer is not None:
self.time_log_printer.update(
torch.tensor([-1]), epoch, metrics=self.time_logger.get_data()
)
self.time_log_printer.on_epoch_end()
self.time_logger.restart()
if do_stop:
return
# -------------------end of epoch -------------------------------
def _iteration_step(self, sample, current_phase):
"""Compute loss and metrics
Parameters
----------
sample : anything provided by the data loader
typically the sample x and the corresponding label
current_phase : str
training, validation, or test
Returns
-------
float, dict
loss value that is used for gradient computation and dictionaries
with metrics, counters, and functions
"""
self.time_logger.start(current_phase + '_iteration_step')
if current_phase == 'validation':
with torch.no_grad():
output = self.train_interface.val_step(sample)
elif current_phase == 'test':
with torch.no_grad():
output = self.train_interface.test_step(sample)
else:
output = self.train_interface.train_step(sample)
functions = None
counters = None
if len(output) == 2:
loss, metrics = output[0], output[1]
elif len(output) == 3:
loss, metrics, counters = output[0], output[1], output[2]
else:
loss, metrics, counters = output[0], output[1], output[2]
functions = output[3]
self.time_logger.stop(current_phase + '_iteration_step')
return loss, metrics, counters, functions
def _update_weights(self, loss):
"""Compute gradient and apply backpropagation
from:
https://discuss.pytorch.org/t/what-step-backward-and-zero-grad-do/33301
Hopefully, you use them in the other order - opt.zero_grad(), loss.backward(), opt.step().
zero_grad clears old gradients from the last step (otherwise you’d just accumulate the gradients from all loss.backward() calls).
loss.backward() computes the derivative of the loss w.r.t. the parameters (or anything requiring gradients) using backpropagation.
opt.step() causes the optimizer to take a step based on the gradients of the parameters.
Parameters
----------
loss : float
error function the weight update is based on
"""
self.time_logger.start('update_weights')
self.optimizer.zero_grad()
self.time_logger.start('loss_backward')
loss.backward(retain_graph=self.retain_graph)
self.time_logger.stop('loss_backward')
if self.clip is not None:
torch.nn.utils.clip_grad_norm_(
self.train_interface.model.parameters(), self.clip
)
self.time_logger.start('opt_step')
self.optimizer.step()
self.time_logger.stop('opt_step')
self.time_logger.stop('update_weights')
def _update_printer(self, epoch, loss, metrics, counters, functions, current_phase):
"""Pass the necessary values to the printer
Parameters
----------
epoch : int
Current epoch
loss : float
Current loss value
metrics : dict
current_phase : str
If the current phase is validation, all metrics/losses/etc. are renamed
from [name] to val_[name]. If the current phase is test, all they are renamed to test_[name].
"""
self.time_logger.start(current_phase + '_update_printer')
if current_phase == 'train':
self.printer.update(loss, epoch, metrics, counters, functions)
else:
prefix = {'validation': 'val_', 'test': 'test_'}[current_phase]
if metrics is not None:
metrics = {prefix + k: v for (k, v) in metrics.items()}
if counters is not None:
counters = {prefix + k: v for (k, v) in counters.items()}
if functions is not None:
functions = {prefix + k: v for (k, v) in functions.items()}
self.printer.update(
loss, epoch, metrics,
counters, functions, loss_key=prefix + 'loss'
)
self.time_logger.stop(current_phase + '_update_printer')
self.printer.print_conditional()
def _schedule(self, current_phase):
"""Update the scheduler after each training epoch.
"""
if self.scheduler is not None and current_phase == 'train':
self.time_logger.start('schedule')
self.scheduler.step()
self.time_logger.stop('schedule')
def _batch_schedule(self, current_phase, epoch, iteration, num_batches):
"""Update the scheduler after each training batch.
"""
if self.batch_scheduler is not None and current_phase == 'train':
self.time_logger.start('batch_schedule')
self.batch_scheduler.step(epoch, iteration, num_batches)
self.time_logger.stop('batch_schedule')
def _save(self, epoch, epochs_, current_phase):
"""save the model to model path every 'save_steps' epochs.
Parameters
----------
epoch : int
current epoch
epochs_ : int
number of epochs for entire training
current_phase: str
is this function called after training, val or testing? Only after
validation, the model is saved.
"""
# only save after validation
if current_phase != 'validation' and 'validation' in self.phases:
return
if self.do_save:
self.time_logger.start('save')
is_last_epoch = (epoch == epochs_ - 1)
if self.save_steps > 0:
is_save_intervall = epoch % self.save_steps == 0
else:
is_save_intervall = False
if is_last_epoch or is_save_intervall:
torch_save_model(
self.train_interface.model,
self.save_path,
self.save_state_dict
)
self.time_logger.stop('save')
def get_optimizer(opt_id, parameters, learning_rate, **kwargs):
""" Simple getter function for a pytorch optimizer
Parameters
----------
opt_id : str
Which optimizer, e.g., SGD or Adam
parameters : model.parameters
pytorch variables that shall be updated, usually model.parameters()
is passed
learning_rate : float
Returns
-------
pytorch optimizer
Raises
------
ValueError
if unknown opt_id
"""
if opt_id == 'SGD':
if 'momentum' not in kwargs.keys():
warnings.warn(f'Using default momentum for SGD: {.9}')
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.SGD(parameters,
lr=learning_rate,
momentum=kwargs.get('momentum', .9),
weight_decay=kwargs.get('weight_decay', 0.),
nesterov=kwargs.get('nesterov', False)
)
elif opt_id == 'Adam':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adam(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.)
)
elif opt_id == 'lamb':
from pytorch_lamb import Lamb
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.001}')
optimizer = Lamb(
parameters,
lr=learning_rate, weight_decay=kwargs.get('weight_decay', 0.001),
betas=(kwargs.get('beta0', .9), kwargs.get('beta1', .999))
)
elif opt_id == 'AdaDelta':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for SGD {0.}')
optimizer = optim.Adadelta(
parameters,
lr=learning_rate,
weight_decay=kwargs.get('weight_decay', 0.),
rho=kwargs.get('rho', 0.9),
eps=kwargs.get('eps', 1e-3)
)
elif opt_id == 'RMSProb':
if 'weight_decay' not in kwargs.keys():
warnings.warn(f'Using default weight_decay for RMSprop {0.}')
optimizer = optim.RMSprop(
parameters,
lr = learning_rate,
alpha = kwargs.get('alpha', 0.99),
eps = kwargs.get('eps', 1e-08),
weight_decay = kwargs.get('weight_decay', 0.),
momentum = kwargs.get('momentum', 0.),
centered = kwargs.get('centered', False)
)
else:
raise ValueError(f'Unknown opt value: {opt_id}')
return optimizer
def get_scheduler(lr_steps, epochs, optimizer, gamma=.1, fixed_steps=None):
"""returns a pytorch scheduler
Parameters
----------
lr_steps : int
the learning rate is altered in 'lr_steps' uniformly steps
epochs : int
number of epochs for the entire training
optimizer : pytorch optimizer
gamma : float, optional
the learning rate is multiplied by gamma, by default .1
Returns
-------
pytorch scheduler
"""
if fixed_steps is not None:
assert lr_steps == 0, 'no lr_steps if fixed steps is used'
# might be filled with strings, when coming from argparse
fixed_steps = [int(x) for x in fixed_steps]
scheduler = optim.lr_scheduler.MultiStepLR(
optimizer, fixed_steps,
gamma=gamma
)
print(f'fixed rate scheduling at: {fixed_steps}')
return scheduler
if lr_steps < 1:
return None
assert lr_steps < epochs, f'Epochs must be greater than lr_steps but e:{epochs} < l:{lr_steps}'
step_size = epochs // lr_steps
print(f'Sched step size: {step_size}')
scheduler = optim.lr_scheduler.StepLR(
optimizer, step_size,
gamma=gamma, last_epoch=-1
)
return scheduler
def set_device(device=None, verbose=True):
"""Use if you have multiple GPUs, but you only want to use a subset.
Use the command 'nvidia-smi' in the terminal for more information on your
pc's gpu setup
Parameters
----------
device : int or list of int, optional
masks all devices but 'device'. By default None, all devices are
visible
"""
if device is not None:
if isinstance(device, list):
device = ','.join(str(x) for x in device)
else:
device = str(device)
os.environ['CUDA_VISIBLE_DEVICES'] = device
if verbose:
print(f'using device {device}')
def set_random_seed(seed):
"""Sets a seed for all training related random functions. The seed is only
identical on the same machine.
Parameters
----------
seed : int
"""
print(f'Setting seed: {seed}')
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
cv2.setRNGSeed(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
def _init_fn(worker_id):
np.random.seed(seed + worker_id)
# for debugging purposes some random numbers are generated
# output = {
# 'seed': seed,
# 'torch': torch.randn(1).item(),
# 'cuda': torch.cuda.FloatTensor(1).normal_().item(),
# 'numpy': float(np.random.randn(1)),
# 'python': random.randint(0, 5000)
# }
# with open(os.path.join(options.folder_name, 'rand_num_test.json'), 'w') as file:
# json.dump(output, file)
return _init_fn
def loss_verification(train_interface, data_loader, printer):
"""Run through one epoch and print the corresponding loss.
When using cross-entropy, the usual loss should be -ln(num_classes). If
not, there might be something wrong with your code.
Parameters
----------
train_interface : ITrainInterface
data_loader : pytorch data_loader
printer : Printer (pt_train_printer.py)
"""
# verify loss
print('Running loss verification')
with torch.no_grad():
mean_im = 0.0
std_im = 0.0
ctr = 0.0
for sample in data_loader:
mean_im += sample['x'].mean()
std_im += sample['x'].std()
ctr += 1.0
loss, metrics = train_interface.train_step(sample)
printer.update(loss, -1, metrics)
printer.print()
print(f'mean: {mean_im/ctr:.3f} std: {std_im/ctr:.3f}')
class EarlyStopping():
"""Save the best model depending on a specified metric on the validation
set.
Returns
-------
EarlyStopping
"""
def __init__(self, metric_key, get_max=True, epoch_thres=np.inf):
"""Constructor. You need to specify which metric should be observed,
if the value is better when decreased or increased.
For example:
EarlyStopping('val_acc', get_max=True, epoch_thres=10)
keeps track of the validation accuracy. If the current best validation
accuracy (starting from -inf) is exceeded, this value is saved and
the model is saved.
If after 10 epochs the best accuracy is not exceeded, the training
is stopped.
This object is used within the Training class.
Parameters
----------
metric_key : str
Which metric is observed. Needs to be a metric that is present in
the training_interface. val_[name] is also possible.
get_max : bool, optional
Save the model if the new observed metric is above the current best
value (True) or below it (False). By default True.
epoch_thres : int, optional
if the model has not bin saved for 'epoch_thres' epochs,
the training is stopped. By default np.inf, the model is trained
the full number of epochs.
"""
self.key = metric_key
self.get_max = get_max
self.no_update_counter = 0.
self.thres = epoch_thres
if self.thres < np.inf:
warnings.warn(
f'Early stopping: training is stopped after {self.thres} unchanged epochs.')
if get_max:
self.current_val = -np.inf
else:
self.current_val = +np.inf
def __call__(self, metrics, model, save_path, save_state_dict):
value = metrics[self.key]
self.no_update_counter += 1
if self.get_max:
if value > self.current_val:
self._update(value, model, save_path, save_state_dict)
else:
if value < self.current_val:
self._update(value, model, save_path, save_state_dict)
if self.no_update_counter > self.thres:
return True
else:
return False
def _update(self, value, model, save_path, save_state_dict):
self.no_update_counter = 0
self.current_val = value
torch_save_model(model, save_path, save_state_dict)
class IStopCondition():
def __call__(self, epoch, metrics):
raise NotImplementedError
def torch_save_model(model, save_path, save_state_dict):
print(f'saving model: {save_path}')
if save_state_dict:
print('save as state dict')
to_save = model.state_dict()
torch.save(to_save, save_path)
else:
torch.save(model, save_path)
print('model saved.')
def get_printer(print_intervall, log_file=None):
"""Convenience function, to get a printer without import pt_train_printer.
Note that only the basic keywords are passed on here!
Parameters
----------
print_intervall : int
print to terminal after n batches, if -1: no printing
log_file : str, optional
path to a json file, by default None: no log-file is saved.
Returns
-------
Printer
"""
return Printer(print_intervall, log_file=log_file)
# taken from https://github.com/d-li14/mobilenetv2.pytorch/blob/master/imagenet.py
class BatchScheduler():
def __init__(self, decay_type, optimizer, initial_learning_rate, warmup, num_epochs, gamma=.1):
self.optimizer = optimizer
self.lr = initial_learning_rate
self.warmup = warmup
self.num_epochs = num_epochs
self.decay_type = decay_type
self.gamma = gamma
def step(self, epoch, iteration, num_iter):
lr = self.optimizer.param_groups[0]['lr']
warmup_epoch = 5 if self.warmup else 0
warmup_iter = warmup_epoch * num_iter
current_iter = iteration + epoch * num_iter
max_iter = self.num_epochs * num_iter
if self.decay_type == 'step':
lr = self.lr * \
(self.gamma ** ((current_iter - warmup_iter) // (max_iter - warmup_iter)))
elif self.decay_type == 'cos':
lr = self.lr * \
(1 + cos(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))) / 2
elif self.decay_type == 'linear':
lr = self.lr * (1 - (current_iter - warmup_iter) /
(max_iter - warmup_iter))
else:
raise ValueError('Unknown lr mode {}'.format(self.decay_type))
if epoch < warmup_epoch:
lr = self.lr * current_iter / warmup_iter
for param_group in self.optimizer.param_groups:
param_group['lr'] = lr
class TimeLogger():
def __init__(self, is_active):
self.is_active = is_active
self.data = dict()
self.qu = dict()
self.functions = {
'mean': np.mean,
'min': np.min,
'max': np.max,
'std': np.std,
'median': np.median,
'sum': np.sum
}
def restart(self):
if not self.is_active:
return
self.data = dict()
def start(self, key):
if not self.is_active:
return
assert key not in self.qu.keys()
self.qu[key] = time.time()
def stop(self, key, do_log=True):
if not self.is_active:
return
start_time = self.qu.pop(key)
time_needed = time.time() - start_time
if do_log:
self._update(key, time_needed)
def _update(self, key, value):
assert self.is_active
if key not in self.data.keys():
self.data[key] = [value]
else:
self.data[key].append(value)
def get_data(self):
assert self.is_active
out = dict()
for key, values in self.data.items():
values = np.array(values)
for name, fcn in self.functions.items():
tmp = float(fcn(values))
out[key + '_' + name] = tmp
return out
def get_train_arg_parser(config):
# Deprecated function
"""Typical argument parser to train a neural network
Parameters
----------
config : module or object
default values for your project
Returns
-------
argument parser
use like this:
import config_module
...
...
def get_options():
parser = get_train_argparser(config_module)
parser.add_argument(...)
...
return parser.parse_args()
"""
parser = argparse.ArgumentParser()
parser.add_argument('--lr', type=float, default=config.LEARNING_RATE)
parser.add_argument('--wd', type=float, default=config.WEIGHT_DECAY)
parser.add_argument('--mom', type=float, default=config.MOMENTUM)
parser.add_argument('--opt', type=str, default=config.OPTIMIZER)
parser.add_argument('--bs', type=int, default=config.BATCH_SIZE)
parser.add_argument('--epochs', type=int, default=config.EPOCHS)
parser.add_argument('--lr_steps', type=int, default=config.LR_STEPS)
parser.add_argument('--nw', type=int, default=config.NUM_WORKERS)
parser.add_argument('--sv_int', type=int, default=config.SAVE_INTERVALL)
parser.add_argument('--model_type', type=str, default=config.MODEL_TYPE)
parser.add_argument('--seed', type=int, default=config.SEED)
parser.add_argument('--device', type=int, default=config.DEVICE)
parser.add_argument('--folder', type=str, default=config.DEF_FOLDER)
parser.add_argument('--model_name', type=str, default=config.MODEL_NAME)
parser.add_argument('--in_dim', type=int, default=config.INPUT_DIM)
parser.add_argument('--early_stopping', action='store_true')
parser.add_argument('--es_metric', type=str, default=config.ES_METRIC)
parser.add_argument('--num_classes', type=int, default=config.NUM_CLASSES)
# may be unnecessary for your project
parser.add_argument('--ds_len', type=int, default=config.DATASET_LENGTH)
parser.add_argument('--crop_size', type=int, default=config.CROP_SIZE)
return parser
|
[
"numpy.random.seed",
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"DLBio.pytorch_helpers.get_lr",
"torch.no_grad",
"random.seed",
"math.cos",
"torch.manual_seed",
"torch.cuda.manual_seed",
"torch.cuda.is_available",
"DLBio.pt_train_printer.Printer",
"cv2.setRNGSeed",
"time.time",
"torch.save",
"torch.cuda.manual_seed_all",
"numpy.array",
"warnings.warn",
"torch.tensor",
"torch.optim.lr_scheduler.MultiStepLR"
] |
[((22861, 22936), 'torch.optim.lr_scheduler.StepLR', 'optim.lr_scheduler.StepLR', (['optimizer', 'step_size'], {'gamma': 'gamma', 'last_epoch': '(-1)'}), '(optimizer, step_size, gamma=gamma, last_epoch=-1)\n', (22886, 22936), True, 'import torch.optim as optim\n'), ((23885, 23905), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (23899, 23905), True, 'import numpy as np\n'), ((23910, 23933), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (23927, 23933), False, 'import torch\n'), ((23938, 23955), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (23949, 23955), False, 'import random\n'), ((23964, 23989), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23987, 23989), False, 'import torch\n'), ((24171, 24191), 'cv2.setRNGSeed', 'cv2.setRNGSeed', (['seed'], {}), '(seed)\n', (24185, 24191), False, 'import cv2\n'), ((29030, 29073), 'DLBio.pt_train_printer.Printer', 'Printer', (['print_intervall'], {'log_file': 'log_file'}), '(print_intervall, log_file=log_file)\n', (29037, 29073), False, 'from DLBio.pt_train_printer import Printer\n'), ((32423, 32448), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (32446, 32448), False, 'import argparse\n'), ((22438, 22505), 'torch.optim.lr_scheduler.MultiStepLR', 'optim.lr_scheduler.MultiStepLR', (['optimizer', 'fixed_steps'], {'gamma': 'gamma'}), '(optimizer, fixed_steps, gamma=gamma)\n', (22468, 22505), True, 'import torch.optim as optim\n'), ((23999, 24027), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (24021, 24027), False, 'import torch\n'), ((24036, 24068), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (24062, 24068), False, 'import torch\n'), ((24275, 24307), 'numpy.random.seed', 'np.random.seed', (['(seed + worker_id)'], {}), '(seed + worker_id)\n', (24289, 24307), True, 'import numpy as np\n'), ((25235, 25250), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (25248, 25250), False, 'import torch\n'), ((28466, 28496), 'torch.save', 'torch.save', (['to_save', 'save_path'], {}), '(to_save, save_path)\n', (28476, 28496), False, 'import torch\n'), ((28515, 28543), 'torch.save', 'torch.save', (['model', 'save_path'], {}), '(model, save_path)\n', (28525, 28543), False, 'import torch\n'), ((31119, 31130), 'time.time', 'time.time', ([], {}), '()\n', (31128, 31130), False, 'import time\n'), ((7442, 7460), 'DLBio.pt_train_printer.Printer', 'Printer', (['(100)', 'None'], {}), '(100, None)\n', (7449, 7460), False, 'from DLBio.pt_train_printer import Printer\n'), ((9034, 9059), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (9057, 9059), False, 'import torch\n'), ((9073, 9128), 'warnings.warn', 'warnings.warn', (['"""No GPU detected. Training can be slow."""'], {}), "('No GPU detected. Training can be slow.')\n", (9086, 9128), False, 'import warnings\n'), ((10454, 10476), 'DLBio.pytorch_helpers.get_lr', 'get_lr', (['self.optimizer'], {}), '(self.optimizer)\n', (10460, 10476), False, 'from DLBio.pytorch_helpers import get_lr\n'), ((19540, 19595), 'warnings.warn', 'warnings.warn', (['f"""Using default momentum for SGD: {0.9}"""'], {}), "(f'Using default momentum for SGD: {0.9}')\n", (19553, 19595), False, 'import warnings\n'), ((19655, 19713), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (19668, 19713), False, 'import warnings\n'), ((27232, 27332), 'warnings.warn', 'warnings.warn', (['f"""Early stopping: training is stopped after {self.thres} unchanged epochs."""'], {}), "(\n f'Early stopping: training is stopped after {self.thres} unchanged epochs.'\n )\n", (27245, 27332), False, 'import warnings\n'), ((31280, 31291), 'time.time', 'time.time', ([], {}), '()\n', (31289, 31291), False, 'import time\n'), ((31709, 31725), 'numpy.array', 'np.array', (['values'], {}), '(values)\n', (31717, 31725), True, 'import numpy as np\n'), ((13655, 13670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13668, 13670), False, 'import torch\n'), ((20134, 20192), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (20147, 20192), False, 'import warnings\n'), ((13790, 13805), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13803, 13805), False, 'import torch\n'), ((20470, 20530), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.001}"""'], {}), "(f'Using default weight_decay for SGD {0.001}')\n", (20483, 20530), False, 'import warnings\n'), ((12705, 12723), 'torch.tensor', 'torch.tensor', (['[-1]'], {}), '([-1])\n', (12717, 12723), False, 'import torch\n'), ((20831, 20889), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for SGD {0.0}"""'], {}), "(f'Using default weight_decay for SGD {0.0}')\n", (20844, 20889), False, 'import warnings\n'), ((21216, 21278), 'warnings.warn', 'warnings.warn', (['f"""Using default weight_decay for RMSprop {0.0}"""'], {}), "(f'Using default weight_decay for RMSprop {0.0}')\n", (21229, 21278), False, 'import warnings\n'), ((30028, 30093), 'math.cos', 'cos', (['(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))'], {}), '(pi * (current_iter - warmup_iter) / (max_iter - warmup_iter))\n', (30031, 30093), False, 'from math import cos, pi\n')]
|
import pandas as pd
import numpy as np
import logging
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
# IF CHOPPINESS INDEX <= 38.2 - -> MARKET IS TRENDING
# https://medium.com/codex/detecting-ranging-and-trending-markets-with-choppiness-index-in-python-1942e6450b58
class WyckoffAccumlationDistribution:
def __init__(self):
self.lookback = 10
self.barCountDistribution = 3
self.barCountVolClimaxRebound = 2
self.barCountAccumulation = 7
self.minVolumeClimax = 5.0 # minimum volume climax - 600%
self.isConsolidating = 61.8
self.isTrending = 38.2
# IF CHOPPINESS INDEX >= 61.8 - -> MARKET IS CONSOLIDATING
def isAccumulating(self, value):
return value > self.isConsolidating
def isDistributing(self, value):
return value < self.isTrending
# **** Tricky part ****
# Because it uses previous data for choppiness, you cannot take an average of the chopiness.
# The average is already built-in to the calculation. So evaluate any of the data falls
# into consolidating or trending regions.
#
@staticmethod
def get_ci(high, low, close, lookback):
tr1 = pd.DataFrame(high - low).rename(columns={0: 'tr1'})
tr2 = pd.DataFrame(abs(high - close.shift(1))).rename(columns={0: 'tr2'})
tr3 = pd.DataFrame(abs(low - close.shift(1))).rename(columns={0: 'tr3'})
frames = [tr1, tr2, tr3]
tr = pd.concat(frames, axis=1, join='inner').dropna().max(axis=1)
atr = tr.rolling(1).mean()
highh = high.rolling(lookback).max()
lowl = low.rolling(lookback).min()
ci = 100 * np.log10((atr.rolling(lookback).sum()) /
(highh - lowl)) / np.log10(lookback)
return ci
def trimIndexes(self, ci:list, startIndex:int, endIndex:int):
if startIndex < 0:
startIndex = 0
if endIndex > len(ci):
endIndex = len(ci)
if startIndex >= endIndex:
startIndex = endIndex - 1
return startIndex, endIndex
def isDistributionPhase(self, ci: list, volClimaxIndex: int):
startIndex = volClimaxIndex - self.barCountDistribution - 1
endIndex = startIndex + self.barCountDistribution
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for i in range(startIndex, endIndex):
if self.isDistributing(ci[i]):
return True
return False
def isAccumulationValid(self, ci:list, volClimaxIndex:int):
endIndex = volClimaxIndex - self.barCountVolClimaxRebound
startIndex = endIndex - self.barCountAccumulation
startIndex, endIndex = self.trimIndexes(ci, startIndex, endIndex)
for value in ci[startIndex:endIndex]:
if self.isAccumulating(value):
return True
return False
def Run(self, symbol:str, df:pd.DataFrame, volClimax:float, volClimaxIndex:int):
try:
if volClimax > self.minVolumeClimax:
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
ci = data.to_numpy()[::-1]
isDistribute = self.isDistributionPhase(ci, volClimaxIndex)
isAccumulate = self.isAccumulationValid(ci, volClimaxIndex)
return isDistribute and isAccumulate
return False
except Exception as e:
logging.error(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
print(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')
return False
def RunWickoff(self, symbol:str, dataf:pd.DataFrame):
df = dataf[::-1]
df.reset_index()
data = WyckoffAccumlationDistribution.get_ci(
df['High'], df['Low'], df['Close'], self.lookback)
data = data.dropna()
|
[
"pandas.DataFrame",
"numpy.log10",
"logging.error",
"pandas.concat"
] |
[((1742, 1760), 'numpy.log10', 'np.log10', (['lookback'], {}), '(lookback)\n', (1750, 1760), True, 'import numpy as np\n'), ((1191, 1215), 'pandas.DataFrame', 'pd.DataFrame', (['(high - low)'], {}), '(high - low)\n', (1203, 1215), True, 'import pandas as pd\n'), ((3523, 3591), 'logging.error', 'logging.error', (['f"""WyckoffAccumlationDistribution.Run: {symbol} - {e}"""'], {}), "(f'WyckoffAccumlationDistribution.Run: {symbol} - {e}')\n", (3536, 3591), False, 'import logging\n'), ((1452, 1491), 'pandas.concat', 'pd.concat', (['frames'], {'axis': '(1)', 'join': '"""inner"""'}), "(frames, axis=1, join='inner')\n", (1461, 1491), True, 'import pandas as pd\n')]
|
'''
Created on 17 Mar 2018
@author: julianporter
'''
from OSGridConverter.algebra import Vector3
from OSGridConverter.mapping import Datum
from math import radians,degrees,sin,cos,sqrt,atan2,isnan
class Cartesian (Vector3):
def __init__(self,arg):
try:
phi=radians(arg.latitude)
l =radians(arg.longitude)
s=sin(phi)
c=cos(phi)
e=arg.ellipsoid.eccentricity(1)
nu=arg.ellipsoid.a/sqrt(1.0-e*s*s)
super(Cartesian,self).__init__([nu*c*cos(l),nu*c*sin(l),nu*(1-e)*s])
except:
super(Cartesian,self).__init__(arg)
def transform(self,oldTag,newTag):
if newTag==oldTag: return self
t1=Datum.get(oldTag).transform.inverse
t2=Datum.get(newTag).transform
return Cartesian(t2(t1(self)))
def toLatLong(self,ellipsoid):
try:
t=(1+ellipsoid.eb/self.normSquared)
s=t/sqrt(1+t*t)
c=s/t
if isnan(c): raise Exception()
phi=atan2(self.z+ellipsoid.eb*s*s*s,sqrt(self.x*self.x + self.y*self.y)-ellipsoid.ea*c*c*c)
except:
phi=0
l=atan2(self.y,self.x)
return(degrees(phi),degrees(l))
def __str__(self):
return '({},{},{})'.format(self.x,self.y,self.z)
def transformation(oldTag,newTag,vector):
if newTag==oldTag: return vector
t1=Datum.get(oldTag).transform.inverse
t2=Datum.get(newTag).transform
return Cartesian(t2(t1(vector)))
|
[
"math.isnan",
"math.sqrt",
"math.atan2",
"math.radians",
"math.sin",
"math.cos",
"OSGridConverter.mapping.Datum.get",
"math.degrees"
] |
[((1208, 1229), 'math.atan2', 'atan2', (['self.y', 'self.x'], {}), '(self.y, self.x)\n', (1213, 1229), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((1505, 1522), 'OSGridConverter.mapping.Datum.get', 'Datum.get', (['newTag'], {}), '(newTag)\n', (1514, 1522), False, 'from OSGridConverter.mapping import Datum\n'), ((285, 306), 'math.radians', 'radians', (['arg.latitude'], {}), '(arg.latitude)\n', (292, 306), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((323, 345), 'math.radians', 'radians', (['arg.longitude'], {}), '(arg.longitude)\n', (330, 345), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((361, 369), 'math.sin', 'sin', (['phi'], {}), '(phi)\n', (364, 369), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((384, 392), 'math.cos', 'cos', (['phi'], {}), '(phi)\n', (387, 392), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((800, 817), 'OSGridConverter.mapping.Datum.get', 'Datum.get', (['newTag'], {}), '(newTag)\n', (809, 817), False, 'from OSGridConverter.mapping import Datum\n'), ((1031, 1039), 'math.isnan', 'isnan', (['c'], {}), '(c)\n', (1036, 1039), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((1244, 1256), 'math.degrees', 'degrees', (['phi'], {}), '(phi)\n', (1251, 1256), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((1257, 1267), 'math.degrees', 'degrees', (['l'], {}), '(l)\n', (1264, 1267), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((1462, 1479), 'OSGridConverter.mapping.Datum.get', 'Datum.get', (['oldTag'], {}), '(oldTag)\n', (1471, 1479), False, 'from OSGridConverter.mapping import Datum\n'), ((477, 498), 'math.sqrt', 'sqrt', (['(1.0 - e * s * s)'], {}), '(1.0 - e * s * s)\n', (481, 498), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((753, 770), 'OSGridConverter.mapping.Datum.get', 'Datum.get', (['oldTag'], {}), '(oldTag)\n', (762, 770), False, 'from OSGridConverter.mapping import Datum\n'), ((986, 1001), 'math.sqrt', 'sqrt', (['(1 + t * t)'], {}), '(1 + t * t)\n', (990, 1001), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((1107, 1146), 'math.sqrt', 'sqrt', (['(self.x * self.x + self.y * self.y)'], {}), '(self.x * self.x + self.y * self.y)\n', (1111, 1146), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((559, 565), 'math.cos', 'cos', (['l'], {}), '(l)\n', (562, 565), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n'), ((571, 577), 'math.sin', 'sin', (['l'], {}), '(l)\n', (574, 577), False, 'from math import radians, degrees, sin, cos, sqrt, atan2, isnan\n')]
|
"""
https://www.practicepython.org
Exercise 18: Cows and Bulls
3 chilis
Create a program that will play the “cows and bulls” game with the user.
The game works like this:
Randomly generate a 4-digit number. Ask the user to guess a 4-digit number.
For every digit that the user guessed correctly in the correct place, they
have a “cow”. For every digit the user guessed correctly in the wrong place
is a “bull.” Every time the user makes a guess, tell them how many “cows”
and “bulls” they have. Once the user guesses the correct number, the game
is over. Keep track of the number of guesses the user makes throughout teh
game and tell the user at the end.
Say the number generated by the computer is 1038. An example interaction
could look like this:
Welcome to the Cows and Bulls Game!
Enter a number:
>>> 1234
2 cows, 0 bulls
>>> 1256
1 cow, 1 bull
...
Until the user guesses the number.
"""
import random
def generate_target():
return(int(random.triangular() * 10000))
def compare(guess, target):
if guess > 9999:
print("guess must be 4 digits or less, try again.")
return False
cows = bulls = 0
g = [ int((guess % 10000) / 1000),
int((guess % 1000) / 100),
int((guess % 100) / 10),
int((guess % 10) / 1) ]
t = [ int((target % 10000) / 1000),
int((target % 1000) / 100),
int((target % 100) / 10),
int((target % 10) / 1) ]
for i in 3, 2, 1, 0:
if g[i] == t[i]:
g.pop(i)
t.pop(i)
cows += 1
for i in range(len(g)-1, -1, -1):
if g[i] in t:
t.pop(t.index(g[i]))
g.pop(i)
bulls += 1
if cows == 4:
return True
else:
print("cows: %d, bulls: %d " % (cows, bulls))
return False
if __name__ == "__main__":
target = generate_target()
print("target is %4d" % target)
guess_count = 1
guess = int(input("What's your first guess? "))
while False == compare(guess, target):
guess_count += 1
guess = int(input("What's your next guess? "))
print("Took %d guesses to guess %4d." % (guess_count, target))
|
[
"random.triangular"
] |
[((971, 990), 'random.triangular', 'random.triangular', ([], {}), '()\n', (988, 990), False, 'import random\n')]
|
import time
mainIsOn = True
targetValue = -1
while mainIsOn:
print("Select category\n"
"0 - Close App\n"
"1 - Lists\n"
"2 - While\n")
if targetValue == -1:
try:
targetValue = int(input())
except ValueError as e:
print("Wrong statement. Try again!")
targetValue = -1
if targetValue == 1:
print("List = []\n"
"Initialize empty list\n"
"List = [a, b, c]\n"
"Initialize string List\n"
"List = [1, 2, 3]\n"
"Initialize string list\n")
time.sleep(5)
targetValue = -1
if targetValue == 2:
print("while boolean(True of False)\n"
"Or \n"
"a = 0\n"
"while a < 100:\n"
"print(do something)\n"
"a = a + 1")
print("Operator break\n"
"force stop circle while and if")
time.sleep(5)
targetValue = -1
if targetValue == 0:
print("Close app")
time.sleep(1)
print("5")
time.sleep(1)
print("4")
time.sleep(1)
print("3")
time.sleep(1)
print("2")
time.sleep(1)
print("1")
mainIsOn = False
time.sleep(1)
print("Bye Bye!")
|
[
"time.sleep"
] |
[((610, 623), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (620, 623), False, 'import time\n'), ((954, 967), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (964, 967), False, 'import time\n'), ((1053, 1066), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1063, 1066), False, 'import time\n'), ((1094, 1107), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1104, 1107), False, 'import time\n'), ((1135, 1148), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1145, 1148), False, 'import time\n'), ((1176, 1189), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1186, 1189), False, 'import time\n'), ((1217, 1230), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1227, 1230), False, 'import time\n'), ((1283, 1296), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1293, 1296), False, 'import time\n')]
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import logging
from rest_framework import permissions
from backend.bcs_web.constants import ACCESS_TOKEN_KEY_NAME
from backend.components.paas_auth import get_access_token
from backend.utils import FancyDict, whitelist
logger = logging.getLogger(__name__)
class RemoteAccessPermission(permissions.BasePermission):
def has_permission(self, request, view):
if request.user.is_anonymous:
return False
return True
class AccessTokenPermission(RemoteAccessPermission):
message = "no valid access_token"
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
access_token = request.META.get(ACCESS_TOKEN_KEY_NAME, "")
if access_token:
try:
from backend.components.paas_auth import get_user_by_access_token
except ImportError:
pass
else:
user = get_user_by_access_token(access_token)
if user.get("user_id") != request.user.username:
return False
request.user.token = FancyDict(access_token=access_token)
return True
return False
class ClientAccessTokenPermission(RemoteAccessPermission):
message = "no valid access_token"
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
access_token = request.META.get(ACCESS_TOKEN_KEY_NAME, "")
request.user.token = FancyDict(user_access_token=access_token)
access_token = get_access_token().get("access_token")
request.user.token.access_token = access_token
return True
class BKAppPermission(permissions.BasePermission):
"""调用接口的app是否有项目权限"""
def has_permission(self, request, view):
has_perm = super().has_permission(request, view)
if not has_perm:
return False
project_id_or_code = view.kwargs.get("project_id_or_code")
if not project_id_or_code:
return False
app_code = request.user.client.app.app_code
return whitelist.can_access_webconsole(app_code, project_id_or_code)
|
[
"backend.components.paas_auth.get_access_token",
"backend.utils.FancyDict",
"backend.utils.whitelist.can_access_webconsole",
"backend.components.paas_auth.get_user_by_access_token",
"logging.getLogger"
] |
[((962, 989), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (979, 989), False, 'import logging\n'), ((2314, 2355), 'backend.utils.FancyDict', 'FancyDict', ([], {'user_access_token': 'access_token'}), '(user_access_token=access_token)\n', (2323, 2355), False, 'from backend.utils import FancyDict, whitelist\n'), ((2923, 2984), 'backend.utils.whitelist.can_access_webconsole', 'whitelist.can_access_webconsole', (['app_code', 'project_id_or_code'], {}), '(app_code, project_id_or_code)\n', (2954, 2984), False, 'from backend.utils import FancyDict, whitelist\n'), ((1882, 1918), 'backend.utils.FancyDict', 'FancyDict', ([], {'access_token': 'access_token'}), '(access_token=access_token)\n', (1891, 1918), False, 'from backend.utils import FancyDict, whitelist\n'), ((1711, 1749), 'backend.components.paas_auth.get_user_by_access_token', 'get_user_by_access_token', (['access_token'], {}), '(access_token)\n', (1735, 1749), False, 'from backend.components.paas_auth import get_user_by_access_token\n'), ((2379, 2397), 'backend.components.paas_auth.get_access_token', 'get_access_token', ([], {}), '()\n', (2395, 2397), False, 'from backend.components.paas_auth import get_access_token\n')]
|
#!/user/bin/env python
#_*_ coding=utf-8 *_*
"""
Function:微信消息自动回复
Date:2015/05/26
Author:lvzhang
ChangeLog:v0.1 init
"""
import itchat
@itchat.msg_register('Text')
def text_replay(msg):
# 自己实现问答
print("已经自动回复")
return "[自动回复]您好,我正忙,一会儿再联系您!!!"
if __name__=="__main__":
print("运行成功!!!")
itchat.auto_login(hotReload=True)
itchat.run()
|
[
"itchat.auto_login",
"itchat.run",
"itchat.msg_register"
] |
[((138, 165), 'itchat.msg_register', 'itchat.msg_register', (['"""Text"""'], {}), "('Text')\n", (157, 165), False, 'import itchat\n'), ((310, 343), 'itchat.auto_login', 'itchat.auto_login', ([], {'hotReload': '(True)'}), '(hotReload=True)\n', (327, 343), False, 'import itchat\n'), ((348, 360), 'itchat.run', 'itchat.run', ([], {}), '()\n', (358, 360), False, 'import itchat\n')]
|
import unittest
import random
import numpy as np
from mep.genetics.population import Population
from mep.genetics.chromosome import Chromosome
class TestPopulation(unittest.TestCase):
"""
Test the Population class.
"""
def test_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# confirm the number of feature variables (not critical for this test)
self.assertEqual(num_features, population.num_feature_variables)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(max_chromosome, population.random_tournament_selection(1))
def test_larger_random_tournament_selection(self):
"""
Test the random_tournament_selection(...)
"""
# make it so this repeatable
random.seed(0)
# construct the population
num_examples = 5
num_features = 7
population = Population(np.zeros((num_examples, num_features)), [], 1, 1, 1, 1, 1, 1, 1)
# test the tournament selection; not that it randomly chooses the not as good chromosome
min_chromosome, max_chromosome = Chromosome([], []), Chromosome([], [])
min_chromosome.error = 1
max_chromosome.error = 2
population.chromosomes = [min_chromosome, max_chromosome]
self.assertEqual(min_chromosome, population.random_tournament_selection(10))
|
[
"numpy.zeros",
"random.seed",
"mep.genetics.chromosome.Chromosome"
] |
[((401, 415), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (412, 415), False, 'import random\n'), ((1321, 1335), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1332, 1335), False, 'import random\n'), ((534, 572), 'numpy.zeros', 'np.zeros', (['(num_examples, num_features)'], {}), '((num_examples, num_features))\n', (542, 572), True, 'import numpy as np\n'), ((891, 909), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (901, 909), False, 'from mep.genetics.chromosome import Chromosome\n'), ((911, 929), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (921, 929), False, 'from mep.genetics.chromosome import Chromosome\n'), ((1454, 1492), 'numpy.zeros', 'np.zeros', (['(num_examples, num_features)'], {}), '((num_examples, num_features))\n', (1462, 1492), True, 'import numpy as np\n'), ((1658, 1676), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (1668, 1676), False, 'from mep.genetics.chromosome import Chromosome\n'), ((1678, 1696), 'mep.genetics.chromosome.Chromosome', 'Chromosome', (['[]', '[]'], {}), '([], [])\n', (1688, 1696), False, 'from mep.genetics.chromosome import Chromosome\n')]
|
from wtforms import (
StringField, SelectField, HiddenField
)
from webapp.home.forms import EDIForm
class AccessSelectForm(EDIForm):
pass
class AccessForm(EDIForm):
userid = StringField('User ID', validators=[])
permission = SelectField('Permission',
choices=[("all", "all"), ("changePermission", "changePermission"), ("read", "read"), ("write", "write")])
md5 = HiddenField('')
init_str = 'all'
# def field_data(self)->tuple:
# return (self.userid.data, self.permission.data)
|
[
"wtforms.StringField",
"wtforms.HiddenField",
"wtforms.SelectField"
] |
[((191, 228), 'wtforms.StringField', 'StringField', (['"""User ID"""'], {'validators': '[]'}), "('User ID', validators=[])\n", (202, 228), False, 'from wtforms import StringField, SelectField, HiddenField\n'), ((246, 381), 'wtforms.SelectField', 'SelectField', (['"""Permission"""'], {'choices': "[('all', 'all'), ('changePermission', 'changePermission'), ('read', 'read'),\n ('write', 'write')]"}), "('Permission', choices=[('all', 'all'), ('changePermission',\n 'changePermission'), ('read', 'read'), ('write', 'write')])\n", (257, 381), False, 'from wtforms import StringField, SelectField, HiddenField\n'), ((417, 432), 'wtforms.HiddenField', 'HiddenField', (['""""""'], {}), "('')\n", (428, 432), False, 'from wtforms import StringField, SelectField, HiddenField\n')]
|
import time, discord
from Config._functions import grammar_list
class EVENT:
LOADED = False
RUNNING = False
param = { # Define all the parameters necessary
"CHANNEL": "",
"EMOJIS": []
}
# Executes when loaded
def __init__(self):
self.LOADED = True
# Executes when activated
def start(self, TWOW_CENTRAL, PARAMS): # Set the parameters
self.RUNNING = True
# Executes when deactivated
def end(self): # Reset the parameters
self.param = {
"CHANNEL": "",
"EMOJIS": []
}
self.RUNNING = False
# Function that runs on each message
async def on_message(self, message, PERMS):
if message.channel.mention != self.param["CHANNEL"]:
return # Only messages that are in the channel
for emoji in self.param["EMOJIS"]:
try: # Add the reactions
await message.add_reaction(emoji)
except Exception: # If a reaction is invalid, skip it
continue
# Change a parameter of the event
async def edit_event(self, message, new_params):
incorrect = []
correct = []
for parameter in new_params.keys():
try:
self.param[parameter] = new_params[parameter]
correct.append(parameter)
except KeyError:
incorrect.append(parameter)
if len(correct) > 0:
await message.channel.send(f"Successfully changed the parameters: {grammar_list(correct)}")
if len(incorrect) > 0:
await message.channel.send(f"The following parameters are invalid: {grammar_list(incorrect)}")
return
|
[
"Config._functions.grammar_list"
] |
[((1284, 1305), 'Config._functions.grammar_list', 'grammar_list', (['correct'], {}), '(correct)\n', (1296, 1305), False, 'from Config._functions import grammar_list\n'), ((1405, 1428), 'Config._functions.grammar_list', 'grammar_list', (['incorrect'], {}), '(incorrect)\n', (1417, 1428), False, 'from Config._functions import grammar_list\n')]
|
import re
from typing import Any, Dict, List, Optional, Type
from dokklib_db.errors import exceptions as ex
from dokklib_db.errors.client import ClientError
from dokklib_db.op_args import OpArg
CancellationReasons = List[Optional[Type[ClientError]]]
class TransactionCanceledException(ClientError):
"""The entire transaction request was canceled.
Please see DynamoDB docs for details.
https://docs.aws.amazon.com/amazondynamodb/latest/APIReference/API_TransactWriteItems.html
"""
# Example match: "reasons [ConditionalCheckFailed, None]"
_reasons_re = re.compile(r'reasons\W+\[([A-Za-z0-9, ]+)]', re.MULTILINE)
_codes_to_exceptions: Dict[str, Type[ClientError]] = {
'ConditionalCheckFailed': ex.ConditionalCheckFailedException,
'ItemCollectionSizeLimitExceeded': ex.ItemCollectionSizeLimitExceededException, # noqa 501
'TransactionConflict': ex.TransactionConflictException,
'ProvisionedThroughputExceeded': ex.ProvisionedThroughputExceededException, # noqa 501
'ThrottlingError': ex.ThrottlingError,
'ValidationError': ex.ValidationError
}
def __init__(self, op_args: List[OpArg], *args: Any, **kwargs: Any):
"""Initialize a TransactionCanceledException instance.
Args:
op_args: The list of operations that were the inputs to this
transaction.
"""
super().__init__(*args, **kwargs)
self._op_args = list(op_args)
self._reasons: Optional[CancellationReasons] = None
def _extract_reasons(self, message: str) -> List[str]:
match = re.search(self._reasons_re, message)
if not match:
return []
else:
reasons = match.group(1)
split = reasons.split(', ')
if split[0] == reasons:
return reasons.split(',')
else:
return split
def _get_reasons(self) -> CancellationReasons:
db_error = self.response.get('Error', {})
message = db_error.get('Message', '')
reasons = self._extract_reasons(message)
res: CancellationReasons = []
for r in reasons:
if r == 'None':
res.append(None)
else:
exception = self._codes_to_exceptions.get(r, ClientError)
res.append(exception)
if len(res) != len(self.op_args):
msg = f'Transaction cancellation reasons don\'t match ' \
f'transaction arguments in error:\n{message}'
raise ValueError(msg)
return res
@property
def op_args(self) -> List[OpArg]:
"""Get the list of inputs to the transaction."""
return self._op_args
@property
def reasons(self) -> CancellationReasons:
"""List of cancellation reasons for each item in the transaction.
Corresponds to order of `op_args`.
"""
if self._reasons is None:
self._reasons = self._get_reasons()
return self._reasons
def has_error(self, exception: Type[ClientError]) -> bool:
"""Whether the transaction failed due to a particular exception.
Args:
exception: The exception type to check for, eg. `ValidationError`.
Returns:
True if any of the failure reasons match the exception type.
"""
for r in self.reasons:
if r is exception:
return True
else:
return False
|
[
"re.search",
"re.compile"
] |
[((584, 643), 're.compile', 're.compile', (['"""reasons\\\\W+\\\\[([A-Za-z0-9, ]+)]"""', 're.MULTILINE'], {}), "('reasons\\\\W+\\\\[([A-Za-z0-9, ]+)]', re.MULTILINE)\n", (594, 643), False, 'import re\n'), ((1615, 1651), 're.search', 're.search', (['self._reasons_re', 'message'], {}), '(self._reasons_re, message)\n', (1624, 1651), False, 'import re\n')]
|
# -*- coding: utf-8 -*-
from __future__ import print_function,division
import os
import time
import argparse
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from torch.autograd import Variable
from torchvision import datasets,transforms
from load_text import load_dataset
from rank_loss import ImageSelector,TextSelector
from loss import TripletLoss
from model_rank import Merge_image_text
from test_acc import test
import utils
from utils import getDataset
from loader import ClassUniformlySampler
import random
#os.system("ulimit -n 5000000")
torch.multiprocessing.set_sharing_strategy('file_system')
parser = argparse.ArgumentParser(description='Training arguments')
parser.add_argument('--save_path',type=str,default='./flickr30k-56-stage2/')
parser.add_argument('--datasets',type=str,default='/data/reid/flickr30k/Dual-path/')
parser.add_argument('--batch_size',type=int,default=32,help='batch_size')
parser.add_argument('--learning_rate',type=float,default=0.001,help = 'FC parms learning rate')
parser.add_argument('--epochs',type=int,default=120,help='The number of epochs to train')
parser.add_argument('--stage',type=str,default='II',choices=['I','II'],help='which stage is on')
arg = parser.parse_args()
torch.manual_seed(1)
torch.cuda.manual_seed_all(1)
#Make saving directory
save_dir_path = arg.save_path
os.makedirs(save_dir_path,exist_ok=True)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
# -------------------------------------Train Function--------------------------------------
def train_rank(model,criterion,optimizer,scheduler,dataloder,text_loader,num_epochs,device,stage):
start_time = time.time()
# Logger instance
logger = utils.Logger(save_dir_path)
logger.info('-'*10)
logger.info(vars(arg))
logger.info('Stage: '+stage)
print("############################ Train stage II #############################")
get = list(zip(dataloder,text_loader))
random.shuffle(get)
img,txt = zip(*get)
for epoch in range(num_epochs):
logger.info('Epoch {}/{}'.format(epoch+1,num_epochs))
model.train()
scheduler.step()
##Training
batch_num = 0
loss_avg = []
for (inputs,labels),(text_inputs,text_labels) in zip(img,txt):
batch_num += 1
inputs = inputs.to(device)
labels = labels.to(device)
text_inputs = text_inputs.to(device)
text_labels = text_labels.to(device,dtype=torch.int64)
outputs,text_outs = model(inputs,text_inputs)
# print("output.shape:: ",outputs.shape)
# print("text_out.shape:: ",text_outs.shape)
# print("label.shape: ",labels)
# print("text_label.shape:: ",text_labels)
anc_IT,pos_IT,neg_IT = ImageSelector(outputs,text_outs,labels)
anc_TI,pos_TI,neg_TI = TextSelector(text_outs,outputs,labels)
loss_rank = criterion(anc_IT,pos_IT,neg_IT)+criterion(anc_TI,pos_TI,neg_TI)
optimizer.zero_grad()
loss_rank.backward()
optimizer.step()
loss_avg.append(loss_rank.item())
if batch_num % 10 == 0:
loss_avg= sum(loss_avg) /len(loss_avg)
logger.info('Stage II training : {} [{}]]\t Rank_loss:{:.6f}'.format(epoch+1,batch_num*len(inputs),loss_avg))
loss_avg = []
if (epoch+1)%10==0 or epoch+1 == num_epochs:
##Testing / Vlidaing
torch.cuda.empty_cache()
# model.mode = 'test'
CMC,mAP = test(model,arg.datasets,128)
logger.info('Testing: Top1:{:.2f}% Top5:{:.2f}% Top10:{:.2f}% mAP:{:.2f}%'.format(CMC[0],CMC[4],CMC[9],mAP))
print("=======================================================")
logger.info('-'*10)
time_cost = time.time()-start_time
logger.info('Training complete in {:.0f}m {:.0f}s'.format(
time_cost//60,time_cost%60
))
utils.save_network(model,save_dir_path,'final_r')
class IterLoader:
def __init__(self,loader):
self.loader=loader
self.iter = iter(self.loader)
def next_one(self):
try:
return next(self.iter)
except:
self.iter = iter(self.loader)
return next(self.iter)
if __name__ == "__main__":
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Loader image dataset ,PK samples
seeds = random.randint(0,100)
datasets_img = getDataset(arg.datasets,arg.batch_size,'train')
loader = torch.utils.data.DataLoader(datasets_img, batch_size=32, num_workers=0, drop_last=False, # default 16 works
sampler=ClassUniformlySampler(datasets_img, class_position=1, k=4, seeds=seeds))
dataloader_img = IterLoader(loader)
#print('labels_img: ',dataloader_img.next_one()[1])
##Loader txt dataset , PK samples
dataset_text = load_dataset(arg.datasets,'train',arg.batch_size, datasets_img)
loader_txt = torch.utils.data.DataLoader(dataset_text, batch_size=32, num_workers=0, drop_last=False, # 16 works
sampler=ClassUniformlySampler(dataset_text, class_position=1, k=4, seeds=seeds))
dataloader_txt = IterLoader(loader_txt)
#print('dataloader_txt: ',dataloader_txt.next_one()[1])
##############################################
model = Merge_image_text(num_class=len(datasets_img.classes),mode = 'test') #Stage II ,change to 'test',Stage I:'train'
model = model.to(device)
# criterion = nn.CrossEntropyLoss()
criterion = TripletLoss(margin = 1).cuda() #no margin means soft-margin
#delete module parallel
optimizer = optim.SGD([
{'params':model.image_feature.backbone.parameters(),'lr':arg.learning_rate},
# {'params':model.image_feature.fc1.parameters(),'lr':arg.learning_rate},
# {'params':model.image_feature.fc.parameters(),'lr':arg.learning_rate},
{'params':model.text_feature.parameters(),'lr':arg.learning_rate/10}
],lr=0.001,momentum=0.9,weight_decay = 5e-4,nesterov=True)
scheduler = lr_scheduler.StepLR(optimizer,step_size=100,gamma=0.1)
#---------------------Start training----------------------
#Stage I
# train(model,criterion,optimizer,scheduler,train_dataloder,train_dataloder_text,arg.epochs,device,'I')
##Stage II
model.load_state_dict(torch.load('./flickr30k-56-stage1/net_final.pth'),strict=False)
train_rank(model,criterion,optimizer,scheduler,loader,loader_txt,arg.epochs,device,'II')
torch.cuda.empty_cache()
|
[
"torch.optim.lr_scheduler.StepLR",
"argparse.ArgumentParser",
"loss.TripletLoss",
"random.shuffle",
"loader.ClassUniformlySampler",
"load_text.load_dataset",
"random.randint",
"torch.multiprocessing.set_sharing_strategy",
"torch.load",
"utils.getDataset",
"utils.save_network",
"utils.Logger",
"torch.manual_seed",
"torch.cuda.is_available",
"rank_loss.ImageSelector",
"rank_loss.TextSelector",
"os.makedirs",
"test_acc.test",
"time.time",
"torch.cuda.manual_seed_all",
"torch.cuda.empty_cache"
] |
[((628, 685), 'torch.multiprocessing.set_sharing_strategy', 'torch.multiprocessing.set_sharing_strategy', (['"""file_system"""'], {}), "('file_system')\n", (670, 685), False, 'import torch\n'), ((696, 753), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Training arguments"""'}), "(description='Training arguments')\n", (719, 753), False, 'import argparse\n'), ((1312, 1332), 'torch.manual_seed', 'torch.manual_seed', (['(1)'], {}), '(1)\n', (1329, 1332), False, 'import torch\n'), ((1334, 1363), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(1)'], {}), '(1)\n', (1360, 1363), False, 'import torch\n'), ((1422, 1463), 'os.makedirs', 'os.makedirs', (['save_dir_path'], {'exist_ok': '(True)'}), '(save_dir_path, exist_ok=True)\n', (1433, 1463), False, 'import os\n'), ((1718, 1729), 'time.time', 'time.time', ([], {}), '()\n', (1727, 1729), False, 'import time\n'), ((1767, 1794), 'utils.Logger', 'utils.Logger', (['save_dir_path'], {}), '(save_dir_path)\n', (1779, 1794), False, 'import utils\n'), ((2019, 2038), 'random.shuffle', 'random.shuffle', (['get'], {}), '(get)\n', (2033, 2038), False, 'import random\n'), ((4126, 4177), 'utils.save_network', 'utils.save_network', (['model', 'save_dir_path', '"""final_r"""'], {}), "(model, save_dir_path, 'final_r')\n", (4144, 4177), False, 'import utils\n'), ((4639, 4661), 'random.randint', 'random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (4653, 4661), False, 'import random\n'), ((4681, 4730), 'utils.getDataset', 'getDataset', (['arg.datasets', 'arg.batch_size', '"""train"""'], {}), "(arg.datasets, arg.batch_size, 'train')\n", (4691, 4730), False, 'from utils import getDataset\n'), ((5133, 5198), 'load_text.load_dataset', 'load_dataset', (['arg.datasets', '"""train"""', 'arg.batch_size', 'datasets_img'], {}), "(arg.datasets, 'train', arg.batch_size, datasets_img)\n", (5145, 5198), False, 'from load_text import load_dataset\n'), ((6352, 6408), 'torch.optim.lr_scheduler.StepLR', 'lr_scheduler.StepLR', (['optimizer'], {'step_size': '(100)', 'gamma': '(0.1)'}), '(optimizer, step_size=100, gamma=0.1)\n', (6371, 6408), False, 'from torch.optim import lr_scheduler\n'), ((6802, 6826), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6824, 6826), False, 'import torch\n'), ((3990, 4001), 'time.time', 'time.time', ([], {}), '()\n', (3999, 4001), False, 'import time\n'), ((6639, 6688), 'torch.load', 'torch.load', (['"""./flickr30k-56-stage1/net_final.pth"""'], {}), "('./flickr30k-56-stage1/net_final.pth')\n", (6649, 6688), False, 'import torch\n'), ((2914, 2955), 'rank_loss.ImageSelector', 'ImageSelector', (['outputs', 'text_outs', 'labels'], {}), '(outputs, text_outs, labels)\n', (2927, 2955), False, 'from rank_loss import ImageSelector, TextSelector\n'), ((2990, 3030), 'rank_loss.TextSelector', 'TextSelector', (['text_outs', 'outputs', 'labels'], {}), '(text_outs, outputs, labels)\n', (3002, 3030), False, 'from rank_loss import ImageSelector, TextSelector\n'), ((3630, 3654), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3652, 3654), False, 'import torch\n'), ((3713, 3743), 'test_acc.test', 'test', (['model', 'arg.datasets', '(128)'], {}), '(model, arg.datasets, 128)\n', (3717, 3743), False, 'from test_acc import test\n'), ((4548, 4573), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (4571, 4573), False, 'import torch\n'), ((4900, 4971), 'loader.ClassUniformlySampler', 'ClassUniformlySampler', (['datasets_img'], {'class_position': '(1)', 'k': '(4)', 'seeds': 'seeds'}), '(datasets_img, class_position=1, k=4, seeds=seeds)\n', (4921, 4971), False, 'from loader import ClassUniformlySampler\n'), ((5368, 5439), 'loader.ClassUniformlySampler', 'ClassUniformlySampler', (['dataset_text'], {'class_position': '(1)', 'k': '(4)', 'seeds': 'seeds'}), '(dataset_text, class_position=1, k=4, seeds=seeds)\n', (5389, 5439), False, 'from loader import ClassUniformlySampler\n'), ((5818, 5839), 'loss.TripletLoss', 'TripletLoss', ([], {'margin': '(1)'}), '(margin=1)\n', (5829, 5839), False, 'from loss import TripletLoss\n')]
|
#!/usr/bin/env python3
#-*- coding: utf-8 -*-
#=======================================================================================
# Imports
#=======================================================================================
import sys
import os
from lib.configutils import *
#=======================================================================================
# Library
#=======================================================================================
#==========================================================
class DefaultsConfigSetup(ConfigSetup):
#=============================
"""'ConfigSetup' for general defaults for cappman."""
#=============================
def __init__(self):
super().__init__()
self.addOption(ConfigOption(varName="cappConfigDirPath", configName="cappconfigdir", optionTypes=[ConfigOptionCanonicalizedFilePathType()], enforceAssignment=True))
self.addOption(\
ConfigOption(varName="pluginDirPaths",\
configName="plugindirs",\
defaultValue="[\""+os.path.realpath(os.path.join(os.path.dirname(sys.argv[0]), "plugins")+"\"]"),\
optionTypes=[ConfigOptionListType(merge=True),\
ConfigOptionCanonicalizedFilePathType()]))
#==========================================================
class Defaults(Namespace):
#=============================
"""Capp Manager default config."""
#=============================
def __init__(self):
super().__init__()
# Hard coded defaults & other vital values.
self.execPath = sys.argv[0]
self.execDirPath = os.path.realpath(os.path.dirname(self.execPath))
self.distConfigDirPath = os.path.join(self.execDirPath, "config")
self.distPluginDirPath = os.path.join(self.execDirPath, "plugins")
self.distCappmanConfigPath = os.path.join(self.distConfigDirPath, "cappman.conf")
self.pluginDirNames = {\
"cappExtensions": "cappextensions",\
"callFlavors": "cappflavors",\
"cappLibs": "capplibs",\
"languages": "languages"}
# Configured defaults.
config = DefaultsConfigSetup().getConfig(configFilePaths=[self.distCappmanConfigPath])
self.cappConfigDirPath = config.cappConfigDirPath
#print("[DEBUG] [cappconfig.py.Defaults]"[self.distPluginDirPath]+config.pluginDirPaths)
self.pluginDirPaths = [self.distPluginDirPath]+config.pluginDirPaths
#==========================================================
class BasicCappConfigSetup(ConfigSetup):
#=============================
"""Basic capp information needed to decide which capp flavor we're dealing with, and other
information that's universal across all flavors."""
#=============================
def __init__(self, configFilePaths=[]):
super().__init__(configFilePaths)
self.addOption(ConfigOption(varName="cappFlavorName", configName="cappflavor", category="main", enforceAssignment=True))
self.addOption(ConfigOption(varName="name", configName="name", category="main", enforceAssignment=True))
#==========================================================
class BasicFlavorConfigSetup(ConfigSetup):
def __init__(self):
super().__init__()
self.addOption(ConfigOption(varName="cappLibName", configName="capplib", category="main", enforceAssignment=True))
|
[
"os.path.dirname",
"os.path.join"
] |
[((1605, 1645), 'os.path.join', 'os.path.join', (['self.execDirPath', '"""config"""'], {}), "(self.execDirPath, 'config')\n", (1617, 1645), False, 'import os\n'), ((1673, 1714), 'os.path.join', 'os.path.join', (['self.execDirPath', '"""plugins"""'], {}), "(self.execDirPath, 'plugins')\n", (1685, 1714), False, 'import os\n'), ((1746, 1798), 'os.path.join', 'os.path.join', (['self.distConfigDirPath', '"""cappman.conf"""'], {}), "(self.distConfigDirPath, 'cappman.conf')\n", (1758, 1798), False, 'import os\n'), ((1546, 1576), 'os.path.dirname', 'os.path.dirname', (['self.execPath'], {}), '(self.execPath)\n', (1561, 1576), False, 'import os\n'), ((1052, 1080), 'os.path.dirname', 'os.path.dirname', (['sys.argv[0]'], {}), '(sys.argv[0])\n', (1067, 1080), False, 'import os\n')]
|
#! /usr/bin/python3
import subprocess
import time
import sys
import os
subprocess.Popen(["./marueditor.py","--debug"])
while 1:
time.sleep(1)
|
[
"subprocess.Popen",
"time.sleep"
] |
[((72, 120), 'subprocess.Popen', 'subprocess.Popen', (["['./marueditor.py', '--debug']"], {}), "(['./marueditor.py', '--debug'])\n", (88, 120), False, 'import subprocess\n'), ((133, 146), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (143, 146), False, 'import time\n')]
|
import os, yaml
config = {
'debug': False,
'user': '',
'token': '',
'sql_url': '',
'client_id': '',
'client_secret': '',
'cookie_secret': '',
'redirect_uri': '',
'web_port': 8001,
'irc': {
'host': 'irc.chat.twitch.tv',
'port': 6697,
'use_ssl': True,
},
'pubsub_url': 'wss://pubsub-edge.twitch.tv',
'discord': {
'token': None,
'email': None,
'password': None,
'bot': True,
},
'logging': {
'level': 'warning',
'path': None,
'max_size': 100 * 1000 * 1000,# ~ 95 mb
'num_backups': 10,
},
'mysql': {
'host': '127.0.0.1',
'port': 3306,
'user': 'root',
'password': '',
'database': 'logitch',
},
}
def load(path=None):
default_paths = [
'~/logitch.yaml',
'./logitch.yaml',
'../logitch.yaml',
'../../logitch.yaml',
'/etc/logitch/logitch.yaml',
'/etc/logitch.yaml',
]
if not path:
path = os.environ.get('LOGITCH_CONFIG', None)
if not path:
for p in default_paths:
p = os.path.expanduser(p)
if os.path.isfile(p):
path = p
break
if not path:
raise Exception('No config file specified.')
if not os.path.isfile(path):
raise Exception('Config: "{}" could not be found.'.format(path))
with open(path) as f:
data = yaml.load(f)
for key in data:
if key in config:
if isinstance(config[key], dict):
config[key].update(data[key])
else:
config[key] = data[key]
|
[
"os.environ.get",
"os.path.isfile",
"yaml.load",
"os.path.expanduser"
] |
[((1048, 1086), 'os.environ.get', 'os.environ.get', (['"""LOGITCH_CONFIG"""', 'None'], {}), "('LOGITCH_CONFIG', None)\n", (1062, 1086), False, 'import os, yaml\n'), ((1360, 1380), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (1374, 1380), False, 'import os, yaml\n'), ((1496, 1508), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (1505, 1508), False, 'import os, yaml\n'), ((1164, 1185), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (1182, 1185), False, 'import os, yaml\n'), ((1205, 1222), 'os.path.isfile', 'os.path.isfile', (['p'], {}), '(p)\n', (1219, 1222), False, 'import os, yaml\n')]
|
import config.config as config
# Decoder class for use with a rotary encoder.
class decoder:
"""Class to decode mechanical rotary encoder pulses."""
def __init__(self, pi, rot_gpioA, rot_gpioB, switch_gpio, rotation_callback, switch_callback):
"""
Instantiate the class with the pi and gpios connected to
rotary encoder contacts A and B. The common contact
should be connected to ground. The callback is
called when the rotary encoder is turned. It takes
one parameter which is +1 for clockwise and -1 for
counterclockwise.
EXAMPLE
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = config.pigpio.pi()
decoder = rotary_encoder.decoder(pi, 7, 8, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
"""
self.pi = pi
self.rot_gpioA = rot_gpioA
self.rot_gpioB = rot_gpioB
self.rot_callback = rotation_callback
self.sw_callback = switch_callback
self.levA = 0
self.levB = 0
self.lastGpio = None
# Setting up rotary encoder, including callback.
self.pi.set_mode(rot_gpioA, config.pigpio.INPUT)
self.pi.set_mode(rot_gpioB, config.pigpio.INPUT)
self.pi.set_pull_up_down(rot_gpioA, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(rot_gpioB, config.pigpio.PUD_UP)
self.cbA = self.pi.callback(rot_gpioA, config.pigpio.EITHER_EDGE, self._pulse)
self.cbB = self.pi.callback(rot_gpioB, config.pigpio.EITHER_EDGE, self._pulse)
# Setting up switch of rotary encoder.
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_mode(switch_gpio, config.pigpio.INPUT)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.pi.set_pull_up_down(switch_gpio, config.pigpio.PUD_UP)
self.switch_cb = self.pi.callback(switch_gpio, config.pigpio.RISING_EDGE, self._switch_toggle)
# Handles the switch part of the rotary encoder.
def _switch_toggle(self, gpio, level, tick):
self.sw_callback()
def _pulse(self, gpio, level, tick):
"""
Decode the rotary encoder pulse.
+---------+ +---------+ 0
| | | |
A | | | |
| | | |
+---------+ +---------+ +----- 1
+---------+ +---------+ 0
| | | |
B | | | |
| | | |
----+ +---------+ +---------+ 1
"""
if gpio == self.rot_gpioA:
self.levA = level
else:
self.levB = level;
if gpio != self.lastGpio: # debounce
self.lastGpio = gpio
if gpio == self.rot_gpioA and level == 1:
if self.levB == 1:
self.rot_callback(1)
elif gpio == self.rot_gpioB and level == 1:
if self.levA == 1:
self.rot_callback(-1)
def cancel(self):
"""
Cancel the rotary encoder decoder.
"""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import rotary_encoder
pos = 0
def callback(way):
global pos
pos += way
print("pos={}".format(pos))
pi = pigpio.pi()
decoder = rotary_encoder.decoder(pi, 2, 4, callback)
time.sleep(300)
decoder.cancel()
pi.stop()
|
[
"rotary_encoder.decoder",
"pigpio.pi",
"time.sleep"
] |
[((3833, 3844), 'pigpio.pi', 'pigpio.pi', ([], {}), '()\n', (3842, 3844), False, 'import pigpio\n'), ((3862, 3904), 'rotary_encoder.decoder', 'rotary_encoder.decoder', (['pi', '(2)', '(4)', 'callback'], {}), '(pi, 2, 4, callback)\n', (3884, 3904), False, 'import rotary_encoder\n'), ((3912, 3927), 'time.sleep', 'time.sleep', (['(300)'], {}), '(300)\n', (3922, 3927), False, 'import time\n')]
|
import arcade
from game.title_view import Title
from game.player import Player
from game import constants
class Director():
def __init__(self):
"""Directs the game"""
self.window = arcade.Window(
constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, constants.SCREEN_TITLE)
self.main()
self.player = Player()
# Track the current state of what key is pressed
def main(self):
""" Main method """
# Class for displaying the screen.
start_view = Title()
self.window.show_view(start_view)
arcade.run()
|
[
"game.title_view.Title",
"game.player.Player",
"arcade.run",
"arcade.Window"
] |
[((203, 294), 'arcade.Window', 'arcade.Window', (['constants.SCREEN_WIDTH', 'constants.SCREEN_HEIGHT', 'constants.SCREEN_TITLE'], {}), '(constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, constants.\n SCREEN_TITLE)\n', (216, 294), False, 'import arcade\n'), ((345, 353), 'game.player.Player', 'Player', ([], {}), '()\n', (351, 353), False, 'from game.player import Player\n'), ((544, 551), 'game.title_view.Title', 'Title', ([], {}), '()\n', (549, 551), False, 'from game.title_view import Title\n'), ((611, 623), 'arcade.run', 'arcade.run', ([], {}), '()\n', (621, 623), False, 'import arcade\n')]
|
import os
import tornado.httpserver
import tornado.ioloop
import tornado.log
import tornado.web
from tornado.options import define, options, parse_command_line
import config
import handlers.web
import handlers.api
class Application(tornado.web.Application):
def __init__(self, debug):
routes = [
# Web handlers
(r"/", handlers.web.ExampleWebHandler),
# API handlers
(r"/api", handlers.api.ExampleApiHandler),
# Public files: JS, CSS, images and favicon.ico
(r'/public/(.*)', tornado.web.StaticFileHandler, {
'path' : os.path.join(os.path.dirname(__file__), "public")
}),
(r'/(favicon\.ico)', tornado.web.StaticFileHandler, {
"path": os.path.join(os.path.dirname(__file__), "public", "images")
})
]
settings = {
"template_path": os.path.join(os.path.dirname(__file__), "templates"),
"debug": debug,
"cookie_secret": config.cookie_secret,
"xsrf_cookies": True,
"login_url": '/login'
}
tornado.web.Application.__init__(self, routes, **settings)
@property
def logger(self):
return tornado.log.app_log
if __name__=='__main__':
define('port', default = config.port, help = 'port', type = int)
define('debug', default = False, help = 'run in debug mode', type = bool)
parse_command_line()
app = Application(options.debug)
app.logger.info('Starting %s on 0.0.0.0:%s' % ('tornado skeleton', options.port))
http_server = tornado.httpserver.HTTPServer(app)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
|
[
"os.path.dirname",
"tornado.options.define",
"tornado.options.parse_command_line"
] |
[((1295, 1353), 'tornado.options.define', 'define', (['"""port"""'], {'default': 'config.port', 'help': '"""port"""', 'type': 'int'}), "('port', default=config.port, help='port', type=int)\n", (1301, 1353), False, 'from tornado.options import define, options, parse_command_line\n'), ((1364, 1431), 'tornado.options.define', 'define', (['"""debug"""'], {'default': '(False)', 'help': '"""run in debug mode"""', 'type': 'bool'}), "('debug', default=False, help='run in debug mode', type=bool)\n", (1370, 1431), False, 'from tornado.options import define, options, parse_command_line\n'), ((1442, 1462), 'tornado.options.parse_command_line', 'parse_command_line', ([], {}), '()\n', (1460, 1462), False, 'from tornado.options import define, options, parse_command_line\n'), ((927, 952), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (942, 952), False, 'import os\n'), ((635, 660), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (650, 660), False, 'import os\n'), ((791, 816), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (806, 816), False, 'import os\n')]
|
#!env python
import sys
import json
import csv
json_input = json.load(sys.stdin)
csv_output = csv.writer(sys.stdout)
csv_output.writerow(['Library', 'URL', 'License'])
for package_name, data in json_input.items():
name = package_name.split('@')[0]
url = ''
if 'homepage' in data:
if type(data['homepage']) is str:
url = data['homepage']
elif 'repository' in data:
if type(data['repository']) is str:
url = data['repository']
elif type(data['repository']) is dict:
url = data['repository']['url']
csv_output.writerow([name, url, ';'.join(data['licenses'])])
|
[
"json.load",
"csv.writer"
] |
[((62, 82), 'json.load', 'json.load', (['sys.stdin'], {}), '(sys.stdin)\n', (71, 82), False, 'import json\n'), ((96, 118), 'csv.writer', 'csv.writer', (['sys.stdout'], {}), '(sys.stdout)\n', (106, 118), False, 'import csv\n')]
|
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django_datatables_view.base_datatable_view import BaseDatatableView
from apps.Servers.models import TemplateServer, ServerProfile, Parameters
class ServerTemplatesListJson(LoginRequiredMixin, BaseDatatableView):
model = TemplateServer
columns = ['name', 'description', 'pk']
order_columns = ['name', 'description', 'pk']
max_display_length = 200
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(
Q(name__icontains=search) |
Q(description__icontains=search)
)
return qs
class ServerProfilesListJson(LoginRequiredMixin, BaseDatatableView):
model = ServerProfile
columns = ['name', 'description', 'pk']
order_columns = ['name', 'description', 'pk']
max_display_length = 100
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(name__icontains=search)
return qs
class ParametersListJson(LoginRequiredMixin, BaseDatatableView):
model = Parameters
columns = ['name', 'category', 'pk']
order_columns = ['name', 'category', 'pk']
max_display_length = 200
def filter_queryset(self, qs):
search = self.request.GET.get(u'search[value]', None)
if search:
qs = qs.filter(
Q(name__icontains=search) |
Q(category__icontains=search)
)
return qs
|
[
"django.db.models.Q"
] |
[((620, 645), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'search'}), '(name__icontains=search)\n', (621, 645), False, 'from django.db.models import Q\n'), ((664, 696), 'django.db.models.Q', 'Q', ([], {'description__icontains': 'search'}), '(description__icontains=search)\n', (665, 696), False, 'from django.db.models import Q\n'), ((1503, 1528), 'django.db.models.Q', 'Q', ([], {'name__icontains': 'search'}), '(name__icontains=search)\n', (1504, 1528), False, 'from django.db.models import Q\n'), ((1547, 1576), 'django.db.models.Q', 'Q', ([], {'category__icontains': 'search'}), '(category__icontains=search)\n', (1548, 1576), False, 'from django.db.models import Q\n')]
|
# %% Packages
import json
from dotmap import DotMap
# %% Functions
def get_config_from_json(json_file):
with open(json_file, "r") as config_file:
config_dict = json.load(config_file)
config = DotMap(config_dict)
return config
def process_config(json_file):
config = get_config_from_json(json_file)
return config
|
[
"dotmap.DotMap",
"json.load"
] |
[((213, 232), 'dotmap.DotMap', 'DotMap', (['config_dict'], {}), '(config_dict)\n', (219, 232), False, 'from dotmap import DotMap\n'), ((177, 199), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (186, 199), False, 'import json\n')]
|
from typing import Callable
import numpy as np
import torch
import torch.nn as nn
from util.data import transform_observation
class PommerQEmbeddingRNN(nn.Module):
def __init__(self, embedding_model):
super(PommerQEmbeddingRNN, self).__init__()
self.embedding_model = embedding_model
self.memory = []
self.steps = 10
# Stacked lstm
self.rnn = [nn.LSTM(64, 64) for step in range(self.steps)]
self.linear = nn.Sequential(
nn.Flatten(),
nn.ReLU(),
nn.Linear(in_features=64, out_features=6),
nn.Softmax(dim=-1)
)
def forward(self, obs):
while len(self.memory) >= self.steps:
self.memory.pop(0)
while len(self.memory) != self.steps:
self.memory.append(obs)
# x=obs[0] # Board Embedding
x = None
h = None
for obs_n, rnn_n in zip(self.memory, self.rnn):
x_n = obs_n[0]
x, h = rnn_n(x_n, h)
x = self.linear(x).squeeze()
return x
def get_transformer(self) -> Callable:
"""
Return a callable for input transformation.
The callable should take a ``dict`` containing data of a single
observation from the Pommerman environment and return a ``list``
of individual numpy arrays that can be used later as an input
value in the ``forward()`` function.
"""
def transformer(obs: dict) -> list:
planes = transform_observation(obs, p_obs=True, centralized=True)
planes = np.array(planes, dtype=np.float32)
# Generate embedding
# flattened = planes.flatten()
# flattened = torch.tensor(flattened, device=torch.device('cpu')) # TODO: Make 'cpu' variable
X = torch.tensor(planes, device=torch.device('cpu')).unsqueeze(0)
board_embedding = self.embedding_model.forward(X)
board_embedding = board_embedding.detach().numpy()
return [
board_embedding
]
return transformer
|
[
"torch.nn.ReLU",
"util.data.transform_observation",
"torch.nn.Softmax",
"numpy.array",
"torch.nn.Linear",
"torch.device",
"torch.nn.LSTM",
"torch.nn.Flatten"
] |
[((400, 415), 'torch.nn.LSTM', 'nn.LSTM', (['(64)', '(64)'], {}), '(64, 64)\n', (407, 415), True, 'import torch.nn as nn\n'), ((497, 509), 'torch.nn.Flatten', 'nn.Flatten', ([], {}), '()\n', (507, 509), True, 'import torch.nn as nn\n'), ((523, 532), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (530, 532), True, 'import torch.nn as nn\n'), ((546, 587), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': '(64)', 'out_features': '(6)'}), '(in_features=64, out_features=6)\n', (555, 587), True, 'import torch.nn as nn\n'), ((601, 619), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (611, 619), True, 'import torch.nn as nn\n'), ((1510, 1566), 'util.data.transform_observation', 'transform_observation', (['obs'], {'p_obs': '(True)', 'centralized': '(True)'}), '(obs, p_obs=True, centralized=True)\n', (1531, 1566), False, 'from util.data import transform_observation\n'), ((1588, 1622), 'numpy.array', 'np.array', (['planes'], {'dtype': 'np.float32'}), '(planes, dtype=np.float32)\n', (1596, 1622), True, 'import numpy as np\n'), ((1850, 1869), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (1862, 1869), False, 'import torch\n')]
|
# This script does the following:
# 1) Record H264 Video using PiCam at a maximum bitrate of 300 kbps
# 2) Stream video data to a local BytesIO object
# 3) Send raw data over LTE
# 4) Store raw data to an onboard file
# 5) Clears BytesIO object after network stream and file store
# 6) Interrupts and ends recording after 'record_time' seconds
# Author: <NAME>
# Last Edited: 10/2/19
# Libraries
# -> picamera -> PiCamera: Enables pi cam interfacing and settings manipulation
# -> picamera -> CircularIO: Allows for a circular buffer (if we want one)
# -> threading: enables timer interrupt
# -> io -> BytesIO : local file-like object that camera streams to
# -> socket: allows for UDP socket and message sending
# -> Hologram.HologramCloud -> HologramCloud: LTE API to send data over an LTE network
from picamera import PiCamera
from picamera import CircularIO
from io import BytesIO
from Hologram.HologramCloud import HologramCloud
import threading
import socket
import time
import os
#======================= Global Variables and Objects =================
#Global Variables
lte = True
wifi = not(lte)
record_file = 'buffer_recording.h264' #on-board file video is stored to
bitrate_max = 100000 # bits per second
record_time = 8 # Time in seconds that the recording runs for
record_chunk = 0.1 #chunk size in seconds video object is broken into and sent
frame_rate = 15 #camera frame rate
interrupt_bool = False #global interrupt flag that ends recording/program
store_and_send_bool = False #global interrupt flag that initiates sending and storing of camera data
#ensures chunk size is not smaller than one frame
if record_chunk < 1/frame_rate:
record_chunk = 1/frame_rate
#Camera Settings
camera = PiCamera()
camera.resolution = (320, 240)
camera.framerate = frame_rate
if lte:
#LTE Network Streaming
credentials = {'devicekey': '<KEY>'}
hologram = HologramCloud(credentials, network='cellular')
if wifi:
#Wifi UDP Network Streaming
STREAM_IP = '127.0.0.1'
STREAM_PORT = 4000
send_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#========================= Functions =================================
def interrupt_func():
#Interrupt function that ends camera streaming and program
global interrupt_bool
interrupt_bool = True
print("Program Timer up")
def store_interrupt_func():
#interrupt function that initiates sending and storing camera data
global store_and_send_bool
store_and_send_bool = True
#threading.Timer(record_chunk, store_interrupt_func).start()
def send_network(msg):
if lte:
#Sends data over LTE
msg_err = hologram.sendMessage(msg, timeout = 1)
if wifi:
#Sends data over Wifi UDP
send_sock.sendto(msg, (STREAM_IP, STREAM_PORT))
#======================== Video Streaming and Recording ============
loop_cnt = 0.0
cnt = 0
#camera.start_preview()
#=================== Stores to local BytesIO then sends========================
# MOST EFFICENT AND TEST-PROVEN METHOD
if lte:
#Initialize LTE Network Connection
connected = 0
while not(connected == 1):
os.system("sudo hologram network disconnect")
if connected == 0:
print("Not Connected (%d)\n -> Connecting"%(connected))
hologram.network.connect(timeout = 10)
else:
print("Trying to Reconnect (%d)"%(connected))
hologram.network.disconnect()
hologram.network.connect(timeout = 10)
connected = hologram.network.getConnectionStatus()
print("Connected!")
#Initialize local stream object
stream = BytesIO()
#stream = CircularIO(int((10*bitrate_max*record_chunk)/8))
#Open and/or create onboard file to store to
camera_file_handle = open(record_file, 'wb+')
#Begin Pi Cam recording
camera.start_recording(stream, format='h264', bitrate=bitrate_max)
print("Beginning Program")
#Start timer threads
threading.Timer(record_time, interrupt_func).start()
threading.Timer(record_chunk, store_interrupt_func).start()
loop_sum = 0
comms_sum = 0
store_sum = 0
random_cnt = 0
program_start = time.time()
#Main Program Loop
while not(interrupt_bool):
#camera.wait_recording(record_chunk)
if (store_and_send_bool):
threading.Timer(record_chunk, store_interrupt_func).start()
loop_start = time.time()
#executes when record_chunk thread times out
#controls how often data is ported ofver the network and to file
#change 'record_chunk' to vary time and data size
#Reset global interrupt flag
store_and_send_bool = False
#Send bytes-like date over the Network (UDP)
comms_start = time.time()
send_network(stream.getvalue())
comms_sum += (time.time()-comms_start)
#Store bytes-like data to file
store_start = time.time()
camera_file_handle.write(stream.getvalue())
store_sum += (time.time()-store_start)
#Clear local file-like object
stream.truncate(0)
stream.seek(0)
#[Optional] Print Diagnostic printout
cnt+=1
print("Sent and Saved Chunk #%d | Loop Time: %f"%(cnt, (time.time()-loop_start)))
loop_sum+=(time.time() - loop_start)
#======================================================================================
#End Recording and Tidy Up
total_time = time.time() - program_start
print("Ending Recording")
camera.stop_recording()
print("Closing Video File")
camera_file_handle.close()
print("Program Time: %fs"%(total_time))
print("Process Time: %fs | Process Usage: %f%%"%(loop_sum, (loop_sum*100)/total_time))
print("\tComms: %fs | %f%%\n\tStore: %fs | %f%%"%(comms_sum, (comms_sum*100)/loop_sum, store_sum,(store_sum*100)/loop_sum))
#camera.stop_preview()
|
[
"io.BytesIO",
"threading.Timer",
"Hologram.HologramCloud.HologramCloud",
"socket.socket",
"os.system",
"time.time",
"picamera.PiCamera"
] |
[((1709, 1719), 'picamera.PiCamera', 'PiCamera', ([], {}), '()\n', (1717, 1719), False, 'from picamera import PiCamera\n'), ((3440, 3449), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (3447, 3449), False, 'from io import BytesIO\n'), ((3928, 3939), 'time.time', 'time.time', ([], {}), '()\n', (3937, 3939), False, 'import time\n'), ((1864, 1910), 'Hologram.HologramCloud.HologramCloud', 'HologramCloud', (['credentials'], {'network': '"""cellular"""'}), "(credentials, network='cellular')\n", (1877, 1910), False, 'from Hologram.HologramCloud import HologramCloud\n'), ((2007, 2055), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (2020, 2055), False, 'import socket\n'), ((5051, 5062), 'time.time', 'time.time', ([], {}), '()\n', (5060, 5062), False, 'import time\n'), ((3023, 3068), 'os.system', 'os.system', (['"""sudo hologram network disconnect"""'], {}), "('sudo hologram network disconnect')\n", (3032, 3068), False, 'import os\n'), ((3742, 3786), 'threading.Timer', 'threading.Timer', (['record_time', 'interrupt_func'], {}), '(record_time, interrupt_func)\n', (3757, 3786), False, 'import threading\n'), ((3795, 3846), 'threading.Timer', 'threading.Timer', (['record_chunk', 'store_interrupt_func'], {}), '(record_chunk, store_interrupt_func)\n', (3810, 3846), False, 'import threading\n'), ((4128, 4139), 'time.time', 'time.time', ([], {}), '()\n', (4137, 4139), False, 'import time\n'), ((4434, 4445), 'time.time', 'time.time', ([], {}), '()\n', (4443, 4445), False, 'import time\n'), ((4576, 4587), 'time.time', 'time.time', ([], {}), '()\n', (4585, 4587), False, 'import time\n'), ((4496, 4507), 'time.time', 'time.time', ([], {}), '()\n', (4505, 4507), False, 'import time\n'), ((4650, 4661), 'time.time', 'time.time', ([], {}), '()\n', (4659, 4661), False, 'import time\n'), ((4893, 4904), 'time.time', 'time.time', ([], {}), '()\n', (4902, 4904), False, 'import time\n'), ((4053, 4104), 'threading.Timer', 'threading.Timer', (['record_chunk', 'store_interrupt_func'], {}), '(record_chunk, store_interrupt_func)\n', (4068, 4104), False, 'import threading\n'), ((4854, 4865), 'time.time', 'time.time', ([], {}), '()\n', (4863, 4865), False, 'import time\n')]
|
# coding: utf8
"""
weasyprint.tests.w3_test_suite.web
----------------------------------
A simple web application to run and inspect the results of
the W3C CSS 2.1 Test Suite.
See http://test.csswg.org/suites/css2.1/20110323/
:copyright: Copyright 2011-2012 <NAME> and contributors, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from __future__ import division, unicode_literals
import os.path
import lxml.html
# Don’t try to import Flask on Python 3
from weasyprint import HTML, CSS
def split(something):
return something.split(',') if something else []
def read_testinfo(suite_directory):
with open(os.path.join(suite_directory, '..', 'testinfo.data')) as fd:
lines = iter(fd)
next(lines) # skip labels
for line in lines:
test_id, references, title, flags, links, _, _, assertion = \
line.strip(' \n').split('\t')
yield dict(
test_id=test_id,
assertion=assertion,
title=title,
flags=split(flags),
links=split(links),
references=split(references),
)
def read_chapter(filename, tests_by_link):
url_prefix = 'http://www.w3.org/TR/CSS21/'
for link in lxml.html.parse(filename).xpath(
'//th/a[starts-with(@href, "%s")]' % url_prefix):
url = link.get('href')[len(url_prefix):]
if url in tests_by_link:
yield (
link.text_content().strip(),
link.get('href'),
tests_by_link[url]
)
def read_toc(suite_directory, tests_by_link):
filename = os.path.join(suite_directory, 'toc.html')
for link in lxml.html.parse(filename).xpath('//table//a[@href]'):
filename = os.path.join(suite_directory, link.get('href'))
sections = list(read_chapter(filename, tests_by_link))
if sections:
num = sum(len(tests) for _, _, tests in sections)
yield (link.text_content().strip(), sections, num)
def prepare_test_data(suite_directory):
tests = {}
tests_by_link = {}
for test in read_testinfo(suite_directory):
for link in test['links']:
tests[test['test_id']] = test
tests_by_link.setdefault(link, []).append(test)
return list(read_toc(suite_directory, tests_by_link)), tests
def run(suite_directory):
from flask import (
Flask, render_template, abort, send_from_directory, safe_join)
chapters, tests = prepare_test_data(suite_directory)
app = Flask(__name__)
app.jinja_env.globals['len'] = len
@app.route('/')
def toc():
return render_template('toc.html',
chapters=enumerate(chapters, 1), total=len(tests))
@app.route('/chapter<int:chapter_num>/')
def chapter(chapter_num):
try:
title, sections, _ = chapters[chapter_num - 1]
except IndexError:
abort(404)
return render_template('chapter.html',
chapter_num=chapter_num, chapter=title,
sections=enumerate(sections, 1))
@app.route('/chapter<int:chapter_num>/section<int:section_num>/')
def section(chapter_num, section_num):
try:
chapter, sections, _ = chapters[chapter_num - 1]
title, url, tests = sections[section_num - 1]
except IndexError:
abort(404)
return render_template('section.html', **locals())
default_stylesheet = CSS(string='''
@page { margin: 20px; size: 680px }
body { margin: 0 }
''')
@app.route('/test/<test_id>/')
@app.route('/chapter<int:chapter_num>/section<int:section_num>/test<int:test_index>/')
def run_test(chapter_num=None, section_num=None, test_index=None,
test_id=None):
if test_id is None:
try:
chapter, sections, _ = chapters[chapter_num - 1]
title, url, tests = sections[section_num - 1]
test = tests[test_index - 1]
previous_index = test_index - 1
next_index = test_index + 1 if test_index < len(tests) else None
except IndexError:
abort(404)
else:
test = dict(test_id=test_id)
from pygments import highlight
from pygments.lexers import HtmlLexer
from pygments.formatters import HtmlFormatter
filename = safe_join(suite_directory, test['test_id'] + '.htm')
with open(filename, 'rb') as fd:
source = fd.read().decode('utf8')
formatter = HtmlFormatter(linenos='inline')
source = highlight(source, HtmlLexer(), formatter)
css = formatter.get_style_defs('.highlight')
return render_template('run_test.html', **locals())
@app.route('/render/<path:test_id>')
def render(test_id):
document = HTML(
safe_join(suite_directory, test_id + '.htm'),
encoding='utf8',
).render(stylesheets=[default_stylesheet], enable_hinting=True)
pages = [
'data:image/png;base64,' + document.copy([page]).write_png(
)[0].encode('base64').replace('\n', '')
for page in document.pages]
return render_template('render.html', **locals())
@app.route('/test-data/<path:filename>')
def test_data(filename):
return send_from_directory(suite_directory, filename)
app.run(debug=True)
if __name__ == '__main__':
run(os.path.expanduser('~/css2.1_test_suite/20110323/html4/'))
|
[
"pygments.formatters.HtmlFormatter",
"flask.safe_join",
"weasyprint.CSS",
"flask.Flask",
"flask.abort",
"pygments.lexers.HtmlLexer",
"flask.send_from_directory"
] |
[((2591, 2606), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (2596, 2606), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((3515, 3619), 'weasyprint.CSS', 'CSS', ([], {'string': '"""\n @page { margin: 20px; size: 680px }\n body { margin: 0 }\n """'}), '(string=\n """\n @page { margin: 20px; size: 680px }\n body { margin: 0 }\n """\n )\n', (3518, 3619), False, 'from weasyprint import HTML, CSS\n'), ((4458, 4510), 'flask.safe_join', 'safe_join', (['suite_directory', "(test['test_id'] + '.htm')"], {}), "(suite_directory, test['test_id'] + '.htm')\n", (4467, 4510), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((4619, 4650), 'pygments.formatters.HtmlFormatter', 'HtmlFormatter', ([], {'linenos': '"""inline"""'}), "(linenos='inline')\n", (4632, 4650), False, 'from pygments.formatters import HtmlFormatter\n'), ((5410, 5456), 'flask.send_from_directory', 'send_from_directory', (['suite_directory', 'filename'], {}), '(suite_directory, filename)\n', (5429, 5456), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((4686, 4697), 'pygments.lexers.HtmlLexer', 'HtmlLexer', ([], {}), '()\n', (4695, 4697), False, 'from pygments.lexers import HtmlLexer\n'), ((2977, 2987), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (2982, 2987), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((3418, 3428), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (3423, 3428), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((4232, 4242), 'flask.abort', 'abort', (['(404)'], {}), '(404)\n', (4237, 4242), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n'), ((4928, 4972), 'flask.safe_join', 'safe_join', (['suite_directory', "(test_id + '.htm')"], {}), "(suite_directory, test_id + '.htm')\n", (4937, 4972), False, 'from flask import Flask, render_template, abort, send_from_directory, safe_join\n')]
|
# This script parses MLB data from retrosheet and creates a dataframe
# Importing required modules
import pandas as pd
import glob
# Defining username + directory
username = ''
filepath = 'C:/Users/' + username + '/Documents/Data/mlbozone/'
# Create a list of all files in the raw_data subfolder
files = []
for file in glob.glob(filepath + 'raw_data/*'):
files.append(file)
# Declaring some data storage
attendance = []
dates = []
dblh = []
dow = []
stadia = []
dn = []
scorea = []
scoreh = []
team = [] # home team
ateam = [] # away team
gtype = [] # game type
# Main loop
for file in files:
print('Extracting data from ' + file + '.......')
data = pd.read_csv(file, header = None)
attendance = attendance + data[[17]][17].to_list()
dates = dates + data[[0]][0].to_list()
dblh = dblh + data[[1]][1].to_list()
dow = dow + data[[2]][2].to_list()
stadia = stadia + data[[16]][16].to_list()
dn = dn + data[[12]][12].to_list()
team = team + data[[6]][6].to_list()
ateam = ateam + data[[3]][3].to_list()
gt = file[-8:-4]
try:
gt = int(gt)
gt = 'REGS'
except:
pass
gtype = gtype + [gt]*len(data)
if file[-8:] == 'GLAS.TXT':
scorea = scorea + [None]*len(data)
scoreh = scoreh + [None]*len(data)
else:
scorea = scorea + data[[9]][9].to_list()
scoreh = scoreh + data[[10]][10].to_list()
# Compute home winner in non all-star games
winner = [int(scoreh[i] - scorea[i] > 0) if scoreh[i] != None and scorea[i] != None else None for i in range(len(scoreh))]
# Updating FLO to MIA for consistency
team = ['MIA' if t == 'FLO' else t for t in team]
# Creating a dataframe
attendance = pd.Series(attendance, name = 'Attendance')
dates = pd.Series(dates, name = 'Date')
dblh = pd.Series(dblh, name = 'Doubleheader')
dow = pd.Series(dow, name = 'Day')
stadia = pd.Series(stadia, name = 'Stadium')
dn = pd.Series(dn, name = 'Time')
scorea = pd.Series(scorea, name = 'Score_Away')
scoreh = pd.Series(scoreh, name = 'Score_Home')
winner = pd.Series(winner, name = 'Winner')
team = pd.Series(team, name = 'Home_Team')
ateam = pd.Series(ateam, name = 'Away_Team')
gtype = pd.Series(gtype, name = 'Type')
df = pd.concat([attendance, dates, dblh, dow, stadia, dn, scorea, scoreh, winner, team, ateam, gtype], axis = 1)
# Subset to remove non-season data (playoffs, etc.) that are outside of the window
df = df[df.Date > 20100000].reset_index(drop = True)
df = df[df.Date < 20200000].reset_index(drop = True)
# Create a 2010-2016 sample indicator and add to df
subsamp = [1 if d < 20170000 else 0 for d in df.Date]
df = pd.concat([df, pd.Series(subsamp, name = 'SAMPLE')], axis = 1)
# Subset to remove non-standard stadiums
parks = list(df.Stadium.unique())
counts = [len(df[df.Stadium == p]) for p in parks]
keeps = [p for p in parks if counts[parks.index(p)] > 100]
df = df[df.Stadium.isin(keeps)].reset_index(drop = True)
# Save df
df.to_csv(filepath + 'mlb_data.csv', index = False)
|
[
"pandas.read_csv",
"pandas.concat",
"pandas.Series",
"glob.glob"
] |
[((342, 376), 'glob.glob', 'glob.glob', (["(filepath + 'raw_data/*')"], {}), "(filepath + 'raw_data/*')\n", (351, 376), False, 'import glob\n'), ((1882, 1922), 'pandas.Series', 'pd.Series', (['attendance'], {'name': '"""Attendance"""'}), "(attendance, name='Attendance')\n", (1891, 1922), True, 'import pandas as pd\n'), ((1934, 1963), 'pandas.Series', 'pd.Series', (['dates'], {'name': '"""Date"""'}), "(dates, name='Date')\n", (1943, 1963), True, 'import pandas as pd\n'), ((1974, 2010), 'pandas.Series', 'pd.Series', (['dblh'], {'name': '"""Doubleheader"""'}), "(dblh, name='Doubleheader')\n", (1983, 2010), True, 'import pandas as pd\n'), ((2020, 2046), 'pandas.Series', 'pd.Series', (['dow'], {'name': '"""Day"""'}), "(dow, name='Day')\n", (2029, 2046), True, 'import pandas as pd\n'), ((2059, 2092), 'pandas.Series', 'pd.Series', (['stadia'], {'name': '"""Stadium"""'}), "(stadia, name='Stadium')\n", (2068, 2092), True, 'import pandas as pd\n'), ((2101, 2127), 'pandas.Series', 'pd.Series', (['dn'], {'name': '"""Time"""'}), "(dn, name='Time')\n", (2110, 2127), True, 'import pandas as pd\n'), ((2140, 2176), 'pandas.Series', 'pd.Series', (['scorea'], {'name': '"""Score_Away"""'}), "(scorea, name='Score_Away')\n", (2149, 2176), True, 'import pandas as pd\n'), ((2189, 2225), 'pandas.Series', 'pd.Series', (['scoreh'], {'name': '"""Score_Home"""'}), "(scoreh, name='Score_Home')\n", (2198, 2225), True, 'import pandas as pd\n'), ((2238, 2270), 'pandas.Series', 'pd.Series', (['winner'], {'name': '"""Winner"""'}), "(winner, name='Winner')\n", (2247, 2270), True, 'import pandas as pd\n'), ((2281, 2314), 'pandas.Series', 'pd.Series', (['team'], {'name': '"""Home_Team"""'}), "(team, name='Home_Team')\n", (2290, 2314), True, 'import pandas as pd\n'), ((2326, 2360), 'pandas.Series', 'pd.Series', (['ateam'], {'name': '"""Away_Team"""'}), "(ateam, name='Away_Team')\n", (2335, 2360), True, 'import pandas as pd\n'), ((2372, 2401), 'pandas.Series', 'pd.Series', (['gtype'], {'name': '"""Type"""'}), "(gtype, name='Type')\n", (2381, 2401), True, 'import pandas as pd\n'), ((2410, 2519), 'pandas.concat', 'pd.concat', (['[attendance, dates, dblh, dow, stadia, dn, scorea, scoreh, winner, team,\n ateam, gtype]'], {'axis': '(1)'}), '([attendance, dates, dblh, dow, stadia, dn, scorea, scoreh, winner,\n team, ateam, gtype], axis=1)\n', (2419, 2519), True, 'import pandas as pd\n'), ((728, 758), 'pandas.read_csv', 'pd.read_csv', (['file'], {'header': 'None'}), '(file, header=None)\n', (739, 758), True, 'import pandas as pd\n'), ((2847, 2880), 'pandas.Series', 'pd.Series', (['subsamp'], {'name': '"""SAMPLE"""'}), "(subsamp, name='SAMPLE')\n", (2856, 2880), True, 'import pandas as pd\n')]
|
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
from six.moves.urllib.parse import urlencode, quote
from nltk.tokenize import RegexpTokenizer
from bs4 import BeautifulSoup
import re
import json
import time
tokenizer = RegexpTokenizer(r"[a-zA-Z\s\d]")
options = webdriver.ChromeOptions()
options.add_argument("--incognito")
options.add_argument("--headless")
options.add_argument("--disable-extensions")
options.add_argument("start-maximized")
zone_to_url = {
"US": "https://www.amazon.com/",
"UK": "https://www.amazon.co.uk/",
"IN": "https://www.amazon.co.in/",
}
def _identity_seller1(soup: BeautifulSoup):
search_area = soup.find("div", {"id": "mbc-sold-by-1"})
if search_area is None:
raise Exception("ID1 not found")
try:
search_area = search_area.find("span", {"class": "mbcMerchantName"})
if search_area is not None:
return search_area.text.strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug1")
return ""
raise Exception("Wow1")
def _identity_seller2(soup: BeautifulSoup):
search_area = soup.find("a", {"id": "bylineInfo"})
if search_area is None:
raise Exception("ID2 not found")
try:
if search_area is not None:
return search_area.text.strip() # [10:-6].strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug2")
return ""
raise Exception("Wow2")
def _identity_seller3(soup: BeautifulSoup):
search_area = soup.find("div", {"id": "merchant-info"})
if search_area is None:
raise Exception("ID3 not found")
try:
search_area = soup.find("a", {"id": "sellerProfileTriggerId"})
if search_area is not None:
return search_area.text.strip()
except Exception as e:
print(e)
print("Could not retrieve the seller name open the website and debug3")
return ""
return ""
def identity_seller(soup: BeautifulSoup):
try:
return _identity_seller1(soup)
except Exception as e:
print(e)
try:
return _identity_seller2(soup)
except Exception as e:
print(e)
try:
return _identity_seller3(soup)
except Exception as e:
print(e)
return ""
return ""
def get_stock_info(soup: BeautifulSoup):
try:
stock_info = soup.find("div", {"id": "availability_feature_div"})
if stock_info is None:
raise Exception("availability_feature_div ID not found")
stock_info = "".join(tokenizer.tokenize(stock_info.text.strip().lower()))
stock_info = " ".join(stock_info.split())
except Exception as e:
stock_info = soup.find("div", {"id": "availability"})
return stock_info.text.strip()
try:
if stock_info is not None:
return stock_info
except Exception as e:
pass
return ""
def get_stars_reviews(soup: BeautifulSoup):
try:
search_area = soup.find("div", "a-row")
try:
st = search_area.find("i")["class"][-1]
except Exception as e:
print(str(e) + " Stars could not be retrieved")
st = ""
try:
rev = search_area.find("span", "a-color-link").text.strip()
except Exception as e:
print(str(e) + " Reviews could not be retrieved")
rev = ""
return st, rev
except Exception as e:
print(str(e) + " Stars and reviews could not be retrieved")
return "", ""
return "", ""
def get_price_from_carousel(soup: BeautifulSoup):
try:
return soup.find("div", "a-row a-color-price").text.strip()
except Exception as e:
print(str(e) + " Price from Carousel could not be retrieved")
return ""
return ""
def extract_details(link: str, tracker):
browserdriver = webdriver.Chrome(options=options, executable_path=r"./chromedriver")
browserdriver.get(link)
timeout = 15
try:
wait = WebDriverWait(browserdriver, timeout)
except TimeoutException as e:
print(e)
print("Timeout")
browserdriver.quit()
return {}
try:
content = browserdriver.page_source
soup = BeautifulSoup(content, "html.parser")
browserdriver.quit()
product_title = soup.find("span", {"id": "productTitle"}).text.strip()
seller = str(identity_seller(soup)).strip()
if seller is None:
return dict()
if seller == tracker:
return dict()
stock_info = get_stock_info(soup)
return dict(
product_title=product_title,
seller=seller,
stock_info=stock_info,
)
except Exception as e:
browserdriver.quit()
print(e)
return {}
def parse_carousel(soup: BeautifulSoup, tracker):
skipped = 0
r = []
for data in soup.findChildren("li", recursive=False):
try:
asin = data.find(
"div", "a-section sp_offerVertical p13n-asin sp_ltr_offer"
)["data-asin"]
link = data.find("a", "a-link-normal")["href"]
details = extract_details(link, tracker)
if details is None or details == dict():
print("SKIPPING DUE TO SAME SELLER")
skipped += 1
time.sleep(7)
continue
stars, rev = get_stars_reviews(data)
price = get_price_from_carousel(data)
details["stars"] = stars
details["reviews"] = rev
details["price"] = price
print(asin)
print(link)
print(details)
print()
r.append(dict(asin=asin, linkToProduct=link, details=details))
time.sleep(7)
except Exception as e:
skipped += 1
print(e)
continue
print(f"SKIPPED COMPETITORS - {skipped}")
return r
def lookup_similar_products(soup: BeautifulSoup, tracker):
try:
search_area = soup.find("div", {"id": "sp_detail"})
try:
search_area = search_area.find("ol", "a-carousel")
except Exception as e:
print(str(e) + "Carousel1 not found")
except Exception as e:
print(str(e) + "sp_details not found")
try:
search_area = soup.find("div", {"id": "sp_detail2"})
try:
search_area = search_area.find("ol", "a-carousel")
except Exception as e:
print(str(e) + "Carousel2 not found")
except Exception as e:
print(str(e) + "sp_details2 not found")
return []
try:
if search_area is not None:
return parse_carousel(search_area, tracker)
except Exception as e:
print("sp_details AND sp_details2 found")
return []
return []
def track_for_product(soup: BeautifulSoup, asin, zone, search):
try:
seller_name = identity_seller(soup)
if seller_name is None:
return "NA"
print(seller_name)
tracker = seller_name
tracking = lookup_similar_products(soup, tracker)
return dict(
tracker_firm=tracker,
tracking=tracking,
asin=asin,
zone=zone,
search=search,
)
except Exception as e:
print(e)
print("IN track_for_product")
return {}
return {}
def amazon_track_competition(asin: str, zone: str, *args, **kwargs):
browserdriver = webdriver.Chrome(options=options, executable_path=r"./chromedriver")
search = zone_to_url[zone] + "dp/" + asin
browserdriver.get(search)
print(search)
print()
timeout = 15
try:
wait = WebDriverWait(browserdriver, timeout)
except TimeoutException as e:
print(e)
print("Timeout")
browserdriver.quit()
return {}
try:
content = browserdriver.page_source
soup = BeautifulSoup(content, "html.parser")
browserdriver.quit()
return track_for_product(soup, asin, zone, search)
except Exception as e:
browserdriver.quit()
print(e)
print("IN amazon_track_competition")
return {}
# print(amazon_track_competition("B07Y", "US"))
# amazon_track_competition("B07YWS4QTH", "US")
# amazon_track_competition("B07WSHWNH8", "IN")
# amazon_track_competition("B01MTQ5M7B", "IN")
# amazon_track_competition("B081KBXT5N", "US")
# amazon_track_competition("B07N39NDDB", "UK")
|
[
"nltk.tokenize.RegexpTokenizer",
"time.sleep",
"selenium.webdriver.ChromeOptions",
"selenium.webdriver.Chrome",
"bs4.BeautifulSoup",
"selenium.webdriver.support.ui.WebDriverWait"
] |
[((426, 459), 'nltk.tokenize.RegexpTokenizer', 'RegexpTokenizer', (['"""[a-zA-Z\\\\s\\\\d]"""'], {}), "('[a-zA-Z\\\\s\\\\d]')\n", (441, 459), False, 'from nltk.tokenize import RegexpTokenizer\n'), ((470, 495), 'selenium.webdriver.ChromeOptions', 'webdriver.ChromeOptions', ([], {}), '()\n', (493, 495), False, 'from selenium import webdriver\n'), ((4245, 4312), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options', 'executable_path': '"""./chromedriver"""'}), "(options=options, executable_path='./chromedriver')\n", (4261, 4312), False, 'from selenium import webdriver\n'), ((7951, 8018), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {'options': 'options', 'executable_path': '"""./chromedriver"""'}), "(options=options, executable_path='./chromedriver')\n", (7967, 8018), False, 'from selenium import webdriver\n'), ((4385, 4422), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browserdriver', 'timeout'], {}), '(browserdriver, timeout)\n', (4398, 4422), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((4617, 4654), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (4630, 4654), False, 'from bs4 import BeautifulSoup\n'), ((8171, 8208), 'selenium.webdriver.support.ui.WebDriverWait', 'WebDriverWait', (['browserdriver', 'timeout'], {}), '(browserdriver, timeout)\n', (8184, 8208), False, 'from selenium.webdriver.support.ui import WebDriverWait\n'), ((8403, 8440), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content', '"""html.parser"""'], {}), "(content, 'html.parser')\n", (8416, 8440), False, 'from bs4 import BeautifulSoup\n'), ((6176, 6189), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (6186, 6189), False, 'import time\n'), ((5742, 5755), 'time.sleep', 'time.sleep', (['(7)'], {}), '(7)\n', (5752, 5755), False, 'import time\n')]
|
from django.shortcuts import render,redirect
from .models import QuesModel
from django.http import JsonResponse
# Create your views here.
def audioquiz(request):
quiz=QuesModel.objects.all()
if request.method == 'POST':
print(request.POST)
score = 0
wrong = 0
correct = 0
total = 0
for q in quiz:
total += 1
print(request.POST.get(q.question))
print(q.ans)
print()
if q.ans == request.POST.get(q.question):
score += 10
correct += 1
else:
wrong += 1
percent = score/(total*10) * 100
context = {
'score': score,
'time': request.POST.get('timer'),
'correct': correct,
'wrong': wrong,
'percent': percent,
'total': total
}
return render(request, 'results.html', context)
return render(request,'aqz.html',{'quiz':quiz})
def getquiz(request):
quiz=QuesModel.objects.all()
quiz=list(quiz.values())
for item in quiz:
del item['ans']
return JsonResponse({"quiz":quiz})
def results(request):
pass
|
[
"django.shortcuts.render",
"django.http.JsonResponse"
] |
[((959, 1002), 'django.shortcuts.render', 'render', (['request', '"""aqz.html"""', "{'quiz': quiz}"], {}), "(request, 'aqz.html', {'quiz': quiz})\n", (965, 1002), False, 'from django.shortcuts import render, redirect\n'), ((1144, 1172), 'django.http.JsonResponse', 'JsonResponse', (["{'quiz': quiz}"], {}), "({'quiz': quiz})\n", (1156, 1172), False, 'from django.http import JsonResponse\n'), ((906, 946), 'django.shortcuts.render', 'render', (['request', '"""results.html"""', 'context'], {}), "(request, 'results.html', context)\n", (912, 946), False, 'from django.shortcuts import render, redirect\n')]
|
# -*- coding: utf-8 -*-
#
from math import pi
import numpy
from .. import helpers
def show(scheme, backend="mpl"):
"""Displays scheme for 3D ball quadrature.
"""
helpers.backend_to_function[backend](
scheme.points,
scheme.weights,
volume=4.0 / 3.0 * pi,
edges=[],
balls=[((0.0, 0.0, 0.0), 1.0)],
)
return
def integrate(f, center, radius, rule, dot=numpy.dot):
center = numpy.array(center)
rr = numpy.multiply.outer(radius, rule.points)
rr = numpy.swapaxes(rr, 0, -2)
ff = numpy.array(f((rr + center).T))
return numpy.array(radius) ** 3 * dot(ff, rule.weights)
|
[
"numpy.multiply.outer",
"numpy.array",
"numpy.swapaxes"
] |
[((438, 457), 'numpy.array', 'numpy.array', (['center'], {}), '(center)\n', (449, 457), False, 'import numpy\n'), ((467, 508), 'numpy.multiply.outer', 'numpy.multiply.outer', (['radius', 'rule.points'], {}), '(radius, rule.points)\n', (487, 508), False, 'import numpy\n'), ((518, 543), 'numpy.swapaxes', 'numpy.swapaxes', (['rr', '(0)', '(-2)'], {}), '(rr, 0, -2)\n', (532, 543), False, 'import numpy\n'), ((596, 615), 'numpy.array', 'numpy.array', (['radius'], {}), '(radius)\n', (607, 615), False, 'import numpy\n')]
|
import unittest
from pyjsonassert.matchers import StringMatcher
class TestStringMatcher(unittest.TestCase):
string = "asfasdf"
number_as_string = "12"
number = 12
float = 12.2
boolean = False
def test_should_identify_an_string(self):
assert StringMatcher.match(self.string) is True
def test_should_consider_string_a_number_that_comes_as_string_type(self):
assert StringMatcher.match(self.number_as_string) is True
def test_should_return_false_if_varible_is_not_and_string(self):
assert StringMatcher.match(self.number) is False
assert StringMatcher.match(self.float) is False
assert StringMatcher.match(self.boolean) is False
|
[
"pyjsonassert.matchers.StringMatcher.match"
] |
[((278, 310), 'pyjsonassert.matchers.StringMatcher.match', 'StringMatcher.match', (['self.string'], {}), '(self.string)\n', (297, 310), False, 'from pyjsonassert.matchers import StringMatcher\n'), ((414, 456), 'pyjsonassert.matchers.StringMatcher.match', 'StringMatcher.match', (['self.number_as_string'], {}), '(self.number_as_string)\n', (433, 456), False, 'from pyjsonassert.matchers import StringMatcher\n'), ((551, 583), 'pyjsonassert.matchers.StringMatcher.match', 'StringMatcher.match', (['self.number'], {}), '(self.number)\n', (570, 583), False, 'from pyjsonassert.matchers import StringMatcher\n'), ((608, 639), 'pyjsonassert.matchers.StringMatcher.match', 'StringMatcher.match', (['self.float'], {}), '(self.float)\n', (627, 639), False, 'from pyjsonassert.matchers import StringMatcher\n'), ((664, 697), 'pyjsonassert.matchers.StringMatcher.match', 'StringMatcher.match', (['self.boolean'], {}), '(self.boolean)\n', (683, 697), False, 'from pyjsonassert.matchers import StringMatcher\n')]
|
from machine import Pin
led1 = Pin(("LED1", 52), Pin.OUT_PP)
led2 = Pin(("LED2", 53), Pin.OUT_PP)
key1 = Pin(("KEY1", 85), Pin.IN, Pin.PULL_UP)
key2 = Pin(("KEY2", 86), Pin.IN, Pin.PULL_UP)
while True:
if key1.value():
led1.value(1)
else:
led1.value(0)
if key2.value():
led2.value(1)
else:
led2.value(0)
|
[
"machine.Pin"
] |
[((31, 60), 'machine.Pin', 'Pin', (["('LED1', 52)", 'Pin.OUT_PP'], {}), "(('LED1', 52), Pin.OUT_PP)\n", (34, 60), False, 'from machine import Pin\n'), ((68, 97), 'machine.Pin', 'Pin', (["('LED2', 53)", 'Pin.OUT_PP'], {}), "(('LED2', 53), Pin.OUT_PP)\n", (71, 97), False, 'from machine import Pin\n'), ((105, 143), 'machine.Pin', 'Pin', (["('KEY1', 85)", 'Pin.IN', 'Pin.PULL_UP'], {}), "(('KEY1', 85), Pin.IN, Pin.PULL_UP)\n", (108, 143), False, 'from machine import Pin\n'), ((151, 189), 'machine.Pin', 'Pin', (["('KEY2', 86)", 'Pin.IN', 'Pin.PULL_UP'], {}), "(('KEY2', 86), Pin.IN, Pin.PULL_UP)\n", (154, 189), False, 'from machine import Pin\n')]
|
# -*- coding: utf-8 -*-
#
# inventory/suppliers/admin.py
#
"""
Supplier Admin
"""
__docformat__ = "restructuredtext en"
from django.contrib import admin
from django.utils.translation import gettext_lazy as _
from inventory.common.admin_mixins import UserAdminMixin, UpdaterFilter
from .models import Supplier
#
# SupplierAdmin
#
@admin.register(Supplier)
class SupplierAdmin(UserAdminMixin, admin.ModelAdmin):
fieldsets = (
(None, {'fields': ('public_id', 'project', 'name', 'stype',
'address_01', 'address_02', 'city', 'subdivision',
'postal_code', 'country', 'phone', 'fax', 'email',
'url', 'language', 'timezone',)}),
(_('Status'), {'classes': ('collapse',),
'fields': ('active', 'creator', 'created', 'updater',
'updated',)}),
)
readonly_fields = ('public_id', 'creator', 'created', 'updater',
'updated',)
list_display = ('name', 'public_id', 'stype', 'phone', 'email',
'url_producer', 'project', 'updater_producer', 'active',)
list_editable = ('stype', 'active',)
search_fields = ('project__name', 'country__country', 'city',
'region__region', 'region__region_code',)
list_filter = ('stype', 'active', 'project__name', UpdaterFilter,)
ordering = ('name',)
# class Media:
# js = ('vendor/js.cookie-2.0.4.min.js',
# 'js/inheritance.js',
# 'js/regions.js',)
|
[
"django.contrib.admin.register",
"django.utils.translation.gettext_lazy"
] |
[((335, 359), 'django.contrib.admin.register', 'admin.register', (['Supplier'], {}), '(Supplier)\n', (349, 359), False, 'from django.contrib import admin\n'), ((728, 739), 'django.utils.translation.gettext_lazy', '_', (['"""Status"""'], {}), "('Status')\n", (729, 739), True, 'from django.utils.translation import gettext_lazy as _\n')]
|
#----------------------------------------------------------------------------#
# Imports
#----------------------------------------------------------------------------#
from flask import Flask, render_template, request
from flask_basicauth import BasicAuth
# from flask.ext.sqlalchemy import SQLAlchemy
import logging
from logging import Formatter, FileHandler
from forms import *
from pymongo import MongoClient
from alg import *
from language_data import *
#----------------------------------------------------------------------------#
# App Config
#----------------------------------------------------------------------------#
app = Flask(__name__)
app.config.from_object('config')
app.config['BASIC_AUTH_USERNAME'] = 'langadmin'
app.config['BASIC_AUTH_PASSWORD'] = '<PASSWORD>'
basic_auth = BasicAuth(app)
#open mongodb connection
client = MongoClient("mongodb+srv://admin:[email protected]/test?retryWrites=true")
db = client.test
#db = SQLAlchemy(app)
# Automatically tear down SQLAlchemy.
'''
@app.teardown_request
def shutdown_session(exception=None):
db_session.remove()
'''
# Login required decorator.
'''
def login_required(test):
@wraps(test)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return test(*args, **kwargs)
else:
flash('You need to login first.')
return redirect(url_for('login'))
return wrap
'''
#----------------------------------------------------------------------------#
# Controllers.
#----------------------------------------------------------------------------#
@app.route('/')
def home():
return render_template('pages/home.html')
@app.route('/about')
def about():
return render_template('pages/about.html')
@app.route('/signup', methods=['POST', 'GET'])
def signup():
if request.method == 'POST': #successful form post
results = request.get_json()
db.inventory.insert(results) #load form results into mongodb
update()
return render_template('pages/register.html')
@app.route('/learn_data')
@basic_auth.required
def gen_learn_chart():
gen_learn_pie()
return render_template('learn-count-pie.html')
@app.route('/share_data')
@basic_auth.required
def gen_share_chart():
gen_share_pie()
return render_template('share-count-pie.html')
@app.route('/view_students', methods=['GET', 'PUT'])
@basic_auth.required
def students():
if request.method == 'PUT':
results = request.get_json()
header = results.pop(0)
if header['status'] == 'pair':
s1 = results[0]['name']
s2 = results[1]['name']
db.inventory.update(
{"name": s1},
{"$set": {"partner": s2}}
)
db.inventory.update(
{"name": s2},
{"$set": {"partner": s1}}
)
if header['status'] == 'remove':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"partner": "None"}}
)
if header['status'] == 'correct':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"placement": "Correct"}}
)
if header['status'] == 'incorrect':
for r in results:
db.inventory.update(
{"name": r['name']},
{"$set": {"placement": "Incorrect"}}
)
if header['status'] == 'delete':
for r in results:
db.inventory.delete_one(
{"name": r['name']}
)
update()
rows = db.inventory.find({})
rowslist = []
for r in rows:
llist = [r['ll1'], r['ll2'], r['ll3']]
llist[:] = [x for x in llist if x != 'None']
llist.sort()
slist = [r['sl1'], r['sl2'], r['sl3']]
slist[:] = [x for x in slist if x != 'None']
slist.sort()
student = {
'name': r['name'],
'year': r['year'],
'ruid': r['ruid'],
'email': r['email'],
'learn_langs': make_string(llist),
'share_langs': make_string(slist),
'partner': r['partner'],
'placement': r['placement']
}
rowslist.append(student)
return render_template('pages/students.html', rows=rowslist)
def make_string(langs):
res = ''
for i in range(len(langs)):
if(i + 1 < len(langs)):
res += langs[i] + ', '
else:
res += langs[i]
return res
@app.route('/make_pairs', methods=['POST', 'GET'])
@basic_auth.required
def make():
if request.method == 'POST':
results = request.get_json()
for r in results:
names = r['names'].split('&')
s1 = names[0][:(len(names[0])-1)]
s2 = names[1][1:]
db.inventory.update(
{"name": s1},
{"$set": {"partner": s2}}
)
db.inventory.update(
{"name": s2},
{"$set": {"partner": s1}}
)
pairlist = []
pairs = make_pairs()
for pair in pairs:
langs = pair.language1 + " & " + pair.language2
names = pair.student1.name + " & " + pair.student2.name
p = {
'languages' : langs,
'names' : names,
'prof1' : pair.prof1,
'prof2' : pair.prof2
}
pairlist.append(p)
return render_template('pages/pairs.html', pairs=pairlist)
@app.route('/register')
def register():
form = RegisterForm(request.form)
return render_template('forms/register.html', form=form)
@app.route('/login', methods=['GET'])
def login():
form = LoginForm(request.form)
return render_template('forms/login.html', form=form)
@app.route('/forgot')
def forgot():
form = ForgotForm(request.form)
return render_template('forms/forgot.html', form=form)
# Error handlers
@app.errorhandler(500)
def internal_error(error):
#db_session.rollback()
return render_template('errors/500.html'), 500
@app.errorhandler(404)
def not_found_error(error):
return render_template('errors/404.html'), 404
if not app.debug:
file_handler = FileHandler('error.log')
file_handler.setFormatter(
Formatter('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]')
)
app.logger.setLevel(logging.INFO)
file_handler.setLevel(logging.INFO)
app.logger.addHandler(file_handler)
app.logger.info('errors')
def update():
db.inventory.update_many(
{"$or" : [
{"ll2": {"$exists": False}},
{"ll2": None},
]},
{"$set": {"ll2": "None", "lp2": "None", "ll3": "None", "lp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"ll3": {"$exists": False}},
{"ll3": None},
]},
{"$set": {"ll3": "None", "lp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"sl2": {"$exists": False}},
{"sl2": None},
]},
{"$set": {"sl2": "None", "sp2": "None", "sl3": "None", "sp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"sl3": {"$exists": False}},
{"sl3": None},
]},
{"$set": {"sl3": "None", "sp3": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"partner": {"$exists": False}},
{"partner": None},
]},
{"$set": {"partner": "None"}}
)
db.inventory.update_many(
{"$or" : [
{"rate1": {"$exists": False}},
{"rate1": None},
]},
{"$set": {"rate1": "3"}}
)
db.inventory.update_many(
{"$or" : [
{"rate2": {"$exists": False}},
{"rate2": None},
]},
{"$set": {"rate2": "3"}}
)
db.inventory.update_many(
{"$or" : [
{"placement": {"$exists": False}},
{"placement": None},
]},
{"$set": {"placement": "Unverified"}}
)
#----------------------------------------------------------------------------#
# Launch.
#----------------------------------------------------------------------------#
# Default port:
if __name__ == '__main__':
app.run()
# Or specify port manually:
'''
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
'''
|
[
"pymongo.MongoClient",
"logging.FileHandler",
"flask.Flask",
"flask_basicauth.BasicAuth",
"logging.Formatter",
"flask.render_template",
"flask.request.get_json"
] |
[((638, 653), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (643, 653), False, 'from flask import Flask, render_template, request\n'), ((798, 812), 'flask_basicauth.BasicAuth', 'BasicAuth', (['app'], {}), '(app)\n', (807, 812), False, 'from flask_basicauth import BasicAuth\n'), ((848, 959), 'pymongo.MongoClient', 'MongoClient', (['"""mongodb+srv://admin:[email protected]/test?retryWrites=true"""'], {}), "(\n 'mongodb+srv://admin:[email protected]/test?retryWrites=true'\n )\n", (859, 959), False, 'from pymongo import MongoClient\n'), ((1643, 1677), 'flask.render_template', 'render_template', (['"""pages/home.html"""'], {}), "('pages/home.html')\n", (1658, 1677), False, 'from flask import Flask, render_template, request\n'), ((1724, 1759), 'flask.render_template', 'render_template', (['"""pages/about.html"""'], {}), "('pages/about.html')\n", (1739, 1759), False, 'from flask import Flask, render_template, request\n'), ((2011, 2049), 'flask.render_template', 'render_template', (['"""pages/register.html"""'], {}), "('pages/register.html')\n", (2026, 2049), False, 'from flask import Flask, render_template, request\n'), ((2152, 2191), 'flask.render_template', 'render_template', (['"""learn-count-pie.html"""'], {}), "('learn-count-pie.html')\n", (2167, 2191), False, 'from flask import Flask, render_template, request\n'), ((2294, 2333), 'flask.render_template', 'render_template', (['"""share-count-pie.html"""'], {}), "('share-count-pie.html')\n", (2309, 2333), False, 'from flask import Flask, render_template, request\n'), ((4367, 4420), 'flask.render_template', 'render_template', (['"""pages/students.html"""'], {'rows': 'rowslist'}), "('pages/students.html', rows=rowslist)\n", (4382, 4420), False, 'from flask import Flask, render_template, request\n'), ((5525, 5576), 'flask.render_template', 'render_template', (['"""pages/pairs.html"""'], {'pairs': 'pairlist'}), "('pages/pairs.html', pairs=pairlist)\n", (5540, 5576), False, 'from flask import Flask, render_template, request\n'), ((5668, 5717), 'flask.render_template', 'render_template', (['"""forms/register.html"""'], {'form': 'form'}), "('forms/register.html', form=form)\n", (5683, 5717), False, 'from flask import Flask, render_template, request\n'), ((5816, 5862), 'flask.render_template', 'render_template', (['"""forms/login.html"""'], {'form': 'form'}), "('forms/login.html', form=form)\n", (5831, 5862), False, 'from flask import Flask, render_template, request\n'), ((5947, 5994), 'flask.render_template', 'render_template', (['"""forms/forgot.html"""'], {'form': 'form'}), "('forms/forgot.html', form=form)\n", (5962, 5994), False, 'from flask import Flask, render_template, request\n'), ((6284, 6308), 'logging.FileHandler', 'FileHandler', (['"""error.log"""'], {}), "('error.log')\n", (6295, 6308), False, 'from logging import Formatter, FileHandler\n'), ((1895, 1913), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (1911, 1913), False, 'from flask import Flask, render_template, request\n'), ((2475, 2493), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (2491, 2493), False, 'from flask import Flask, render_template, request\n'), ((4751, 4769), 'flask.request.get_json', 'request.get_json', ([], {}), '()\n', (4767, 4769), False, 'from flask import Flask, render_template, request\n'), ((6102, 6136), 'flask.render_template', 'render_template', (['"""errors/500.html"""'], {}), "('errors/500.html')\n", (6117, 6136), False, 'from flask import Flask, render_template, request\n'), ((6206, 6240), 'flask.render_template', 'render_template', (['"""errors/404.html"""'], {}), "('errors/404.html')\n", (6221, 6240), False, 'from flask import Flask, render_template, request\n'), ((6348, 6433), 'logging.Formatter', 'Formatter', (['"""%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]"""'], {}), "('%(asctime)s %(levelname)s: %(message)s [in %(pathname)s:%(lineno)d]'\n )\n", (6357, 6433), False, 'from logging import Formatter, FileHandler\n')]
|
from rest_framework import exceptions, status
from rest_framework.views import Response, exception_handler
def custom_exception_handler(exc, context):
# Call REST framework's default exception handler first to get the standard error response.
response = exception_handler(exc, context)
# if there is an IntegrityError and the error response hasn't already been generated
if isinstance(exc, Exception) and not response:
response = Response({'message': str(exc)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return response
class UnprocessableEntity(exceptions.APIException):
status_code = 422
default_detail = 'Cannot process with the data'
default_code = 'unprocessable_entity'
class BadRequest(exceptions.APIException):
status_code = 400
default_detail = 'Bad Request'
default_code = 'bad_request'
|
[
"rest_framework.views.exception_handler"
] |
[((264, 295), 'rest_framework.views.exception_handler', 'exception_handler', (['exc', 'context'], {}), '(exc, context)\n', (281, 295), False, 'from rest_framework.views import Response, exception_handler\n')]
|
"""
Utils for generating random data and comparing performance
"""
import os
import time
import pickle
import random
from kmeans import kmeans, here
here = here(__file__)
try:
range = xrange
except NameError:
pass
def timer():
start = time.clock()
return lambda: time.clock() - start
def random_points(n):
"""Returns n random [(x_1, x_2, x_3), w] tuples.
Constraints:
0 <= abs(x_N) <= 1<<8
0 <= w <= 100
x_N, w are non-negative integers
"""
rx = lambda: random.randrange(0, 1 << 8)
rw = lambda: random.randrange(1, 10)
point = lambda: [(rx(), rx(), rx()), rw()]
filename = os.path.join(here, "_perf.sample")
try:
with open(filename, 'rb') as f:
points = pickle.load(f)
except:
points = []
diff = n - len(points)
if diff > 0:
print("Cache was missing {} points".format(diff))
t = timer()
points.extend(point() for _ in range(diff))
with open(filename, 'wb') as f:
pickle.dump(points, f)
elapsed = t()
print("Added {} points to the cache ({}s)".format(diff, elapsed))
return ListProxy(points)
class ListProxy(list):
"""Fake sizes by setting length"""
def __init__(self, data):
super().__init__(data)
self.max_length = len(data)
self.length = self.max_length
@property
def length(self):
return self._length
@length.setter
def length(self, n):
if n > self.max_length:
raise ValueError(
"Maximum possible length is " + str(self.max_length))
self._length = n
def __len__(self):
return self.length
def __iter__(self):
for i in range(self.length):
yield self[i]
def main():
samples = [
# ~ Common "large" image sizes
(1920 * 1200, 3),
(1920 * 1200, 5),
(1920 * 1200, 15),
# Personal benchmarks
(747116, 5), # Unique pixels in 1920 x 1080 image
(1095169, 5), # Unique pixels in 15530 x 8591 image
# Max unique pixels in rgb 256 image
(16581375, 5)
]
max_sample = max(sample[0] for sample in samples)
print("Generating {} random points".format(max_sample))
t = timer()
points = random_points(max_sample)
elapsed = t()
print("Random points generated ({}s)".format(elapsed))
def run_test(n, k):
points.length = n
t = timer()
kmeans(points, k)
elapsed = t()
return elapsed
for n, k in samples:
print("Running test: {} points, {} centers".format(n, k))
elapsed = run_test(n, k)
print("N {:9} || K {:3} || E {}".format(n, k, elapsed))
if __name__ == "__main__":
main()
|
[
"pickle.dump",
"kmeans.kmeans",
"kmeans.here",
"time.clock",
"pickle.load",
"random.randrange",
"os.path.join"
] |
[((156, 170), 'kmeans.here', 'here', (['__file__'], {}), '(__file__)\n', (160, 170), False, 'from kmeans import kmeans, here\n'), ((250, 262), 'time.clock', 'time.clock', ([], {}), '()\n', (260, 262), False, 'import time\n'), ((651, 685), 'os.path.join', 'os.path.join', (['here', '"""_perf.sample"""'], {}), "(here, '_perf.sample')\n", (663, 685), False, 'import os\n'), ((520, 547), 'random.randrange', 'random.randrange', (['(0)', '(1 << 8)'], {}), '(0, 1 << 8)\n', (536, 547), False, 'import random\n'), ((565, 588), 'random.randrange', 'random.randrange', (['(1)', '(10)'], {}), '(1, 10)\n', (581, 588), False, 'import random\n'), ((2486, 2503), 'kmeans.kmeans', 'kmeans', (['points', 'k'], {}), '(points, k)\n', (2492, 2503), False, 'from kmeans import kmeans, here\n'), ((282, 294), 'time.clock', 'time.clock', ([], {}), '()\n', (292, 294), False, 'import time\n'), ((756, 770), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (767, 770), False, 'import pickle\n'), ((1030, 1052), 'pickle.dump', 'pickle.dump', (['points', 'f'], {}), '(points, f)\n', (1041, 1052), False, 'import pickle\n')]
|
# Examples from the article "Two-stage recursive algorithms in XSLT"
# By <NAME> and <NAME>
# http://www.topxml.com/xsl/articles/recurse/
from Xml.Xslt import test_harness
BOOKS = """ <book>
<title>Angela's Ashes</title>
<author><NAME></author>
<publisher>HarperCollins</publisher>
<isbn>0 00 649840 X</isbn>
<price>6.99</price>
<sales>235</sales>
</book>
<book>
<title>Sword of Honour</title>
<author><NAME></author>
<publisher>Penguin Books</publisher>
<isbn>0 14 018967 X</isbn>
<price>12.99</price>
<sales>12</sales>
</book>"""
BOOKLIST_XML = """<?xml version="1.0" encoding="utf-8"?>
<booklist>
%s
</booklist>"""
BOOKS_TOTAL = 6.99 * 235 + 12.99 * 12
# total-sales/simple.xsl
sheet_1 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="/*/book"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="sumSales1">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:choose>
<xsl:when test="$pNodes">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes[position()!=1]"/>
<xsl:with-param name="result" select="$result+$pNodes[1]/sales*$pNodes[1]/price"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="$result"/>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# total-sales/dvc.xsl
sheet_2 = """<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="/*/book"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="sumSales">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:variable name="vcntNodes" select="count($pNodes)"/>
<xsl:choose>
<xsl:when test="$vcntNodes = 1">
<xsl:value-of select="$result + $pNodes/sales * $pNodes/price"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="vcntHalf" select="floor($vcntNodes div 2)"/>
<xsl:variable name="vValue1">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() <= $vcntHalf]"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:variable>
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() > $vcntHalf]"/>
<xsl:with-param name="result" select="$vValue1"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# total-sales/two-stage.xsl
# (with $t param added so threshold can be adjusted)
#
# The threshold is the # of elements above which DVC will be used,
# and below which recursion will be used.
#
sheet_3="""<?xml version="1.0" encoding="utf-8"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:param name="t" select="20"/>
<xsl:template match="/">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="/*/book"/>
<xsl:with-param name="threshold" select="$t"/>
</xsl:call-template>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="sumSales">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="threshold" select="10"/>
<xsl:param name="result" select="0"/>
<xsl:variable name="vcntNodes" select="count($pNodes)"/>
<xsl:choose>
<xsl:when test="$vcntNodes <= $threshold">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="vcntHalf" select="floor($vcntNodes div 2)"/>
<xsl:variable name="vValue1">
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() <= $vcntHalf]"/>
<xsl:with-param name="threshold" select="$threshold"/>
<xsl:with-param name="result" select="$result"/>
</xsl:call-template>
</xsl:variable>
<xsl:call-template name="sumSales">
<xsl:with-param name="pNodes" select="$pNodes[position() > $vcntHalf]"/>
<xsl:with-param name="threshold" select="$threshold"/>
<xsl:with-param name="result" select="$vValue1"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="sumSales1">
<xsl:param name="pNodes" select="/.."/>
<xsl:param name="result" select="0"/>
<xsl:choose>
<xsl:when test="$pNodes">
<xsl:call-template name="sumSales1">
<xsl:with-param name="pNodes" select="$pNodes[position()!=1]"/>
<xsl:with-param name="result" select="$result+$pNodes[1]/sales*$pNodes[1]/price"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise><xsl:value-of select="$result"/></xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
DIGITS = "0123456789"
DIGITS_XML = """<?xml version="1.0" encoding="utf-8"?>
<text>%s</text>"""
REVERSED_DIGITS = "9876543210"
# reverse/lrReverse.xsl
sheet_4 = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:template match="/">
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="/*/text()"/>
</xsl:call-template>
</xsl:template>
<xsl:template name="reverse2">
<xsl:param name="theString"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength = 1">
<xsl:value-of select="$theString"/>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="length1" select="floor($thisLength div 2)"/>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString,$length1+1, $thisLength - $length1)"/>
</xsl:call-template>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString, 1, $length1)"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
# reverse/lrReverse2.xsl
# (with $t param added so threshold can be adjusted)
#
# The threshold is the # of chars above which DVC will be used,
# and below which recursion will be used.
#
sheet_5 = """<?xml version="1.0"?>
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:output method="text"/>
<xsl:param name="t" select="75"/>
<xsl:template match="/">
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="threshold" select="$t"/>
</xsl:call-template>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="reverse2">
<xsl:param name="theString"/>
<xsl:param name="threshold" select="30"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength <= $threshold">
<xsl:call-template name="reverse">
<xsl:with-param name="theString" select="$theString"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="length1" select="floor($thisLength div 2)"/>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString,$length1+1, $thisLength - $length1)"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
<xsl:call-template name="reverse2">
<xsl:with-param name="theString" select="substring($theString, 1, $length1)"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="reverse">
<xsl:param name="theString"/>
<xsl:variable name="thisLength" select="string-length($theString)"/>
<xsl:choose>
<xsl:when test="$thisLength = 1">
<xsl:value-of select="$theString"/>
</xsl:when>
<xsl:otherwise>
<xsl:value-of select="substring($theString,$thisLength,1)"/>
<xsl:call-template name="reverse">
<xsl:with-param name="theString" select="substring($theString, 1, $thisLength -1)"/>
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
GOBBLEDY = "dfd dh AAAsrter xcbxb AAAA gghmjk gfghjk ghAAAkghk dgsdfgAAA sdsdg AAA sdsdfg\n"
GOBBLEDY_XML = """<?xml version="1.0" encoding="utf-8"?>
<text>%s</text>"""
GOBBLEDY_OUT = GOBBLEDY.replace('AAA','ZZZ')
sheet_6="""<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common">
<xsl:output method="text" encoding="iso-8859-1" />
<xsl:template match="/">
<xsl:variable name="Result">
<xsl:call-template name="lrReplace">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="target" select="'AAA'" />
<xsl:with-param name="replacement" select="'ZZZ'" />
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="$Result" />
</xsl:template>
<xsl:template name="lrReplace">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="resRTF">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="resNode-set" select="exsl:node-set($resRTF)"/>
<xsl:value-of select="$resNode-set/text()"/>
<xsl:value-of select="substring($theString, $lStr - $resNode-set/u+1)" />
</xsl:template>
<xsl:template name="lrReplace2">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement" select="''" />
<xsl:variable name="lStr" select="string-length($theString)" />
<xsl:variable name="lTarget" select="string-length($target)" />
<xsl:choose>
<xsl:when test="$lStr < $lTarget + $lTarget">
<xsl:choose>
<xsl:when
test="contains($theString,$target)">
<xsl:value-of select="substring-before($theString,$target)" />
<xsl:value-of select="$replacement" />
<u>
<xsl:value-of select="string-length(substring-after($theString,$target))" />
</u>
</xsl:when>
<xsl:otherwise>
<xsl:choose>
<xsl:when test="$lStr >= $lTarget">
<xsl:value-of select="substring($theString, 1, $lStr - $lTarget + 1)"/>
<u>
<xsl:value-of select="$lTarget - 1" />
</u>
</xsl:when>
<xsl:otherwise>
<u>
<xsl:value-of select="$lStr" />
</u>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:when>
<!-- Now the general case - theString is not less than twice the replacement -->
<xsl:otherwise>
<xsl:variable name="halfLength" select="floor($lStr div 2)"/>
<xsl:variable name="processedHalf">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, 1, $halfLength)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="nodePrHalf" select="exsl:node-set($processedHalf)"/>
<xsl:value-of select="$nodePrHalf/text()"/>
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString"
select="substring($theString, $halfLength - $nodePrHalf/u + 1)" />
<xsl:with-param name="target" select="$target" />
<xsl:with-param name="replacement" select="$replacement" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
sheet_7="""<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
xmlns:exsl="http://exslt.org/common">
<xsl:output method="text" encoding="iso-8859-1" />
<xsl:template match="/">
<xsl:variable name="Result">
<xsl:call-template name="lrReplace">
<xsl:with-param name="theString" select="/*/text()"/>
<xsl:with-param name="target" select="'AAA'"/>
<xsl:with-param name="replacement" select="'ZZZ'"/>
<xsl:with-param name="threshold" select="2000"/>
</xsl:call-template>
</xsl:variable>
<xsl:value-of select="$Result" />
</xsl:template>
<xsl:template name="lrReplace">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:param name="threshold" select="150"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="resRTF">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="resNode-set" select="exsl:node-set($resRTF)"/>
<xsl:value-of select="$resNode-set/text()"/>
<xsl:value-of select="substring($theString, $lStr - $resNode-set/u+1)"/>
</xsl:template>
<!-- DVC template: -->
<xsl:template name="lrReplace2">
<xsl:param name="theString"/>
<xsl:param name="target"/>
<xsl:param name="replacement"/>
<xsl:param name="threshold" select="150"/>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="lTarget" select="string-length($target)"/>
<xsl:choose>
<xsl:when test="$lStr <= $threshold">
<xsl:call-template name="lrReplace3">
<xsl:with-param name="theString" select="$theString"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="halfLength" select="floor($lStr div 2)"/>
<xsl:variable name="processedHalf">
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, 1, $halfLength)" />
<xsl:with-param name="target" select="$target" />
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold"/>
</xsl:call-template>
</xsl:variable>
<xsl:variable name="nodePrHalf" select="exsl:node-set($processedHalf)"/>
<xsl:value-of select="$nodePrHalf/text()"/>
<xsl:call-template name="lrReplace2">
<xsl:with-param name="theString" select="substring($theString, $halfLength - $nodePrHalf/u + 1)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
<xsl:with-param name="threshold" select="$threshold" />
</xsl:call-template>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- simple recursive template: -->
<xsl:template name="lrReplace3">
<xsl:param name="theString" />
<xsl:param name="target" />
<xsl:param name="replacement" />
<xsl:choose>
<xsl:when test="contains($theString, $target)">
<xsl:value-of select="substring-before($theString, $target)"/>
<xsl:value-of select="$replacement"/>
<xsl:call-template name="lrReplace3">
<xsl:with-param name="theString" select="substring-after($theString, $target)"/>
<xsl:with-param name="target" select="$target"/>
<xsl:with-param name="replacement" select="$replacement"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<xsl:variable name="lStr" select="string-length($theString)"/>
<xsl:variable name="lTarget" select="string-length($target)"/>
<xsl:choose>
<xsl:when test="$lStr >= $lTarget">
<xsl:value-of select="substring($theString, 1, $lStr -$lTarget+1)" />
<u>
<xsl:value-of select="$lTarget -1"/>
</u>
</xsl:when>
<xsl:otherwise>
<u>
<xsl:value-of select="$lStr"/>
</u>
</xsl:otherwise>
</xsl:choose>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
</xsl:stylesheet>"""
def Test(tester):
# how many repetitions of BOOKS for the shortest source doc
MULTIPLIER = 10
# how many binary orders of magnitude to go up to
EXPLIMIT = 1
sheet = test_harness.FileInfo(string=sheet_1)
for i in range(EXPLIMIT):
elements = (2 * MULTIPLIER) * 2 ** i
title = "simple recursion with %d element" % elements + "s" * (elements > 0)
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_2)
for i in range(EXPLIMIT):
elements = (2 * MULTIPLIER) * 2 ** i
title = "divide and conquer with %d element" % elements + "s" * (elements > 0)
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_3)
for i in range(EXPLIMIT):
threshold = 8 # seems to be best as of 2003-03-23
elements = (2 * MULTIPLIER) * 2 ** i
title = "2-stage divide and conquer with %d element" % elements + "s" * (elements > 0)
title += " (threshold=%d)" % threshold
source_xml = BOOKLIST_XML % ((BOOKS * MULTIPLIER) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = str((BOOKS_TOTAL * MULTIPLIER) * 2 ** i)
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title,
topLevelParams={'t': threshold})
sheet = test_harness.FileInfo(string=sheet_4)
for i in range(EXPLIMIT):
chars = 1000 * 2 ** i
title = "divide and conquer reversal of %d-char string" % chars
source_xml = DIGITS_XML % ((DIGITS * 100) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (REVERSED_DIGITS * 100) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_5)
for i in range(EXPLIMIT):
threshold = 75
chars = 1000 * 2 ** i
title = "2-stage divide and conquer reversal of %d-char string" % chars
title += " (threshold=%d)" % threshold
source_xml = DIGITS_XML % ((DIGITS * 100) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (REVERSED_DIGITS * 100) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title,
topLevelParams={'t': threshold})
sheet = test_harness.FileInfo(string=sheet_6)
for i in range(EXPLIMIT):
chars = (len(GOBBLEDY) * 20) * 2 ** i
title = "divide and conquer search/replace on %d-char string" % chars
source_xml = GOBBLEDY_XML % ((GOBBLEDY * 20) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (GOBBLEDY_OUT * 20) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
sheet = test_harness.FileInfo(string=sheet_7)
for i in range(EXPLIMIT):
chars = (len(GOBBLEDY) * 20) * 2 ** i
title = "2-stage divide and conquer search/replace on %d-char string" % chars
source_xml = GOBBLEDY_XML % ((GOBBLEDY * 20) * 2 ** i)
source_1 = test_harness.FileInfo(string=source_xml)
expected_1 = (GOBBLEDY_OUT * 20) * 2 ** i
test_harness.XsltTest(tester, source_1, [sheet], expected_1,
title=title)
return
|
[
"Xml.Xslt.test_harness.FileInfo",
"Xml.Xslt.test_harness.XsltTest"
] |
[((17899, 17936), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_1'}), '(string=sheet_1)\n', (17920, 17936), False, 'from Xml.Xslt import test_harness\n'), ((18412, 18449), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_2'}), '(string=sheet_2)\n', (18433, 18449), False, 'from Xml.Xslt import test_harness\n'), ((18927, 18964), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_3'}), '(string=sheet_3)\n', (18948, 18964), False, 'from Xml.Xslt import test_harness\n'), ((19618, 19655), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_4'}), '(string=sheet_4)\n', (19639, 19655), False, 'from Xml.Xslt import test_harness\n'), ((20088, 20125), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_5'}), '(string=sheet_5)\n', (20109, 20125), False, 'from Xml.Xslt import test_harness\n'), ((20698, 20735), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_6'}), '(string=sheet_6)\n', (20719, 20735), False, 'from Xml.Xslt import test_harness\n'), ((21188, 21225), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'sheet_7'}), '(string=sheet_7)\n', (21209, 21225), False, 'from Xml.Xslt import test_harness\n'), ((18184, 18224), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (18205, 18224), False, 'from Xml.Xslt import test_harness\n'), ((18295, 18368), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title'}), '(tester, source_1, [sheet], expected_1, title=title)\n', (18316, 18368), False, 'from Xml.Xslt import test_harness\n'), ((18699, 18739), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (18720, 18739), False, 'from Xml.Xslt import test_harness\n'), ((18810, 18883), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title'}), '(tester, source_1, [sheet], expected_1, title=title)\n', (18831, 18883), False, 'from Xml.Xslt import test_harness\n'), ((19327, 19367), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (19348, 19367), False, 'from Xml.Xslt import test_harness\n'), ((19438, 19548), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title', 'topLevelParams': "{'t': threshold}"}), "(tester, source_1, [sheet], expected_1, title=title,\n topLevelParams={'t': threshold})\n", (19459, 19548), False, 'from Xml.Xslt import test_harness\n'), ((19867, 19907), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (19888, 19907), False, 'from Xml.Xslt import test_harness\n'), ((19970, 20043), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title'}), '(tester, source_1, [sheet], expected_1, title=title)\n', (19991, 20043), False, 'from Xml.Xslt import test_harness\n'), ((20415, 20455), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (20436, 20455), False, 'from Xml.Xslt import test_harness\n'), ((20518, 20628), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title', 'topLevelParams': "{'t': threshold}"}), "(tester, source_1, [sheet], expected_1, title=title,\n topLevelParams={'t': threshold})\n", (20539, 20628), False, 'from Xml.Xslt import test_harness\n'), ((20972, 21012), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (20993, 21012), False, 'from Xml.Xslt import test_harness\n'), ((21071, 21144), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title'}), '(tester, source_1, [sheet], expected_1, title=title)\n', (21092, 21144), False, 'from Xml.Xslt import test_harness\n'), ((21470, 21510), 'Xml.Xslt.test_harness.FileInfo', 'test_harness.FileInfo', ([], {'string': 'source_xml'}), '(string=source_xml)\n', (21491, 21510), False, 'from Xml.Xslt import test_harness\n'), ((21569, 21642), 'Xml.Xslt.test_harness.XsltTest', 'test_harness.XsltTest', (['tester', 'source_1', '[sheet]', 'expected_1'], {'title': 'title'}), '(tester, source_1, [sheet], expected_1, title=title)\n', (21590, 21642), False, 'from Xml.Xslt import test_harness\n')]
|
from click.testing import CliRunner
from luna.pathology.cli.infer_tile_labels import cli
def test_cli(tmp_path):
runner = CliRunner()
result = runner.invoke(cli, [
'pyluna-pathology/tests/luna/pathology/cli/testdata/data/test/slides/123/test_generate_tile_ov_labels/TileImages/data/',
'-o', tmp_path,
'-rn', 'msk-mind/luna-ml',
'-tn', 'tissue_tile_net_transform',
'-mn', 'tissue_tile_net_model_5_class',
'-wt', 'main:tissue_net_2021-01-19_21.05.24-e17.pth',
])
# No longer error gracefully -- can update tests with proper data and they'll work
assert result.exit_code == 1
|
[
"click.testing.CliRunner"
] |
[((130, 141), 'click.testing.CliRunner', 'CliRunner', ([], {}), '()\n', (139, 141), False, 'from click.testing import CliRunner\n')]
|
import logging
import requests
from injector import inject
import app_config
from microsoft_graph import MicrosoftGraphAuthentication
class MicrosoftGraph:
@inject
def __init__(self, authentication_handler: MicrosoftGraphAuthentication):
self.authentication_handler = authentication_handler
def query(self, url, additional_headers=None):
self.__update_token()
result = None
if self.token:
headers = {
'Authorization': f'{self.token["token_type"]} {self.token["access_token"]}'
}
if additional_headers:
headers.update(additional_headers)
result = requests.get(url, headers=headers)
return result
def __update_token(self):
self.token = None
self.token = self.authentication_handler.get_token_from_cache()
if not self.token:
logging.error('token not updated')
|
[
"logging.error",
"requests.get"
] |
[((684, 718), 'requests.get', 'requests.get', (['url'], {'headers': 'headers'}), '(url, headers=headers)\n', (696, 718), False, 'import requests\n'), ((918, 952), 'logging.error', 'logging.error', (['"""token not updated"""'], {}), "('token not updated')\n", (931, 952), False, 'import logging\n')]
|
from semantic_aware_models.models.recommendation.abstract_recommender import AbstractRecommender
from semantic_aware_models.dataset.movielens.movielens_data_model import *
from surprise import NormalPredictor
from surprise.reader import Reader
from surprise.dataset import Dataset
import time
class RandomRecommender(AbstractRecommender):
""" Algorithm predicting a random rating based on the distribution of the training set, which is assumed to be normal. """
def __init__(self, ratings_file_path=None, separator=None):
super(AbstractRecommender, self).__init__()
# Create the recommendation input_model and configure its input parameters:
self.model = NormalPredictor()
self.rating_data_model = RatingDataModel(ratings_file_path=ratings_file_path, separator=separator)
self.separator = separator
def recommend(self, user_id, how_many):
"""
Recommends the best items for a specific user.
:param user_id: Id of the user to recommend.
:param how_many: Number of items that we recommend to the specific user.
:return: Id of the items that the recommender returns.
"""
# Items not seen by a specific user.
item_ids_not_seen_from_user = self.rating_data_model.get_item_ids_not_seen_from_user(user_id)
list_recommend = []
for item_id in item_ids_not_seen_from_user:
preference = self.estimate_preference(user_id, item_id)
list_recommend.append([item_id, preference])
print(item_id, ', ', preference)
list_recommend.sort(key=lambda x: x[1], reverse=True)
return list_recommend[:how_many]
def estimate_preference(self, user_id, item_id):
"""
Estimate the preference value by a specific user.
:param user_id: Id of the user to recommend.
:param item_id: Id of the item to recommend.
:return: The estimate preference by the sepecific recommender.
"""
# train file:
df_ratings = self.rating_data_model.df_ratings
# A reader is still needed but only the rating_scale param is requiered.
reader = Reader(rating_scale=(self.rating_data_model.get_min_preference(), self.rating_data_model.get_max_preference()))
train_data = Dataset(reader=reader)
# The columns must correspond to user id, item id and ratings (in that order).
raw_trainset = train_data.load_from_df(df_ratings[['user_id', 'item_id', 'rating']], reader)
trainset = train_data.construct_trainset(raw_trainset.raw_ratings)
# Train recommendation input_model:
self.model.fit(trainset)
return float(self.model.estimate(u=user_id, i=item_id)[0])
def recommend_rival(self, n_folds, train_test_file_path, reader, recommendation_file_path):
"""
Prepare the predictions to take them to RiVaL Toolkit.
:param n_folds: Number of folds.
:param train_test_file_path: Path with train and input_test files.
:param recommendation_file_path: Path where the suitable files to run RiVaL Toolkit are saved.
:return: The suitable files to run RiVaL Toolkit are saved.
"""
for i in range(n_folds):
print('Fold: ', i)
timestart = time.time()
# train file:
train_file_name = train_test_file_path + 'train_bin_verified_sep_' + str(i) + '.csv'
train_data = Dataset(reader=reader)
raw_trainset = train_data.read_ratings(file_name=train_file_name)
trainset = train_data.construct_trainset(raw_trainset)
timeend = time.time()
print('Train file loading time: ', (timeend - timestart), 'seconds')
timestart = time.time()
# Train recommendation input_model:
self.model.fit(trainset)
timeend = time.time()
print('Training time: ', (timeend - timestart), 'seconds')
# input_test file:
timestart = time.time()
test_file_name = train_test_file_path + 'test_bin_verified_sep_' + str(i) + '.csv'
test_data = Dataset(reader=reader)
raw_testset = test_data.read_ratings(file_name=test_file_name)
testset = test_data.construct_testset(raw_testset)
timeend = time.time()
print('Load time of the input_test file: ', (timeend - timestart), 'seconds')
# Predictions:
timestart = time.time()
predictions = self.model.test(testset)
file_name = open(recommendation_file_path + 'recs_' + str(i) + '.csv', 'w')
for pred in predictions:
user_id = pred[0]
item_id = pred[1]
rating_real = pred[2]
rating_estimated = pred[3]
file_name.write(user_id + "\t" + item_id + "\t" + str(rating_estimated) + '\n')
timeend = time.time()
print('Prediction time: ', (timeend - timestart), 'seconds')
|
[
"surprise.dataset.Dataset",
"surprise.NormalPredictor",
"time.time"
] |
[((692, 709), 'surprise.NormalPredictor', 'NormalPredictor', ([], {}), '()\n', (707, 709), False, 'from surprise import NormalPredictor\n'), ((2301, 2323), 'surprise.dataset.Dataset', 'Dataset', ([], {'reader': 'reader'}), '(reader=reader)\n', (2308, 2323), False, 'from surprise.dataset import Dataset\n'), ((3295, 3306), 'time.time', 'time.time', ([], {}), '()\n', (3304, 3306), False, 'import time\n'), ((3455, 3477), 'surprise.dataset.Dataset', 'Dataset', ([], {'reader': 'reader'}), '(reader=reader)\n', (3462, 3477), False, 'from surprise.dataset import Dataset\n'), ((3645, 3656), 'time.time', 'time.time', ([], {}), '()\n', (3654, 3656), False, 'import time\n'), ((3763, 3774), 'time.time', 'time.time', ([], {}), '()\n', (3772, 3774), False, 'import time\n'), ((3882, 3893), 'time.time', 'time.time', ([], {}), '()\n', (3891, 3893), False, 'import time\n'), ((4021, 4032), 'time.time', 'time.time', ([], {}), '()\n', (4030, 4032), False, 'import time\n'), ((4152, 4174), 'surprise.dataset.Dataset', 'Dataset', ([], {'reader': 'reader'}), '(reader=reader)\n', (4159, 4174), False, 'from surprise.dataset import Dataset\n'), ((4335, 4346), 'time.time', 'time.time', ([], {}), '()\n', (4344, 4346), False, 'import time\n'), ((4489, 4500), 'time.time', 'time.time', ([], {}), '()\n', (4498, 4500), False, 'import time\n'), ((4944, 4955), 'time.time', 'time.time', ([], {}), '()\n', (4953, 4955), False, 'import time\n')]
|
import torch, sys
import torch.nn as nn
sys.path.append('..')
from MPLayers.lib_stereo import TRWP_hard_soft as TRWP_stereo
from MPLayers.lib_seg import TRWP_hard_soft as TRWP_seg
from utils.label_context import create_label_context
# references:
# http://www.benjack.io/2017/06/12/python-cpp-tests.html
# https://pytorch.org/tutorials/advanced/cpp_extension.html
class TRWPFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, unary, label_context, edge_weights, args):
# to, unary:(batch,cv,h,w,disp),message:(dir,batch,cv,h,w,disp),label_context:(disp,disp) for seg and (disp) for stereo
# edge_weights:(dir,batch,cv,h,w)
batch, cv, h, w, n_disp = unary.size()
rho, n_iter, n_dir, is_training = args.rho, args.mpnet_max_iter, args.mpnet_n_dirs, args.training
TRWP = TRWP_seg if (n_disp == 21) else TRWP_stereo
message = unary.new_zeros(n_dir, batch, cv, h, w, n_disp)
cost_final = unary.new_zeros(batch, cv, h, w, n_disp)
unary_update = unary.new_zeros(batch, cv, h, w, n_disp)
if edge_weights is None:
edge_weights = unary.new_ones(n_dir, batch, cv, h, w)
enable_edge_weights = False
else:
enable_edge_weights = True
if args.enable_saving_label:
label_all = unary.new_zeros(n_iter, batch, cv, h, w, dtype=torch.uint8)
else:
label_all = torch.empty(0, dtype=torch.uint8)
if args.mpnet_enable_soft:
if is_training:
message_edge_label = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, n_disp, n_disp)
cost_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, dtype=torch.uint8)
else:
message_edge_label = torch.empty(0, dtype=torch.float32)
cost_index = torch.empty(0, dtype=torch.uint8)
TRWP.forward_soft(rho, int(n_iter), unary, label_context, edge_weights,
message, message_edge_label, cost_index, cost_final,
unary_update, label_all)
message_index = torch.empty(0, dtype=torch.float32)
else:
if is_training:
message_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, n_disp, dtype=torch.uint8)
cost_index = unary.new_zeros(n_iter, n_dir, batch, cv, h, w, dtype=torch.uint8)
else:
message_index = torch.empty(0, dtype=torch.uint8)
cost_index = torch.empty(0, dtype=torch.uint8)
TRWP.forward(rho, int(n_iter), 0, unary, label_context,
edge_weights, message, cost_final, message_index, cost_index,
unary_update, label_all)
message_edge_label = torch.empty(0, dtype=torch.float32)
ctx.intermediate_results = rho, args, message_edge_label, message_index, \
cost_index, label_context, edge_weights, enable_edge_weights
del message, message_index, unary_update, label_context, edge_weights
return cost_final, label_all, message_edge_label, cost_index
@staticmethod
def backward(ctx, dcost_final, dlabel_all, dmessage_edge_label, dcost_index):
dcost_final = dcost_final.contiguous()
rho, args, message_edge_label, message_index, cost_index, \
label_context, edge_weights, enable_edge_weights = ctx.intermediate_results
del ctx.intermediate_results
cost_index = args.msg_norm_index if (args.msg_norm_index is not None) else cost_index
n_iter, n_dir, batch, cv, h, w = cost_index.size()
n_disp = args.n_classes
TRWP = TRWP_seg if (n_disp == 21) else TRWP_stereo
dunary = dcost_final.new_zeros(batch, cv, h, w, n_disp)
dmessage = dcost_final.new_zeros(n_dir, batch, cv, h, w, n_disp)
dunary_update = dcost_final.new_zeros(batch, cv, h, w, n_disp)
dedge_weights = dcost_final.new_zeros(n_dir, batch, cv, h, w)
if args.enable_seg:
dlabel_context = dcost_final.new_zeros(n_disp, n_disp)
else:
dlabel_context = dcost_final.new_zeros(n_disp)
if args.mpnet_enable_soft:
TRWP.backward_soft(rho, dcost_final, label_context, edge_weights, message_edge_label,
cost_index, dunary, dlabel_context, dedge_weights,
dmessage, dunary_update)
else:
TRWP.backward(rho, label_context, edge_weights, dcost_final, message_index,
cost_index, dunary, dlabel_context, dedge_weights,
dmessage, dunary_update)
del message_edge_label, message_index, cost_index, label_context, \
edge_weights, dcost_final, dmessage, dunary_update
dedge_weights = None if (not enable_edge_weights) else dedge_weights
return dunary, dlabel_context, dedge_weights, None, None, None
class MPModule(torch.nn.Module):
def __init__(self, args, enable_create_label_context=False, enable_saving_label=False):
super(MPModule, self).__init__()
self.args = args
self.args.enable_saving_label = enable_saving_label
self.args.rho = 0.5 if (args.mpnet_mrf_mode == 'TRWP') else 1
self.args.enable_seg = True if (args.n_classes == 21) else False
self.smoothness_train = args.mpnet_smoothness_train if args.mpnet_smoothness_train else None
self.smoothness_mode = args.mpnet_smoothness_mode if args.mpnet_smoothness_mode else None
self.smoothness_trunct_value = args.mpnet_smoothness_trunct_value
self.smoothness_trunct_loc = args.mpnet_smoothness_trunct_loc
if enable_create_label_context:
self.create_label_context()
def get_label_context(self):
return self.label_context, self.label_context_loc, self.label_context_diag_loc
def set_label_context(self, label_context, label_context_loc, label_context_diag_loc):
self.label_context = label_context
self.label_context_loc = label_context_loc
self.label_context_diag_loc = label_context_diag_loc
def create_label_context(self):
self.label_context, self.label_context_loc, self.label_context_diag_loc = \
create_label_context(self.args, enable_seg=self.args.enable_seg,
enable_symmetric=self.args.enable_symmetric)
def forward(self, unary, edge_weights=None, msg_norm_index=None, pairwise_terms=None):
# unary:(batch,cv,n_disp,h,w); label_context:(n_disp,n_disp) for seg and (n_disp) for stereo
# edge_weights:(batch,n_dir,h,w) unsqueeze(1) and permute to be (n_dir,batch,cv,h,w)
unary = unary.permute(0, 1, 3, 4, 2).contiguous()
if True:
edge_weights = edge_weights.unsqueeze(1).permute(2, 0, 1, 3, 4).contiguous() \
if (edge_weights is not None) else edge_weights
else:
# TODO : switch on for debugging when n_cv > 1 in test_parallel_grad.py
edge_weights = edge_weights.unsqueeze(0).permute(2, 0, 1, 3, 4).contiguous() \
if (edge_weights is not None) else edge_weights
label_context = self.label_context * self.args.mpnet_term_weight
if self.args.mpnet_smoothness_train == 'sigmoid':
label_context_valid = label_context[self.label_context_loc].flatten()
label_context[self.label_context_loc] = 2 * torch.sigmoid(label_context_valid)
elif self.args.mpnet_smoothness_train == 'softmax':
label_context_valid = label_context[self.label_context_loc].flatten()
label_context_max = label_context_valid.max()
label_context_norm = nn.Softmax(dim=0)(label_context_valid)
label_context_norm_max = label_context_norm.max()
label_context[self.label_context_loc] = label_context_norm * label_context_max / label_context_norm_max
if self.args.mpnet_smoothness_train in {'sigmoid', 'softmax'}:
label_context[self.label_context_diag_loc] = self.args.mpnet_diag_value
if edge_weights is not None:
assert unary.size()[-3:-1] == edge_weights.size()[-2:]
if unary.is_cuda and (msg_norm_index is not None):
msg_norm_index = msg_norm_index.cuda()
self.args.msg_norm_index = msg_norm_index
self.args.training = self.training
if self.args.mpnet_mrf_mode == 'TRWP':
cost_final, cost_all, message_vector, message_index = \
TRWPFunction.apply(unary, label_context, edge_weights, self.args)
else:
assert False
cost_final = cost_final.permute(0, 1, 4, 2, 3).contiguous()
label_context = label_context.unsqueeze(0) # Create a batch
return cost_final, label_context, cost_all, message_vector, message_index
|
[
"sys.path.append",
"utils.label_context.create_label_context",
"torch.empty",
"torch.sigmoid",
"torch.nn.Softmax"
] |
[((40, 61), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (55, 61), False, 'import torch, sys\n'), ((5845, 5958), 'utils.label_context.create_label_context', 'create_label_context', (['self.args'], {'enable_seg': 'self.args.enable_seg', 'enable_symmetric': 'self.args.enable_symmetric'}), '(self.args, enable_seg=self.args.enable_seg,\n enable_symmetric=self.args.enable_symmetric)\n', (5865, 5958), False, 'from utils.label_context import create_label_context\n'), ((1340, 1373), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.uint8'}), '(0, dtype=torch.uint8)\n', (1351, 1373), False, 'import torch, sys\n'), ((1968, 2003), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.float32'}), '(0, dtype=torch.float32)\n', (1979, 2003), False, 'import torch, sys\n'), ((2563, 2598), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.float32'}), '(0, dtype=torch.float32)\n', (2574, 2598), False, 'import torch, sys\n'), ((1650, 1685), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.float32'}), '(0, dtype=torch.float32)\n', (1661, 1685), False, 'import torch, sys\n'), ((1707, 1740), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.uint8'}), '(0, dtype=torch.uint8)\n', (1718, 1740), False, 'import torch, sys\n'), ((2259, 2292), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.uint8'}), '(0, dtype=torch.uint8)\n', (2270, 2292), False, 'import torch, sys\n'), ((2314, 2347), 'torch.empty', 'torch.empty', (['(0)'], {'dtype': 'torch.uint8'}), '(0, dtype=torch.uint8)\n', (2325, 2347), False, 'import torch, sys\n'), ((6947, 6981), 'torch.sigmoid', 'torch.sigmoid', (['label_context_valid'], {}), '(label_context_valid)\n', (6960, 6981), False, 'import torch, sys\n'), ((7193, 7210), 'torch.nn.Softmax', 'nn.Softmax', ([], {'dim': '(0)'}), '(dim=0)\n', (7203, 7210), True, 'import torch.nn as nn\n')]
|
from __future__ import print_function
import keras.backend as K
import keras.losses as losses
import keras.optimizers as optimizers
import numpy as np
from keras.callbacks import ModelCheckpoint
from keras.layers.advanced_activations import LeakyReLU
from keras.layers import Input, RepeatVector, Reshape
from keras.layers.embeddings import Embedding
from keras.layers.merge import Concatenate, Multiply
from keras.losses import binary_crossentropy
from keras.models import Model, Sequential
from .multi_sampler import *
class PretrainImageAutoencoder(RobotMultiPredictionSampler):
def __init__(self, taskdef, *args, **kwargs):
'''
As in the other models, we call super() to parse arguments from the
command line and set things like our optimizer and learning rate.
'''
super(PretrainImageAutoencoder, self).__init__(taskdef, *args, **kwargs)
self.PredictorCb = ImageCb
self.save_encoder_decoder = True
def _makePredictor(self, features):
'''
Create model to predict possible manipulation goals.
'''
(images, arm, gripper) = features
img_shape, image_size, arm_size, gripper_size = self._sizes(
images,
arm,
gripper)
img_in = Input(img_shape,name="predictor_img_in")
img0_in = Input(img_shape,name="predictor_img0_in")
option_in = Input((1,), name="predictor_option_in")
encoder = self._makeImageEncoder(img_shape)
ins = [img0_in, img_in]
# Create the encoder
enc = encoder(img_in)
#enc = Dropout(self.dropout_rate)(enc)
decoder = self._makeImageDecoder(
self.hidden_shape,
self.skip_shape,)
out = decoder(enc)
if not self.no_disc:
# Create the discriminator to make sure this is a good image
image_discriminator = MakeImageClassifier(self, img_shape)
image_discriminator.load_weights(
self.makeName("discriminator", "classifier"))
image_discriminator.trainable = False
o2 = image_discriminator([img0_in, out])
if self.no_disc:
ae = Model(ins, [out])
ae.compile(
loss=["mae"],
loss_weights=[1.],
optimizer=self.getOptimizer())
else:
ae = Model(ins, [out, o2])
ae.compile(
loss=["mae"] + ["categorical_crossentropy"],
loss_weights=[1.,1e-3],
optimizer=self.getOptimizer())
encoder.summary()
decoder.summary()
ae.summary()
return ae, ae, None, [img_in], enc
def _getData(self, *args, **kwargs):
features, targets = GetAllMultiData(self.num_options, *args, **kwargs)
[I, q, g, oin, label, q_target, g_target,] = features
o1 = targets[1]
I0 = I[0,:,:,:]
length = I.shape[0]
I0 = np.tile(np.expand_dims(I0,axis=0),[length,1,1,1])
if self.no_disc:
return [I0, I], [I]
else:
o1_1h = np.squeeze(ToOneHot2D(o1, self.num_options))
return [I0, I], [I, o1_1h]
|
[
"keras.layers.Input",
"numpy.expand_dims",
"keras.models.Model"
] |
[((1293, 1334), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img_in"""'}), "(img_shape, name='predictor_img_in')\n", (1298, 1334), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((1352, 1394), 'keras.layers.Input', 'Input', (['img_shape'], {'name': '"""predictor_img0_in"""'}), "(img_shape, name='predictor_img0_in')\n", (1357, 1394), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((1414, 1453), 'keras.layers.Input', 'Input', (['(1,)'], {'name': '"""predictor_option_in"""'}), "((1,), name='predictor_option_in')\n", (1419, 1453), False, 'from keras.layers import Input, RepeatVector, Reshape\n'), ((2223, 2240), 'keras.models.Model', 'Model', (['ins', '[out]'], {}), '(ins, [out])\n', (2228, 2240), False, 'from keras.models import Model, Sequential\n'), ((2420, 2441), 'keras.models.Model', 'Model', (['ins', '[out, o2]'], {}), '(ins, [out, o2])\n', (2425, 2441), False, 'from keras.models import Model, Sequential\n'), ((3023, 3049), 'numpy.expand_dims', 'np.expand_dims', (['I0'], {'axis': '(0)'}), '(I0, axis=0)\n', (3037, 3049), True, 'import numpy as np\n')]
|
import random
from collections import OrderedDict
from urllib.parse import quote
from rest_framework import filters
from rest_framework.pagination import PageNumberPagination
from rest_framework.response import Response
DEFAULT_PAGE_SIZE = 15
DEFAULT_SEED = 1234
class OptionalPageNumberPagination(PageNumberPagination):
"""
Optional pagination that does not paginate the response
if the user does not specify it.
"""
page_size = DEFAULT_PAGE_SIZE
page_size_query_param = "page_size"
def paginate_queryset(self, queryset, request, view=None):
if self.page_query_param not in request.query_params:
return None
return super().paginate_queryset(queryset, request, view)
class RandomPageNumberPagination(OptionalPageNumberPagination):
"""
Custom pagination that supports randomly sorting objects with pagination.
Must be used with the associated ordering filter.
"""
def paginate_queryset(self, queryset, request, view=None):
if "random" in request.query_params.get("ordering", "").split(","):
rng = random.Random(request.GET.get("seed", DEFAULT_SEED))
results = list(queryset)
rng.shuffle(results)
self._random_count = getattr(request, "_original_item_count", None)
if self._random_count is None:
self._random_count = queryset.model.objects.count()
else:
del request._original_item_count
page = int(request.GET.get("page", 1))
page_size = int(request.GET.get("page_size", DEFAULT_PAGE_SIZE))
if (page - 1) * page_size >= self._random_count:
self._random_next_page = None
else:
new_params = request.GET.dict()
new_params["page"] = str(page + 1)
self._random_next_page = "{}?{}".format(
request.build_absolute_uri(request.path),
"&".join(
["{}={}".format(k, quote(v)) for k, v in new_params.items()]
),
)
return results
return super().paginate_queryset(queryset, request, view)
def get_paginated_response(self, data):
if hasattr(self, "_random_next_page"):
return Response(
OrderedDict(
[
("count", self._random_count),
("next", self._random_next_page),
("results", data),
]
)
)
return super().get_paginated_response(data)
class RandomOrderingFilter(filters.OrderingFilter):
"""
Custom ordering filter that supports random pagination.
Must be used with the associated pagination class.
"""
def filter_queryset(self, request, queryset, view):
new_queryset = super().filter_queryset(request, queryset, view)
ordering = request.GET.get("ordering", "").split(",")
# handle random ordering
if "random" in ordering:
page = int(request.GET.get("page", 1)) - 1
page_size = int(request.GET.get("page_size", DEFAULT_PAGE_SIZE))
rng = random.Random(request.GET.get("seed", DEFAULT_SEED))
all_ids = list(new_queryset.order_by("id").values_list("id", flat=True))
rng.shuffle(all_ids)
start_index = page * page_size
end_index = (page + 1) * page_size
page_ids = all_ids[start_index:end_index]
request._original_item_count = new_queryset.count()
return new_queryset.filter(id__in=page_ids)
return new_queryset
|
[
"collections.OrderedDict",
"urllib.parse.quote"
] |
[((2345, 2447), 'collections.OrderedDict', 'OrderedDict', (["[('count', self._random_count), ('next', self._random_next_page), (\n 'results', data)]"], {}), "([('count', self._random_count), ('next', self._random_next_page\n ), ('results', data)])\n", (2356, 2447), False, 'from collections import OrderedDict\n'), ((2031, 2039), 'urllib.parse.quote', 'quote', (['v'], {}), '(v)\n', (2036, 2039), False, 'from urllib.parse import quote\n')]
|
import inspect
import os
import sys
import time
from datetime import datetime
from uuid import uuid4
import pkg_resources
import pyfiglet
from ZathuraProject.bugtracker import (send_data_to_bugtracker,
send_verbose_log_to_bugtracker)
CURRENT_VERSION = "v0.0.6 beta"
def create_app():
if sys.version_info < (3, 0, 0):
print("Zathura needs python3.x to perform normally!")
sys.exit(255)
pyfiglet_ascii() # spits out zathura in speed font
print("*#$" * 20)
print("Current version: {}".format(CURRENT_VERSION))
print("*#$" * 20)
return
def pyfiglet_ascii():
"""
Prints out Zathura using pyfiglet package, speed font.
"""
print(pyfiglet.figlet_format("Zathura", font="speed"))
class Zathura:
def __init__(self, bugtracker_url: str = None,
project_token: str = None):
"""
Initiates zathura using bugtracker url and project token.
:param: bugtracker_url: str
:param: project_token: str
"""
self.verbose_url = None
self.error_url = None
self.project_token = project_token
if bugtracker_url is not None:
if bugtracker_url[-1:] != '/':
bugtracker_url += '/'
self.error_url = bugtracker_url + "project/error/log/"
self.verbose_url = bugtracker_url + "project/verbose/log/"
def log_error(self, error_name, error_description, user=None):
"""
logs error in bugtracker server.
:param: error_name, error name.
:param: error_description: str This should include all the necessary details of an exception.
:param: user: str It's an optional field. This will help to uniquely identify a user.
:returns: bool whether log has been logged successfully
"""
point_of_origin = (inspect.stack()[1].function).lower()
if self.error_url is not None:
return send_data_to_bugtracker(
name=error_name,
description=error_description,
origin=point_of_origin,
token=self.project_token,
url=self.error_url,
user=user
)
return False
def log_verbose(self, description=None, user=None):
"""
logs verbose (debug) in bugtracker server.
:param: description: str This could be a long description of any debug message you want to see.
:param: user: str It's an optional field. This will help to uniquely identify a user.
:returns: bool whether log has been logged successfully
"""
point_of_origin = (inspect.stack()[1].function).lower()
if self.verbose_url is not None:
return send_verbose_log_to_bugtracker(
origin=point_of_origin,
description=description,
project_token=self.project_token,
bugtracker_url=self.verbose_url,
user=user
)
return False
|
[
"ZathuraProject.bugtracker.send_data_to_bugtracker",
"ZathuraProject.bugtracker.send_verbose_log_to_bugtracker",
"pyfiglet.figlet_format",
"inspect.stack",
"sys.exit"
] |
[((435, 448), 'sys.exit', 'sys.exit', (['(255)'], {}), '(255)\n', (443, 448), False, 'import sys\n'), ((728, 775), 'pyfiglet.figlet_format', 'pyfiglet.figlet_format', (['"""Zathura"""'], {'font': '"""speed"""'}), "('Zathura', font='speed')\n", (750, 775), False, 'import pyfiglet\n'), ((1990, 2150), 'ZathuraProject.bugtracker.send_data_to_bugtracker', 'send_data_to_bugtracker', ([], {'name': 'error_name', 'description': 'error_description', 'origin': 'point_of_origin', 'token': 'self.project_token', 'url': 'self.error_url', 'user': 'user'}), '(name=error_name, description=error_description,\n origin=point_of_origin, token=self.project_token, url=self.error_url,\n user=user)\n', (2013, 2150), False, 'from ZathuraProject.bugtracker import send_data_to_bugtracker, send_verbose_log_to_bugtracker\n'), ((2802, 2969), 'ZathuraProject.bugtracker.send_verbose_log_to_bugtracker', 'send_verbose_log_to_bugtracker', ([], {'origin': 'point_of_origin', 'description': 'description', 'project_token': 'self.project_token', 'bugtracker_url': 'self.verbose_url', 'user': 'user'}), '(origin=point_of_origin, description=\n description, project_token=self.project_token, bugtracker_url=self.\n verbose_url, user=user)\n', (2832, 2969), False, 'from ZathuraProject.bugtracker import send_data_to_bugtracker, send_verbose_log_to_bugtracker\n'), ((1895, 1910), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (1908, 1910), False, 'import inspect\n'), ((2705, 2720), 'inspect.stack', 'inspect.stack', ([], {}), '()\n', (2718, 2720), False, 'import inspect\n')]
|
import torch
import numpy as np
import pickle
def filterit(s,W2ID):
s=s.lower()
S=''
for c in s:
if c in ' abcdefghijklmnopqrstuvwxyz0123456789':
S+=c
S = " ".join([x if x and x in W2ID else "<unk>" for x in S.split()])
return S
def Sentence2Embeddings(sentence,W2ID,EMB):
if type(sentence)==str:
sentence = filterit(sentence, W2ID)
#print(sentence)
IDS = torch.tensor([W2ID[i] for i in sentence.split(" ")])
return EMB(IDS)
if type(sentence)==list:
sembs = []
for sent in sentence:
sent = filterit(sent,W2ID)
IDS = torch.tensor([W2ID[i] for i in sent.split(" ")])
sembs.append(EMB(IDS))
sembs = torch.nn.utils.rnn.pad_sequence(sembs,batch_first=True)
return sembs
def GetEmbeddings(path='./student_code/supportfiles/GloVe300.d'):
GloVe = pickle.load(open(path,'rb'))
W2ID = {w:i for i,w in enumerate(sorted(list(GloVe.keys())))}
EMB = torch.nn.Embedding(len(W2ID),300)
EMB.weight.requires_grad=False
GloVeW = np.vstack([GloVe[w] for w in W2ID])
EMB.weight.data.copy_(torch.from_numpy(GloVeW))
return W2ID, EMB
def getAnsWords(path='./student_code/supportfiles/CoAttAns.d'):
with open(path,'rb') as file:
data = pickle.load(file)
return data
def Answer2OneHot1(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
for w in answer.split(" "):
if w in AW:
Aembs[AW[w]]=1
break
else:
Aembs[0]=1
break
A.append(Aembs)
A = torch.stack(A)
return A
def Answer2OneHot(answers,AW):
A=[]
for answer in answers:
Aembs = torch.zeros(len(AW))
w = answer.split(" ")[0]
if w in AW:Aembs[AW[w]]=1
else:Aembs[-1]=1
A.append(Aembs)
A = torch.stack(A)
return A
|
[
"torch.stack",
"pickle.load",
"torch.nn.utils.rnn.pad_sequence",
"numpy.vstack",
"torch.from_numpy"
] |
[((1080, 1115), 'numpy.vstack', 'np.vstack', (['[GloVe[w] for w in W2ID]'], {}), '([GloVe[w] for w in W2ID])\n', (1089, 1115), True, 'import numpy as np\n'), ((1651, 1665), 'torch.stack', 'torch.stack', (['A'], {}), '(A)\n', (1662, 1665), False, 'import torch\n'), ((1908, 1922), 'torch.stack', 'torch.stack', (['A'], {}), '(A)\n', (1919, 1922), False, 'import torch\n'), ((737, 793), 'torch.nn.utils.rnn.pad_sequence', 'torch.nn.utils.rnn.pad_sequence', (['sembs'], {'batch_first': '(True)'}), '(sembs, batch_first=True)\n', (768, 793), False, 'import torch\n'), ((1142, 1166), 'torch.from_numpy', 'torch.from_numpy', (['GloVeW'], {}), '(GloVeW)\n', (1158, 1166), False, 'import torch\n'), ((1303, 1320), 'pickle.load', 'pickle.load', (['file'], {}), '(file)\n', (1314, 1320), False, 'import pickle\n')]
|
#!/usr/bin/env python
# coding:utf-8
"""
Name : test_mod_group.py
Author : <NAME>
Date : 6/21/2021
Desc:
"""
from model.group import Group
from random import randrange
def test_modification_some_group(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(name="111", header="222", footer="333")
group.id = old_groups[index].id
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_name(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(name="New group")
group.id = old_groups[index].id
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_header(app, db, check_ui):
if len(db.get_group_list()) == 0:
app.group.create(Group(name="test"))
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(header="New header")
group.id = old_groups[index].id
group.name = old_groups[index].name
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
def test_modification_some_group_footer(app, db, check_ui):
app.group.create_group_if_missing()
old_groups = db.get_group_list()
index = randrange(len(old_groups))
group = Group(footer="New footer")
group.id = old_groups[index].id
group.name = old_groups[index].name
app.group.modification_group_by_id(group.id, group)
new_groups = db.get_group_list()
old_groups[index] = group
assert old_groups == new_groups
if check_ui:
assert sorted(old_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
|
[
"model.group.Group"
] |
[((405, 450), 'model.group.Group', 'Group', ([], {'name': '"""111"""', 'header': '"""222"""', 'footer': '"""333"""'}), "(name='111', header='222', footer='333')\n", (410, 450), False, 'from model.group import Group\n'), ((1008, 1031), 'model.group.Group', 'Group', ([], {'name': '"""New group"""'}), "(name='New group')\n", (1013, 1031), False, 'from model.group import Group\n'), ((1591, 1617), 'model.group.Group', 'Group', ([], {'header': '"""New header"""'}), "(header='New header')\n", (1596, 1617), False, 'from model.group import Group\n'), ((2174, 2200), 'model.group.Group', 'Group', ([], {'footer': '"""New footer"""'}), "(footer='New footer')\n", (2179, 2200), False, 'from model.group import Group\n'), ((297, 315), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (302, 315), False, 'from model.group import Group\n'), ((900, 918), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (905, 918), False, 'from model.group import Group\n'), ((1483, 1501), 'model.group.Group', 'Group', ([], {'name': '"""test"""'}), "(name='test')\n", (1488, 1501), False, 'from model.group import Group\n')]
|
#! /usr/bin/env python
import tensorflow as tf
import numpy as np
import os
import data_helpers
import csv
import pickle
import data_helpers as dp
import json
# Parameters
# ==================================================
# Data Parameters
tf.flags.DEFINE_string("positive_data_file", "./data/rt-polaritydata/rt-polarity.pos", "Data source for the positive data.")
tf.flags.DEFINE_string("negative_data_file", "./data/rt-polaritydata/rt-polarity.neg", "Data source for the positive data.")
# Eval Parameters
tf.flags.DEFINE_integer("batch_size", 64, "Batch Size (default: 64)")
tf.flags.DEFINE_string("checkpoint_dir", "../runs/1548399951/checkpoints/", "Checkpoint directory from training run")
tf.flags.DEFINE_boolean("eval_train", False, "Evaluate on all training data")
# Misc Parameters
tf.flags.DEFINE_boolean("allow_soft_placement", True, "Allow device soft device placement")
tf.flags.DEFINE_boolean("log_device_placement", False, "Log placement of ops on devices")
FLAGS = tf.flags.FLAGS
#FLAGS._parse_flags()
FLAGS.flag_values_dict()
print("\nParameters:")
for attr, value in sorted(FLAGS.__flags.items()):
print("{}={}".format(attr.upper(), value))
print("")
runs_path = os.path.abspath(os.path.join(FLAGS.checkpoint_dir, os.path.pardir))
vacabulary_path = os.path.join(runs_path, "vocab")
print("vacabulary path:"+vacabulary_path)
vacabulary_file = open(vacabulary_path, "rb")
vacabulary = pickle.load(vacabulary_file)
vacabulary_file.close()
#load sequence length
sequence_path = os.path.join(runs_path, "sequence_lenth")
sequence_file = open(sequence_path, "rb")
sequence_length = pickle.load(sequence_file)
sequence_file.close()
print("sequence is {0}",sequence_length)
label_list = []
label_json_path = os.path.join(runs_path, "lable.json")
with open(label_json_path,'r') as load_f:
label_list = json.load(load_f)
def classify(text):
x_text = [list(text.strip())]
sentences_padded = dp.pad_sentences(x_text, sequence_length=sequence_length)
x = np.array([[vacabulary.get(word,0) for word in sentence] for sentence in sentences_padded])
print("\npredict...\n")
# Evaluation
# ==================================================
checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
graph = tf.Graph()
with graph.as_default():
session_conf = tf.ConfigProto(
allow_soft_placement=FLAGS.allow_soft_placement,
log_device_placement=FLAGS.log_device_placement)
sess = tf.Session(config=session_conf)
with sess.as_default():
# Load the saved meta graph and restore variables
saver = tf.train.import_meta_graph("{}.meta".format(checkpoint_file))
saver.restore(sess, checkpoint_file)
# Get the placeholders from the graph by name
input_x = graph.get_operation_by_name("input_x").outputs[0]
# input_y = graph.get_operation_by_name("input_y").outputs[0]
dropout_keep_prob = graph.get_operation_by_name("dropout_keep_prob").outputs[0]
# Tensors we want to evaluate
predictions = graph.get_operation_by_name("output/predictions").outputs[0]
single_predictions = sess.run(predictions, {input_x: x, dropout_keep_prob: 1.0})
predict_label = label_list[int(single_predictions)]
print(predict_label)
return predict_label
|
[
"json.load",
"tensorflow.Session",
"data_helpers.pad_sentences",
"tensorflow.ConfigProto",
"pickle.load",
"tensorflow.train.latest_checkpoint",
"tensorflow.Graph",
"tensorflow.flags.DEFINE_integer",
"os.path.join",
"tensorflow.flags.DEFINE_boolean",
"tensorflow.flags.DEFINE_string"
] |
[((246, 378), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""positive_data_file"""', '"""./data/rt-polaritydata/rt-polarity.pos"""', '"""Data source for the positive data."""'], {}), "('positive_data_file',\n './data/rt-polaritydata/rt-polarity.pos',\n 'Data source for the positive data.')\n", (268, 378), True, 'import tensorflow as tf\n'), ((371, 503), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""negative_data_file"""', '"""./data/rt-polaritydata/rt-polarity.neg"""', '"""Data source for the positive data."""'], {}), "('negative_data_file',\n './data/rt-polaritydata/rt-polarity.neg',\n 'Data source for the positive data.')\n", (393, 503), True, 'import tensorflow as tf\n'), ((515, 584), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""Batch Size (default: 64)"""'], {}), "('batch_size', 64, 'Batch Size (default: 64)')\n", (538, 584), True, 'import tensorflow as tf\n'), ((585, 706), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""checkpoint_dir"""', '"""../runs/1548399951/checkpoints/"""', '"""Checkpoint directory from training run"""'], {}), "('checkpoint_dir', '../runs/1548399951/checkpoints/',\n 'Checkpoint directory from training run')\n", (607, 706), True, 'import tensorflow as tf\n'), ((703, 780), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""eval_train"""', '(False)', '"""Evaluate on all training data"""'], {}), "('eval_train', False, 'Evaluate on all training data')\n", (726, 780), True, 'import tensorflow as tf\n'), ((800, 895), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""allow_soft_placement"""', '(True)', '"""Allow device soft device placement"""'], {}), "('allow_soft_placement', True,\n 'Allow device soft device placement')\n", (823, 895), True, 'import tensorflow as tf\n'), ((892, 985), 'tensorflow.flags.DEFINE_boolean', 'tf.flags.DEFINE_boolean', (['"""log_device_placement"""', '(False)', '"""Log placement of ops on devices"""'], {}), "('log_device_placement', False,\n 'Log placement of ops on devices')\n", (915, 985), True, 'import tensorflow as tf\n'), ((1283, 1315), 'os.path.join', 'os.path.join', (['runs_path', '"""vocab"""'], {}), "(runs_path, 'vocab')\n", (1295, 1315), False, 'import os\n'), ((1417, 1445), 'pickle.load', 'pickle.load', (['vacabulary_file'], {}), '(vacabulary_file)\n', (1428, 1445), False, 'import pickle\n'), ((1509, 1550), 'os.path.join', 'os.path.join', (['runs_path', '"""sequence_lenth"""'], {}), "(runs_path, 'sequence_lenth')\n", (1521, 1550), False, 'import os\n'), ((1611, 1637), 'pickle.load', 'pickle.load', (['sequence_file'], {}), '(sequence_file)\n', (1622, 1637), False, 'import pickle\n'), ((1737, 1774), 'os.path.join', 'os.path.join', (['runs_path', '"""lable.json"""'], {}), "(runs_path, 'lable.json')\n", (1749, 1774), False, 'import os\n'), ((1213, 1263), 'os.path.join', 'os.path.join', (['FLAGS.checkpoint_dir', 'os.path.pardir'], {}), '(FLAGS.checkpoint_dir, os.path.pardir)\n', (1225, 1263), False, 'import os\n'), ((1834, 1851), 'json.load', 'json.load', (['load_f'], {}), '(load_f)\n', (1843, 1851), False, 'import json\n'), ((1931, 1988), 'data_helpers.pad_sentences', 'dp.pad_sentences', (['x_text'], {'sequence_length': 'sequence_length'}), '(x_text, sequence_length=sequence_length)\n', (1947, 1988), True, 'import data_helpers as dp\n'), ((2212, 2260), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint_dir'], {}), '(FLAGS.checkpoint_dir)\n', (2238, 2260), True, 'import tensorflow as tf\n'), ((2273, 2283), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2281, 2283), True, 'import tensorflow as tf\n'), ((2336, 2452), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'allow_soft_placement': 'FLAGS.allow_soft_placement', 'log_device_placement': 'FLAGS.log_device_placement'}), '(allow_soft_placement=FLAGS.allow_soft_placement,\n log_device_placement=FLAGS.log_device_placement)\n', (2350, 2452), True, 'import tensorflow as tf\n'), ((2489, 2520), 'tensorflow.Session', 'tf.Session', ([], {'config': 'session_conf'}), '(config=session_conf)\n', (2499, 2520), True, 'import tensorflow as tf\n')]
|
#! /usr/bin/env python3
import argparse
import yaml
def merge_two_dict(d1, d2):
result = {}
for key in set(d1) | set(d2):
if isinstance(d1.get(key), dict) or isinstance(d2.get(key), dict):
result[key] = merge_two_dict(d1.get(key, dict()), d2.get(key, dict()))
else:
result[key] = d1.get(key, 0) + d2.get(key, 0)
return result
def merge_yaml_files(input_yamls, output_yaml):
output = {}
for input_yaml in input_yamls:
with open(input_yaml) as open_input:
data = yaml.safe_load(open_input) or {}
output = merge_two_dict(output, data)
with open(output_yaml, 'w') as open_output:
yaml.safe_dump(output, open_output)
def main():
description = ('Merge multiple yaml file containing stats by summing the overlapping fields')
parser = argparse.ArgumentParser(description=description)
parser.add_argument('--inputs', type=str, required=True, nargs='+',
help='YAML files containing the input summary metrics')
parser.add_argument('--output', type=str, required=True,
help='YAML files containing the output summary metrics')
args = parser.parse_args()
merge_yaml_files(args.inputs, args.output)
if __name__ == '__main__':
main()
|
[
"yaml.safe_load",
"argparse.ArgumentParser",
"yaml.safe_dump"
] |
[((848, 896), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'description'}), '(description=description)\n', (871, 896), False, 'import argparse\n'), ((686, 721), 'yaml.safe_dump', 'yaml.safe_dump', (['output', 'open_output'], {}), '(output, open_output)\n', (700, 721), False, 'import yaml\n'), ((546, 572), 'yaml.safe_load', 'yaml.safe_load', (['open_input'], {}), '(open_input)\n', (560, 572), False, 'import yaml\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''第 0015 题: 纯文本文件 city.txt为城市信息, 里面的内容(包括花括号)如下所示:
{
"1" : "上海",
"2" : "北京",
"3" : "成都"
}
请将上述内容写到 city.xls 文件中。'''
__author__ = 'Drake-Z'
import json
from collections import OrderedDict
from openpyxl import Workbook
def txt_to_xlsx(filename):
file = open(filename, 'r', encoding = 'UTF-8')
file_cintent = json.load(file, encoding = 'UTF-8')
print(file_cintent)
workbook = Workbook()
worksheet = workbook.worksheets[0]
for i in range(1, len(file_cintent)+1):
worksheet.cell(row = i, column = 1).value = i
worksheet.cell(row = i, column = 2).value = file_cintent[str(i)]
workbook.save(filename = 'city.xls')
if __name__ == '__main__':
txt_to_xlsx('city.txt')
|
[
"json.load",
"openpyxl.Workbook"
] |
[((378, 411), 'json.load', 'json.load', (['file'], {'encoding': '"""UTF-8"""'}), "(file, encoding='UTF-8')\n", (387, 411), False, 'import json\n'), ((453, 463), 'openpyxl.Workbook', 'Workbook', ([], {}), '()\n', (461, 463), False, 'from openpyxl import Workbook\n')]
|
import jax
import jax.numpy as np
import time
import skimage.io
num_iter = 10
key = jax.random.PRNGKey(1234)
Mask = np.array(skimage.io.imread('../data/Mask0.png')) > 0
Mask = np.reshape(Mask, [Mask.shape[0], Mask.shape[1], 1])
Offsets = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Angle = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)
Offsets_d = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Angle_d = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)
UrShape = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
Constraints = jax.random.uniform(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.float32)
C_valid = np.ones(Mask.shape, dtype=Mask.dtype)
def f(Offsets, Angle):
Offsets_left = np.roll(Offsets, shift=-1, axis=0)
Offsets_right = np.roll(Offsets, shift=1, axis=0)
Offsets_up = np.roll(Offsets, shift=-1, axis=1)
Offsets_down = np.roll(Offsets, shift=1, axis=1)
UrShape_left = np.roll(UrShape, shift=-1, axis=0)
UrShape_right = np.roll(UrShape, shift=1, axis=0)
UrShape_up = np.roll(UrShape, shift=-1, axis=1)
UrShape_down = np.roll(UrShape, shift=1, axis=1)
Mask_left = np.roll(Mask, shift=-1, axis=0)
Mask_right = np.roll(Mask, shift=1, axis=0)
Mask_up = np.roll(Mask, shift=-1, axis=1)
Mask_down = np.roll(Mask, shift=1, axis=1)
d_off_left = Offsets - Offsets_left
d_off_right = Offsets - Offsets_right
d_off_up = Offsets - Offsets_up
d_off_down = Offsets - Offsets_down
d_ur_left = UrShape - UrShape_left
d_ur_right = UrShape - UrShape_right
d_ur_up = UrShape - UrShape_up
d_ur_down = UrShape - UrShape_down
cos_angle = np.cos(Angle)
sin_angle = np.sin(Angle)
Rot2D_left = np.stack(\
[cos_angle * d_ur_left[:, :, 0] - sin_angle * d_ur_left[:, :, 1],
sin_angle * d_ur_left[:, :, 0] - cos_angle * d_ur_left[:, :, 1]], -1)
Rot2D_right = np.stack(\
[cos_angle * d_ur_right[:, :, 0] - sin_angle * d_ur_right[:, :, 1],
sin_angle * d_ur_right[:, :, 0] - cos_angle * d_ur_right[:, :, 1]], -1)
Rot2D_up = np.stack(\
[cos_angle * d_ur_up[:, :, 0] - sin_angle * d_ur_up[:, :, 1],
sin_angle * d_ur_up[:, :, 0] - cos_angle * d_ur_up[:, :, 1]], -1)
Rot2D_down = np.stack(\
[cos_angle * d_ur_down[:, :, 0] - sin_angle * d_ur_down[:, :, 1],
sin_angle * d_ur_down[:, :, 0] - cos_angle * d_ur_down[:, :, 1]], -1)
d_diff_left = d_off_left - Rot2D_left
d_diff_right = d_off_right - Rot2D_right
d_diff_up = d_off_up - Rot2D_up
d_diff_down = d_off_down - Rot2D_down
reg_left = np.logical_and(Mask, Mask_left) * d_diff_left * d_diff_left
reg_right = np.logical_and(Mask, Mask_right) * d_diff_right * d_diff_right
reg_up = np.logical_and(Mask, Mask_up) * d_diff_up * d_diff_up
reg_down = np.logical_and(Mask, Mask_down) * d_diff_down * d_diff_down
E_fit = (Offsets - Constraints) * (Offsets - Constraints)
return np.stack([C_valid * 0.5 * E_fit,
0.5 * reg_left,
0.5 * reg_right,
0.5 * reg_up,
0.5 * reg_down], -1)
def JTJx(Offsets, Angle, Offsets_d, Angle_d):
_, Jx = jax.jvp(f, [Offsets, Angle], [Offsets_d, Angle_d])
_, f_vjp = jax.vjp(f, Offsets, Angle)
return f_vjp(Jx)
def JTFx(Offsets, Angle):
Fx, f_vjp = jax.vjp(f, Offsets, Angle)
return f_vjp(Fx)
jf = jax.jit(f)
jJTJx = jax.jit(JTJx)
jJTFx = jax.jit(JTFx)
# jf = f
# jJTJx = JTJx
# jJTFx = JTFx
min_fwd_time = 1e20
min_JTJ_time = 1e20
min_JTFx_time = 1e20
avg_fwd_time = 0
avg_JTJ_time = 0
avg_JTFx_time = 0
for i in range(num_iter + 1):
start = time.time()
y = jf(Offsets, Angle).block_until_ready()
int0 = time.time()
jtjx = jJTJx(Offsets, Angle, Offsets_d, Angle_d)
jtjx[0].block_until_ready()
jtjx[1].block_until_ready()
int1 = time.time()
jtfx = jJTFx(Offsets, Angle)
jtfx[0].block_until_ready()
jtfx[1].block_until_ready()
end = time.time()
if i > 0:
avg_fwd_time += int0 - start
avg_JTJ_time += int1 - int0
avg_JTFx_time += end - int1
if int0 - start < min_fwd_time:
min_fwd_time = int0 - start
if int1 - int0 < min_JTJ_time:
min_JTJ_time = int1 - int0
if end - int1 < min_JTFx_time:
min_JTFx_time = end - int1
print('Minimum forward time:', min_fwd_time)
print('Minimum JTJ time:', min_JTJ_time)
print('Minimum JTFx time:', min_JTFx_time)
print('Ratio minimum JTJ:', min_JTJ_time / min_fwd_time)
print('Ratio minimum JTFx:', min_JTFx_time / min_fwd_time)
print('Average forward time:', avg_fwd_time / num_iter)
print('Average JTJ time:', avg_JTJ_time / num_iter)
print('Average JTFx time:', avg_JTFx_time / num_iter)
print('Ratio average JTJ:', avg_JTJ_time / avg_fwd_time)
print('Ratio average JTFx:', avg_JTFx_time / avg_fwd_time)
|
[
"jax.jvp",
"jax.random.uniform",
"jax.jit",
"jax.numpy.roll",
"jax.numpy.stack",
"jax.numpy.logical_and",
"time.time",
"jax.vjp",
"jax.random.PRNGKey",
"jax.numpy.cos",
"jax.numpy.ones",
"jax.numpy.sin",
"jax.numpy.reshape"
] |
[((86, 110), 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['(1234)'], {}), '(1234)\n', (104, 110), False, 'import jax\n'), ((178, 229), 'jax.numpy.reshape', 'np.reshape', (['Mask', '[Mask.shape[0], Mask.shape[1], 1]'], {}), '(Mask, [Mask.shape[0], Mask.shape[1], 1])\n', (188, 229), True, 'import jax.numpy as np\n'), ((240, 327), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1], 2]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.\n float32)\n', (258, 327), False, 'import jax\n'), ((331, 410), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1]]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)\n', (349, 410), False, 'import jax\n'), ((423, 510), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1], 2]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.\n float32)\n', (441, 510), False, 'import jax\n'), ((516, 595), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1]]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1]], dtype=np.float32)\n', (534, 595), False, 'import jax\n'), ((606, 693), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1], 2]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.\n float32)\n', (624, 693), False, 'import jax\n'), ((703, 790), 'jax.random.uniform', 'jax.random.uniform', (['key'], {'shape': '[Mask.shape[0], Mask.shape[1], 2]', 'dtype': 'np.float32'}), '(key, shape=[Mask.shape[0], Mask.shape[1], 2], dtype=np.\n float32)\n', (721, 790), False, 'import jax\n'), ((796, 833), 'jax.numpy.ones', 'np.ones', (['Mask.shape'], {'dtype': 'Mask.dtype'}), '(Mask.shape, dtype=Mask.dtype)\n', (803, 833), True, 'import jax.numpy as np\n'), ((3562, 3572), 'jax.jit', 'jax.jit', (['f'], {}), '(f)\n', (3569, 3572), False, 'import jax\n'), ((3581, 3594), 'jax.jit', 'jax.jit', (['JTJx'], {}), '(JTJx)\n', (3588, 3594), False, 'import jax\n'), ((3603, 3616), 'jax.jit', 'jax.jit', (['JTFx'], {}), '(JTFx)\n', (3610, 3616), False, 'import jax\n'), ((877, 911), 'jax.numpy.roll', 'np.roll', (['Offsets'], {'shift': '(-1)', 'axis': '(0)'}), '(Offsets, shift=-1, axis=0)\n', (884, 911), True, 'import jax.numpy as np\n'), ((932, 965), 'jax.numpy.roll', 'np.roll', (['Offsets'], {'shift': '(1)', 'axis': '(0)'}), '(Offsets, shift=1, axis=0)\n', (939, 965), True, 'import jax.numpy as np\n'), ((983, 1017), 'jax.numpy.roll', 'np.roll', (['Offsets'], {'shift': '(-1)', 'axis': '(1)'}), '(Offsets, shift=-1, axis=1)\n', (990, 1017), True, 'import jax.numpy as np\n'), ((1037, 1070), 'jax.numpy.roll', 'np.roll', (['Offsets'], {'shift': '(1)', 'axis': '(1)'}), '(Offsets, shift=1, axis=1)\n', (1044, 1070), True, 'import jax.numpy as np\n'), ((1091, 1125), 'jax.numpy.roll', 'np.roll', (['UrShape'], {'shift': '(-1)', 'axis': '(0)'}), '(UrShape, shift=-1, axis=0)\n', (1098, 1125), True, 'import jax.numpy as np\n'), ((1146, 1179), 'jax.numpy.roll', 'np.roll', (['UrShape'], {'shift': '(1)', 'axis': '(0)'}), '(UrShape, shift=1, axis=0)\n', (1153, 1179), True, 'import jax.numpy as np\n'), ((1197, 1231), 'jax.numpy.roll', 'np.roll', (['UrShape'], {'shift': '(-1)', 'axis': '(1)'}), '(UrShape, shift=-1, axis=1)\n', (1204, 1231), True, 'import jax.numpy as np\n'), ((1251, 1284), 'jax.numpy.roll', 'np.roll', (['UrShape'], {'shift': '(1)', 'axis': '(1)'}), '(UrShape, shift=1, axis=1)\n', (1258, 1284), True, 'import jax.numpy as np\n'), ((1302, 1333), 'jax.numpy.roll', 'np.roll', (['Mask'], {'shift': '(-1)', 'axis': '(0)'}), '(Mask, shift=-1, axis=0)\n', (1309, 1333), True, 'import jax.numpy as np\n'), ((1351, 1381), 'jax.numpy.roll', 'np.roll', (['Mask'], {'shift': '(1)', 'axis': '(0)'}), '(Mask, shift=1, axis=0)\n', (1358, 1381), True, 'import jax.numpy as np\n'), ((1396, 1427), 'jax.numpy.roll', 'np.roll', (['Mask'], {'shift': '(-1)', 'axis': '(1)'}), '(Mask, shift=-1, axis=1)\n', (1403, 1427), True, 'import jax.numpy as np\n'), ((1444, 1474), 'jax.numpy.roll', 'np.roll', (['Mask'], {'shift': '(1)', 'axis': '(1)'}), '(Mask, shift=1, axis=1)\n', (1451, 1474), True, 'import jax.numpy as np\n'), ((1806, 1819), 'jax.numpy.cos', 'np.cos', (['Angle'], {}), '(Angle)\n', (1812, 1819), True, 'import jax.numpy as np\n'), ((1836, 1849), 'jax.numpy.sin', 'np.sin', (['Angle'], {}), '(Angle)\n', (1842, 1849), True, 'import jax.numpy as np\n'), ((1868, 2017), 'jax.numpy.stack', 'np.stack', (['[cos_angle * d_ur_left[:, :, 0] - sin_angle * d_ur_left[:, :, 1], sin_angle *\n d_ur_left[:, :, 0] - cos_angle * d_ur_left[:, :, 1]]', '(-1)'], {}), '([cos_angle * d_ur_left[:, :, 0] - sin_angle * d_ur_left[:, :, 1], \n sin_angle * d_ur_left[:, :, 0] - cos_angle * d_ur_left[:, :, 1]], -1)\n', (1876, 2017), True, 'import jax.numpy as np\n'), ((2050, 2202), 'jax.numpy.stack', 'np.stack', (['[cos_angle * d_ur_right[:, :, 0] - sin_angle * d_ur_right[:, :, 1], \n sin_angle * d_ur_right[:, :, 0] - cos_angle * d_ur_right[:, :, 1]]', '(-1)'], {}), '([cos_angle * d_ur_right[:, :, 0] - sin_angle * d_ur_right[:, :, 1],\n sin_angle * d_ur_right[:, :, 0] - cos_angle * d_ur_right[:, :, 1]], -1)\n', (2058, 2202), True, 'import jax.numpy as np\n'), ((2233, 2374), 'jax.numpy.stack', 'np.stack', (['[cos_angle * d_ur_up[:, :, 0] - sin_angle * d_ur_up[:, :, 1], sin_angle *\n d_ur_up[:, :, 0] - cos_angle * d_ur_up[:, :, 1]]', '(-1)'], {}), '([cos_angle * d_ur_up[:, :, 0] - sin_angle * d_ur_up[:, :, 1], \n sin_angle * d_ur_up[:, :, 0] - cos_angle * d_ur_up[:, :, 1]], -1)\n', (2241, 2374), True, 'import jax.numpy as np\n'), ((2406, 2555), 'jax.numpy.stack', 'np.stack', (['[cos_angle * d_ur_down[:, :, 0] - sin_angle * d_ur_down[:, :, 1], sin_angle *\n d_ur_down[:, :, 0] - cos_angle * d_ur_down[:, :, 1]]', '(-1)'], {}), '([cos_angle * d_ur_down[:, :, 0] - sin_angle * d_ur_down[:, :, 1], \n sin_angle * d_ur_down[:, :, 0] - cos_angle * d_ur_down[:, :, 1]], -1)\n', (2414, 2555), True, 'import jax.numpy as np\n'), ((3107, 3211), 'jax.numpy.stack', 'np.stack', (['[C_valid * 0.5 * E_fit, 0.5 * reg_left, 0.5 * reg_right, 0.5 * reg_up, 0.5 *\n reg_down]', '(-1)'], {}), '([C_valid * 0.5 * E_fit, 0.5 * reg_left, 0.5 * reg_right, 0.5 *\n reg_up, 0.5 * reg_down], -1)\n', (3115, 3211), True, 'import jax.numpy as np\n'), ((3351, 3401), 'jax.jvp', 'jax.jvp', (['f', '[Offsets, Angle]', '[Offsets_d, Angle_d]'], {}), '(f, [Offsets, Angle], [Offsets_d, Angle_d])\n', (3358, 3401), False, 'import jax\n'), ((3417, 3443), 'jax.vjp', 'jax.vjp', (['f', 'Offsets', 'Angle'], {}), '(f, Offsets, Angle)\n', (3424, 3443), False, 'import jax\n'), ((3508, 3534), 'jax.vjp', 'jax.vjp', (['f', 'Offsets', 'Angle'], {}), '(f, Offsets, Angle)\n', (3515, 3534), False, 'import jax\n'), ((3815, 3826), 'time.time', 'time.time', ([], {}), '()\n', (3824, 3826), False, 'import time\n'), ((3885, 3896), 'time.time', 'time.time', ([], {}), '()\n', (3894, 3896), False, 'import time\n'), ((4025, 4036), 'time.time', 'time.time', ([], {}), '()\n', (4034, 4036), False, 'import time\n'), ((4144, 4155), 'time.time', 'time.time', ([], {}), '()\n', (4153, 4155), False, 'import time\n'), ((2752, 2783), 'jax.numpy.logical_and', 'np.logical_and', (['Mask', 'Mask_left'], {}), '(Mask, Mask_left)\n', (2766, 2783), True, 'import jax.numpy as np\n'), ((2828, 2860), 'jax.numpy.logical_and', 'np.logical_and', (['Mask', 'Mask_right'], {}), '(Mask, Mask_right)\n', (2842, 2860), True, 'import jax.numpy as np\n'), ((2904, 2933), 'jax.numpy.logical_and', 'np.logical_and', (['Mask', 'Mask_up'], {}), '(Mask, Mask_up)\n', (2918, 2933), True, 'import jax.numpy as np\n'), ((2973, 3004), 'jax.numpy.logical_and', 'np.logical_and', (['Mask', 'Mask_down'], {}), '(Mask, Mask_down)\n', (2987, 3004), True, 'import jax.numpy as np\n')]
|
"""Methods for unzipping files."""
import os
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
def unzip_tar(tar_file_name, target_directory_name=None,
file_and_dir_names_to_unzip=None):
"""Unzips tar file.
:param tar_file_name: Path to input file.
:param target_directory_name: Path to output directory.
:param file_and_dir_names_to_unzip: List of files and directories to extract
from the tar file. Each list element should be a relative path inside
the tar file. After unzipping, the same relative path will exist inside
`target_directory_name`.
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_is_string(tar_file_name)
error_checking.assert_is_string_list(file_and_dir_names_to_unzip)
file_system_utils.mkdir_recursive_if_necessary(
directory_name=target_directory_name)
unix_command_string = 'tar -C "{0:s}" -xvf "{1:s}"'.format(
target_directory_name, tar_file_name)
for this_relative_path in file_and_dir_names_to_unzip:
unix_command_string += ' "' + this_relative_path + '"'
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
def unzip_gzip(gzip_file_name, extracted_file_name):
"""Unzips gzip archive.
Keep in mind that all gzip archive contain only one file.
:param gzip_file_name: Path to gzip archive.
:param extracted_file_name: The one file in the gzip archive will be saved
here.
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_is_string(gzip_file_name)
file_system_utils.mkdir_recursive_if_necessary(
file_name=extracted_file_name)
unix_command_string = 'gunzip -v -c "{0:s}" > "{1:s}"'.format(
gzip_file_name, extracted_file_name)
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
def gzip_file(input_file_name, output_file_name=None, delete_input_file=True):
"""Creates gzip archive with one file.
:param input_file_name: Path to input file (will be gzipped).
:param output_file_name: Path to output file (extension must be ".gz"). If
`output_file_name is None`, will simply append ".gz" to name of input
file.
:param delete_input_file: Boolean flag. If True, will delete input file
after gzipping.
:raises: ValueError: if `output_file_name` does not end with ".gz".
:raises: ValueError: if the Unix command fails.
"""
error_checking.assert_file_exists(input_file_name)
error_checking.assert_is_boolean(delete_input_file)
if output_file_name is None:
output_file_name = '{0:s}.gz'.format(input_file_name)
if not output_file_name.endswith('.gz'):
error_string = (
'Output file ("{0:s}") should have extension ".gz".'
).format(output_file_name)
raise ValueError(error_string)
unix_command_string = 'gzip -v -c "{0:s}" > "{1:s}"'.format(
input_file_name, output_file_name)
exit_code = os.system(unix_command_string)
if exit_code != 0:
raise ValueError('\nUnix command failed (log messages shown above '
'should explain why).')
if delete_input_file:
os.remove(input_file_name)
|
[
"gewittergefahr.gg_utils.error_checking.assert_is_string_list",
"os.remove",
"gewittergefahr.gg_utils.error_checking.assert_is_boolean",
"os.system",
"gewittergefahr.gg_utils.error_checking.assert_is_string",
"gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary",
"gewittergefahr.gg_utils.error_checking.assert_file_exists"
] |
[((729, 775), 'gewittergefahr.gg_utils.error_checking.assert_is_string', 'error_checking.assert_is_string', (['tar_file_name'], {}), '(tar_file_name)\n', (760, 775), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((780, 845), 'gewittergefahr.gg_utils.error_checking.assert_is_string_list', 'error_checking.assert_is_string_list', (['file_and_dir_names_to_unzip'], {}), '(file_and_dir_names_to_unzip)\n', (816, 845), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((850, 939), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'directory_name': 'target_directory_name'}), '(directory_name=\n target_directory_name)\n', (896, 939), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((1195, 1225), 'os.system', 'os.system', (['unix_command_string'], {}), '(unix_command_string)\n', (1204, 1225), False, 'import os\n'), ((1728, 1775), 'gewittergefahr.gg_utils.error_checking.assert_is_string', 'error_checking.assert_is_string', (['gzip_file_name'], {}), '(gzip_file_name)\n', (1759, 1775), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((1780, 1857), 'gewittergefahr.gg_utils.file_system_utils.mkdir_recursive_if_necessary', 'file_system_utils.mkdir_recursive_if_necessary', ([], {'file_name': 'extracted_file_name'}), '(file_name=extracted_file_name)\n', (1826, 1857), False, 'from gewittergefahr.gg_utils import file_system_utils\n'), ((1997, 2027), 'os.system', 'os.system', (['unix_command_string'], {}), '(unix_command_string)\n', (2006, 2027), False, 'import os\n'), ((2778, 2828), 'gewittergefahr.gg_utils.error_checking.assert_file_exists', 'error_checking.assert_file_exists', (['input_file_name'], {}), '(input_file_name)\n', (2811, 2828), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((2833, 2884), 'gewittergefahr.gg_utils.error_checking.assert_is_boolean', 'error_checking.assert_is_boolean', (['delete_input_file'], {}), '(delete_input_file)\n', (2865, 2884), False, 'from gewittergefahr.gg_utils import error_checking\n'), ((3316, 3346), 'os.system', 'os.system', (['unix_command_string'], {}), '(unix_command_string)\n', (3325, 3346), False, 'import os\n'), ((3531, 3557), 'os.remove', 'os.remove', (['input_file_name'], {}), '(input_file_name)\n', (3540, 3557), False, 'import os\n')]
|
from rest_framework import generics, authentication, permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.exceptions import ValidationError, AuthenticationFailed
from rest_framework.authtoken.models import Token
from rest_framework.response import Response
from rest_framework import status
from django.contrib.auth import get_user_model
from django.shortcuts import get_object_or_404
from author.serializers import AuthorSerializer, AuthAuthorSerializer, AuthorProfileSerializer
class CreateAuthorView(generics.CreateAPIView):
"""Create a new author in the system"""
serializer_class = AuthorSerializer
class AuthAuthorView(ObtainAuthToken):
"""Authenticate author in the system"""
serializer_class = AuthAuthorSerializer
def post(self, request, *args, **kwargs):
serializer = self.serializer_class(data=request.data,
context={'request': request})
serializer.is_valid(raise_exception=True)
author = serializer.validated_data['user']
token, created = Token.objects.get_or_create(user=author)
if not author.adminApproval:
return Response({"error": ["User has not been approved by admin"]}, status=status.HTTP_401_UNAUTHORIZED)
return Response({
'token': token.key,
})
class AuthorProfileView(generics.RetrieveUpdateAPIView):
"""Get author in the system"""
serializer_class = AuthorProfileSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ["get", "put"]
def get_object(self):
id = self.kwargs['pk']
try:
return get_object_or_404(get_user_model().objects, id=id)
except:
raise ValidationError({"error": ["User not found"]})
class MyProfileView(generics.RetrieveAPIView):
"""Get authenticated author profile in the system"""
serializer_class = AuthorProfileSerializer
authenticate_classes = (authentication.TokenAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
http_method_names = ["get"]
def get_object(self):
if not self.request.user.adminApproval:
raise AuthenticationFailed(detail ={"error": ["User has not been approved by admin"]})
return self.request.user
|
[
"rest_framework.exceptions.AuthenticationFailed",
"rest_framework.authtoken.models.Token.objects.get_or_create",
"django.contrib.auth.get_user_model",
"rest_framework.response.Response",
"rest_framework.exceptions.ValidationError"
] |
[((1093, 1133), 'rest_framework.authtoken.models.Token.objects.get_or_create', 'Token.objects.get_or_create', ([], {'user': 'author'}), '(user=author)\n', (1120, 1133), False, 'from rest_framework.authtoken.models import Token\n'), ((1305, 1335), 'rest_framework.response.Response', 'Response', (["{'token': token.key}"], {}), "({'token': token.key})\n", (1313, 1335), False, 'from rest_framework.response import Response\n'), ((1191, 1293), 'rest_framework.response.Response', 'Response', (["{'error': ['User has not been approved by admin']}"], {'status': 'status.HTTP_401_UNAUTHORIZED'}), "({'error': ['User has not been approved by admin']}, status=status.\n HTTP_401_UNAUTHORIZED)\n", (1199, 1293), False, 'from rest_framework.response import Response\n'), ((2279, 2358), 'rest_framework.exceptions.AuthenticationFailed', 'AuthenticationFailed', ([], {'detail': "{'error': ['User has not been approved by admin']}"}), "(detail={'error': ['User has not been approved by admin']})\n", (2299, 2358), False, 'from rest_framework.exceptions import ValidationError, AuthenticationFailed\n'), ((1834, 1880), 'rest_framework.exceptions.ValidationError', 'ValidationError', (["{'error': ['User not found']}"], {}), "({'error': ['User not found']})\n", (1849, 1880), False, 'from rest_framework.exceptions import ValidationError, AuthenticationFailed\n'), ((1767, 1783), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (1781, 1783), False, 'from django.contrib.auth import get_user_model\n')]
|
#Faça um programa que leia o comprimento do cateto oposto e do cateto adjacente de um triângulo retângulo.
#Calcule e mostre o comprimento da hipotenusa.
"""co = float(input('Valor cateto oposto: '))
ca = float(input('valor cateto adjacente: '))
hi = (co ** 2 + ca ** 2) ** (1/2)
print('O valor da hipotenusa é {:.2f}'.format(hi))"""
'''import math
co = float(input('Informe o valor Cateto Oposto: '))
ca = float(input('Informe o valor Cateto Adjacente: '))
hi = math.hypot(co, ca)
print('O valor da hipotenusa é {:.2f}'.format(hi))'''
from math import hypot
co = float(input('Valor cateto oposto: '))
ca = float(input('Valor cateto adjacente: '))
hi = hypot(co, ca)
print('O valor da hipotenusa será {:.2f}'.format(hi))
|
[
"math.hypot"
] |
[((656, 669), 'math.hypot', 'hypot', (['co', 'ca'], {}), '(co, ca)\n', (661, 669), False, 'from math import hypot\n')]
|
import numpy as np
import torch
import torch.nn as nn
from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet
n = 10
m = n
epsilon = 0.1
train_episodes = 3000
eval_runs = 300
patience = 20
print_progress = True
print_behaviour = False
def reward(x):
return -np.max(x)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def greedy(model, ball_number):
action_values = model(torch.DoubleTensor([ball_number]))
a = torch.argmax(action_values)
return a
def epsilon_greedy(model, ball_number, epsilon=epsilon):
action_values = model(torch.DoubleTensor([ball_number]))
r = torch.rand(1)
if r < epsilon:
a = torch.randint(len(action_values), (1,))[0]
else:
a = torch.argmax(action_values)
return a, action_values[a]
def evaluate_q_values(model, n=n, m=m, reward=reward, eval_runs=eval_runs, print_behaviour=print_behaviour):
with torch.no_grad():
sum_loads = 0
for _ in range(eval_runs):
loads = np.zeros(n)
for i in range(m):
a = greedy(model, i)
if print_behaviour:
print(f"With loads {loads} the trained model chose {a}")
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
sum_loads += reward(loads)
avg_score = sum_loads / eval_runs
return avg_score
def train(n=n, m=m, epsilon=epsilon, reward=reward, episodes=train_episodes, eval_runs=eval_runs, patience=patience,
print_progress=print_progress, print_behaviour=print_behaviour, device=device):
curr_model = AverageTwoThinningNet(m, device)
best_model = AverageTwoThinningNet(m, device)
optimizer = torch.optim.Adam(curr_model.parameters())
mse_loss = nn.MSELoss()
best_eval_score = None
not_improved = 0
for ep in range(episodes):
loads = np.zeros(n)
for i in range(m):
a, old_val = epsilon_greedy(curr_model, i, epsilon)
randomly_selected = np.random.randint(n)
if loads[randomly_selected] <= a:
loads[randomly_selected] += 1
else:
loads[np.random.randint(n)] += 1
if i == m - 1:
new_val = torch.as_tensor(reward(loads)).to(device)
else:
_, new_val = epsilon_greedy(curr_model, i + 1, epsilon)
new_val = new_val.detach()
loss = mse_loss(old_val, new_val)
optimizer.zero_grad()
loss.backward()
optimizer.step()
curr_eval_score = evaluate_q_values(curr_model, n=n, m=m, reward=reward, eval_runs=eval_runs,
print_behaviour=print_behaviour)
if best_eval_score is None or curr_eval_score > best_eval_score:
best_eval_score = curr_eval_score
best_model.load_state_dict(curr_model.state_dict())
not_improved = 0
if print_progress:
print(f"At episode {ep} the best eval score has improved to {curr_eval_score}.")
elif not_improved < patience:
not_improved += 1
if print_progress:
print(f"At episode {ep} no improvement happened.")
else:
if print_progress:
print(f"Training has stopped after episode {ep} as the eval score didn't improve anymore.")
break
return best_model
if __name__ == "__main__":
train()
|
[
"torch.nn.MSELoss",
"torch.argmax",
"torch.DoubleTensor",
"numpy.zeros",
"two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet",
"numpy.max",
"numpy.random.randint",
"torch.cuda.is_available",
"torch.rand",
"torch.no_grad"
] |
[((494, 521), 'torch.argmax', 'torch.argmax', (['action_values'], {}), '(action_values)\n', (506, 521), False, 'import torch\n'), ((663, 676), 'torch.rand', 'torch.rand', (['(1)'], {}), '(1)\n', (673, 676), False, 'import torch\n'), ((1804, 1836), 'two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet', 'AverageTwoThinningNet', (['m', 'device'], {}), '(m, device)\n', (1825, 1836), False, 'from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet\n'), ((1854, 1886), 'two_thinning.average_based.RL.basic_neuralnet_RL.neural_network.AverageTwoThinningNet', 'AverageTwoThinningNet', (['m', 'device'], {}), '(m, device)\n', (1875, 1886), False, 'from two_thinning.average_based.RL.basic_neuralnet_RL.neural_network import AverageTwoThinningNet\n'), ((1960, 1972), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1970, 1972), True, 'import torch.nn as nn\n'), ((309, 318), 'numpy.max', 'np.max', (['x'], {}), '(x)\n', (315, 318), True, 'import numpy as np\n'), ((353, 378), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (376, 378), False, 'import torch\n'), ((451, 484), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[ball_number]'], {}), '([ball_number])\n', (469, 484), False, 'import torch\n'), ((620, 653), 'torch.DoubleTensor', 'torch.DoubleTensor', (['[ball_number]'], {}), '([ball_number])\n', (638, 653), False, 'import torch\n'), ((774, 801), 'torch.argmax', 'torch.argmax', (['action_values'], {}), '(action_values)\n', (786, 801), False, 'import torch\n'), ((953, 968), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (966, 968), False, 'import torch\n'), ((2070, 2081), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (2078, 2081), True, 'import numpy as np\n'), ((1047, 1058), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1055, 1058), True, 'import numpy as np\n'), ((2205, 2225), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (2222, 2225), True, 'import numpy as np\n'), ((1276, 1296), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1293, 1296), True, 'import numpy as np\n'), ((2358, 2378), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (2375, 2378), True, 'import numpy as np\n'), ((1445, 1465), 'numpy.random.randint', 'np.random.randint', (['n'], {}), '(n)\n', (1462, 1465), True, 'import numpy as np\n')]
|
from pathlib import Path
from echopype.convert import Convert
def test_2in1_ek80_conversion():
file = Path("./echopype/test_data/ek80/Green2.Survey2.FM.short.slow.-D20191004-T211557.raw").resolve()
nc_path = file.parent.joinpath(file.stem+".nc")
tmp = Convert(str(file), model="EK80")
tmp.raw2nc()
del tmp
nc_path.unlink()
|
[
"pathlib.Path"
] |
[((108, 203), 'pathlib.Path', 'Path', (['"""./echopype/test_data/ek80/Green2.Survey2.FM.short.slow.-D20191004-T211557.raw"""'], {}), "(\n './echopype/test_data/ek80/Green2.Survey2.FM.short.slow.-D20191004-T211557.raw'\n )\n", (112, 203), False, 'from pathlib import Path\n')]
|
import random
import string
import unittest
from find_the_difference import Solution
from hypothesis import given
from hypothesis.strategies import text
class Test(unittest.TestCase):
def test_1(self):
solution = Solution()
self.assertEqual(solution.findTheDifference("abcd", "abcde"), "e")
@given(text())
def test_random(self, s):
solution = Solution()
random_letter = random.choice(string.ascii_letters)
t = list(s + random_letter)
random.shuffle(t)
t = "".join(t)
self.assertEqual(solution.findTheDifference(s, t), random_letter)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"find_the_difference.Solution",
"random.shuffle",
"random.choice",
"hypothesis.strategies.text"
] |
[((646, 661), 'unittest.main', 'unittest.main', ([], {}), '()\n', (659, 661), False, 'import unittest\n'), ((228, 238), 'find_the_difference.Solution', 'Solution', ([], {}), '()\n', (236, 238), False, 'from find_the_difference import Solution\n'), ((383, 393), 'find_the_difference.Solution', 'Solution', ([], {}), '()\n', (391, 393), False, 'from find_the_difference import Solution\n'), ((418, 453), 'random.choice', 'random.choice', (['string.ascii_letters'], {}), '(string.ascii_letters)\n', (431, 453), False, 'import random\n'), ((498, 515), 'random.shuffle', 'random.shuffle', (['t'], {}), '(t)\n', (512, 515), False, 'import random\n'), ((326, 332), 'hypothesis.strategies.text', 'text', ([], {}), '()\n', (330, 332), False, 'from hypothesis.strategies import text\n')]
|
import os
import logging
import json
import pandas as pd
def data_paths_from_periodicity(periodicity):
if periodicity == 'hourly':
return ['../datasets/bitstamp_data_hourly.csv']
elif periodicity == 'daily':
return ['../datasets/bitstamp_data_daily.csv']
return ['../datasets/bitstamp_data.csv.part1',
'../datasets/bitstamp_data.csv.part2',
'../datasets/bitstamp_data.csv.part3',
'../datasets/bitstamp_data.csv.part4',
'../datasets/bitstamp_data.csv.part5']
def load_btc_data(periodicity):
file_paths = data_paths_from_periodicity(periodicity)
# Función que permite convertir el formato de las fechas como unix time
# en un objeto de fecha.
def unix_time_to_date(x): return pd.to_datetime(x, unit='s')
li = []
for filename in file_paths:
df = pd.read_csv(filename, parse_dates=[
'Timestamp'], date_parser=unix_time_to_date, index_col='Timestamp')
li.append(df)
return pd.concat(li, axis=0)
def load_btc_csv(filepath):
# Función que permite convertir el formato de las fechas como unix time
# en un objeto de fecha.
def unix_time_to_date(x): return pd.to_datetime(x, unit='s')
return pd.read_csv(filepath, parse_dates=['Timestamp'], date_parser=unix_time_to_date, index_col='Timestamp')
def load_glassnode_json():
glassnode_json_directory = '../datasets/glassnode/json/'
df = pd.DataFrame()
for f in os.listdir(glassnode_json_directory):
if f.endswith('.json'):
col_name = f[:-len('.json')]
df0 = pd.read_json(os.path.join(glassnode_json_directory, f),
orient='records', precise_float=True,
convert_dates=['t'])
# Sets the index
df0.rename(columns={'t': 'Timestamp'}, inplace=True)
df0.set_index('Timestamp', inplace=True)
# Change column name
if 'v' in df0.columns:
df0.rename(columns={'v': col_name}, inplace=True)
else:
columns = df0['o'][0].keys()
# TODO: stock-to-flow.json requires a special treatment.
if 'ratio' in columns:
df0['ratio'] = df0['o'].apply(lambda x: x['ratio'])
df0['daysTillHalving'] = df0['o'].apply(
lambda x: x['daysTillHalving'])
else:
for c in columns:
df0[[c]] = df0['o'].map(lambda d: d[c])
df0.drop(['o'], axis=1, inplace=True)
# Merge it
if df.empty:
df = df0
else:
df = pd.merge(df, df0, how='inner', left_index=True,
right_index=True)
return df
def load_glassnode_csv():
return load_btc_csv('../datasets/glassnode/csv/dataset.csv')
def load_gtrends_csv():
# Correctly parses the date.
def date_to_pandas_datetime(x): return pd.to_datetime(x, format='%Y-%m-%d')
df = pd.read_csv('../datasets/google_trends/gtrends.csv', parse_dates=[
'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')
df.sort_index(inplace=True)
return df
def load_alternative_me_csv():
# Correctly parses the date.
def date_to_pandas_datetime(x): return pd.to_datetime(x, format='%d-%m-%Y')
df = pd.read_csv('../datasets/alternative_me/alternative_me.csv', parse_dates=[
'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')
# Convert SentimentClassification into a factor
df['SentimentClassificationFactor'], _ = pd.factorize(
df.SentimentClassification)
# Removes the used column
df.drop('SentimentClassification', inplace=True, axis=1)
df.sort_index(inplace=True)
return df
|
[
"pandas.DataFrame",
"pandas.concat",
"pandas.read_csv",
"pandas.merge",
"pandas.to_datetime",
"pandas.factorize",
"os.path.join",
"os.listdir"
] |
[((1017, 1038), 'pandas.concat', 'pd.concat', (['li'], {'axis': '(0)'}), '(li, axis=0)\n', (1026, 1038), True, 'import pandas as pd\n'), ((1250, 1357), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'parse_dates': "['Timestamp']", 'date_parser': 'unix_time_to_date', 'index_col': '"""Timestamp"""'}), "(filepath, parse_dates=['Timestamp'], date_parser=\n unix_time_to_date, index_col='Timestamp')\n", (1261, 1357), True, 'import pandas as pd\n'), ((1453, 1467), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1465, 1467), True, 'import pandas as pd\n'), ((1481, 1517), 'os.listdir', 'os.listdir', (['glassnode_json_directory'], {}), '(glassnode_json_directory)\n', (1491, 1517), False, 'import os\n'), ((3073, 3217), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/google_trends/gtrends.csv"""'], {'parse_dates': "['Timestamp']", 'date_parser': 'date_to_pandas_datetime', 'index_col': '"""Timestamp"""'}), "('../datasets/google_trends/gtrends.csv', parse_dates=[\n 'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')\n", (3084, 3217), True, 'import pandas as pd\n'), ((3436, 3588), 'pandas.read_csv', 'pd.read_csv', (['"""../datasets/alternative_me/alternative_me.csv"""'], {'parse_dates': "['Timestamp']", 'date_parser': 'date_to_pandas_datetime', 'index_col': '"""Timestamp"""'}), "('../datasets/alternative_me/alternative_me.csv', parse_dates=[\n 'Timestamp'], date_parser=date_to_pandas_datetime, index_col='Timestamp')\n", (3447, 3588), True, 'import pandas as pd\n'), ((3703, 3743), 'pandas.factorize', 'pd.factorize', (['df.SentimentClassification'], {}), '(df.SentimentClassification)\n', (3715, 3743), True, 'import pandas as pd\n'), ((770, 797), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (784, 797), True, 'import pandas as pd\n'), ((855, 962), 'pandas.read_csv', 'pd.read_csv', (['filename'], {'parse_dates': "['Timestamp']", 'date_parser': 'unix_time_to_date', 'index_col': '"""Timestamp"""'}), "(filename, parse_dates=['Timestamp'], date_parser=\n unix_time_to_date, index_col='Timestamp')\n", (866, 962), True, 'import pandas as pd\n'), ((1211, 1238), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'unit': '"""s"""'}), "(x, unit='s')\n", (1225, 1238), True, 'import pandas as pd\n'), ((3027, 3063), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'format': '"""%Y-%m-%d"""'}), "(x, format='%Y-%m-%d')\n", (3041, 3063), True, 'import pandas as pd\n'), ((3390, 3426), 'pandas.to_datetime', 'pd.to_datetime', (['x'], {'format': '"""%d-%m-%Y"""'}), "(x, format='%d-%m-%Y')\n", (3404, 3426), True, 'import pandas as pd\n'), ((1623, 1664), 'os.path.join', 'os.path.join', (['glassnode_json_directory', 'f'], {}), '(glassnode_json_directory, f)\n', (1635, 1664), False, 'import os\n'), ((2722, 2787), 'pandas.merge', 'pd.merge', (['df', 'df0'], {'how': '"""inner"""', 'left_index': '(True)', 'right_index': '(True)'}), "(df, df0, how='inner', left_index=True, right_index=True)\n", (2730, 2787), True, 'import pandas as pd\n')]
|
from http import HTTPStatus
from random import sample
from unittest import mock
from urllib.parse import quote
from pytest import fixture
import jwt
from api.mappings import Sighting, Indicator, Relationship
from .utils import headers
def implemented_routes():
yield '/observe/observables'
yield '/refer/observables'
@fixture(scope='module',
params=implemented_routes(),
ids=lambda route: f'POST {route}')
def implemented_route(request):
return request.param
@fixture(scope='module')
def invalid_json():
return [{'type': 'unknown', 'value': ''}]
def test_enrich_call_with_invalid_json_failure(implemented_route,
client,
invalid_json):
response = client.post(implemented_route, json=invalid_json)
# The actual error message is quite unwieldy, so let's just ignore it.
expected_payload = {
'errors': [
{
'code': 'invalid payload received',
'message': mock.ANY,
'type': 'fatal',
}
]
}
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
def avotx_api_routes():
yield '/observe/observables'
@fixture(scope='module',
params=avotx_api_routes(),
ids=lambda route: f'POST {route}')
def avotx_api_route(request):
return request.param
def all_routes():
yield '/deliberate/observables'
yield '/observe/observables'
yield '/refer/observables'
@fixture(scope='module',
params=all_routes(),
ids=lambda route: f'POST {route}')
def any_route(request):
return request.param
def avotx_api_response(status_code):
mock_response = mock.MagicMock()
mock_response.status_code = status_code
if status_code == HTTPStatus.OK:
indicator_types = [
'domain',
'FileHash-MD5',
'FileHash-SHA1',
'FileHash-SHA256',
'IPv4',
'IPv6',
'URL',
]
pulse = {
'author': {
'username': 'JoriQ',
},
'description': (
'This is simply the best pulse in the history of humankind!'
),
'id': 'q1w2e3r4t5y6',
'indicator_type_counts': {
indicator_type: 1
for indicator_type in indicator_types
},
'name': 'Best Pulse Ever',
'tags': ['open', 'threat', 'exchange'],
'TLP': 'white',
}
indicators = [
{
'indicator': 'jsebnawkndwandawd.sh',
'created': '1970-01-01T00:00:00',
'expiration': None,
},
{
'indicator': 'd8414d743778cae103c15461200ec64d',
'created': '1970-01-02T00:00:00',
'expiration': None,
},
{
'indicator': '4f79d1a01b9b5cb3cb65a9911db2a02ea3bb7c45',
'created': '1970-01-03T00:00:00',
'expiration': None,
},
{
'indicator': 'efdd3ee0f816eba8ab1cba3643e42b40aaa16654d5120c67169d1b002e7f714d', # noqa: E501
'created': '1970-01-04T00:00:00',
'expiration': None,
},
{
'indicator': '172.16.58.3',
'created': '1970-01-05T00:00:00',
'expiration': '1970-01-06T00:00:00',
},
{
'indicator': '2001:14ba:1f00:0:1117:e76e:843d:f803',
'created': '1970-01-06T00:00:00',
'expiration': '1970-01-07T00:00:00',
},
{
'indicator': 'http://blockchains.pk/nw_NIHbAj35.bin',
'created': '1970-01-07T00:00:00',
'expiration': None,
},
]
for indicator in indicators:
indicator['pulse_key'] = pulse['id']
payload_list = []
for indicator_type in indicator_types:
payload_list.append({
'base_indicator': {
'type': indicator_type,
},
'pulse_info': {
'pulses': [pulse],
},
})
payload_list.append({
'next': None,
'results': indicators,
})
payload_list_iter = iter(payload_list)
mock_response.json = lambda: next(payload_list_iter)
return mock_response
@fixture(scope='module')
def expected_payload(any_route, client, valid_json):
app = client.application
payload = None
if any_route.startswith('/deliberate'):
payload = {}
if any_route.startswith('/observe'):
observable_types = {
'domain',
'md5',
'sha1',
'sha256',
'ip',
'ipv6',
'url',
}
observables = [
observable
for observable in valid_json
if observable['type'] in observable_types
]
count = len(observables)
start_times = [
f'1970-01-0{day}T00:00:00Z'
for day in range(1, count + 1)
]
observed_times = [
{'start_time': start_time}
for start_time in start_times
]
for observed_time in observed_times:
observed_time['end_time'] = observed_time['start_time']
valid_times = [
{'start_time': start_time}
for start_time in start_times
]
for index in range(count):
if observables[index]['type'] in ('ip', 'ipv6'):
day = index + 1
valid_times[index]['end_time'] = (
f'1970-01-0{day + 1}T00:00:00Z'
)
description = (
'This is simply the best pulse in the history of humankind!'
)
external_ids = ['q1w2e3r4t5y6']
producer = 'JoriQ'
short_description = description
source_uri = (
f"{app.config['AVOTX_URL'].rstrip('/')}/pulse/{external_ids[0]}"
)
tags = ['open', 'threat', 'exchange']
title = 'Best Pulse Ever'
tlp = 'white'
# Implement a dummy class initializing its instances
# only after the first comparison with any other object.
class LazyEqualizer:
NONE = object()
def __init__(self):
self.value = self.NONE
def __eq__(self, other):
if self.value is self.NONE:
self.value = other
return self.value == other
sighting_refs = [LazyEqualizer() for _ in range(count)]
indicator_refs = [LazyEqualizer() for _ in range(count)]
payload = {
'sightings': {
'count': count,
'docs': [
{
'description': description,
'external_ids': external_ids,
'id': sighting_ref,
'observables': [observable],
'observed_time': observed_time,
'source_uri': source_uri,
'title': title,
'tlp': tlp,
**Sighting.DEFAULTS
}
for sighting_ref, observable, observed_time
in zip(sighting_refs, observables, observed_times)
],
},
'indicators': {
'count': count,
'docs': [
{
'id': indicator_ref,
'external_ids': external_ids,
'producer': producer,
'short_description': short_description,
'source_uri': source_uri,
'tags': tags,
'title': title,
'tlp': tlp,
'valid_time': valid_time,
**Indicator.DEFAULTS
}
for indicator_ref, observable, valid_time
in zip(indicator_refs, observables, valid_times)
],
},
'relationships': {
'count': count,
'docs': [
{
'id': mock.ANY,
'source_ref': sighting_ref,
'target_ref': indicator_ref,
**Relationship.DEFAULTS
}
for sighting_ref, indicator_ref
in zip(sighting_refs, indicator_refs)
],
},
}
if any_route.startswith('/refer'):
observable_types = {
'domain': {'name': 'domain', 'category': 'domain'},
'email': {'name': 'email', 'category': 'email'},
'md5': {'name': 'MD5', 'category': 'file'},
'sha1': {'name': 'SHA1', 'category': 'file'},
'sha256': {'name': 'SHA256', 'category': 'file'},
'ip': {'name': 'IP', 'category': 'ip'},
'ipv6': {'name': 'IPv6', 'category': 'ip'},
'url': {'name': 'URL', 'category': 'url'},
}
payload = []
for observable in valid_json:
if observable['type'] not in observable_types:
continue
observable = {**observable, **observable_types[observable['type']]}
reference = {
'id': (
f"ref-avotx-search-{observable['type']}-"
f"{quote(observable['value'], safe='')}"
),
'title': f"Search for this {observable['name']}",
'description': (
f"Lookup this {observable['name']} on AlienVault OTX"
),
'url': (
f"{app.config['AVOTX_URL']}/indicator/"
f"{observable['category']}/"
f"{quote(observable['value'], safe='@:')}"
),
'categories': ['Search', 'AlienVault OTX'],
}
payload.append(reference)
assert payload is not None, f'Unknown route: {any_route}.'
return {'data': payload}
def test_enrich_call_success(any_route,
client,
valid_json,
mock_request,
valid_jwt,
expected_payload,
get_public_key):
app = client.application
response = None
if any_route.startswith('/deliberate'):
response = client.post(any_route)
if any_route.startswith('/observe'):
target = 'api.observables.ThreadPoolExecutor.map'
side_effect = map
with mock.patch(target, side_effect=side_effect):
mock_request.side_effect = (
[get_public_key] + [avotx_api_response(HTTPStatus.OK)] * 14
)
response = client.post(any_route,
json=valid_json,
headers=headers(valid_jwt()))
observable_types = {
'domain': 'domain',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'ip': 'IPv4',
'ipv6': 'IPv6',
'url': 'url',
}
expected_urls = []
expected_headers = {
'User-Agent': app.config['CTR_USER_AGENT'],
'X-OTX-API-KEY': (
jwt.decode(
valid_jwt(), options={'verify_signature': False}
)['key']
),
}
expected_params_list = []
pulse_id = 'q1w2e3r4t5y6'
for observable in valid_json:
if observable['type'] not in observable_types:
continue
category = observable_types[observable['type']]
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/indicators/{category}/"
f"{quote(observable['value'], safe='@:')}/general"
)
expected_params_list.append({})
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/pulses/{pulse_id}/"
'indicators'
)
expected_params_list.append({
'sort': '-created',
'limit': mock.ANY,
'page': 1,
})
mock_request.assert_has_calls([
mock.call(expected_url,
headers=expected_headers,
params=expected_params)
for expected_url, expected_params
in zip(expected_urls, expected_params_list)
])
if any_route.startswith('/refer'):
response = client.post(any_route, json=valid_json)
assert response is not None, f'Unknown route: {any_route}.'
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
def test_enrich_call_with_external_error_from_avotx_failure(
avotx_api_route, client, valid_json, mock_request, valid_jwt,
get_public_key):
for status_code, error_code, error_message in [
(
HTTPStatus.FORBIDDEN,
'authorization error',
('Authorization failed: '
'Authorization failed on AlienVault OTX side'),
),
(
HTTPStatus.INTERNAL_SERVER_ERROR,
'oops',
'Something went wrong. Reason: '
f'{HTTPStatus.INTERNAL_SERVER_ERROR.value} '
f'{HTTPStatus.INTERNAL_SERVER_ERROR.phrase}.',
),
]:
app = client.application
mock_request.side_effect = [
get_public_key, avotx_api_response(status_code)
]
def shuffle(sequence):
return sample(sequence, len(sequence))
observables = shuffle(valid_json)
response = client.post(avotx_api_route,
json=observables,
headers=headers(valid_jwt()))
observable_types = {
'domain': 'domain',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'ip': 'IPv4',
'ipv6': 'IPv6',
'url': 'url',
}
expected_headers = {
'User-Agent': app.config['CTR_USER_AGENT'],
'X-OTX-API-KEY': (
jwt.decode(
valid_jwt(), options={'verify_signature': False}
)['key']
),
}
expected_params = {}
expected_urls = []
for observable in valid_json:
if observable['type'] not in observable_types:
continue
category = observable_types[observable['type']]
expected_urls.append(
f"{app.config['AVOTX_URL']}/api/v1/indicators/{category}/"
f"{quote(observable['value'], safe='@:')}/general"
)
mock_request.assert_has_calls([
mock.call(expected_url,
headers=expected_headers,
params=expected_params)
for expected_url in expected_urls
], any_order=True)
mock_request.reset_mock()
expected_payload = {
'errors': [
{
'code': error_code,
'message': error_message,
'type': 'fatal',
}
]
}
assert response.status_code == HTTPStatus.OK
assert response.get_json() == expected_payload
|
[
"unittest.mock.MagicMock",
"pytest.fixture",
"unittest.mock.patch",
"urllib.parse.quote",
"unittest.mock.call"
] |
[((498, 521), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (505, 521), False, 'from pytest import fixture\n'), ((4616, 4639), 'pytest.fixture', 'fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (4623, 4639), False, 'from pytest import fixture\n'), ((1776, 1792), 'unittest.mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (1790, 1792), False, 'from unittest import mock\n'), ((11052, 11095), 'unittest.mock.patch', 'mock.patch', (['target'], {'side_effect': 'side_effect'}), '(target, side_effect=side_effect)\n', (11062, 11095), False, 'from unittest import mock\n'), ((15514, 15587), 'unittest.mock.call', 'mock.call', (['expected_url'], {'headers': 'expected_headers', 'params': 'expected_params'}), '(expected_url, headers=expected_headers, params=expected_params)\n', (15523, 15587), False, 'from unittest import mock\n'), ((12931, 13004), 'unittest.mock.call', 'mock.call', (['expected_url'], {'headers': 'expected_headers', 'params': 'expected_params'}), '(expected_url, headers=expected_headers, params=expected_params)\n', (12940, 13004), False, 'from unittest import mock\n'), ((9807, 9842), 'urllib.parse.quote', 'quote', (["observable['value']"], {'safe': '""""""'}), "(observable['value'], safe='')\n", (9812, 9842), False, 'from urllib.parse import quote\n'), ((10213, 10250), 'urllib.parse.quote', 'quote', (["observable['value']"], {'safe': '"""@:"""'}), "(observable['value'], safe='@:')\n", (10218, 10250), False, 'from urllib.parse import quote\n'), ((15399, 15436), 'urllib.parse.quote', 'quote', (["observable['value']"], {'safe': '"""@:"""'}), "(observable['value'], safe='@:')\n", (15404, 15436), False, 'from urllib.parse import quote\n'), ((12416, 12453), 'urllib.parse.quote', 'quote', (["observable['value']"], {'safe': '"""@:"""'}), "(observable['value'], safe='@:')\n", (12421, 12453), False, 'from urllib.parse import quote\n')]
|
# -*- coding: utf-8 -*-
"""
.. invisible:
_ _ _____ _ _____ _____
| | | | ___| | | ___/ ___|
| | | | |__ | | | |__ \ `--.
| | | | __|| | | __| `--. \
\ \_/ / |___| |___| |___/\__/ /
\___/\____/\_____|____/\____/
Created on Apr 13, 2015
BLAS class to use with ocl backend.
███████████████████████████████████████████████████████████████████████████████
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
███████████████████████████████████████████████████████████████████████████████
"""
from cuda4py.blas import CUBLAS_OP_N, CUBLAS_OP_T
import numpy
import opencl4py.blas as clblas
import os
import threading
import weakref
from zope.interface import implementer
from veles.accelerated_units import AcceleratedUnit, IOpenCLUnit
from veles.config import root
from veles.dummy import DummyWorkflow
from veles.logger import Logger
from veles.numpy_ext import roundup
@implementer(IOpenCLUnit)
class Builder(AcceleratedUnit):
"""Dummy unit for building OpenCL kernels.
"""
def __init__(self, workflow, **kwargs):
super(Builder, self).__init__(workflow, **kwargs)
self.source = kwargs["source"]
self.defines = kwargs["defines"]
self.kernel_name = kwargs["kernel_name"]
self.cache_file_name = kwargs["cache_file_name"]
self.dtype = kwargs["dtype"]
@property
def kernel(self):
return self._kernel_
def ocl_init(self):
self.sources_[self.source] = {}
self.build_program(self.defines, self.cache_file_name, self.dtype)
self.assign_kernel(self.kernel_name)
def ocl_run(self):
pass
class OCLBLAS(Logger):
"""Class with BLAS functionality similar to CUBLAS.
It uses CLBLAS when available or custom kernels otherwise.
"""
@staticmethod
def attach_to_device(device):
if device.blas is None:
device.blas = OCLBLAS(device)
def __init__(self, device):
super(OCLBLAS, self).__init__()
self._lock_ = threading.Lock()
self._device = weakref.ref(device)
self.kernels = {}
self._const_i = numpy.zeros(3, dtype=numpy.uint64)
try:
if (root.common.engine.ocl.clBLAS is not True or
root.common.engine.precision_level > 0):
raise ValueError()
if "CLBLAS_STORAGE_PATH" not in os.environ:
found = False
for dirnme in root.common.engine.device_dirs:
for path, _, files in os.walk(dirnme):
for f in files:
if f.endswith(".kdb"):
found = True
os.environ["CLBLAS_STORAGE_PATH"] = path
break
if found:
break
if found:
break
self.blas = clblas.CLBLAS()
self._sgemm = self.clblas_sgemm
self._dgemm = self.clblas_dgemm
self.debug("Using clBLAS for matrix multiplication")
except (OSError, RuntimeError, ValueError):
self._sgemm = self.veles_gemm
self._dgemm = self.veles_gemm
self.debug("Using Veles OpenCL kernels for matrix multiplication")
@property
def device(self):
return self._device()
@staticmethod
def gemm(dtype):
if dtype == numpy.float32:
return OCLBLAS.sgemm
if dtype == numpy.float64:
return OCLBLAS.dgemm
raise ValueError("Invalid dtype %s" % dtype)
def sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._sgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
return self._dgemm(
transA, transB, rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_sgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Single precision (float) version.
"""
self.blas.sgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def clblas_dgemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using clBLAS.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
Double precision (double) version.
"""
self.blas.dgemm((self.device.queue_,), clblas.clblasColumnMajor,
transA, transB, rowsCountA, columnCountB,
commonSideLength, alpha, A, B, beta, C,
offsetA=offsetA, offsetB=offsetB, offsetC=offsetC)
def veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA=0, offsetB=0, offsetC=0):
"""Does a matrix multiplication like in CUBLAS using custom kernel.
Matricies are assumed to be tightly packed and stored like in CUBLAS.
"""
with self._lock_:
self._veles_gemm(transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC)
def _veles_gemm(self, transA, transB,
rowsCountA, columnCountB, commonSideLength,
alpha, A, B, beta, C, offsetA, offsetB, offsetC):
dtype = alpha.dtype
key = (transA, transB, rowsCountA, columnCountB, commonSideLength,
dtype)
krn_info = self.kernels.get(key)
if krn_info is None:
block_size, vector_opt = self.device.device_info.get_kernel_bs_vo(
kernel="matrix_multiplication", dtype=dtype)
defines = {
"BLOCK_SIZE": block_size,
"VECTOR_OPT": int(bool(vector_opt)),
"B_WIDTH": rowsCountA,
"A_WIDTH": columnCountB,
"AB_COMMON": commonSideLength
}
if transB == CUBLAS_OP_T:
defines["A_COL"] = 1
else:
assert transB == CUBLAS_OP_N
if transA == CUBLAS_OP_N:
defines["B_COL"] = 1
else:
assert transA == CUBLAS_OP_T
global_size = (roundup(rowsCountA, block_size),
roundup(columnCountB, block_size))
local_size = (block_size, block_size)
w = DummyWorkflow()
builder = Builder(
w, source="gemm", defines=defines, kernel_name="gemm",
cache_file_name=(
"veles_gemm_%s" % "_".join(str(x) for x in key)),
dtype=dtype)
builder.initialize(self.device)
krn_info = (builder.kernel, global_size, local_size)
self.kernels[key] = krn_info
del builder
del w
# Set the constants and execute the kernel
krn = krn_info[0]
self._const_i[0:3] = offsetA, offsetB, offsetC
# Our kernel stores output in row-major order, so swap A and B
krn.set_args(B, A, C, alpha, beta, self._const_i[1:2],
self._const_i[0:1], self._const_i[2:3])
global_size = krn_info[1]
local_size = krn_info[2]
self.device.queue_.execute_kernel(krn, global_size, local_size,
need_event=False)
|
[
"opencl4py.blas.CLBLAS",
"zope.interface.implementer",
"os.walk",
"numpy.zeros",
"threading.Lock",
"veles.dummy.DummyWorkflow",
"veles.numpy_ext.roundup",
"weakref.ref"
] |
[((1621, 1645), 'zope.interface.implementer', 'implementer', (['IOpenCLUnit'], {}), '(IOpenCLUnit)\n', (1632, 1645), False, 'from zope.interface import implementer\n'), ((2720, 2736), 'threading.Lock', 'threading.Lock', ([], {}), '()\n', (2734, 2736), False, 'import threading\n'), ((2760, 2779), 'weakref.ref', 'weakref.ref', (['device'], {}), '(device)\n', (2771, 2779), False, 'import weakref\n'), ((2830, 2864), 'numpy.zeros', 'numpy.zeros', (['(3)'], {'dtype': 'numpy.uint64'}), '(3, dtype=numpy.uint64)\n', (2841, 2864), False, 'import numpy\n'), ((3641, 3656), 'opencl4py.blas.CLBLAS', 'clblas.CLBLAS', ([], {}), '()\n', (3654, 3656), True, 'import opencl4py.blas as clblas\n'), ((8184, 8199), 'veles.dummy.DummyWorkflow', 'DummyWorkflow', ([], {}), '()\n', (8197, 8199), False, 'from veles.dummy import DummyWorkflow\n'), ((8023, 8054), 'veles.numpy_ext.roundup', 'roundup', (['rowsCountA', 'block_size'], {}), '(rowsCountA, block_size)\n', (8030, 8054), False, 'from veles.numpy_ext import roundup\n'), ((8083, 8116), 'veles.numpy_ext.roundup', 'roundup', (['columnCountB', 'block_size'], {}), '(columnCountB, block_size)\n', (8090, 8116), False, 'from veles.numpy_ext import roundup\n'), ((3225, 3240), 'os.walk', 'os.walk', (['dirnme'], {}), '(dirnme)\n', (3232, 3240), False, 'import os\n')]
|
# Copyright (c) Trainline Limited, 2016-2017. All rights reserved. See LICENSE.txt in the project root for license information.
import base64, json, logging, requests
from retrying import retry
class ConsulError(RuntimeError):
pass
def handle_connection_error(func):
def handle_error(*args, **kwargs):
try:
return func(*args, **kwargs)
except requests.exceptions.ConnectionError as e:
logging.exception(e)
raise ConsulError('Failed to establish connection with Consul HTTP API. Check that Consul agent is running.')
return handle_error
def retry_if_connection_error(exception):
return isinstance(exception, requests.exceptions.ConnectionError)
class ConsulApi(object):
def __init__(self, consul_config):
self._config = consul_config
self._base_url = '{0}://{1}:{2}/{3}'.format(self._config['scheme'], self._config['host'], self._config['port'], self._config['version'])
self._last_known_modify_index = 0
@handle_connection_error
@retry(retry_on_exception=retry_if_connection_error, wait_exponential_multiplier=1000, wait_exponential_max=60000)
def _api_get(self, relative_url):
url = '{0}/{1}'.format(self._base_url, relative_url)
logging.debug('Consul HTTP API request: {0}'.format(url))
response = requests.get(url, headers={'X-Consul-Token': self._config['acl_token']})
logging.debug('Response status code: {0}'.format(response.status_code))
logging.debug('Response content: {0}'.format(response.text))
if response.status_code == 500:
raise ConsulError('Consul HTTP API internal error. Response content: {0}'.format(response.text))
return response
@handle_connection_error
@retry(retry_on_exception=retry_if_connection_error, wait_exponential_multiplier=1000, wait_exponential_max=60000)
def _api_put(self, relative_url, content):
url = '{0}/{1}'.format(self._base_url, relative_url)
logging.debug('Consul HTTP API PUT request URL: {0}'.format(url))
logging.debug('Consul HTTP API PUT request content: {0}'.format(content))
response = requests.put(url, data=content, headers={'X-Consul-Token': self._config['acl_token']})
logging.debug('Response status code: {0}'.format(response.status_code))
logging.debug('Response content: {0}'.format(response.text))
if response.status_code == 500:
raise ConsulError('Consul HTTP API internal error. Response content: {0}'.format(response.text))
return response
@retry(wait_fixed=5000, stop_max_attempt_number=12)
def _get_modify_index(self, key, for_write_operation):
logging.debug('Retrieving Consul key-value store modify index for key: {0}'.format(key))
response = self._api_get('kv/{0}?index'.format(key))
# For new values modify_index == 0
if response.status_code == 404 and for_write_operation == True:
modify_index = 0
else:
modify_index = response.headers.get('X-Consul-Index')
logging.debug('Consul key-value store modify index for key \'{0}\': {1}'.format(key, modify_index))
return modify_index
def check_connectivity(self):
logging.info('Checking Consul HTTP API connectivity')
self._api_get('agent/self')
logging.info('Consul HTTP API connectivity OK ')
def get_keys(self, key_prefix):
def decode():
return response.json()
def not_found():
logging.warning('Consul key-value store does not contain key prefix \'{0}\''.format(key_prefix))
return []
response = self._api_get('kv/{0}?keys'.format(key_prefix))
cases = {200: decode, 404: not_found}
return cases[response.status_code]()
def get_service_catalogue(self):
response = self._api_get('agent/services')
return response.json()
def get_value(self, key):
def decode():
values = response.json()
for value in values:
value['Value'] = json.loads(base64.b64decode(value['Value']))
return values[0].get('Value')
def not_found():
logging.warning('Consul key-value store does not contain a value for key \'{0}\''.format(key))
return None
response = self._api_get('kv/{0}'.format(key))
cases = {200: decode, 404: not_found}
return cases[response.status_code]()
def key_exists(self, key):
return self.get_value(key) is not None
def deregister_check(self, id):
response = self._api_put('agent/check/deregister/{0}'.format(id), {})
return response.status_code == 200
def register_http_check(self, service_id, id, name, url, interval, tls_skip_verify=False):
response = self._api_put('agent/check/register', json.dumps({'ServiceID': service_id, 'ID': id, 'Name': name, 'HTTP': url, 'TLSSkipVerify': tls_skip_verify, 'Interval': interval}))
return response.status_code == 200
def register_script_check(self, service_id, id, name, script_path, interval):
response = self._api_put('agent/check/register', json.dumps({'ServiceID': service_id, 'ID': id, 'Name': name, 'Script': script_path, 'Interval': interval}))
return response.status_code == 200
def register_service(self, id, name, address, port, tags):
response = self._api_put('agent/service/register', json.dumps({'ID': id, 'Name': name, 'Address': address, 'Port': port, 'Tags': tags}))
return response.status_code == 200
def wait_for_change(self, key_prefix):
modify_index = self._get_modify_index(key_prefix, False)
if modify_index is None:
self._last_known_modify_index = modify_index
#raise ConsulError('Modify index is invalid.')
if self._last_known_modify_index is None:
logging.info('There may be changes that have not been processed yet, skipping blocking query.')
self._last_known_modify_index = modify_index
return
self._last_known_modify_index = modify_index
logging.debug('Blocking query to Consul HTTP API to wait for changes in the \'{0}\' key space...'.format(key_prefix))
# TODO: Timeout by default is 5 minutes. This can be changed by adding wait=10s or wait=10m to the query string
self._api_get('kv/{0}?index={1}'.format(key_prefix, self._last_known_modify_index))
def write_value(self, key, value):
modify_index = self._get_modify_index(key, True)
response = self._api_put('kv/{0}?cas={1}'.format(key, modify_index), json.dumps(value))
return response.text == 'true'
|
[
"logging.exception",
"json.dumps",
"base64.b64decode",
"logging.info",
"requests.get",
"requests.put",
"retrying.retry"
] |
[((1040, 1157), 'retrying.retry', 'retry', ([], {'retry_on_exception': 'retry_if_connection_error', 'wait_exponential_multiplier': '(1000)', 'wait_exponential_max': '(60000)'}), '(retry_on_exception=retry_if_connection_error,\n wait_exponential_multiplier=1000, wait_exponential_max=60000)\n', (1045, 1157), False, 'from retrying import retry\n'), ((1768, 1885), 'retrying.retry', 'retry', ([], {'retry_on_exception': 'retry_if_connection_error', 'wait_exponential_multiplier': '(1000)', 'wait_exponential_max': '(60000)'}), '(retry_on_exception=retry_if_connection_error,\n wait_exponential_multiplier=1000, wait_exponential_max=60000)\n', (1773, 1885), False, 'from retrying import retry\n'), ((2580, 2630), 'retrying.retry', 'retry', ([], {'wait_fixed': '(5000)', 'stop_max_attempt_number': '(12)'}), '(wait_fixed=5000, stop_max_attempt_number=12)\n', (2585, 2630), False, 'from retrying import retry\n'), ((1338, 1410), 'requests.get', 'requests.get', (['url'], {'headers': "{'X-Consul-Token': self._config['acl_token']}"}), "(url, headers={'X-Consul-Token': self._config['acl_token']})\n", (1350, 1410), False, 'import base64, json, logging, requests\n'), ((2165, 2256), 'requests.put', 'requests.put', (['url'], {'data': 'content', 'headers': "{'X-Consul-Token': self._config['acl_token']}"}), "(url, data=content, headers={'X-Consul-Token': self._config[\n 'acl_token']})\n", (2177, 2256), False, 'import base64, json, logging, requests\n'), ((3251, 3304), 'logging.info', 'logging.info', (['"""Checking Consul HTTP API connectivity"""'], {}), "('Checking Consul HTTP API connectivity')\n", (3263, 3304), False, 'import base64, json, logging, requests\n'), ((3349, 3397), 'logging.info', 'logging.info', (['"""Consul HTTP API connectivity OK """'], {}), "('Consul HTTP API connectivity OK ')\n", (3361, 3397), False, 'import base64, json, logging, requests\n'), ((4861, 4995), 'json.dumps', 'json.dumps', (["{'ServiceID': service_id, 'ID': id, 'Name': name, 'HTTP': url,\n 'TLSSkipVerify': tls_skip_verify, 'Interval': interval}"], {}), "({'ServiceID': service_id, 'ID': id, 'Name': name, 'HTTP': url,\n 'TLSSkipVerify': tls_skip_verify, 'Interval': interval})\n", (4871, 4995), False, 'import base64, json, logging, requests\n'), ((5176, 5286), 'json.dumps', 'json.dumps', (["{'ServiceID': service_id, 'ID': id, 'Name': name, 'Script': script_path,\n 'Interval': interval}"], {}), "({'ServiceID': service_id, 'ID': id, 'Name': name, 'Script':\n script_path, 'Interval': interval})\n", (5186, 5286), False, 'import base64, json, logging, requests\n'), ((5450, 5538), 'json.dumps', 'json.dumps', (["{'ID': id, 'Name': name, 'Address': address, 'Port': port, 'Tags': tags}"], {}), "({'ID': id, 'Name': name, 'Address': address, 'Port': port,\n 'Tags': tags})\n", (5460, 5538), False, 'import base64, json, logging, requests\n'), ((5899, 6004), 'logging.info', 'logging.info', (['"""There may be changes that have not been processed yet, skipping blocking query."""'], {}), "(\n 'There may be changes that have not been processed yet, skipping blocking query.'\n )\n", (5911, 6004), False, 'import base64, json, logging, requests\n'), ((6636, 6653), 'json.dumps', 'json.dumps', (['value'], {}), '(value)\n', (6646, 6653), False, 'import base64, json, logging, requests\n'), ((436, 456), 'logging.exception', 'logging.exception', (['e'], {}), '(e)\n', (453, 456), False, 'import base64, json, logging, requests\n'), ((4093, 4125), 'base64.b64decode', 'base64.b64decode', (["value['Value']"], {}), "(value['Value'])\n", (4109, 4125), False, 'import base64, json, logging, requests\n')]
|
import argparse, socket
from time import sleep, time, localtime, strftime
import time
import logging
import sys
import trace
fhand = logging.FileHandler('new20180321.log', mode='a', encoding='GBK')
logging.basicConfig(level=logging.DEBUG, # 控制台打印的日志级别
handlers=[fhand],
format=
'%(asctime)s - %(levelname)s: %(message)s'
# 日志格式
)
def recvall(sock, length):
data = b''
while len(data) < length:
more = sock.recv(length - len(data))
if not more:
raise EOFError('was expecting %d bytes but only received %d bytes before the socket closed'
% (length, len(data)))
data += more
return data
header = '''
GET / HTTP/1.1\r\n
Host: 192.168.1.157:8000\r\n
Connection: keep-alive\r\n
Upgrade-Insecure-Requests: 1\r\n
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36\r\n
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\r\n
DNT: 1\r\n
Accept-Encoding: gzip, deflate\r\n
Accept-Language: zh-CN,zh;q=0.9\r\n
If-None-Match: "15e84b11ce57dec1b9483884f4e5587e71d5c201"\r\n
\r\n
'''
CRLF = "\r\n\r\n"
def client(host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
print('Client has been assigned socket name', sock.getsockname())
sock.send((header).encode())
print(header.encode())
reply = recvall(sock, 160)
print('reply', repr(reply))
sock.close()
def main():
print(strftime("%Y-%m-%d %H:%M:%S", localtime(time.time())))
try:
# client('172.16.17.32', 80)
client('192.168.1.157', 8000)
except Exception as e:
print(e)
logging.info(e)
if __name__ == '__main__':
print(strftime("%Y-%m-%d %H:%M:%S", localtime(time.time())))
client('https://www.baidu.com', 443)
|
[
"logging.FileHandler",
"logging.basicConfig",
"socket.socket",
"time.time",
"logging.info"
] |
[((134, 198), 'logging.FileHandler', 'logging.FileHandler', (['"""new20180321.log"""'], {'mode': '"""a"""', 'encoding': '"""GBK"""'}), "('new20180321.log', mode='a', encoding='GBK')\n", (153, 198), False, 'import logging\n'), ((200, 315), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'handlers': '[fhand]', 'format': '"""%(asctime)s - %(levelname)s: %(message)s"""'}), "(level=logging.DEBUG, handlers=[fhand], format=\n '%(asctime)s - %(levelname)s: %(message)s')\n", (219, 315), False, 'import logging\n'), ((1366, 1415), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1379, 1415), False, 'import argparse, socket\n'), ((1872, 1887), 'logging.info', 'logging.info', (['e'], {}), '(e)\n', (1884, 1887), False, 'import logging\n'), ((1721, 1732), 'time.time', 'time.time', ([], {}), '()\n', (1730, 1732), False, 'import time\n'), ((1983, 1994), 'time.time', 'time.time', ([], {}), '()\n', (1992, 1994), False, 'import time\n')]
|
from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree
from converters import DecoratorConverter
def test_DecoratorGather_01():
src = """
@require_call_auth( "view" )
def bim():
pass
"""
matches = DecoratorConverter().gather( parseSource( src ) )
match = matches[ 0 ]
assert nodesToString( match.at_sym ) == '@'
assert nodesToString( match.decorated ) == 'require_call_auth( "view" )'
assert str( match.newl ) == '\n'
def test_DecoratorProcess_01():
src = """
@require_call_auth( "view" )
def bim():
pass
"""
nodes = parseSource( src )
cvtr = DecoratorConverter()
matches = cvtr.gather( nodes )
cvtr.processAll( matches )
assert nodesToLines( nodes )[ 0 ] == """/* @require_call_auth( "view" ) DECORATOR */"""
|
[
"utils.parseSource",
"utils.nodesToLines",
"converters.DecoratorConverter",
"utils.nodesToString"
] |
[((640, 656), 'utils.parseSource', 'parseSource', (['src'], {}), '(src)\n', (651, 656), False, 'from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree\n'), ((670, 690), 'converters.DecoratorConverter', 'DecoratorConverter', ([], {}), '()\n', (688, 690), False, 'from converters import DecoratorConverter\n'), ((292, 308), 'utils.parseSource', 'parseSource', (['src'], {}), '(src)\n', (303, 308), False, 'from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree\n'), ((349, 376), 'utils.nodesToString', 'nodesToString', (['match.at_sym'], {}), '(match.at_sym)\n', (362, 376), False, 'from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree\n'), ((397, 427), 'utils.nodesToString', 'nodesToString', (['match.decorated'], {}), '(match.decorated)\n', (410, 427), False, 'from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree\n'), ((263, 283), 'converters.DecoratorConverter', 'DecoratorConverter', ([], {}), '()\n', (281, 283), False, 'from converters import DecoratorConverter\n'), ((768, 787), 'utils.nodesToLines', 'nodesToLines', (['nodes'], {}), '(nodes)\n', (780, 787), False, 'from utils import parseSource, nodesToString, nodesToLines, dumpNodes, dumpTree\n')]
|
import json
from mobilebdd.reports.base import BaseReporter
class JsonReporter(BaseReporter):
"""
outputs the test run results in the form of a json
one example use case is to plug this into a bdd api that returns the results
in json format.
"""
def __init__(self, config):
super(JsonReporter, self).__init__(config)
def get_json(self):
"""
:return: json payload of the test results
:rtype: str
"""
return json.dumps({u'features': self.features})
|
[
"json.dumps"
] |
[((488, 528), 'json.dumps', 'json.dumps', (["{u'features': self.features}"], {}), "({u'features': self.features})\n", (498, 528), False, 'import json\n')]
|
import json
import logging
from django.http import HttpResponse, HttpResponseBadRequest
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger('pretix.security.csp')
@csrf_exempt
def csp_report(request):
try:
body = json.loads(request.body.decode())
logger.warning(
'CSP violation at {r[document-uri]}\n'
'Referer: {r[referrer]}\n'
'Blocked: {r[blocked-uri]}\n'
'Violated: {r[violated-directive]}\n'
'Original polity: {r[original-policy]}'.format(r=body['csp-report'])
)
except (ValueError, KeyError) as e:
logger.exception('CSP report failed ' + str(e))
return HttpResponseBadRequest()
return HttpResponse()
|
[
"django.http.HttpResponseBadRequest",
"django.http.HttpResponse",
"logging.getLogger"
] |
[((152, 192), 'logging.getLogger', 'logging.getLogger', (['"""pretix.security.csp"""'], {}), "('pretix.security.csp')\n", (169, 192), False, 'import logging\n'), ((735, 749), 'django.http.HttpResponse', 'HttpResponse', ([], {}), '()\n', (747, 749), False, 'from django.http import HttpResponse, HttpResponseBadRequest\n'), ((699, 723), 'django.http.HttpResponseBadRequest', 'HttpResponseBadRequest', ([], {}), '()\n', (721, 723), False, 'from django.http import HttpResponse, HttpResponseBadRequest\n')]
|
# Generated by Django 2.0 on 2017-12-20 16:32
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ctf', '0002_auto_20171220_1128'),
]
operations = [
migrations.AlterField(
model_name='category',
name='requires',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='categories_required_by', to='ctf.Question'),
),
migrations.AlterField(
model_name='question',
name='category',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions', to='ctf.Category'),
),
migrations.AlterField(
model_name='question',
name='hint',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='question',
name='requires',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='questions_required_by', to='ctf.Question'),
),
]
|
[
"django.db.models.ForeignKey",
"django.db.models.TextField"
] |
[((367, 521), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""categories_required_by"""', 'to': '"""ctf.Question"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='categories_required_by', to='ctf.Question'\n )\n", (384, 521), False, 'from django.db import migrations, models\n'), ((637, 760), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""questions"""', 'to': '"""ctf.Category"""'}), "(null=True, on_delete=django.db.models.deletion.SET_NULL,\n related_name='questions', to='ctf.Category')\n", (654, 760), False, 'from django.db import migrations, models\n'), ((878, 917), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (894, 917), False, 'from django.db import migrations, models\n'), ((1043, 1191), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""questions_required_by"""', 'to': '"""ctf.Question"""'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='questions_required_by', to='ctf.Question')\n", (1060, 1191), False, 'from django.db import migrations, models\n')]
|
# Copyright 2020 HuaWei Technologies. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from ironicclient.common.apiclient import exceptions as ironic_exc
from neutron.db import provisioning_blocks
from neutron_lib import constants as n_const
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from oslo_log import log as logging
from networking_mlnx_baremetal import constants as const, exceptions
from networking_mlnx_baremetal import ironic_client
from networking_mlnx_baremetal import ufm_client
from networking_mlnx_baremetal._i18n import _
from networking_mlnx_baremetal.plugins.ml2 import config
from networking_mlnx_baremetal.ufmclient import exceptions as ufm_exec
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
config.register_opts(CONF)
MLNX_IB_BAREMETAL_ENTITY = 'MLNX-IB-Baremetal'
class InfiniBandBaremetalMechanismDriver(api.MechanismDriver):
"""OpenStack neutron ml2 mechanism driver for mellanox infini-band PKey
configuration when provisioning baremetal using Ironic.
"""
def initialize(self):
"""Perform driver initialization.
Called after all drivers have been loaded and the database has
been initialized. No abstract methods defined below will be
called prior to this method being called.
"""
self.ironic_client = ironic_client.get_client()
self.ufm_client = ufm_client.get_client()
self.conf = CONF[const.MLNX_BAREMETAL_DRIVER_GROUP_NAME]
self.allowed_network_types = const.SUPPORTED_NETWORK_TYPES
self.allowed_physical_networks = self.conf.physical_networks
def create_network_precommit(self, context):
"""Allocate resources for a new network.
:param context: NetworkContext instance describing the new
network.
Create a new network, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_network_postcommit(self, context):
"""Create a network.
:param context: NetworkContext instance describing the new
network.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_network_precommit(self, context):
"""Update resources of a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Update values of a network, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_network_precommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_network_postcommit(self, context):
"""Update a network.
:param context: NetworkContext instance describing the new
state of the network, as well as the original state prior
to the update_network call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_network_postcommit is called for all changes to the
network state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_network_precommit(self, context):
"""Delete resources for a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Delete network resources previously allocated by this
mechanism driver for a network. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_network_postcommit(self, context):
"""Delete a network.
:param context: NetworkContext instance describing the current
state of the network, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
# TODO(qianbiao.ng): if an UFM partition has no guid, it will be auto
# deleted. So, if port unbound logic is stable (remove guid when
# unbound), we may ignore delete_network_postcommit callback?
for segment in context.network_segments:
if self._is_segment_supported(segment):
segmentation_id = segment.get(api.SEGMENTATION_ID)
pkey = hex(segmentation_id)
try:
self.ufm_client.pkey.delete(pkey)
except ufm_exec.ResourceNotFoundError:
# NOTE(turnbig): ignore 404 exception, because of that the
# UFM partition key may have not been setup at this point.
LOG.info(_("UFM partition key %(pkey)s does not exists, "
"could not be deleted."),
{'pkey': pkey})
except ufm_exec.UfmClientError as e:
LOG.error(_("Failed to delete UFM partition key %(pkey)s, "
"reason is %(reason)s."),
{'pkey': pkey, 'reason': e})
raise
def create_subnet_precommit(self, context):
"""Allocate resources for a new subnet.
:param context: SubnetContext instance describing the new
subnet.
rt = context.current
device_id = port['device_id']
device_owner = port['device_owner']
Create a new subnet, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_subnet_postcommit(self, context):
"""Create a subnet.
:param context: SubnetContext instance describing the new
subnet.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
"""
pass
def update_subnet_precommit(self, context):
"""Update resources of a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Update values of a subnet, updating the associated resources
in the database. Called inside transaction context on session.
Raising an exception will result in rollback of the
transaction.
update_subnet_precommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def update_subnet_postcommit(self, context):
"""Update a subnet.
:param context: SubnetContext instance describing the new
state of the subnet, as well as the original state prior
to the update_subnet call.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
cause the deletion of the resource.
update_subnet_postcommit is called for all changes to the
subnet state. It is up to the mechanism driver to ignore
state or state changes that it does not know or care about.
"""
pass
def delete_subnet_precommit(self, context):
"""Delete resources for a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Delete subnet resources previously allocated by this
mechanism driver for a subnet. Called inside transaction
context on session. Runtime errors are not expected, but
raising an exception will result in rollback of the
transaction.
"""
pass
def delete_subnet_postcommit(self, context):
"""Delete a subnet.
:param context: SubnetContext instance describing the current
state of the subnet, prior to the call to delete it.
Called after the transaction commits. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
pass
def create_port_precommit(self, context):
"""Allocate resources for a new port.
:param context: PortContext instance describing the port.
Create a new port, allocating resources as necessary in the
database. Called inside transaction context on session. Call
cannot block. Raising an exception will result in a rollback
of the current transaction.
"""
pass
def create_port_postcommit(self, context):
"""Create a port.
:param context: PortContext instance describing the port.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
"""
pass
def update_port_precommit(self, context):
"""Update resources of a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called inside transaction context on session to complete a
port update as defined by this mechanism driver. Raising an
exception will result in rollback of the transaction.
update_port_precommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
pass
def update_port_postcommit(self, context):
# type: (api.PortContext) -> None
"""Update a port.
:param context: PortContext instance describing the new
state of the port, as well as the original state prior
to the update_port call.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Raising an exception will
result in the deletion of the resource.
update_port_postcommit is called for all changes to the port
state. It is up to the mechanism driver to ignore state or
state changes that it does not know or care about.
"""
port = context.current
original_port = context.original
if not self._is_baremetal_port(port):
LOG.info(_('Port is not a baremetal port, '
'skip update_port_postcommit callback.'))
return
if not self._is_port_bound(context):
LOG.info(_('Port is not bound by current driver, '
'skip update_port_postcommit callback.'))
return
binding_level = self._get_binding_level(context)
LOG.info(_('Port is bound by current driver with binding '
'level %(binding_level)s.'),
{'binding_level': binding_level})
current_vif_type = context.vif_type
original_vif_type = context.original_vif_type
# when port is unbound, unbind relevant guids from IB partition.
if (current_vif_type == portbindings.VIF_TYPE_UNBOUND
and original_vif_type not in const.UNBOUND_VIF_TYPES):
LOG.info(_("Port's VIF type changed from bound to unbound"))
LOG.info(_("Remove infiniband guids from partition key now."))
# binding:host_id has been clear in current port
node_uuid = original_port.get(portbindings.HOST_ID)
node_ib_guids = self._get_ironic_ib_guids(node_uuid)
if len(node_ib_guids) == 0:
LOG.error(_(
'For current port(%(port)s), could not find any '
'infiniband port presents in the same ironic '
'node(%(node_uuid)s), could not remove guids from '
'partition key.'),
{port: port, 'node_uuid': node_uuid})
return
LOG.info(_('To be removed infiniband port guids: %s.')
% node_ib_guids)
segmentation_id = binding_level.get(api.SEGMENTATION_ID)
self.ufm_client.pkey.remove_guids(hex(segmentation_id),
node_ib_guids)
LOG.info(_('Infiniband port guids %(guids)s has been removed '
'from partition key %(pkey)s.'),
{'guids': node_ib_guids,
'pkey': hex(segmentation_id)})
# when port is bound, mark port as provision completed.
if (current_vif_type not in const.UNBOUND_VIF_TYPES
and original_vif_type in const.UNBOUND_VIF_TYPES):
LOG.info(_("Port's VIF type changed from unbound to bound."))
# NOTE(qianbiao.ng): this provisioning_complete action maps to
# provisioning_blocks.add_provisioning_component called in
# bind_port process.
# provisioning_blocks.provisioning_complete(
# context._plugin_context, port['id'], resources.PORT,
# MLNX_IB_BAREMETAL_ENTITY)
pass
# when port binding fails, raise exception
if (port.get('status') == n_const.PORT_STATUS_ERROR
and current_vif_type == portbindings.VIF_TYPE_BINDING_FAILED):
LOG.info(_("Port binding failed, Port's VIF details: "
"%(vif_details)s."),
{'vif_details': context.vif_details})
if context.vif_details.get('driver') == const.DRIVE_NAME:
LOG.info(_("Port binding failure is caused by current driver. "
"Raise an exception to abort port update "
"process."))
raise exceptions.PortBindingException(**context.vif_details)
def delete_port_precommit(self, context):
"""Delete resources of a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called inside transaction context on session. Runtime errors
are not expected, but raising an exception will result in
rollback of the transaction.
"""
pass
def delete_port_postcommit(self, context):
"""Delete a port.
:param context: PortContext instance describing the current
state of the port, prior to the call to delete it.
Called after the transaction completes. Call can block, though
will block the entire process so care should be taken to not
drastically affect performance. Runtime errors are not
expected, and will not prevent the resource from being
deleted.
"""
# NOTE(turnbig): it's impossible to get relevant infiniband ports
# here, the relevant Ironic Node(binding:host_id) has been clear
# before deleted.
pass
def bind_port(self, context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using this mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
Implementing this method explicitly declares the mechanism
driver as having the intention to bind ports. This is inspected
by the QoS service to identify the available QoS rules you
can use with ports.
"""
port = context.current
is_baremetal_port = self._is_baremetal_port(port)
if not is_baremetal_port:
LOG.info(_('Port is not a baremetal port, skip binding.'))
return
# NOTE(turnbig): it seems ml2 driver will auto check whether a
# driver has been bound by a driver through binding_levels
# has_port_bound = self._is_port_bound(port)
# if has_port_bound:
# LOG.info(_('Port has been bound by this driver, skip binding.'))
# return
# try to bind segment now
LOG.info(_('Port is supported, will try binding IB partition now.'))
for segment in context.segments_to_bind:
if self._is_segment_supported(segment):
node_uuid = port.get(portbindings.HOST_ID)
node_ib_guids = self._get_ironic_ib_guids(node_uuid)
if len(node_ib_guids) == 0:
LOG.warning(_(
'For current port(%(port)s), could not find any IB '
'port presents in the same ironic '
'node(%(node_uuid)s), break bind port process now.'),
{port: port, 'node_uuid': node_uuid})
return
LOG.info(_('Load infiniband ports guids: %s.')
% node_ib_guids)
LOG.debug(_('Try to bind IB ports using segment: %s'), segment)
# update partition key for relevant guids
segment_id = segment[api.ID]
segmentation_id = segment[api.SEGMENTATION_ID]
try:
provisioning_blocks.add_provisioning_component(
context._plugin_context, port['id'], resources.PORT,
MLNX_IB_BAREMETAL_ENTITY)
self.ufm_client.pkey.add_guids(hex(segmentation_id),
guids=node_ib_guids)
LOG.info(_('Successfully bound IB ports %(ports)s to '
'partition %(pkey)s.'),
{'ports': node_ib_guids,
'pkey': hex(segmentation_id)})
# NOTE(turnbig): setting VIF details has no effect here.
# details = {
# const.MLNX_EXTRA_NS: {
# 'guids': node_ib_guids,
# 'pkey': segmentation_id,
# }
# }
# LOG.info(_('Update bound IB port vif info: '
# '%(vif_details)s.'),
# {'vif_details': details})
# context._binding.vif_details = jsonutils.dumps(details)
# NOTE(turnbig): chain current segment again to next driver
new_segment = copy.deepcopy(segment)
context.continue_binding(segment_id, [new_segment])
return
except ufm_exec.UfmClientError as e:
LOG.error(_("Failed to add guids %(guids)s to UFM "
"partition key %(pkey)s, "
"reason is %(reason)s."),
{'guids': node_ib_guids,
'pkey': hex(segmentation_id),
'reason': str(e)})
# TODO(qianbiao.ng): if IB partition binding fails,
# we should abort the bind_port process and exit.
vif_details = {'guids': node_ib_guids,
'pkey': hex(segmentation_id),
'driver': const.DRIVE_NAME,
'reason': str(e)}
context.set_binding(segment[api.ID],
portbindings.VIF_TYPE_BINDING_FAILED,
vif_details,
status=n_const.PORT_STATUS_ERROR)
@staticmethod
def _is_baremetal_port(port):
"""Return whether a port's VNIC_TYPE is baremetal.
Ports supported by this driver must have VNIC type 'baremetal'.
:param port: The port to check
:returns: true if the port's VNIC_TYPE is baremetal
"""
vnic_type = port[portbindings.VNIC_TYPE]
return vnic_type == portbindings.VNIC_BAREMETAL
@staticmethod
def _is_network_supported(self, network):
"""Return whether a network is supported by this driver.
:param network: The network(
:class: openstack.network.v2.network.Network) instance to check
:returns: true if network is supported else false
"""
_this = InfiniBandBaremetalMechanismDriver
LOG.debug("Checking whether network is supported: %(network)s.",
{'network': network})
network_id = network.get('id')
network_type = network.get('provider_network_type')
segmentation_id = network.get('provider_segmentation_id')
physical_network = network.get('provider_physical_network')
if network_type not in self.allowed_network_types:
LOG.debug(_(
'Network %(network_id)s with segmentation-id '
'%(segmentation_id)s has network type %(network_type)s '
'but mlnx_ib_bm mechanism driver only '
'support %(allowed_network_types)s.'),
{'network_id': network_id,
'segmentation_id': segmentation_id,
'network_type': network_type,
'allowed_network_types': self.allowed_network_types})
return False
if not segmentation_id:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s does not has a '
'segmentation id, mlnx_ib_bm requires a segmentation id to '
'create UFM partition.'),
{'network_id': network_id, 'id': segmentation_id})
return False
if not self._is_physical_network_matches(physical_network):
LOG.debug(_(
'Network %(network_id)s with segment %(id)s is connected '
'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
'driver was pre-configured to watch on physical networks '
'%(allowed_physical_networks)s.'),
{'network_id': network_id,
'id': segmentation_id,
'physnet': physical_network,
'allowed_physical_networks': self.allowed_physical_networks})
return False
return True
def _is_segment_supported(self, segment):
"""Return whether a network segment is supported by this driver. A
segment dictionary looks like:
{
"network_id": "9425b757-339d-4954-a17b-dbb3f7061006",
"segmentation_id": 15998,
"physical_network": null,
"id": "3a0946cc-1f61-4211-8a33-b8e2b0b7a2a0",
"network_type": "vxlan"
},
Segment supported by this driver must:
- have network type 'vxlan' or 'vlan'.
- have physical networks in pre-configured physical-networks
- have a segmentation_id
:param segment: indicates the segment to check
:returns: true if segment is supported else false
"""
LOG.debug("Checking whether segment is supported: %(segment)s ",
{'segment': segment})
segment_id = segment[api.ID]
network_id = segment[api.NETWORK_ID]
network_type = segment[api.NETWORK_TYPE]
segmentation_id = segment[api.SEGMENTATION_ID]
physical_network = segment[api.PHYSICAL_NETWORK]
if network_type not in self.allowed_network_types:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s has '
'network type %(network_type)s but mlnx_ib_bm mechanism '
'driver only support %(allowed_network_types)s.'),
{'network_id': network_id,
'id': segment_id,
'network_type': network_type,
'allowed_network_types': self.allowed_network_types})
return False
if not segmentation_id:
LOG.debug(_(
'Network %(network_id)s with segment %(id)s does not has a '
'segmentation id, mlnx_ib_bm requires a segmentation id to '
'create UFM partition.'),
{'network_id': network_id, 'id': segment_id})
return False
if not self._is_physical_network_matches(physical_network):
LOG.debug(_(
'Network %(network_id)s with segment %(id)s is connected '
'to physical network %(physnet)s, but mlnx_ib_bm mechanism '
'driver was pre-configured to watch on physical networks '
'%(allowed_physical_networks)s.'),
{'network_id': network_id,
'id': segment_id,
'physnet': physical_network,
'allowed_physical_networks': self.allowed_physical_networks})
return False
return True
def _is_physical_network_matches(self, physical_network):
"""Return whether the physical network matches the pre-configured
physical-networks of this driver. pre-configured physical-network '*'
means matches anything include none.
:param physical_network: the physical network to check
:return: true if match else false
"""
if (const.PHYSICAL_NETWORK_ANY in self.allowed_physical_networks
or physical_network in self.allowed_physical_networks):
return True
return False
@staticmethod
def _is_port_supported(port_context):
# type: (api.PortContext) -> bool
"""NOTE(turnbig): deprecated, Return whether a port binding is
supported by this driver
Ports supported by this driver must:
- have VNIC type 'baremetal'.
- have physical networks in pre-configured physical-networks
- have
- others maybe? (like Huawei-ml2-driver use prefix)
:param port_context: The port-context to check
:returns: true if supported else false
"""
# TODO(qianbiao.ng): add same strategy like huawei ml2 driver do later.
network = port_context.network.current
physical_network = network.provider_physical_network
this = InfiniBandBaremetalMechanismDriver
return (this._is_baremetal_port(port_context)
and this._is_network_type_supported(network)
and this._is_physical_network_matches(physical_network))
@staticmethod
def _is_port_bound(port_context):
# type: (api.PortContext) -> bool
"""Return whether a port has been bound by this driver.
Ports bound by this driver have their binding:levels contains a level
generated by this driver and the segment of that level is in the
network segments of this port.
NOTE(turnbig): this driver does not has a realistic neutron port
connected to an infiniband port, the port here is a baremetal PXE
ethernet port in the same Ironic node which owns the realistic
infiniband ports.
:param port_context: The PortContext to check
:returns: true if port has been bound by this driver else false
"""
this = InfiniBandBaremetalMechanismDriver
port_id = port_context.current.get('id')
binding_level = this._get_binding_level(port_context)
if binding_level:
segmentation_id = binding_level.get(api.SEGMENTATION_ID)
LOG.info("Port %(port_id)s has been bound to segmentation "
"%(segmentation_id)s by driver %(driver)s",
{"port_id": port_id,
"segmentation_id": segmentation_id,
"driver": const.DRIVE_NAME})
return True
LOG.info("Port %(port_id)s is not bound to any known segmentation "
"of its network by driver %(driver)s",
{"port_id": port_id,
"driver": const.DRIVE_NAME})
return False
@staticmethod
def _get_binding_level(port_context):
# type: (api.PortContext) -> dict
"""Return the binding level relevant to this driver.
Ports bound by this driver have their binding:levels contains a level
generated by this driver and the segment of that level is in the
network segments of this port.
NOTE(turnbig): this driver does not has a realistic neutron port
connected to an infiniband port, the port here is a baremetal PXE
ethernet port in the same Ironic node which owns the realistic
infiniband ports.
:param port_context: The PortContext to check
:returns: binding level if port has been bound by this driver else None
"""
network_segments = port_context.network.network_segments
network_segment_id_list = {s.get(api.SEGMENTATION_ID)
for s in network_segments}
# NOTE(qianbiao.ng): It's impossible to get binding_levels from
# PortContext.binding_levels in this place (only in bind_port
# callback). But, binding_levels is passed as a property in port
# dictionary. Remember this binding_levels property has different
# data structure from PortContext.binding_levels.
"""binding_levels property examples:
[
{
"physical_network": "",
"driver": "mlnx_ib_bm",
"network_type": "vxlan",
"segmentation_id": 15998,
"level": 0
},
....
]
"""
binding_levels = port_context.current.get('binding_levels', [])
LOG.info("Get binding_level of current driver from "
"network segments: %(segments)s, "
"binding levels: %(binding_levels)s.",
{'segments': network_segments,
'binding_levels': binding_levels})
for level in binding_levels:
bound_driver = level.get('driver')
segmentation_id = level.get(api.SEGMENTATION_ID)
if (bound_driver == const.DRIVE_NAME and
segmentation_id in network_segment_id_list):
return level
return None
def _get_ironic_ib_guids(self, node):
"""Get all ib guid list of an Ironic node.
:param node: indicates the uuid of ironic node
:return: infiniband guid list for all present IB ports
"""
try:
node_ports = self.ironic_client.port.list(node=node, detail=True)
node_ib_guids = [node_port.extra.get('client-id')
for node_port in node_ports
if node_port.extra.get('client-id')]
return node_ib_guids
except ironic_exc.UnsupportedVersion:
LOG.exception(
"Failed to get ironic port list, Ironic Client is "
"using unsupported version of the API.")
raise
except (ironic_exc.AuthPluginOptionsMissing,
ironic_exc.AuthSystemNotFound):
LOG.exception("Failed to get ironic port list due to Ironic Client"
" authentication failure.")
raise
except Exception:
LOG.exception("Failed to get ironic port list.")
raise
|
[
"networking_mlnx_baremetal.ufm_client.get_client",
"oslo_log.log.getLogger",
"copy.deepcopy",
"neutron.db.provisioning_blocks.add_provisioning_component",
"networking_mlnx_baremetal.exceptions.PortBindingException",
"networking_mlnx_baremetal.ironic_client.get_client",
"networking_mlnx_baremetal.plugins.ml2.config.register_opts",
"networking_mlnx_baremetal._i18n._"
] |
[((1369, 1396), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1386, 1396), True, 'from oslo_log import log as logging\n'), ((1397, 1423), 'networking_mlnx_baremetal.plugins.ml2.config.register_opts', 'config.register_opts', (['CONF'], {}), '(CONF)\n', (1417, 1423), False, 'from networking_mlnx_baremetal.plugins.ml2 import config\n'), ((1981, 2007), 'networking_mlnx_baremetal.ironic_client.get_client', 'ironic_client.get_client', ([], {}), '()\n', (2005, 2007), False, 'from networking_mlnx_baremetal import ironic_client\n'), ((2034, 2057), 'networking_mlnx_baremetal.ufm_client.get_client', 'ufm_client.get_client', ([], {}), '()\n', (2055, 2057), False, 'from networking_mlnx_baremetal import ufm_client\n'), ((13131, 13205), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port is bound by current driver with binding level %(binding_level)s."""'], {}), "('Port is bound by current driver with binding level %(binding_level)s.')\n", (13132, 13205), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((20150, 20208), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port is supported, will try binding IB partition now."""'], {}), "('Port is supported, will try binding IB partition now.')\n", (20151, 20208), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((12744, 12816), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port is not a baremetal port, skip update_port_postcommit callback."""'], {}), "('Port is not a baremetal port, skip update_port_postcommit callback.')\n", (12745, 12816), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((12930, 13009), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port is not bound by current driver, skip update_port_postcommit callback."""'], {}), "('Port is not bound by current driver, skip update_port_postcommit callback.')\n", (12931, 13009), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((13607, 13657), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port\'s VIF type changed from bound to unbound"""'], {}), '("Port\'s VIF type changed from bound to unbound")\n', (13608, 13657), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((13680, 13732), 'networking_mlnx_baremetal._i18n._', '_', (['"""Remove infiniband guids from partition key now."""'], {}), "('Remove infiniband guids from partition key now.')\n", (13681, 13732), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((14649, 14736), 'networking_mlnx_baremetal._i18n._', '_', (['"""Infiniband port guids %(guids)s has been removed from partition key %(pkey)s."""'], {}), "('Infiniband port guids %(guids)s has been removed from partition key %(pkey)s.'\n )\n", (14650, 14736), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((15071, 15122), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port\'s VIF type changed from unbound to bound."""'], {}), '("Port\'s VIF type changed from unbound to bound.")\n', (15072, 15122), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((15706, 15768), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port binding failed, Port\'s VIF details: %(vif_details)s."""'], {}), '("Port binding failed, Port\'s VIF details: %(vif_details)s.")\n', (15707, 15768), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((16137, 16191), 'networking_mlnx_baremetal.exceptions.PortBindingException', 'exceptions.PortBindingException', ([], {}), '(**context.vif_details)\n', (16168, 16191), False, 'from networking_mlnx_baremetal import constants as const, exceptions\n'), ((19707, 19755), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port is not a baremetal port, skip binding."""'], {}), "('Port is not a baremetal port, skip binding.')\n", (19708, 19755), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((24849, 25028), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segmentation-id %(segmentation_id)s has network type %(network_type)s but mlnx_ib_bm mechanism driver only support %(allowed_network_types)s."""'], {}), "('Network %(network_id)s with segmentation-id %(segmentation_id)s has network type %(network_type)s but mlnx_ib_bm mechanism driver only support %(allowed_network_types)s.'\n )\n", (24850, 25028), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((25393, 25540), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segment %(id)s does not has a segmentation id, mlnx_ib_bm requires a segmentation id to create UFM partition."""'], {}), "('Network %(network_id)s with segment %(id)s does not has a segmentation id, mlnx_ib_bm requires a segmentation id to create UFM partition.'\n )\n", (25394, 25540), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((25775, 25985), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segment %(id)s is connected to physical network %(physnet)s, but mlnx_ib_bm mechanism driver was pre-configured to watch on physical networks %(allowed_physical_networks)s."""'], {}), "('Network %(network_id)s with segment %(id)s is connected to physical network %(physnet)s, but mlnx_ib_bm mechanism driver was pre-configured to watch on physical networks %(allowed_physical_networks)s.'\n )\n", (25776, 25985), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((27494, 27652), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segment %(id)s has network type %(network_type)s but mlnx_ib_bm mechanism driver only support %(allowed_network_types)s."""'], {}), "('Network %(network_id)s with segment %(id)s has network type %(network_type)s but mlnx_ib_bm mechanism driver only support %(allowed_network_types)s.'\n )\n", (27495, 27652), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((27980, 28127), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segment %(id)s does not has a segmentation id, mlnx_ib_bm requires a segmentation id to create UFM partition."""'], {}), "('Network %(network_id)s with segment %(id)s does not has a segmentation id, mlnx_ib_bm requires a segmentation id to create UFM partition.'\n )\n", (27981, 28127), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((28357, 28567), 'networking_mlnx_baremetal._i18n._', '_', (['"""Network %(network_id)s with segment %(id)s is connected to physical network %(physnet)s, but mlnx_ib_bm mechanism driver was pre-configured to watch on physical networks %(allowed_physical_networks)s."""'], {}), "('Network %(network_id)s with segment %(id)s is connected to physical network %(physnet)s, but mlnx_ib_bm mechanism driver was pre-configured to watch on physical networks %(allowed_physical_networks)s.'\n )\n", (28358, 28567), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((13991, 14155), 'networking_mlnx_baremetal._i18n._', '_', (['"""For current port(%(port)s), could not find any infiniband port presents in the same ironic node(%(node_uuid)s), could not remove guids from partition key."""'], {}), "('For current port(%(port)s), could not find any infiniband port presents in the same ironic node(%(node_uuid)s), could not remove guids from partition key.'\n )\n", (13992, 14155), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((14345, 14390), 'networking_mlnx_baremetal._i18n._', '_', (['"""To be removed infiniband port guids: %s."""'], {}), "('To be removed infiniband port guids: %s.')\n", (14346, 14390), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((15950, 16058), 'networking_mlnx_baremetal._i18n._', '_', (['"""Port binding failure is caused by current driver. Raise an exception to abort port update process."""'], {}), "('Port binding failure is caused by current driver. Raise an exception to abort port update process.'\n )\n", (15951, 16058), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((20955, 20998), 'networking_mlnx_baremetal._i18n._', '_', (['"""Try to bind IB ports using segment: %s"""'], {}), "('Try to bind IB ports using segment: %s')\n", (20956, 20998), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((21217, 21346), 'neutron.db.provisioning_blocks.add_provisioning_component', 'provisioning_blocks.add_provisioning_component', (['context._plugin_context', "port['id']", 'resources.PORT', 'MLNX_IB_BAREMETAL_ENTITY'], {}), "(context._plugin_context,\n port['id'], resources.PORT, MLNX_IB_BAREMETAL_ENTITY)\n", (21263, 21346), False, 'from neutron.db import provisioning_blocks\n'), ((22476, 22498), 'copy.deepcopy', 'copy.deepcopy', (['segment'], {}), '(segment)\n', (22489, 22498), False, 'import copy\n'), ((20515, 20657), 'networking_mlnx_baremetal._i18n._', '_', (['"""For current port(%(port)s), could not find any IB port presents in the same ironic node(%(node_uuid)s), break bind port process now."""'], {}), "('For current port(%(port)s), could not find any IB port presents in the same ironic node(%(node_uuid)s), break bind port process now.'\n )\n", (20516, 20657), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((20848, 20885), 'networking_mlnx_baremetal._i18n._', '_', (['"""Load infiniband ports guids: %s."""'], {}), "('Load infiniband ports guids: %s.')\n", (20849, 20885), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((21567, 21632), 'networking_mlnx_baremetal._i18n._', '_', (['"""Successfully bound IB ports %(ports)s to partition %(pkey)s."""'], {}), "('Successfully bound IB ports %(ports)s to partition %(pkey)s.')\n", (21568, 21632), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((6414, 6484), 'networking_mlnx_baremetal._i18n._', '_', (['"""UFM partition key %(pkey)s does not exists, could not be deleted."""'], {}), "('UFM partition key %(pkey)s does not exists, could not be deleted.')\n", (6415, 6484), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((6648, 6719), 'networking_mlnx_baremetal._i18n._', '_', (['"""Failed to delete UFM partition key %(pkey)s, reason is %(reason)s."""'], {}), "('Failed to delete UFM partition key %(pkey)s, reason is %(reason)s.')\n", (6649, 6719), False, 'from networking_mlnx_baremetal._i18n import _\n'), ((22681, 22773), 'networking_mlnx_baremetal._i18n._', '_', (['"""Failed to add guids %(guids)s to UFM partition key %(pkey)s, reason is %(reason)s."""'], {}), "('Failed to add guids %(guids)s to UFM partition key %(pkey)s, reason is %(reason)s.'\n )\n", (22682, 22773), False, 'from networking_mlnx_baremetal._i18n import _\n')]
|
import numpy as np
from lipkin.model import LipkinModel
class HartreeFock(LipkinModel):
name = 'Hartree-Fock'
def __init__(self, epsilon, V, Omega):
if Omega%2 == 1:
raise ValueError('This HF implementation assumes N = Omega = even.')
LipkinModel.__init__(self, epsilon, V, Omega, Omega)
self.r_gs = (-1)**(0.5*self.Omega)
self.err = 1E-8
def solve_equations(self, num_iter=100, theta0=0.0, phi0=0.0):
# set initial tau
tau = np.array([theta0, phi0])
# construct HF hamiltonian
h = self.get_self_consistent_hamiltonian(tau)
# construct kinetic energy
T = np.zeros((2,2), dtype=np.complex128)
T[0,0] = -0.5*self.epsilon*self.Omega
T[1,1] = 0.5*self.epsilon*self.Omega
# container for single particle potential
Gamma = np.zeros((2,2), dtype=np.complex128)
for i in range(num_iter):
# solve eigenvalue problem
eigvals, eigvecs = np.linalg.eig(h)
# construct new density matrix
rho = np.outer(eigvecs[:,0], np.conjugate(eigvecs[:,0]))
# construct new potential
Gamma[0,1] = -self.V*self.Omega*(self.Omega-1)*rho[1,0]
Gamma[1,0] = -self.V*self.Omega*(self.Omega-1)*rho[0,1]
# construct new hamiltonian
h = T + Gamma
# calculate energy
E = 0.5*np.trace(np.dot(T+h, rho)).real
return E
def get_self_consistent_hamiltonian(self, tau):
theta, phi = tau[0], tau[1]
h = np.empty((2,2), dtype=np.complex128)
h[0,0] = 1
h[1,1] = -1
h[0,1] = self.chi*np.sin(theta)*np.exp(1j*phi)
h[1,0] = self.chi*np.sin(theta)*np.exp(-1j*phi)
return -0.5*self.epsilon*self.Omega*h
def minimize_energy(self, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
E = self.get_energy(tau)
grad = self.get_gradient_energy(tau)
tau = self.update_tau(tau, grad)
return tau
def minimize_signature_projected_energy(self, r, num_iter=10000):
# pick small initial tau = (theta, phi)
tau = np.random.normal(0.0, 0.1, 2)
# initialize adam optimizer
self.m = np.zeros(2)
self.v = np.zeros(2)
# start minimizing
for self.t in range(1, num_iter+1):
Er = self.get_signature_projected_energy(r, tau)
grad = self.get_gradient_projected_energy(r, tau)
tau = self.update_tau(tau, grad)
return tau
def get_energy(self, tau):
theta, phi = tau[0], tau[1]
E = np.cos(theta)+0.5*self.chi*(np.sin(theta)**2)*np.cos(2*phi);
return -0.5*self.epsilon*self.Omega*E
def get_gradient_energy(self, tau):
theta, phi = tau[0], tau[1]
factor = 0.5*self.epsilon*self.Omega*np.sin(theta)
dE_dtheta = factor*(1-self.chi*np.cos(theta)*np.cos(2*phi))
dE_dphi = factor*self.chi*np.sin(theta)*np.sin(2*phi)
return np.array([dE_dtheta, dE_dphi])
def get_weight(self, r, tau):
theta = tau[0]
a = 1.0+r*self.r_gs*(np.cos(theta))**(self.Omega-2)
b = 1.0+r*self.r_gs*(np.cos(theta))**self.Omega
if a < self.err and b < self.err:
return float((self.Omega-2))/float(self.Omega)
else:
return (a+self.err)/(b+self.err)
def get_gradient_weight(self, r, tau):
theta = tau[0]
a = 2*(1+r*self.r_gs*(np.cos(theta))**self.Omega)-self.Omega*(np.sin(theta))**2
a *= r*self.r_gs*np.sin(theta)*(np.cos(theta))**(self.Omega-3)
b = (1+r*self.r_gs*(np.cos(theta))**self.Omega)**2
if a < self.err and b < self.err:
return np.array([theta*float((self.Omega-2))/float(self.Omega), 0])
return np.array([(a+self.err)/(b+self.err), 0])
def get_signature_projected_energy(self, r, tau):
return self.get_energy(tau)*self.get_weight(r, tau)
def get_gradient_projected_energy(self, r, tau):
E = self.get_energy(tau)
W = self.get_weight(r, tau)
gradE = self.get_gradient_energy(tau)
gradW = self.get_gradient_weight(r, tau)
return E*gradW + W*gradE
def update_tau(self, tau, gradient, eta0=0.001, beta1=0.9, beta2=0.999, epsilon=1.0E-8):
eta = eta0*np.sqrt(1.0-beta2**self.t)/(1.0-beta1**self.t)
self.m = beta1*self.m+(1.0-beta1)*gradient;
self.v = beta2*self.v+(1.0-beta2)*np.square(gradient);
tau -= eta*np.divide(self.m, np.sqrt(self.v)+epsilon)
self.t += 1
return tau
|
[
"numpy.empty",
"numpy.square",
"numpy.zeros",
"numpy.linalg.eig",
"lipkin.model.LipkinModel.__init__",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.random.normal",
"numpy.cos",
"numpy.dot",
"numpy.conjugate",
"numpy.sqrt"
] |
[((292, 344), 'lipkin.model.LipkinModel.__init__', 'LipkinModel.__init__', (['self', 'epsilon', 'V', 'Omega', 'Omega'], {}), '(self, epsilon, V, Omega, Omega)\n', (312, 344), False, 'from lipkin.model import LipkinModel\n'), ((538, 562), 'numpy.array', 'np.array', (['[theta0, phi0]'], {}), '([theta0, phi0])\n', (546, 562), True, 'import numpy as np\n'), ((717, 754), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (725, 754), True, 'import numpy as np\n'), ((920, 957), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (928, 957), True, 'import numpy as np\n'), ((1740, 1777), 'numpy.empty', 'np.empty', (['(2, 2)'], {'dtype': 'np.complex128'}), '((2, 2), dtype=np.complex128)\n', (1748, 1777), True, 'import numpy as np\n'), ((2123, 2152), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', '(2)'], {}), '(0.0, 0.1, 2)\n', (2139, 2152), True, 'import numpy as np\n'), ((2215, 2226), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2223, 2226), True, 'import numpy as np\n'), ((2244, 2255), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2252, 2255), True, 'import numpy as np\n'), ((2655, 2684), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(0.1)', '(2)'], {}), '(0.0, 0.1, 2)\n', (2671, 2684), True, 'import numpy as np\n'), ((2747, 2758), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2755, 2758), True, 'import numpy as np\n'), ((2776, 2787), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (2784, 2787), True, 'import numpy as np\n'), ((3574, 3604), 'numpy.array', 'np.array', (['[dE_dtheta, dE_dphi]'], {}), '([dE_dtheta, dE_dphi])\n', (3582, 3604), True, 'import numpy as np\n'), ((4419, 4465), 'numpy.array', 'np.array', (['[(a + self.err) / (b + self.err), 0]'], {}), '([(a + self.err) / (b + self.err), 0])\n', (4427, 4465), True, 'import numpy as np\n'), ((1079, 1095), 'numpy.linalg.eig', 'np.linalg.eig', (['h'], {}), '(h)\n', (1092, 1095), True, 'import numpy as np\n'), ((1865, 1883), 'numpy.exp', 'np.exp', (['(1.0j * phi)'], {}), '(1.0j * phi)\n', (1871, 1883), True, 'import numpy as np\n'), ((1920, 1939), 'numpy.exp', 'np.exp', (['(-1.0j * phi)'], {}), '(-1.0j * phi)\n', (1926, 1939), True, 'import numpy as np\n'), ((3154, 3167), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3160, 3167), True, 'import numpy as np\n'), ((3414, 3427), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3420, 3427), True, 'import numpy as np\n'), ((3544, 3559), 'numpy.sin', 'np.sin', (['(2 * phi)'], {}), '(2 * phi)\n', (3550, 3559), True, 'import numpy as np\n'), ((1193, 1220), 'numpy.conjugate', 'np.conjugate', (['eigvecs[:, 0]'], {}), '(eigvecs[:, 0])\n', (1205, 1220), True, 'import numpy as np\n'), ((1851, 1864), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1857, 1864), True, 'import numpy as np\n'), ((1906, 1919), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (1912, 1919), True, 'import numpy as np\n'), ((3200, 3215), 'numpy.cos', 'np.cos', (['(2 * phi)'], {}), '(2 * phi)\n', (3206, 3215), True, 'import numpy as np\n'), ((3530, 3543), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3536, 3543), True, 'import numpy as np\n'), ((4159, 4172), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4165, 4172), True, 'import numpy as np\n'), ((4174, 4187), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4180, 4187), True, 'import numpy as np\n'), ((4994, 5024), 'numpy.sqrt', 'np.sqrt', (['(1.0 - beta2 ** self.t)'], {}), '(1.0 - beta2 ** self.t)\n', (5001, 5024), True, 'import numpy as np\n'), ((5135, 5154), 'numpy.square', 'np.square', (['gradient'], {}), '(gradient)\n', (5144, 5154), True, 'import numpy as np\n'), ((3481, 3496), 'numpy.cos', 'np.cos', (['(2 * phi)'], {}), '(2 * phi)\n', (3487, 3496), True, 'import numpy as np\n'), ((3705, 3718), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3711, 3718), True, 'import numpy as np\n'), ((3765, 3778), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3771, 3778), True, 'import numpy as np\n'), ((4116, 4129), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (4122, 4129), True, 'import numpy as np\n'), ((5193, 5208), 'numpy.sqrt', 'np.sqrt', (['self.v'], {}), '(self.v)\n', (5200, 5208), True, 'import numpy as np\n'), ((1560, 1578), 'numpy.dot', 'np.dot', (['(T + h)', 'rho'], {}), '(T + h, rho)\n', (1566, 1578), True, 'import numpy as np\n'), ((3182, 3195), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (3188, 3195), True, 'import numpy as np\n'), ((3467, 3480), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (3473, 3480), True, 'import numpy as np\n'), ((4233, 4246), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4239, 4246), True, 'import numpy as np\n'), ((4076, 4089), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (4082, 4089), True, 'import numpy as np\n')]
|
from typing import List, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import robust_loss_pytorch
class AdaptiveRobustLoss(nn.Module):
"""
This class implements the adaptive robust loss function proposed by <NAME> for image tensors
"""
def __init__(self, device: str = 'cuda:0', num_of_dimension: int = 3 * 6 * 1024 * 768) -> None:
"""
Constructor method
"""
super(AdaptiveRobustLoss, self).__init__()
# Save parameter
self.num_of_dimension = num_of_dimension
# Init adaptive loss module
self.loss_function = robust_loss_pytorch.AdaptiveLossFunction(num_dims=num_of_dimension, device=device,
float_dtype=torch.float)
def forward(self, prediction: torch.Tensor, label: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the loss module
:param prediction: (torch.Tensor) Prediction
:param label: (torch.Tensor) Corresponding label
:return: (torch.Tensor) Scalar loss value
"""
# Calc difference of the prediction an the label
loss = prediction - label
# Reshape loss to use adaptive loss module
loss = loss.view(-1, self.num_of_dimension)
# Perform adaptive loss
loss = self.loss_function.lossfun(loss)
# Perform mean reduction
loss = loss.mean()
return loss
class WassersteinDiscriminatorLoss(nn.Module):
"""
This class implements the wasserstein loss for a discriminator network
"""
def __init__(self) -> None:
"""
Constructor method
"""
# Call super constructor
super(WassersteinDiscriminatorLoss, self).__init__()
def forward(self, prediction_real: torch.Tensor, prediction_fake: torch.Tensor) -> Tuple[
torch.Tensor, torch.Tensor]:
"""
Forward pass of the loss module
:param prediction_real: (torch.Tensor) Prediction for real samples
:param prediction_fake: (torch.Tensor) Prediction for fake smaples
:return: (torch.Tensor) Scalar loss value
"""
# Compute loss
loss_real = - torch.mean(prediction_real)
loss_fake = torch.mean(prediction_fake)
return loss_real, loss_fake
class WassersteinGeneratorLoss(nn.Module):
"""
This class implements the wasserstein loss for a generator network
"""
def __init__(self) -> None:
"""
Constructor method
"""
# Call super constructor
super(WassersteinGeneratorLoss, self).__init__()
def forward(self, prediction_fake: torch.Tensor) -> torch.Tensor:
"""
Forward pass of the loss module
:param prediction_fake: (torch.Tensor) Prediction for fake smaples
:return: (torch.Tensor) Scalar loss value
"""
# Compute loss
loss = - torch.mean(prediction_fake)
return loss
class NonSaturatingLogisticGeneratorLoss(nn.Module):
'''
Implementation of the non saturating GAN loss for the generator network
Source: https://github.com/ChristophReich1996/BCS_Deep_Learning/blob/master/Semantic_Pyramid_Style_Gan_2/lossfunction.py
'''
def __init__(self) -> None:
'''
Constructor method
'''
# Call super constructor
super(NonSaturatingLogisticGeneratorLoss, self).__init__()
def __repr__(self):
'''
Get representation of the loss module
:return: (str) String including information
'''
return '{}'.format(self.__class__.__name__)
def forward(self, prediction_fake: torch.Tensor) -> torch.Tensor:
'''
Forward pass to compute the generator loss
:param prediction_fake: (torch.Tensor) Prediction of the discriminator for fake samples
:return: (torch.Tensor) Loss value
'''
# Calc loss
loss = torch.mean(F.softplus(-prediction_fake))
return loss
class NonSaturatingLogisticDiscriminatorLoss(nn.Module):
'''
Implementation of the non saturating GAN loss for the discriminator network
Source: https://github.com/ChristophReich1996/BCS_Deep_Learning/blob/master/Semantic_Pyramid_Style_Gan_2/lossfunction.py
'''
def __init__(self) -> None:
'''
Constructor
'''
# Call super constructor
super(NonSaturatingLogisticDiscriminatorLoss, self).__init__()
def forward(self, prediction_real: torch.Tensor, prediction_fake: torch.Tensor) -> Tuple[
torch.Tensor, torch.Tensor]:
'''
Forward pass. Loss parts are not summed up to not retain the whole backward graph later.
:param prediction_real: (torch.Tensor) Prediction of the discriminator for real images
:param prediction_fake: (torch.Tensor) Prediction of the discriminator for fake images
:return: (torch.Tensor) Loss values for real and fake part
'''
# Calc real loss part
loss_real = torch.mean(F.softplus(-prediction_real))
# Calc fake loss part
loss_fake = torch.mean(F.softplus(prediction_fake))
return loss_real, loss_fake
class PerceptualLoss(nn.Module):
"""
This class implements perceptual loss
"""
def __init__(self, loss_function: nn.Module = nn.L1Loss(reduction='mean')) -> None:
"""
Constructor method
:param loss_function: (nn.Module) Loss function to be utilized to construct the perceptual loss
"""
# Call super constructor
super(PerceptualLoss, self).__init__()
# Save loss function
self.loss_function = loss_function
def forward(self, features_prediction: List[torch.Tensor], features_label: List[torch.Tensor]) -> torch.Tensor:
"""
Forward pass of the loss module
:param features_prediction: (List[torch.Tensor]) List of VGG-19 features of the prediction
:param features_label: (List[torch.Tensor]) List of VGG-19 features of the label
:return: (torch.Tensor) Scalar loss value
"""
# Init loss value
loss = torch.tensor(0.0, dtype=torch.float, device=features_prediction[0].device)
# Loop over all features
for feature_prediction, feature_label in zip(features_prediction, features_label):
# Calc loss and sum up
loss = loss + self.loss_function(feature_prediction, feature_label)
# Average loss with number of features
loss = loss / len(features_prediction)
return loss
|
[
"torch.mean",
"robust_loss_pytorch.AdaptiveLossFunction",
"torch.nn.L1Loss",
"torch.nn.functional.softplus",
"torch.tensor"
] |
[((620, 732), 'robust_loss_pytorch.AdaptiveLossFunction', 'robust_loss_pytorch.AdaptiveLossFunction', ([], {'num_dims': 'num_of_dimension', 'device': 'device', 'float_dtype': 'torch.float'}), '(num_dims=num_of_dimension, device=\n device, float_dtype=torch.float)\n', (660, 732), False, 'import robust_loss_pytorch\n'), ((2270, 2297), 'torch.mean', 'torch.mean', (['prediction_fake'], {}), '(prediction_fake)\n', (2280, 2297), False, 'import torch\n'), ((5356, 5383), 'torch.nn.L1Loss', 'nn.L1Loss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (5365, 5383), True, 'import torch.nn as nn\n'), ((6161, 6235), 'torch.tensor', 'torch.tensor', (['(0.0)'], {'dtype': 'torch.float', 'device': 'features_prediction[0].device'}), '(0.0, dtype=torch.float, device=features_prediction[0].device)\n', (6173, 6235), False, 'import torch\n'), ((2222, 2249), 'torch.mean', 'torch.mean', (['prediction_real'], {}), '(prediction_real)\n', (2232, 2249), False, 'import torch\n'), ((2940, 2967), 'torch.mean', 'torch.mean', (['prediction_fake'], {}), '(prediction_fake)\n', (2950, 2967), False, 'import torch\n'), ((3974, 4002), 'torch.nn.functional.softplus', 'F.softplus', (['(-prediction_fake)'], {}), '(-prediction_fake)\n', (3984, 4002), True, 'import torch.nn.functional as F\n'), ((5056, 5084), 'torch.nn.functional.softplus', 'F.softplus', (['(-prediction_real)'], {}), '(-prediction_real)\n', (5066, 5084), True, 'import torch.nn.functional as F\n'), ((5147, 5174), 'torch.nn.functional.softplus', 'F.softplus', (['prediction_fake'], {}), '(prediction_fake)\n', (5157, 5174), True, 'import torch.nn.functional as F\n')]
|
"""
Only needed to install the tool in editable mode. See:
https://setuptools.readthedocs.io/en/latest/userguide/quickstart.html#development-mode
"""
import setuptools
setuptools.setup()
|
[
"setuptools.setup"
] |
[((169, 187), 'setuptools.setup', 'setuptools.setup', ([], {}), '()\n', (185, 187), False, 'import setuptools\n')]
|
from markdown.preprocessors import Preprocessor
import re
class CommentPreprocessor(Preprocessor):
''' Searches a Document for comments (e.g. {comment example text here})
and removes them from the document.
'''
def __init__(self, ext, *args, **kwargs):
'''
Args:
ext: An instance of the Markdown parser class.
'''
super().__init__(*args, **kwargs)
self.processor = 'comment'
self.pattern = re.compile(ext.processor_info[self.processor]['pattern'])
def test(self, lines):
'''Return whether the provided document contains comments needing removal.
Args:
lines: A string of Markdown text.
Returns:
True if the document needs to be processed.
'''
return self.pattern.search(lines) is not None
def run(self, lines):
''' Removes all instances of text that match the following
example {comment example text here}. Inherited from
Preprocessor class.
Args:
lines: A list of lines of the Markdown document to be converted.
Returns:
Markdown document with comments removed.
'''
for i, line in enumerate(lines):
lines[i] = re.sub(self.pattern, '', line)
return lines
|
[
"re.sub",
"re.compile"
] |
[((469, 526), 're.compile', 're.compile', (["ext.processor_info[self.processor]['pattern']"], {}), "(ext.processor_info[self.processor]['pattern'])\n", (479, 526), False, 'import re\n'), ((1259, 1289), 're.sub', 're.sub', (['self.pattern', '""""""', 'line'], {}), "(self.pattern, '', line)\n", (1265, 1289), False, 'import re\n')]
|
# coding: utf-8
# In[1]:
get_ipython().run_cell_magic('javascript', '', '<!-- Ignore this block -->\nIPython.OutputArea.prototype._should_scroll = function(lines) {\n return false;\n}')
# ## Use housing data
# I have loaded the required modules. Pandas and Numpy. I have also included sqrt function from Math library.<br>
# I have imported division from future library. Remove this if the code is executed on Python 3. This import mimics behaviour of division operator of python 3 on python 2
# In[2]:
import pandas as pd
import numpy as np
from __future__ import division
from math import sqrt
""" File path change accordingly"""
inputFilepath = "data/house.csv"
"""Using default seperator"""
housingData = pd.read_csv(inputFilepath)
housingData.head(10)
# ### TextEncoder
#
# Here the data is mix of numbers and text. Text value cannot be directly used and should be converted to numeric data.<br>
# For this I have created a function text encoder which accepts a pandas series. Text encoder returns a lookUp dictionary for recreating the numeric value for text value and encoded text vector.
# For encoding I have applied a lambda function that will return value from dictionary.
# In[3]:
""" Converts the text features into numeric values so that they can be used by
the downstream algorithms.
Accepts pandas series and returns lookup dictionary and encoded vector"""
def textEncoder(textVector):
if type(textVector) == pd.core.series.Series:
lookUpDictionary = {}
lookupValue = 1
for key in textVector.unique():
lookUpDictionary[key] = lookupValue
lookupValue +=1
textVector = textVector.apply(lambda a: lookUpDictionary[a])
return lookUpDictionary,textVector
else:
raise TypeError("Expected a pandas series as an input")
# I have encoded nbhd and brick column using text encoder. The lookup dictionary is not used in downstream code. However any future predictions wil require text data to be encoded and hence I have provided the lookup dictionary.
# In[4]:
nbhdFeatureLookup, housingData['nbhd'] = textEncoder(housingData['nbhd'])
brickFeatureLookup, housingData['brick'] = textEncoder(housingData['brick'])
housingData.head(10)
# ### SplitDataSet Procedure
# This method splits the dataset into trainset and testset based upon the trainSetSize value. For splitting the dataset, I am using pandas.sample to split the data. This gives me trainset. For testset I am calculating complement of the trainset. This I am doing by droping the index present in training set.
# In[5]:
"""Splits the provided pandas dataframe into training and test dataset"""
def splitDataSet(inputDataframe, trainSetSize):
trainSet = inputDataframe.sample(frac=trainSetSize)
testSet = inputDataframe.drop(trainSet.index)
return trainSet,testSet
# ## 2. Choose those columns, which can help you in prediction i.e. contain some useful information. You can drop irrelevant columns. Give reason for choosing or dropping any column.
# ### generatePearsonCoefficient Procedure
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/f76ccfa7c2ed7f5b085115086107bbe25d329cec" />
# For sample:-
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bd1ccc2979b0fd1c1aec96e386f686ae874f9ec0" />
# For selecting some features and for dropping others I am using Pearson's Coefficient. The value of Pearson's coefficient lies between [-1, 1] and tells how two features are related<br>
# <table>
# <tr><td>Strength of Association</td><td>Positive</td><td>Negative</td></tr><tr><td>Small</td><td>.1 to .3 </td><td>-0.1 to -0.3 </td></tr><tr><td>Medium</td><td>.3 to .5 </td><td>-0.3 to -0.5 </td></tr><tr><td>Large</td><td>.5 to 1.0 </td><td>-0.5 to -1.0 </td></tr></table>
#
# In[6]:
"""Generate pearson's coefficient"""
def generatePearsonCoefficient(A, B):
A_meanDiff = A - A.mean()
B_meanDiff = B - B.mean()
return ((A_meanDiff * B_meanDiff).sum())/(sqrt((
A_meanDiff * A_meanDiff).sum()) * sqrt((B_meanDiff * B_meanDiff).sum()))
# In[7]:
"""Generate the value of pearson constant for all the features"""
print("Pearson's coefficient of corelation for "+
"nbhd and price is "+ str(generatePearsonCoefficient(housingData.nbhd,housingData.price)))
print("Pearson's coefficient of corelation for "+
"offers and price is "+ str(generatePearsonCoefficient(housingData.offers,housingData.price)))
print("Pearson's coefficient of corelation for "+
"sqft and price is "+ str(generatePearsonCoefficient(housingData.sqft,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bedrooms and price is "+ str(generatePearsonCoefficient(housingData.bedrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"bathrooms and price is "+ str(generatePearsonCoefficient(housingData.bathrooms,housingData.price)))
print("Pearson's coefficient of corelation for "+
"brick and price is "+ str(generatePearsonCoefficient(housingData.brick,housingData.price)))
# The value of Pearson's constant suggests that sqft, bedroom and bathroonm have strong corelation with price. Offers has a weak negative corelation and nbhd and brick has mediup corelation with price. I am keeping all the features as they have some corelation with the data.
# # Visualizing the relation between the X and Y
# Here I have used subplots to plot different X features and their relation with Y.
# In[8]:
import matplotlib.pyplot as plt
"""Set global rcParams for pyplotlib"""
plt.rcParams["figure.figsize"] = "18,15"
figure, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3,2,sharey='none')
ax1.plot(housingData.nbhd,housingData.price,"ro")
ax1.grid()
ax1.set_title("nbhd vs price")
ax2.plot(housingData.offers,housingData.price,"ro")
ax2.grid()
ax2.set_title("no of offers vs price")
ax3.plot(housingData.sqft,housingData.price,"ro")
ax3.grid()
ax3.set_title("sqft vs price")
ax4.plot(housingData.brick,housingData.price,"ro")
ax4.grid()
ax4.set_title("brick vs price")
ax5.plot(housingData.bedrooms,housingData.price,"ro")
ax5.grid()
ax5.set_title("no of bedrooms vs price")
ax6.plot(housingData.bathrooms,housingData.price,"ro")
ax6.grid()
ax6.set_title("bathrooms vs price")
plt.show()
# ### gaussianSolverProcedure
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Generate an augmented matrix.<br>
# <b>Step-2</b><br>
# Calculate pivot for a given column. Pivot is defined as a largest value in column following its index.<br>
# <b>Step-3</b><br>
# Place the piviot in the current row column.(Swap the row)<br>
# <b>Step-4</b><br>
# Make the value of other elements under pivot as zero. Use only row operations for this. Repeat this untill we get a upper triangular matrix.<br>
# <b>Step-5</b><br>
# Solve the upper trangular matrix using backward substitution.<br><br>
#
# The gaussian solver accepts two matrices A and B and tries to solve for x such that Ax = B
#
# In[9]:
"""Method for solving system of linear equations using gaussian elimination method"""
def gaussianSolver(A,B):
augmentedMatrix = np.hstack((A,B)) * 1.0
n = augmentedMatrix.shape[0]
for i in range(0, n):
"""Set default pivot value as diagonal matrix """
pivot = augmentedMatrix[i][i]
pivotRow = i
"""Check for a bigger pivot value"""
for j in range(i+1, n):
if abs(augmentedMatrix[j][i]) > abs(pivot):
pivot = augmentedMatrix[j][i]
pivotRow = j
"""If pivot has changed. Swap the rows"""
if pivotRow != i:
for j in range(0, n+1):
augmentedMatrix[pivotRow][j], augmentedMatrix[i][j] = augmentedMatrix[i][j], augmentedMatrix[pivotRow][j]
"""Make all the column values below pivot as zero by performing matrix row operations"""
for j in range(i+1, n):
op = -1 * (augmentedMatrix[j][i]/augmentedMatrix[i][i])
for k in range(0, n+1):
augmentedMatrix[j][k] = augmentedMatrix[j][k] + ( op * augmentedMatrix[i][k] )
""" Backward substitution to get values for B"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * augmentedMatrix[i][k])
beta[i] = (augmentedMatrix[i][n] - diff)/augmentedMatrix[i][i]
return beta
# ### choleskyDecompositionSolver Procedure
# As per cholesky decomposition a positive definite matrix A can be represented as L.L<sup>T</sup> where L<sup>T</sup> is lower trangular matrix and L<sup>T</sup> is it's transpose.<br>
# Here L is called cholesky factor<br>
# The problem comes down to L.L<sup>T</sup>x = B<br>
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/abf826b0ffb86e190d432828d7485f52f618eaed" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/bb5adc5916e0762b2eca921de3e70ccae9bd2999" />
#
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Initialize a zero matrix<br>
# <b>Step-2</b><br>
# Calculate L using the above formula. If calculating for a diagonal element then stop the procedure and move to calculate for next row. This will generate lower trangular matrix<br/>
# <b>Step-3</b><br>
# Calculate vector Y using forward substitution. LY = b<br>
# <b>Step-4</b><br>
# Calculate vector X using backward substitution.L*X = Y<br>
# In[10]:
"""Method for solving the system of linear equations using cholesky decomposition"""
def choleskyDecompositionSolver(A, B):
"""Converting the matrix values to float"""
A = A * 1.0
B = B * 1.0
n = A.shape[0]
if A.shape[0] == A.shape[1]:
"""Generate cholesky factor"""
L = np.zeros(shape = A.shape)
for i in range(0, n):
for j in range (0, n):
L[i][j] = A[i][j]
"""Calculating diagonal elements"""
if i == j:
for k in range(0, j):
L[i][j] = L[i][j] - (L[i][k] * L[i][k])
L[i][j] = sqrt(L[i][j])
break;
"""Calculating non diagonal elements"""
product = 0
for k in range (0, j):
product = product + (L[i][k] * L[j][k])
L[i][j] = (L[i][j] - product)/L[j][j]
"""Solving the system of linear equation
Ax=b
A can be decomposed into LU such that
Ly=b
Ux=y """
"""Forward substitution"""
Y = np.zeros(n)
for i in range(0, n):
diff = 0
for k in range (i -1, -1, -1):
diff = diff + (Y[k] * L[i][k])
Y[i] = (B[i] - diff)/L[i][i]
"""Backward substitution"""
beta = np.zeros(n)
U = L.T
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * U[i][k])
beta[i] = (Y[i] - diff)/U[i][i]
return beta
else:
raise ValueError("Matrix A is not symmetric")
# ### qrDecompositionSolver Procedure
# A matrix A can be represented as product of Q and R where Q is orthogonal matrix (Q<sup>T</sup>Q = QQ<sup>T</sup> = I) and R is upper triangular matrix.
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/4b845398dd7df51edc31561a612423b20a83eb04" />
# <img src="https://wikimedia.org/api/rest_v1/media/math/render/svg/74356955f03f5c1171e9e812174a715eb112aef8" />
# <br>QR decompsition can be done in four steps
# <ul><li>Calculation of orthogonal basis</li><li>Calculation of orthonormal</li><li>QR factor calculation</li><li>Solving system of linear equation</li></ul>
# <br>
# <b>Algorithm:-</b><br>
# <b>Step-1</b><br>
# Calculate orthogonal basis using gram Schmidt method. For first vector the value is itself. For subsequent vectors the orthogonalbasis is vector - projection on perviously calculated orthogonal basis
# <br><b>Step-2</b><br>
# For calculating orthonormal we divide orthogonal basis by magnitude of respective vectors
# <br><b>Step-3</b><br>
# Q = [orthonormalBasis]<br>
# R = Q<sup>T</sup>A
# <br><b>Step-4</b><br>
# For calculating the value of X in AX = B,<br>
# We calculate Y = Q<sup>T</sup>B<br>
# We solve RX = Y using backward substitution
#
# In[11]:
"""QR decomposition can be done in three steps
1) Calculation orthogonal basis
2) Calculation orthonormal
3) QR factor calculation"""
def qrDecompositionSolver(A, B):
A = A * 1.0
B = B * 1.0
"""Calculating the orthogonal basis"""
n = A.shape[1]
# Store deepcopy of A for processing
orthoBasis = np.array(A, copy = True)
for i in range(1, n):
"""Calculate the projections"""
diff = 0
for j in range(i-1, -1, -1):
diff = diff + (np.dot(orthoBasis[:,i],
orthoBasis[:,j])/np.dot(orthoBasis[:,j],orthoBasis[:,j]))*orthoBasis[:,j]
orthoBasis[:,i] = orthoBasis[:,i] - diff
"""Calculating orthonormal"""
for i in range(0, n):
orthoBasis[:,i] = orthoBasis[:,i]/np.sqrt(np.sum(np.square(orthoBasis[:,i])))
"""QR factorization"""
Q = orthoBasis
R = np.dot(orthoBasis.T,A)
"""Solving system of linear equation"""
B = np.dot(Q.T,B)
"""Backward substitution"""
beta = np.zeros(n)
for i in range(n - 1, -1,-1):
diff = 0
for k in range (i + 1, n):
diff = diff + (beta[k] * R[i][k])
beta[i] = (B[i] - diff)/R[i][i]
return beta
# ### learnLinregNormEq
# Solves system of linear equation in form of <br>
# X<sup>T</sup>XB = X<sup>T</sup>Y<br>
# Accepts three arguments X, Y and solver. Default value for solver is gaussianSolver
# In[12]:
"""Method to learn linear regression using normal equations. Default solver is
gaussian solver"""
def learnLinregNormEq(X, Y, solver = gaussianSolver):
if isinstance(X,np.ndarray) and isinstance(Y,np.ndarray):
if X.shape[0] != Y.shape[0]:
raise ValueError("The shape of X and Y is inconsistant")
X = np.insert(X, 0, 1, axis=1)
Xtranspose = X.T
XtX = np.dot(Xtranspose,X)
XtY = np.dot(Xtranspose,Y)
return solver(XtX, XtY)
else:
raise TypeError("Expected X and Y as numpy.ndarray")
# ### predictLinearRegression Procedure
# This method performs predicts the value for Y given X and model parameters. This method will add bias to X.
# In[13]:
"""Method to make prediction for yTest"""
def predictionLinearRegression(X, modelParameters):
X = np.insert(X, 0, 1, axis=1)
yPrediction = np.dot(X,modelParameters.T)
return yPrediction
# ### RMSE procedure
# Will calculate root mean squared error for given Ytrue values and YPrediction
#
# In[14]:
"""Model accuracy estimator RMSE"""
def RMSE(yTrue, yPrediction):
n = yTrue.shape[0]
return sqrt((1.0) * np.sum(np.square((yTrue - yPrediction))))/n
# # Solving the linear equations using gaussianSolver
# Here I am splitting the dataset into training and test set. For this I am using splitDataSet procedure with 80-20 split.<br>
# I have taken all the features.
# In[15]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# I am learning linear regression using default (Gaussian) solver. I am making predictions using predictionLinearRegression procedure. I am calculating the RMSE using RMSE procedure and average of residuals using mean.
# In[16]:
"""Learn model parameters using gaussian solver"""
modelParamsGaussian = learnLinregNormEq(Xtrain, Ytrain)
"""Make prediction using modelParams"""
yPredictionGaussian = predictionLinearRegression(Xtest, modelParamsGaussian)
"""Calulate RMSE"""
print("RMSE for gaussian solver is "+str(RMSE(Ytest.flatten(),yPredictionGaussian)))
print("Average residual for gaussian solver is "+str((Ytest.flatten() - yPredictionGaussian).mean()))
# In[17]:
plt.plot(yPredictionGaussian - Ytest.flatten(), Ytest,"ro",label="ytest - ybar vs ytest")
plt.title("Plot for gaussian solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using Cholesky method
# In[18]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[19]:
"""Learn model parameters using Cholesky solver"""
modelParamsCholesky = learnLinregNormEq(Xtrain, Ytrain,choleskyDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionCholesky = predictionLinearRegression(Xtest, modelParamsCholesky)
"""Calulate RMSE"""
print("RMSE for Cholesky solver is "+str(RMSE(Ytest.flatten(),yPredictionCholesky)))
print("Average residual for Cholesky solver is "+str((Ytest.flatten() - yPredictionCholesky).mean()))
# In[20]:
plt.plot(yPredictionCholesky - Ytest.flatten(), Ytest,"bo",label="ytest - ybar vs ytest")
plt.title("Plot for Cholesky solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
# # Solving the system of equations using QR decomposition method
# In[21]:
trainSet,testSet = splitDataSet(housingData,0.8)
Ytrain = trainSet.as_matrix(columns=['price'])
Ytest = testSet.as_matrix(columns=['price'])
print("Total items in training set "+str(Ytrain.shape))
print("Total items in test set "+str(Ytest.shape))
Xtrain = trainSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
Xtest = testSet.as_matrix(columns = ['sqft','bedrooms','bathrooms','brick','nbhd','offers'])
# In[22]:
"""Learn model parameters using QR Decomposition solver"""
modelParamsQR = learnLinregNormEq(Xtrain, Ytrain,qrDecompositionSolver)
"""Make prediction using modelParams"""
yPredictionQR = predictionLinearRegression(Xtest, modelParamsQR)
"""Calulate RMSE"""
print("RMSE for QR Decomposition solver is "+str(RMSE(Ytest.flatten(),yPredictionQR)))
print("Average residual for QR Decomposition solver is "+str((Ytest.flatten() - yPredictionQR).mean()))
# In[23]:
plt.plot(yPredictionQR - Ytest.flatten(), Ytest,"go",label="ytest - ybar vs ytest")
plt.title("Plot for QR decomposition solver")
plt.xlabel("ytest - ybar")
plt.ylabel("ytest")
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"math.sqrt",
"pandas.read_csv",
"numpy.square",
"numpy.zeros",
"numpy.insert",
"numpy.hstack",
"numpy.array",
"numpy.dot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.subplots"
] |
[((722, 748), 'pandas.read_csv', 'pd.read_csv', (['inputFilepath'], {}), '(inputFilepath)\n', (733, 748), True, 'import pandas as pd\n'), ((5725, 5758), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {'sharey': '"""none"""'}), "(3, 2, sharey='none')\n", (5737, 5758), True, 'import matplotlib.pyplot as plt\n'), ((6354, 6364), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6362, 6364), True, 'import matplotlib.pyplot as plt\n'), ((16720, 16757), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for gaussian solver"""'], {}), "('Plot for gaussian solver')\n", (16729, 16757), True, 'import matplotlib.pyplot as plt\n'), ((16758, 16784), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (16768, 16784), True, 'import matplotlib.pyplot as plt\n'), ((16785, 16804), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (16795, 16804), True, 'import matplotlib.pyplot as plt\n'), ((16805, 16815), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16813, 16815), True, 'import matplotlib.pyplot as plt\n'), ((17906, 17943), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for Cholesky solver"""'], {}), "('Plot for Cholesky solver')\n", (17915, 17943), True, 'import matplotlib.pyplot as plt\n'), ((17944, 17970), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (17954, 17970), True, 'import matplotlib.pyplot as plt\n'), ((17971, 17990), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (17981, 17990), True, 'import matplotlib.pyplot as plt\n'), ((17991, 18001), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (17999, 18001), True, 'import matplotlib.pyplot as plt\n'), ((19084, 19129), 'matplotlib.pyplot.title', 'plt.title', (['"""Plot for QR decomposition solver"""'], {}), "('Plot for QR decomposition solver')\n", (19093, 19129), True, 'import matplotlib.pyplot as plt\n'), ((19130, 19156), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ytest - ybar"""'], {}), "('ytest - ybar')\n", (19140, 19156), True, 'import matplotlib.pyplot as plt\n'), ((19157, 19176), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""ytest"""'], {}), "('ytest')\n", (19167, 19176), True, 'import matplotlib.pyplot as plt\n'), ((19177, 19187), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (19185, 19187), True, 'import matplotlib.pyplot as plt\n'), ((8306, 8317), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (8314, 8317), True, 'import numpy as np\n'), ((12902, 12924), 'numpy.array', 'np.array', (['A'], {'copy': '(True)'}), '(A, copy=True)\n', (12910, 12924), True, 'import numpy as np\n'), ((13474, 13497), 'numpy.dot', 'np.dot', (['orthoBasis.T', 'A'], {}), '(orthoBasis.T, A)\n', (13480, 13497), True, 'import numpy as np\n'), ((13554, 13568), 'numpy.dot', 'np.dot', (['Q.T', 'B'], {}), '(Q.T, B)\n', (13560, 13568), True, 'import numpy as np\n'), ((13616, 13627), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (13624, 13627), True, 'import numpy as np\n'), ((14908, 14934), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (14917, 14934), True, 'import numpy as np\n'), ((14953, 14981), 'numpy.dot', 'np.dot', (['X', 'modelParameters.T'], {}), '(X, modelParameters.T)\n', (14959, 14981), True, 'import numpy as np\n'), ((7195, 7212), 'numpy.hstack', 'np.hstack', (['(A, B)'], {}), '((A, B))\n', (7204, 7212), True, 'import numpy as np\n'), ((9891, 9914), 'numpy.zeros', 'np.zeros', ([], {'shape': 'A.shape'}), '(shape=A.shape)\n', (9899, 9914), True, 'import numpy as np\n'), ((10755, 10766), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (10763, 10766), True, 'import numpy as np\n'), ((11009, 11020), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (11017, 11020), True, 'import numpy as np\n'), ((14389, 14415), 'numpy.insert', 'np.insert', (['X', '(0)', '(1)'], {'axis': '(1)'}), '(X, 0, 1, axis=1)\n', (14398, 14415), True, 'import numpy as np\n'), ((14464, 14485), 'numpy.dot', 'np.dot', (['Xtranspose', 'X'], {}), '(Xtranspose, X)\n', (14470, 14485), True, 'import numpy as np\n'), ((14499, 14520), 'numpy.dot', 'np.dot', (['Xtranspose', 'Y'], {}), '(Xtranspose, Y)\n', (14505, 14520), True, 'import numpy as np\n'), ((10252, 10265), 'math.sqrt', 'sqrt', (['L[i][j]'], {}), '(L[i][j])\n', (10256, 10265), False, 'from math import sqrt\n'), ((13382, 13409), 'numpy.square', 'np.square', (['orthoBasis[:, i]'], {}), '(orthoBasis[:, i])\n', (13391, 13409), True, 'import numpy as np\n'), ((15245, 15275), 'numpy.square', 'np.square', (['(yTrue - yPrediction)'], {}), '(yTrue - yPrediction)\n', (15254, 15275), True, 'import numpy as np\n'), ((13079, 13121), 'numpy.dot', 'np.dot', (['orthoBasis[:, i]', 'orthoBasis[:, j]'], {}), '(orthoBasis[:, i], orthoBasis[:, j])\n', (13085, 13121), True, 'import numpy as np\n'), ((13154, 13196), 'numpy.dot', 'np.dot', (['orthoBasis[:, j]', 'orthoBasis[:, j]'], {}), '(orthoBasis[:, j], orthoBasis[:, j])\n', (13160, 13196), True, 'import numpy as np\n')]
|
from django.conf import settings
from django.conf.urls.static import static
from django.conf.urls import url
from . import views
urlpatterns=[
url('^$',views.index,name='index'),
url(r'^new/post$',views.new_project, name='new-project'),
url(r'votes/$',views.vote_project, name='vote_project'),
url(r'^user/(\d+)$',views.detail, name='detail'),
url(r'^detail/edit/$', views.edit_detail, name='edit-detail'),
url(r'^viewproject/(\d+)$',views.view_project, name = 'viewproject') ,
url(r'^search/$', views.search_results, name='search-project'),
url(r'^comment/(?P<project_id>\d+)', views.add_comment, name='comment'),
url(r'^vote/(?P<project_id>\d+)', views.vote, name='vote'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
|
[
"django.conf.urls.static.static",
"django.conf.urls.url"
] |
[((148, 184), 'django.conf.urls.url', 'url', (['"""^$"""', 'views.index'], {'name': '"""index"""'}), "('^$', views.index, name='index')\n", (151, 184), False, 'from django.conf.urls import url\n'), ((188, 244), 'django.conf.urls.url', 'url', (['"""^new/post$"""', 'views.new_project'], {'name': '"""new-project"""'}), "('^new/post$', views.new_project, name='new-project')\n", (191, 244), False, 'from django.conf.urls import url\n'), ((250, 305), 'django.conf.urls.url', 'url', (['"""votes/$"""', 'views.vote_project'], {'name': '"""vote_project"""'}), "('votes/$', views.vote_project, name='vote_project')\n", (253, 305), False, 'from django.conf.urls import url\n'), ((311, 360), 'django.conf.urls.url', 'url', (['"""^user/(\\\\d+)$"""', 'views.detail'], {'name': '"""detail"""'}), "('^user/(\\\\d+)$', views.detail, name='detail')\n", (314, 360), False, 'from django.conf.urls import url\n'), ((365, 425), 'django.conf.urls.url', 'url', (['"""^detail/edit/$"""', 'views.edit_detail'], {'name': '"""edit-detail"""'}), "('^detail/edit/$', views.edit_detail, name='edit-detail')\n", (368, 425), False, 'from django.conf.urls import url\n'), ((432, 499), 'django.conf.urls.url', 'url', (['"""^viewproject/(\\\\d+)$"""', 'views.view_project'], {'name': '"""viewproject"""'}), "('^viewproject/(\\\\d+)$', views.view_project, name='viewproject')\n", (435, 499), False, 'from django.conf.urls import url\n'), ((508, 569), 'django.conf.urls.url', 'url', (['"""^search/$"""', 'views.search_results'], {'name': '"""search-project"""'}), "('^search/$', views.search_results, name='search-project')\n", (511, 569), False, 'from django.conf.urls import url\n'), ((576, 647), 'django.conf.urls.url', 'url', (['"""^comment/(?P<project_id>\\\\d+)"""', 'views.add_comment'], {'name': '"""comment"""'}), "('^comment/(?P<project_id>\\\\d+)', views.add_comment, name='comment')\n", (579, 647), False, 'from django.conf.urls import url\n'), ((653, 711), 'django.conf.urls.url', 'url', (['"""^vote/(?P<project_id>\\\\d+)"""', 'views.vote'], {'name': '"""vote"""'}), "('^vote/(?P<project_id>\\\\d+)', views.vote, name='vote')\n", (656, 711), False, 'from django.conf.urls import url\n'), ((753, 814), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (759, 814), False, 'from django.conf.urls.static import static\n')]
|
"""Convexified Belief Propagation Class"""
import numpy as np
from .MatrixBeliefPropagator import MatrixBeliefPropagator, logsumexp, sparse_dot
class ConvexBeliefPropagator(MatrixBeliefPropagator):
"""
Class to perform convexified belief propagation based on counting numbers. The class allows for non-Bethe
counting numbers for the different factors in the MRF. If the factors are all non-negative, then the adjusted
Bethe free energy is convex, providing better guarantees about the convergence and bounds of the primal
and dual objective values.
"""
def __init__(self, markov_net, counting_numbers=None):
"""
Initialize a convexified belief propagator.
:param markov_net: MarkovNet object encoding the probability distribution
:type markov_net: MarkovNet
:param counting_numbers: a dictionary with an entry for each variable and edge such that the value is a float
representing the counting number to use in computing the convexified Bethe formulas
and corresponding message passing updates.
:type counting_numbers: dict
"""
super(ConvexBeliefPropagator, self).__init__(markov_net)
self.unary_counting_numbers = np.ones(len(self.mn.variables))
self.edge_counting_numbers = np.ones(2 * self.mn.num_edges)
default_counting_numbers = dict()
for var in markov_net.variables:
default_counting_numbers[var] = 1
for neighbor in markov_net.neighbors[var]:
if var < neighbor:
default_counting_numbers[(var, neighbor)] = 1
if counting_numbers:
self._set_counting_numbers(counting_numbers)
else:
self._set_counting_numbers(default_counting_numbers)
def _set_counting_numbers(self, counting_numbers):
"""
Store the provided counting numbers and set up the associated vectors for the ordered variable representation.
:param counting_numbers: a dictionary with an entry for each variable and edge with counting number values
:type counting_numbers: dict
:return: None
"""
self.edge_counting_numbers = np.zeros(2 * self.mn.num_edges)
for edge, i in self.mn.message_index.items():
reversed_edge = edge[::-1]
if edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[edge]
elif reversed_edge in counting_numbers:
self.edge_counting_numbers[i] = counting_numbers[reversed_edge]
self.edge_counting_numbers[i + self.mn.num_edges] = counting_numbers[reversed_edge]
else:
raise KeyError('Edge %s was not assigned a counting number.' % repr(edge))
self.unary_counting_numbers = np.zeros((len(self.mn.variables), 1))
for var, i in self.mn.var_index.items():
self.unary_counting_numbers[i] = counting_numbers[var]
self.unary_coefficients = self.unary_counting_numbers.copy()
for edge, i in self.mn.message_index.items():
self.unary_coefficients[self.mn.var_index[edge[0]]] += self.edge_counting_numbers[i]
self.unary_coefficients[self.mn.var_index[edge[1]]] += self.edge_counting_numbers[i]
def compute_bethe_entropy(self):
if self.fully_conditioned:
entropy = 0
else:
entropy = - np.sum(self.edge_counting_numbers[:self.mn.num_edges] *
(np.nan_to_num(self.pair_belief_tensor) * np.exp(self.pair_belief_tensor))) \
- np.sum(self.unary_counting_numbers.T *
(np.nan_to_num(self.belief_mat) * np.exp(self.belief_mat)))
return entropy
def update_messages(self):
self.compute_beliefs()
adjusted_message_prod = self.mn.edge_pot_tensor - np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges]))
adjusted_message_prod /= self.edge_counting_numbers
adjusted_message_prod += self.belief_mat[:, self.mn.message_from]
messages = np.squeeze(logsumexp(adjusted_message_prod, 1)) * self.edge_counting_numbers
messages = np.nan_to_num(messages - messages.max(0))
change = np.sum(np.abs(messages - self.message_mat))
self.message_mat = messages
return change
def compute_beliefs(self):
if not self.fully_conditioned:
self.belief_mat = self.mn.unary_mat + self.augmented_mat
self.belief_mat += sparse_dot(self.message_mat, self.mn.message_to_map)
self.belief_mat /= self.unary_coefficients.T
log_z = logsumexp(self.belief_mat, 0)
self.belief_mat = self.belief_mat - log_z
def compute_pairwise_beliefs(self):
if not self.fully_conditioned:
adjusted_message_prod = self.belief_mat[:, self.mn.message_from] \
- np.nan_to_num(np.hstack((self.message_mat[:, self.mn.num_edges:],
self.message_mat[:, :self.mn.num_edges])) /
self.edge_counting_numbers)
to_messages = adjusted_message_prod[:, :self.mn.num_edges].reshape(
(self.mn.max_states, 1, self.mn.num_edges))
from_messages = adjusted_message_prod[:, self.mn.num_edges:].reshape(
(1, self.mn.max_states, self.mn.num_edges))
beliefs = self.mn.edge_pot_tensor[:, :, self.mn.num_edges:] / \
self.edge_counting_numbers[self.mn.num_edges:] + to_messages + from_messages
beliefs -= logsumexp(beliefs, (0, 1))
self.pair_belief_tensor = beliefs
|
[
"numpy.abs",
"numpy.nan_to_num",
"numpy.zeros",
"numpy.ones",
"numpy.hstack",
"numpy.exp"
] |
[((1362, 1392), 'numpy.ones', 'np.ones', (['(2 * self.mn.num_edges)'], {}), '(2 * self.mn.num_edges)\n', (1369, 1392), True, 'import numpy as np\n'), ((2257, 2288), 'numpy.zeros', 'np.zeros', (['(2 * self.mn.num_edges)'], {}), '(2 * self.mn.num_edges)\n', (2265, 2288), True, 'import numpy as np\n'), ((4039, 4137), 'numpy.hstack', 'np.hstack', (['(self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :self.mn.\n num_edges])'], {}), '((self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :\n self.mn.num_edges]))\n', (4048, 4137), True, 'import numpy as np\n'), ((4519, 4554), 'numpy.abs', 'np.abs', (['(messages - self.message_mat)'], {}), '(messages - self.message_mat)\n', (4525, 4554), True, 'import numpy as np\n'), ((5214, 5312), 'numpy.hstack', 'np.hstack', (['(self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :self.mn.\n num_edges])'], {}), '((self.message_mat[:, self.mn.num_edges:], self.message_mat[:, :\n self.mn.num_edges]))\n', (5223, 5312), True, 'import numpy as np\n'), ((3835, 3865), 'numpy.nan_to_num', 'np.nan_to_num', (['self.belief_mat'], {}), '(self.belief_mat)\n', (3848, 3865), True, 'import numpy as np\n'), ((3868, 3891), 'numpy.exp', 'np.exp', (['self.belief_mat'], {}), '(self.belief_mat)\n', (3874, 3891), True, 'import numpy as np\n'), ((3663, 3701), 'numpy.nan_to_num', 'np.nan_to_num', (['self.pair_belief_tensor'], {}), '(self.pair_belief_tensor)\n', (3676, 3701), True, 'import numpy as np\n'), ((3704, 3735), 'numpy.exp', 'np.exp', (['self.pair_belief_tensor'], {}), '(self.pair_belief_tensor)\n', (3710, 3735), True, 'import numpy as np\n')]
|
# general plotting functions
import matplotlib.pyplot as plt
# plot the given hourly profile
def hourly_profile(profile):
hourly_profile_building('SFH',profile)
hourly_profile_building('MFH',profile)
hourly_profile_building('COM',profile)
def hourly_profile_building(building,profile):
for(name,data) in profile[building].iteritems():
data.plot(label=name, use_index=False)
plt.title('Hourly Profiles for ' + building)
plt.xlabel('Hour of the day')
plt.ylabel('Normalised Demand')
plt.legend(loc='upper right')
plt.show()
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] |
[((405, 449), 'matplotlib.pyplot.title', 'plt.title', (["('Hourly Profiles for ' + building)"], {}), "('Hourly Profiles for ' + building)\n", (414, 449), True, 'import matplotlib.pyplot as plt\n'), ((454, 483), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Hour of the day"""'], {}), "('Hour of the day')\n", (464, 483), True, 'import matplotlib.pyplot as plt\n'), ((488, 519), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Normalised Demand"""'], {}), "('Normalised Demand')\n", (498, 519), True, 'import matplotlib.pyplot as plt\n'), ((524, 553), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (534, 553), True, 'import matplotlib.pyplot as plt\n'), ((558, 568), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (566, 568), True, 'import matplotlib.pyplot as plt\n')]
|
#! /usr/bin/python3
# -*- coding: utf-8 -*-
from cuadrado import Cuadrado
def run():
cuad = Cuadrado(1,2,3)
print(cuad.show())
if __name__ == '__main__':
run()
|
[
"cuadrado.Cuadrado"
] |
[((97, 114), 'cuadrado.Cuadrado', 'Cuadrado', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (105, 114), False, 'from cuadrado import Cuadrado\n')]
|
from __future__ import unicode_literals
from django.shortcuts import render
from datetime import date, timedelta
# django:
from django.views.generic import ListView, DetailView
from django.conf import settings
from django.shortcuts import get_object_or_404
from django.utils.dates import MONTHS_ALT
# thirdparties:
import six
# utils
from .models import Event
from events.utils.displays import month_display, day_display
from events.utils.mixins import JSONResponseMixin
from events.utils import common as c
CALENDAR_LOCALE = getattr(settings, 'CALENDAR_LOCALE', 'en_US.utf8')
class GenericEventView(JSONResponseMixin, ListView):
model = Event
def render_to_response(self, context, **kwargs):
if self.request.is_ajax():
return self.render_to_json_response(context, **kwargs)
return super(GenericEventView, self).render_to_response(
context, **kwargs
)
def get_context_data(self, **kwargs):
context = super(GenericEventView, self).get_context_data(**kwargs)
self.net, self.category, self.tag = c.get_net_category_tag(
self.request
)
if self.category is not None:
context['cal_category'] = self.category
if self.tag is not None:
context['cal_tag'] = self.tag
return context
class EventMonthView(GenericEventView):
template_name = 'event_month_list.html'
def get_year_and_month(self, net, qs, **kwargs):
"""
Get the year and month. First tries from kwargs, then from
querystrings. If none, or if cal_ignore qs is specified,
sets year and month to this year and this month.
"""
now = c.get_now()
year = now.year
month = now.month + net
month_orig = None
if 'cal_ignore=true' not in qs:
if 'year' and 'month' in self.kwargs: # try kwargs
year, month_orig = map(
int, (self.kwargs['year'], self.kwargs['month'])
)
month = month_orig + net
else:
try: # try querystring
year = int(self.request.GET['cal_year'])
month_orig = int(self.request.GET['cal_month'])
month = month_orig + net
except Exception:
pass
# return the year and month, and any errors that may have occurred do
# to an invalid month/year being given.
return c.clean_year_month(year, month, month_orig)
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventMonthView, self).get_context_data(**kwargs)
qs = self.request.META['QUERY_STRING']
year, month, error = self.get_year_and_month(self.net, qs)
mini = True if 'cal_mini=true' in qs else False
# get any querystrings that are not next/prev/year/month
if qs:
qs = c.get_qs(qs)
# add a dict containing the year, month, and month name to the context
current = dict(
year=year, month_num=month, month=MONTHS_ALT[month][:3]
)
context['current'] = current
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month_and_year'] = u"%(month)s, %(year)d" % (
{'month': display_month, 'year': year}
)
if error: # send any year/month errors
context['cal_error'] = error
# List enables sorting. As far as I can tell, .order_by() can't be used
# here because we need it ordered by l_start_date.hour (simply ordering
# by start_date won't work). The only alternative I've found is to use
# extra(), but this would likely require different statements for
# different databases...
all_month_events = list(self.get_month_events(
year, month, self.category, loc=True
))
all_month_events.sort(key=lambda x: x.l_start_date.hour)
start_day = getattr(settings, "CALENDAR_START_DAY", 0)
context['calendar'] = month_display(
year, month, all_month_events, start_day, self.net, qs, mini,
request=self.request,
)
context['show_events'] = False
if getattr(settings, "CALENDAR_SHOW_LIST", False):
context['show_events'] = True
context['events'] = c.order_events(all_month_events, d=True) \
if self.request.is_ajax() else c.order_events(all_month_events)
return context
class EventDayView(GenericEventView):
template_name = 'event_day_list.html'
def get_month_events(self, *args, **kwargs):
return Event.objects.all_month_events(*args, **kwargs)
def get_context_data(self, **kwargs):
context = super(EventDayView, self).get_context_data(**kwargs)
kw = self.kwargs
y, m, d = map(int, (kw['year'], kw['month'], kw['day']))
year, month, day, error = c.clean_year_month_day(y, m, d, self.net)
if error:
context['cal_error'] = error
# Note that we don't prefetch 'cancellations' because they will be
# prefetched later (in day_display in displays.py)
all_month_events = self.get_month_events(
year, month, self.category, self.tag
)
self.events = day_display(
year, month, all_month_events, day
)
context['events'] = self.events
display_month = MONTHS_ALT[month]
if isinstance(display_month, six.binary_type):
display_month = display_month.decode('utf-8')
context['month'] = display_month
context['month_num'] = month
context['year'] = year
context['day'] = day
context['month_day_year'] = u"%(month)s %(day)d, %(year)d" % (
{'month': display_month, 'day': day, 'year': year}
)
# for use in the template to build next & prev querystrings
context['next'], context['prev'] = c.get_next_and_prev(self.net)
return context
class EventDetailView(DetailView):
model = Event
context_object_name = 'event'
def get_object(self):
return get_object_or_404(
Event.objects.prefetch_related(
'location', 'categories'
),
pk=self.kwargs['pk']
)
|
[
"events.utils.common.clean_year_month_day",
"events.utils.common.get_qs",
"events.utils.common.get_now",
"events.utils.displays.day_display",
"events.utils.common.order_events",
"events.utils.common.get_net_category_tag",
"events.utils.displays.month_display",
"events.utils.common.get_next_and_prev",
"events.utils.common.clean_year_month"
] |
[((1079, 1115), 'events.utils.common.get_net_category_tag', 'c.get_net_category_tag', (['self.request'], {}), '(self.request)\n', (1101, 1115), True, 'from events.utils import common as c\n'), ((1694, 1705), 'events.utils.common.get_now', 'c.get_now', ([], {}), '()\n', (1703, 1705), True, 'from events.utils import common as c\n'), ((2493, 2536), 'events.utils.common.clean_year_month', 'c.clean_year_month', (['year', 'month', 'month_orig'], {}), '(year, month, month_orig)\n', (2511, 2536), True, 'from events.utils import common as c\n'), ((4263, 4364), 'events.utils.displays.month_display', 'month_display', (['year', 'month', 'all_month_events', 'start_day', 'self.net', 'qs', 'mini'], {'request': 'self.request'}), '(year, month, all_month_events, start_day, self.net, qs, mini,\n request=self.request)\n', (4276, 4364), False, 'from events.utils.displays import month_display, day_display\n'), ((5150, 5191), 'events.utils.common.clean_year_month_day', 'c.clean_year_month_day', (['y', 'm', 'd', 'self.net'], {}), '(y, m, d, self.net)\n', (5172, 5191), True, 'from events.utils import common as c\n'), ((5519, 5566), 'events.utils.displays.day_display', 'day_display', (['year', 'month', 'all_month_events', 'day'], {}), '(year, month, all_month_events, day)\n', (5530, 5566), False, 'from events.utils.displays import month_display, day_display\n'), ((6180, 6209), 'events.utils.common.get_next_and_prev', 'c.get_next_and_prev', (['self.net'], {}), '(self.net)\n', (6199, 6209), True, 'from events.utils import common as c\n'), ((3037, 3049), 'events.utils.common.get_qs', 'c.get_qs', (['qs'], {}), '(qs)\n', (3045, 3049), True, 'from events.utils import common as c\n'), ((4569, 4609), 'events.utils.common.order_events', 'c.order_events', (['all_month_events'], {'d': '(True)'}), '(all_month_events, d=True)\n', (4583, 4609), True, 'from events.utils import common as c\n'), ((4659, 4691), 'events.utils.common.order_events', 'c.order_events', (['all_month_events'], {}), '(all_month_events)\n', (4673, 4691), True, 'from events.utils import common as c\n')]
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Optional
from azure.mgmt.core import AsyncARMPipelineClient
from msrest import Deserializer, Serializer
from ._configuration_async import KustoManagementClientConfiguration
from .operations_async import ClusterOperations
from .operations_async import ClusterPrincipalAssignmentOperations
from .operations_async import DatabaseOperations
from .operations_async import DatabasePrincipalAssignmentOperations
from .operations_async import AttachedDatabaseConfigurationOperations
from .operations_async import DataConnectionOperations
from .operations_async import OperationOperations
from .. import models
class KustoManagementClient(object):
"""The Azure Kusto management API provides a RESTful set of web services that interact with Azure Kusto services to manage your clusters and databases. The API enables you to create, update, and delete clusters and databases.
:ivar cluster: ClusterOperations operations
:vartype cluster: azure.mgmt.kusto.aio.operations_async.ClusterOperations
:ivar cluster_principal_assignment: ClusterPrincipalAssignmentOperations operations
:vartype cluster_principal_assignment: azure.mgmt.kusto.aio.operations_async.ClusterPrincipalAssignmentOperations
:ivar database: DatabaseOperations operations
:vartype database: azure.mgmt.kusto.aio.operations_async.DatabaseOperations
:ivar database_principal_assignment: DatabasePrincipalAssignmentOperations operations
:vartype database_principal_assignment: azure.mgmt.kusto.aio.operations_async.DatabasePrincipalAssignmentOperations
:ivar attached_database_configuration: AttachedDatabaseConfigurationOperations operations
:vartype attached_database_configuration: azure.mgmt.kusto.aio.operations_async.AttachedDatabaseConfigurationOperations
:ivar data_connection: DataConnectionOperations operations
:vartype data_connection: azure.mgmt.kusto.aio.operations_async.DataConnectionOperations
:ivar operation: OperationOperations operations
:vartype operation: azure.mgmt.kusto.aio.operations_async.OperationOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Gets subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms part of the URI for every service call.
:type subscription_id: str
:param str base_url: Service URL
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
base_url: Optional[str] = None,
**kwargs: Any
) -> None:
if not base_url:
base_url = 'https://management.azure.com'
self._config = KustoManagementClientConfiguration(credential, subscription_id, **kwargs)
self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self.cluster = ClusterOperations(
self._client, self._config, self._serialize, self._deserialize)
self.cluster_principal_assignment = ClusterPrincipalAssignmentOperations(
self._client, self._config, self._serialize, self._deserialize)
self.database = DatabaseOperations(
self._client, self._config, self._serialize, self._deserialize)
self.database_principal_assignment = DatabasePrincipalAssignmentOperations(
self._client, self._config, self._serialize, self._deserialize)
self.attached_database_configuration = AttachedDatabaseConfigurationOperations(
self._client, self._config, self._serialize, self._deserialize)
self.data_connection = DataConnectionOperations(
self._client, self._config, self._serialize, self._deserialize)
self.operation = OperationOperations(
self._client, self._config, self._serialize, self._deserialize)
async def close(self) -> None:
await self._client.close()
async def __aenter__(self) -> "KustoManagementClient":
await self._client.__aenter__()
return self
async def __aexit__(self, *exc_details) -> None:
await self._client.__aexit__(*exc_details)
|
[
"msrest.Serializer",
"azure.mgmt.core.AsyncARMPipelineClient",
"msrest.Deserializer"
] |
[((3333, 3405), 'azure.mgmt.core.AsyncARMPipelineClient', 'AsyncARMPipelineClient', ([], {'base_url': 'base_url', 'config': 'self._config'}), '(base_url=base_url, config=self._config, **kwargs)\n', (3355, 3405), False, 'from azure.mgmt.core import AsyncARMPipelineClient\n'), ((3523, 3548), 'msrest.Serializer', 'Serializer', (['client_models'], {}), '(client_models)\n', (3533, 3548), False, 'from msrest import Deserializer, Serializer\n'), ((3577, 3604), 'msrest.Deserializer', 'Deserializer', (['client_models'], {}), '(client_models)\n', (3589, 3604), False, 'from msrest import Deserializer, Serializer\n')]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.