content
stringlengths 10
4.9M
|
---|
<reponame>icoder33/dart_native<filename>ios/Classes/DOPointerWrapper.h
//
// DOPointerWrapper.h
// dart_objc
//
// Created by 杨萧玉 on 2019/11/5.
//
#import <Foundation/Foundation.h>
NS_ASSUME_NONNULL_BEGIN
@interface DOPointerWrapper : NSObject
@property (nonatomic) void *pointer;
@end
NS_ASSUME_NONNULL_END
|
// Given a list of points and a list of vertices with the same orientation, it finds the first
// vertex of P which lies after the queried point, excluding the point itself.
Eigen::Index find_closest_corner_after_strict(
const mat2x& B,
const vecXi& P,
const Eigen::Index q
) {
Index min_d = numeric_limits<Index>::max();
Index min_v = -1;
for (Index i = 0; i < P.size(); ++i) {
const Index d = CircularDist(B, q, P(i));
if (d > 0 && d < min_d) {
min_d = d;
min_v = i;
}
}
return min_v;
} |
# -*- coding: utf-8 -*-
# Copyright 2021 Cohesity Inc.
import cohesity_management_sdk.models.aws_credentials
import cohesity_management_sdk.models.a_w_s_fleet_params
import cohesity_management_sdk.models.azure_credentials
import cohesity_management_sdk.models.exchange_dag_protection_preference
import cohesity_management_sdk.models.fleet_network_params
import cohesity_management_sdk.models.gcp_credentials
import cohesity_management_sdk.models.registered_protection_source_isilon_params
import cohesity_management_sdk.models.kubernetes_credentials
import cohesity_management_sdk.models.nas_mount_credential_params
import cohesity_management_sdk.models.office_365_credentials
import cohesity_management_sdk.models.physical_params
import cohesity_management_sdk.models.ssl_verification
import cohesity_management_sdk.models.subnet
import cohesity_management_sdk.models.throttling_policy_parameters
import cohesity_management_sdk.models.throttling_policy_override
import cohesity_management_sdk.models.vmware_params
import cohesity_management_sdk.models.vlan_parameters
class RegisterProtectionSourceParameters(object):
"""Implementation of the 'RegisterProtectionSourceParameters' model.
Specifies the parameters required to register a Protection Source.
Attributes:
acropolis_type (AcropolisTypeEnum): Specifies the entity type if the
environment is kAcropolis. overrideDescription: true
agent_endpoint (string): Specifies the agent endpoint if it is
different from the source endpoint.
allowed_ip_addresses (list of string): Specifies the list of IP
Addresses on the registered source to be exclusively allowed for
doing any type of IO operations.
aws_credentials (AwsCredentials): Specifies the credentials to
authenticate with AWS Cloud Platform.
aws_fleet_params (AwsFleetParams): Specifies information related to
AWS fleets launched for various purposes. This will only be set
for kIAMUser entity.
azure_credentials (AzureCredentials): Specifies the credentials to
authenticate with Azure Cloud Platform.
blacklisted_ip_addresses (list of string): This field is deprecated.
Use DeniedIpAddresses instead.
deprecated: true
cluster_network_info (FleetNetworkParams): Specifies information
related to cluster. This is only valid for CE clusters. This is
only populated for kIAMUser entity.
denied_ip_addresses (list of string): Specifies the list of IP
Addresses on the registered source to be denied for doing any
type of IO operations.
encryption_key (string): If set, user has encrypted the credential with
'user_ecryption_key'. It is assumed that credentials are first
encrypted using internal magento key and then encrypted using user
encryption key.
endpoint (string): Specifies the network endpoint of the Protection
Source where it is reachable. It could be an URL or hostname or an
IP address of the Protection Source.
environment (EnvironmentRegisterProtectionSourceParametersEnum):
Specifies the environment such as 'kPhysical' or 'kVMware' of the
Protection Source. overrideDescription: true Supported environment
types such as 'kView', 'kSQL', 'kVMware', etc. NOTE: 'kPuppeteer'
refers to Cohesity's Remote Adapter. 'kVMware' indicates the
VMware Protection Source environment. 'kHyperV' indicates the
HyperV Protection Source environment. 'kSQL' indicates the SQL
Protection Source environment. 'kView' indicates the View
Protection Source environment. 'kPuppeteer' indicates the
Cohesity's Remote Adapter. 'kPhysical' indicates the physical
Protection Source environment. 'kPure' indicates the Pure Storage
Protection Source environment. 'Nimble' indicates the Nimble
Storage Protection Source environment. 'kAzure' indicates the
Microsoft's Azure Protection Source environment. 'kNetapp'
indicates the Netapp Protection Source environment. 'kAgent'
indicates the Agent Protection Source environment. 'kGenericNas'
indicates the Generic Network Attached Storage Protection Source
environment. 'kAcropolis' indicates the Acropolis Protection
Source environment. 'kPhsicalFiles' indicates the Physical Files
Protection Source environment. 'kIsilon' indicates the Dell EMC's
Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS
Protection Source environment. 'kKVM' indicates the KVM Protection
Source environment. 'kAWS' indicates the AWS Protection Source
environment. 'kExchange' indicates the Exchange Protection Source
environment. 'kHyperVVSS' indicates the HyperV VSS Protection
Source environment. 'kOracle' indicates the Oracle Protection
Source environment. 'kGCP' indicates the Google Cloud Platform
Protection Source environment. 'kFlashBlade' indicates the Flash
Blade Protection Source environment. 'kAWSNative' indicates the
AWS Native Protection Source environment. 'kO365' indicates the
Office 365 Protection Source environment. 'kO365Outlook'
indicates Office 365 outlook Protection Source environment.
'kHyperFlex' indicates the Hyper Flex Protection Source
environment. 'kGCPNative' indicates the GCP Native Protection
Source environment. 'kAzureNative' indicates the Azure Native
Protection Source environment. 'kKubernetes' indicates a
Kubernetes Protection Source environment. 'kElastifile'
indicates Elastifile Protection Source environment. 'kAD'
indicates Active Directory Protection Source environment.
'kRDSSnapshotManager' indicates AWS RDS Protection Source
environment. 'kCassandra' indicates Cassandra Protection Source
environment. 'kMongoDB' indicates MongoDB Protection Source
environment. 'kCouchbase' indicates Couchbase Protection Source
environment. 'kHdfs' indicates Hdfs Protection Source environment.
'kHive' indicates Hive Protection Source environment. 'kHBase'
indicates HBase Protection Source environment. 'kUDA' indicates
Universal Data Adapter Protection Source environment.
exchange_dag_protection_preference (ExchangeDAGProtectionPreference):
Specifies information about the preference order while choosing
between which database copy of the exchange database which is part
of DAG should be protected.
force_register (bool): ForceRegister is applicable to Physical
Environment. By default, the agent running on a physical host will
fail the registration, if it is already registered as part of
another cluster. By setting this option to true, agent can be
forced to register with the current cluster. This is a hidden
parameter and should not be documented externally.
gcp_credentials (GcpCredentials): Specifies the credentials to
authenticate with Google Cloud Platform.
host_type (HostTypeRegisterProtectionSourceParametersEnum): Specifies
the optional OS type of the Protection Source (such as kWindows or
kLinux). overrideDescription: true 'kLinux' indicates the Linux
operating system. 'kWindows' indicates the Microsoft Windows
operating system. 'kAix' indicates the IBM AIX operating system.
'kSolaris' indicates the Oracle Solaris operating system.
'kSapHana' indicates the Sap Hana database system developed by SAP
SE. 'kOther' indicates the other types of operating system.
hyperv_type (HypervTypeEnum): Specifies the entity type if the
environment is kHyperV. overrideDescription: true
is_internal_encrypted (bool): Set to true if credentials are encrypted
by internal magneto key.
is_proxy_host (bool): Specifies if the physical host has to be
registered as a proxy host.
isilon_params (RegisteredProtectionSourceIsilonParams): Specifies the
registered protection source params for Isilon Source
kubernetes_credentials (KubernetesCredentials): Specifies the
credentials to authenticate with a Kubernetes Cluster.
kubernetes_type (KubernetesTypeEnum): Specifies the entity type if the
environment is kKubernetes. overrideDescription: true
kvm_type (KvmTypeEnum): Specifies the entity type if the environment
is kKVM. overrideDescription: true
nas_mount_credentials (NasMountCredentialParams): Specifies the server
credentials to connect to a NetApp server. This field is required
for mounting SMB volumes on NetApp servers.
netapp_type (NetappTypeEnum): Specifies the entity type such as
'kCluster,' if the environment is kNetapp.
nimble_type (NimbleTypeEnum): Specifies the entity type such as
'kStorageArray' if the environment is kNimble.
office365_credentials_list (list of Office365Credentials): Office365 Source
Credentials.
Specifies credentials needed to authenticate & authorize user for
Office365 using MS Graph APIs.
office_365_region (string): Specifies the region for Office365.
office_365_type (Office365TypeEnum): Specifies the entity type such as
'kDomain', 'kOutlook', 'kMailbox', if the environment is kO365.
password (string): Specifies password of the username to access the
target source.
physical_params (PhysicalParams): Contains all params specified by
the user while registering a physical entity.
physical_type (PhysicalTypeEnum): Specifies the entity type such as
'kPhysicalHost' if the environment is kPhysical.
overrideDescription: true
proxy_host_source_id_list (list of long|int): Specifies the list of
the protection source id of the windows physical host which will
be used during the protection and recovery of the sites that
belong to a office365 domain.
pure_type (PureTypeEnum): Specifies the entity type such as
'kStorageArray' if the environment is kPure.
source_side_dedup_enabled (bool): This controls whether to use source
side dedup on the source or not. This is only applicable to
sources which support source side dedup (e.g., Linux physical
servers).
ssl_verification (SslVerification): SSL verification parameter is
applicable to VMware environment. It can be populated with the
server's CA certificate or certificate chain and vCenter's
certificate will be validated against this.
subnets (list of Subnet): Specifies the list of subnet IP addresses
and CIDR prefix for enabeling network data transfer. Currently,
only Subnet IP and NetbaskBits are valid input fields. All other
fields provided as input will be ignored.
throttling_policy (ThrottlingPolicyParameters): Specifies the
throttling policy that should be applied to this Source.
throttling_policy_overrides (list of ThrottlingPolicyOverride): Array
of Throttling Policy Overrides for Datastores. Specifies a list
of Throttling Policy for datastores that override the common
throttling policy specified for the registered Protection Source.
For datastores not in this list, common policy will still apply.
use_o_auth_for_exchange_online (bool): Specifies whether OAuth should
be used for authentication in case of Exchange Online.
username (string): Specifies username to access the target source.
vlan_params (VlanParameters): Specifies the VLAN parameters to be used
while taking the backup of this entity and is the preferred
selection for restoring the same. For restores, the VLAN
parameters specifed here can be overridden. Currently, this is
only applicable for Physical hosts running Oracle.
vmware_params (VmwareParams): Contains all params specified by the
user while registering a Vmware entity.
vmware_type (VmwareTypeEnum): Specifies the entity type such as
'kVCenter' if the environment is kVMware.
overrideDescription: true
"""
# Create a mapping from Model property names to API property names
_names = {
"acropolis_type":'acropolisType',
"agent_endpoint":'agentEndpoint',
"allowed_ip_addresses":'allowedIpAddresses',
"aws_credentials":'awsCredentials',
"aws_fleet_params":'awsFleetParams',
"azure_credentials":'azureCredentials',
"blacklisted_ip_addresses":'blacklistedIpAddresses',
"cluster_network_info":'clusterNetworkInfo',
"denied_ip_addresses":'deniedIpAddresses',
"endpoint":'endpoint',
"encryption_key":'encryptionKey',
"environment":'environment',
"exchange_dag_protection_preference":'exchangeDAGProtectionPreference',
"force_register":'forceRegister',
"gcp_credentials":'gcpCredentials',
"host_type":'hostType',
"hyperv_type":'hyperVType',
"is_internal_encrypted":'isInternalEncrypted',
"is_proxy_host":'isProxyHost',
"isilon_params":'isilonParams',
"kubernetes_credentials":'kubernetesCredentials',
"kubernetes_type":'kubernetesType',
"kvm_type":'kvmType',
"nas_mount_credentials":'nasMountCredentials',
"netapp_type":'netappType',
"nimble_type":'nimbleType',
"office365_credentials_list":"office365CredentialsList",
"office_365_region":'office365Region',
"office_365_type":'office365Type',
"password":'password',
"physical_params":'physicalParams',
"physical_type":'physicalType',
"proxy_host_source_id_list":'proxyHostSourceIdList',
"pure_type":'pureType',
"source_side_dedup_enabled":'sourceSideDedupEnabled',
"ssl_verification":'sslVerification',
"subnets":'subnets',
"throttling_policy":'throttlingPolicy',
"throttling_policy_overrides":'throttlingPolicyOverrides',
"use_o_auth_for_exchange_online":'useOAuthForExchangeOnline',
"username":'username',
"vlan_params":'vlanParams',
"vmware_params":'vmwareParams',
"vmware_type":'vmwareType'
}
def __init__(self,
acropolis_type=None,
agent_endpoint=None,
allowed_ip_addresses=None,
aws_credentials=None,
aws_fleet_params=None,
azure_credentials=None,
blacklisted_ip_addresses=None,
cluster_network_info=None,
denied_ip_addresses=None,
encryption_key=None,
endpoint=None,
environment=None,
exchange_dag_protection_preference=None,
force_register=None,
gcp_credentials=None,
host_type=None,
hyperv_type=None,
is_internal_encrypted=None,
is_proxy_host=None,
isilon_params=None,
kubernetes_credentials=None,
kubernetes_type=None,
kvm_type=None,
nas_mount_credentials=None,
netapp_type=None,
nimble_type=None,
office365_credentials_list=None,
office_365_region=None,
office_365_type=None,
password=<PASSWORD>,
physical_params=None,
physical_type=None,
proxy_host_source_id_list=None,
pure_type=None,
source_side_dedup_enabled=None,
ssl_verification=None,
subnets=None,
throttling_policy=None,
throttling_policy_overrides=None,
use_o_auth_for_exchange_online=None,
username=None,
vlan_params=None,
vmware_params=None,
vmware_type=None):
"""Constructor for the RegisterProtectionSourceParameters class"""
# Initialize members of the class
self.acropolis_type = acropolis_type
self.agent_endpoint = agent_endpoint
self.allowed_ip_addresses = allowed_ip_addresses
self.aws_credentials = aws_credentials
self.aws_fleet_params = aws_fleet_params
self.azure_credentials = azure_credentials
self.blacklisted_ip_addresses = blacklisted_ip_addresses
self.cluster_network_info = cluster_network_info
self.denied_ip_addresses = denied_ip_addresses
self.encryption_key = encryption_key
self.endpoint = endpoint
self.environment = environment
self.force_register = force_register
self.exchange_dag_protection_preference = exchange_dag_protection_preference
self.gcp_credentials = gcp_credentials
self.host_type = host_type
self.hyperv_type = hyperv_type
self.is_internal_encrypted = is_internal_encrypted
self.is_proxy_host = is_proxy_host
self.isilon_params = isilon_params
self.kubernetes_credentials = kubernetes_credentials
self.kubernetes_type = kubernetes_type
self.kvm_type = kvm_type
self.nas_mount_credentials = nas_mount_credentials
self.netapp_type = netapp_type
self.nimble_type = nimble_type
self.office365_credentials_list = office365_credentials_list
self.office_365_region = office_365_region
self.office_365_type = office_365_type
self.password = password
self.physical_params = physical_params
self.physical_type = physical_type
self.proxy_host_source_id_list = proxy_host_source_id_list
self.pure_type = pure_type
self.source_side_dedup_enabled = source_side_dedup_enabled
self.ssl_verification = ssl_verification
self.subnets = subnets
self.throttling_policy = throttling_policy
self.throttling_policy_overrides = throttling_policy_overrides
self.use_o_auth_for_exchange_online = use_o_auth_for_exchange_online
self.username = username
self.vlan_params = vlan_params
self.vmware_params = vmware_params
self.vmware_type = vmware_type
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
acropolis_type = dictionary.get('acropolisType')
agent_endpoint = dictionary.get('agentEndpoint')
allowed_ip_addresses = dictionary.get('allowedIpAddresses')
aws_credentials = cohesity_management_sdk.models.aws_credentials.AwsCredentials.from_dictionary(dictionary.get('awsCredentials')) if dictionary.get('awsCredentials') else None
aws_fleet_params = cohesity_management_sdk.models.a_w_s_fleet_params.AwsFleetParams.from_dictionary(dictionary.get('awsFleetParams')) if dictionary.get('awsFleetParams') else None
azure_credentials = cohesity_management_sdk.models.azure_credentials.AzureCredentials.from_dictionary(dictionary.get('azureCredentials')) if dictionary.get('azureCredentials') else None
blacklisted_ip_addresses = dictionary.get('blacklistedIpAddresses')
cluster_network_info = cohesity_management_sdk.models.fleet_network_params.FleetNetworkParams.from_dictionary(dictionary.get('clusterNetworkInfo')) if dictionary.get('clusterNetworkInfo') else None
denied_ip_addresses = dictionary.get('deniedIpAddresses')
encryption_key = dictionary.get('encryptionKey')
endpoint = dictionary.get('endpoint')
environment = dictionary.get('environment')
exchange_dag_protection_preference = cohesity_management_sdk.models.exchange_dag_protection_preference.ExchangeDAGProtectionPreference.from_dictionary(dictionary.get('exchangeDAGProtectionPreference')) if dictionary.get('exchangeDAGProtectionPreference') else None
force_register = dictionary.get('forceRegister')
gcp_credentials = cohesity_management_sdk.models.gcp_credentials.GcpCredentials.from_dictionary(dictionary.get('gcpCredentials')) if dictionary.get('gcpCredentials') else None
host_type = dictionary.get('hostType')
hyperv_type = dictionary.get('hyperVType')
is_internal_encrypted = dictionary.get('isInternalEncrypted')
is_proxy_host = dictionary.get('isProxyHost')
isilon_params = cohesity_management_sdk.models.registered_protection_source_isilon_params.RegisteredProtectionSourceIsilonParams.from_dictionary(dictionary.get('isilonParams')) if dictionary.get('isilonParams') else None
kubernetes_credentials = cohesity_management_sdk.models.kubernetes_credentials.KubernetesCredentials.from_dictionary(dictionary.get('kubernetesCredentials')) if dictionary.get('kubernetesCredentials') else None
kubernetes_type = dictionary.get('kubernetesType')
kvm_type = dictionary.get('kvmType')
nas_mount_credentials = cohesity_management_sdk.models.nas_mount_credential_params.NasMountCredentialParams.from_dictionary(dictionary.get('nasMountCredentials')) if dictionary.get('nasMountCredentials') else None
netapp_type = dictionary.get('netappType')
nimble_type = dictionary.get('nimbleType')
office_365_type = dictionary.get('office365Type')
office365_credentials_list = None
if dictionary.get('office365CredentialsList') != None:
office365_credentials_list = list()
for structure in dictionary.get('office365CredentialsList'):
office365_credentials_list.append(cohesity_management_sdk.models.office_365_credentials.Office365Credentials.from_dictionary(structure))
office_365_region = dictionary.get('office365Region')
password = dictionary.get('password')
physical_params = cohesity_management_sdk.models.physical_params.PhysicalParams.from_dictionary(dictionary.get('physicalParams')) if dictionary.get('physicalParams') else None
physical_type = dictionary.get('physicalType')
proxy_host_source_id_list = dictionary.get('proxyHostSourceIdList')
pure_type = dictionary.get('pureType')
source_side_dedup_enabled = dictionary.get('sourceSideDedupEnabled')
ssl_verification = cohesity_management_sdk.models.ssl_verification.SslVerification.from_dictionary(dictionary.get('sslVerification')) if dictionary.get('sslVerification') else None
subnets = None
if dictionary.get('subnets') != None:
subnets = list()
for structure in dictionary.get('subnets'):
subnets.append(cohesity_management_sdk.models.subnet.Subnet.from_dictionary(structure))
throttling_policy = cohesity_management_sdk.models.throttling_policy_parameters.ThrottlingPolicyParameters.from_dictionary(dictionary.get('throttlingPolicy')) if dictionary.get('throttlingPolicy') else None
throttling_policy_overrides = None
if dictionary.get('throttlingPolicyOverrides') != None:
throttling_policy_overrides = list()
for structure in dictionary.get('throttlingPolicyOverrides'):
throttling_policy_overrides.append(cohesity_management_sdk.models.throttling_policy_override.ThrottlingPolicyOverride.from_dictionary(structure))
use_o_auth_for_exchange_online = dictionary.get('useOAuthForExchangeOnline')
username = dictionary.get('username')
vlan_params = cohesity_management_sdk.models.vlan_parameters.VlanParameters.from_dictionary(dictionary.get('vlanParams')) if dictionary.get('vlanParams') else None
vmware_params = cohesity_management_sdk.models.vmware_params.VmwareParams.from_dictionary(dictionary.get('vmwareParams')) if dictionary.get('vmwareParams') else None
vmware_type = dictionary.get('vmwareType')
# Return an object of this model
return cls(acropolis_type,
agent_endpoint,
allowed_ip_addresses,
aws_credentials,
aws_fleet_params,
azure_credentials,
blacklisted_ip_addresses,
cluster_network_info,
denied_ip_addresses,
encryption_key,
endpoint,
environment,
exchange_dag_protection_preference,
force_register,
gcp_credentials,
host_type,
hyperv_type,
is_internal_encrypted,
is_proxy_host,
isilon_params,
kubernetes_credentials,
kubernetes_type,
kvm_type,
nas_mount_credentials,
netapp_type,
nimble_type,
office365_credentials_list,
office_365_region,
office_365_type,
password,
physical_params,
physical_type,
proxy_host_source_id_list,
pure_type,
source_side_dedup_enabled,
ssl_verification,
subnets,
throttling_policy,
throttling_policy_overrides,
use_o_auth_for_exchange_online,
username,
vlan_params,
vmware_params,
vmware_type)
|
Kinetics of solvolysis of intrazole.
The kinetics of degradation of intrazole in solution was investigated at 65 ± 0.1° at constant ionic strength of 0.5 over a wide pH range. The observed rates, followed by measuring intact intrazole, obeyed first-order kinetics. The catalytic effect of a phosphate buffer was found to be greater than the acetic acid catalysis. The apparent rate of hydrolysis of intrazole increased with the increasing concentration of hydrochloric acid. Primary salt effects were observed in both acidic and basic solutions. The rate of hydrolysis decreased in acidic and alkaline solutions, with increasing concentrations of ethanol in the solvent system. The apparent heats of activation for intrazole degradation in solutionwere determined to be 19.87 kcal./mole in 0.1 N HCl, 21.40 kcal./ mole in pH 4.10 acetate buffer, 20.30 kcal./mole in pH 8.0 phosphate buffer, and 7.25 kcal./mole in pH 9.10 and 10.10 borate buffers. From the rate-pH profile, the pH of minimum degradation or maximum stability of the compound under buffer-free conditions was found to be 3.20. The products of hydrolysis formed in acid and alkali-catalyzed degradation of intrazole were identified by TLC. A mechanism consistent with the above observations is proposed. |
Distribution of intrathecal catheter positions in rat
Background: Although drug administration through an intrathecal catheter is widely used in the study of spinal pharmacology, the catheter positions in transverse plane that may cause a limited spread of a solution remain unclear. In the first step to clarify this issue, the distribution of the intrathecal catheter position was investigated in rats. |
<reponame>hanebarla/CrowdCounting-using-PedestrianFlow
import os
from random import shuffle
from lib.model import CANNet2s, SimpleCNN
from lib.utils import save_checkpoint, fix_model_state_dict
import torch
from torch import nn
from torch.autograd import Variable
from torchvision import datasets, models, transforms
import torch.nn.functional as F
import numpy as np
import argparse
import json
import cv2
from lib import dataset
import time
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='PyTorch CANNet2s')
parser.add_argument('train_json', metavar='TRAIN',
help='path to train json')
parser.add_argument('val_json', metavar='VAL',
help='path to val json')
parser.add_argument('--dataset', default="FDST")
parser.add_argument('--exp', default='.')
parser.add_argument('--myloss', default='0.01')
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--trainmodel', default="CAN")
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--opt', default="adam")
parser.add_argument('--activate', default="leaky")
parser.add_argument('--bn', default=0, type=int)
parser.add_argument('--do_rate', default=0.0, type=float)
parser.add_argument('--pretrained', default=0, type=int)
dloss_on = False
def dataset_factory(dlist, arguments, mode="train"):
if arguments.dataset == "FDST":
if mode == "train":
return dataset.listDataset(
dlist,
shuffle=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
]),
train=True,
batch_size=args.batch_size,
num_workers=args.workers
)
else:
return dataset.listDataset(
dlist,
shuffle=False,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
]), train=False
)
elif arguments.dataset == "CrowdFlow":
return dataset.CrowdDatasets(
dlist,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
)
elif arguments.dataset == "venice":
return dataset.VeniceDataset(
dlist,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
)
else:
raise ValueError
def main():
global args, best_prec1, dloss_on
best_prec1 = 200
args = parser.parse_args()
# args.lr = 1e-4
args.batch_size = 1
args.momentum = 0.95
args.decay = 5*1e-4
# args.decay = 1e-3
# args.start_epoch = 0
args.epochs = 50
args.workers = 8
args.seed = int(time.time())
dloss_on = not (float(args.myloss) == 0)
if args.dataset == "FDST":
args.print_freq = 400
with open(args.train_json, 'r') as outfile:
train_list = json.load(outfile)
with open(args.val_json, 'r') as outfile:
val_list = json.load(outfile)
elif args.dataset == "CrowdFlow":
args.print_freq = 200
train_list = args.train_json
val_list = args.val_json
elif args.dataset == "venice":
args.print_freq = 10
train_list = args.train_json
val_list = args.val_json
else:
raise ValueError
if args.lr != 1e-4:
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'lr-' + str(args.lr))
elif args.opt != "adam":
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'opt-' + args.opt)
elif args.activate != "leaky":
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'activate-' + args.activate)
elif args.do_rate != 0.0:
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'do_rate-' + str(args.do_rate))
elif args.bn != 0:
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'bn-' + str(args.bn))
else:
args.savefolder = os.path.join(args.exp, args.dataset, args.myloss, 'no_change')
if not os.path.exists(args.savefolder):
os.makedirs(args.savefolder)
# logging.basicConfig(filename=os.path.join(args.savefolder, 'train.log'), level=logging.DEBUG)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.manual_seed(args.seed)
if os.path.exists(os.path.join(args.savefolder, 'log.txt')) and args.start_epoch == 0:
os.remove(os.path.join(args.savefolder, 'log.txt'))
if args.bn != 0 or args.do_rate > 0.0:
load_weight = True
else:
load_weight = False
if args.trainmodel == "CAN":
model = CANNet2s(load_weights=load_weight, activate=args.activate, bn=args.bn, do_rate=args.do_rate)
elif args.trainmodel == "SimpleCNN":
model = SimpleCNN()
best_prec1 = 100
# pretrained
if os.path.isfile(os.path.join(args.savefolder, 'checkpoint.pth.tar')):
checkpoint = torch.load(os.path.join(args.savefolder, 'checkpoint.pth.tar'))
modelbest = torch.load(os.path.join(args.savefolder, 'model_best.pth.tar'))
model.load_state_dict(fix_model_state_dict(checkpoint['state_dict']))
args.start_epoch = checkpoint['epoch']
print("Train resumed: {} epoch".format(args.start_epoch))
best_prec1 = modelbest['val']
print("best val: {}".format(best_prec1))
if torch.cuda.device_count() > 1:
print("You can use {} GPUs!".format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
model.to(device)
# criterion = nn.MSELoss(size_average=False)
criterion = nn.MSELoss(reduction='sum')
if args.opt == "adam":
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.decay)
elif args.opt == "amsgrad":
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.decay, amsgrad=True)
elif args.opt == "sgd":
optimizer = torch.optim.SGD(model.parameters(), args.lr)
torch.backends.cudnn.benchmark = True
for epoch in range(args.start_epoch, args.epochs):
train(train_list, model, criterion, optimizer, epoch, device)
prec1 = validate(val_list, model, criterion, device)
is_best = prec1 < best_prec1
best_prec1 = min(prec1, best_prec1)
print(' * best MAE {mae:.3f} '
.format(mae=best_prec1))
save_checkpoint({
'state_dict': model.state_dict(),
'val': prec1.item(),
'epoch': epoch
}, is_best,
filename=os.path.join(args.savefolder, 'checkpoint.pth.tar'),
bestname=os.path.join(args.savefolder, 'model_best.pth.tar'))
def train(train_list, model, criterion, optimizer, epoch, device):
global args
losses = AverageMeter()
batch_time = AverageMeter()
data_time = AverageMeter()
train_dataset = dataset_factory(train_list, args, mode="train")
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=True)
print('epoch %d, processed %d samples, lr %.10f' % (epoch, epoch * len(train_loader.dataset), args.lr))
mae = 0
model.train()
end = time.time()
for i, (prev_img, img, post_img, target) in enumerate(train_loader):
data_time.update(time.time() - end)
prev_img = prev_img.to(device, dtype=torch.float)
prev_img = Variable(prev_img)
img = img.to(device, dtype=torch.float)
img = Variable(img)
post_img = post_img.to(device, dtype=torch.float)
post_img = Variable(post_img)
prev_flow = model(prev_img, img)
post_flow = model(img, post_img)
prev_flow_inverse = model(img, prev_img)
post_flow_inverse = model(post_img, img)
target = target.type(torch.FloatTensor)[0].cuda()
target = Variable(target)
# mask the boundary locations where people can move in/out between regions outside image plane
mask_boundry = torch.zeros(prev_flow.shape[2:])
mask_boundry[0, :] = 1.0
mask_boundry[-1, :] = 1.0
mask_boundry[:, 0] = 1.0
mask_boundry[:, -1] = 1.0
mask_boundry = Variable(mask_boundry.cuda())
reconstruction_from_prev = F.pad(prev_flow[0,0,1:,1:],(0,1,0,1))+F.pad(prev_flow[0,1,1:,:],(0,0,0,1))+F.pad(prev_flow[0,2,1:,:-1],(1,0,0,1))+F.pad(prev_flow[0,3,:,1:],(0,1,0,0))+prev_flow[0,4,:,:]+F.pad(prev_flow[0,5,:,:-1],(1,0,0,0))+F.pad(prev_flow[0,6,:-1,1:],(0,1,1,0))+F.pad(prev_flow[0,7,:-1,:],(0,0,1,0))+F.pad(prev_flow[0,8,:-1,:-1],(1,0,1,0))+prev_flow[0,9,:,:]*mask_boundry
reconstruction_from_post = torch.sum(post_flow[0,:9,:,:],dim=0)+post_flow[0,9,:,:]*mask_boundry
reconstruction_from_prev_inverse = torch.sum(prev_flow_inverse[0,:9,:,:],dim=0)+prev_flow_inverse[0,9,:,:]*mask_boundry
reconstruction_from_post_inverse = F.pad(post_flow_inverse[0,0,1:,1:],(0,1,0,1))+F.pad(post_flow_inverse[0,1,1:,:],(0,0,0,1))+F.pad(post_flow_inverse[0,2,1:,:-1],(1,0,0,1))+F.pad(post_flow_inverse[0,3,:,1:],(0,1,0,0))+post_flow_inverse[0,4,:,:]+F.pad(post_flow_inverse[0,5,:,:-1],(1,0,0,0))+F.pad(post_flow_inverse[0,6,:-1,1:],(0,1,1,0))+F.pad(post_flow_inverse[0,7,:-1,:],(0,0,1,0))+F.pad(post_flow_inverse[0,8,:-1,:-1],(1,0,1,0))+post_flow_inverse[0,9,:,:]*mask_boundry
# prev_density_reconstruction = torch.sum(prev_flow[0,:9,:,:],dim=0)+prev_flow[0,9,:,:]*mask_boundry
# prev_density_reconstruction_inverse = F.pad(prev_flow_inverse[0,0,1:,1:],(0,1,0,1))+F.pad(prev_flow_inverse[0,1,1:,:],(0,0,0,1))+F.pad(prev_flow_inverse[0,2,1:,:-1],(1,0,0,1))+F.pad(prev_flow_inverse[0,3,:,1:],(0,1,0,0))+prev_flow_inverse[0,4,:,:]+F.pad(prev_flow_inverse[0,5,:,:-1],(1,0,0,0))+F.pad(prev_flow_inverse[0,6,:-1,1:],(0,1,1,0))+F.pad(prev_flow_inverse[0,7,:-1,:],(0,0,1,0))+F.pad(prev_flow_inverse[0,8,:-1,:-1],(1,0,1,0))+prev_flow_inverse[0,9,:,:]*mask_boundry
# post_density_reconstruction_inverse = torch.sum(post_flow_inverse[0,:9,:,:],dim=0)+post_flow_inverse[0,9,:,:]*mask_boundry
# post_density_reconstruction = F.pad(post_flow[0,0,1:,1:],(0,1,0,1))+F.pad(post_flow[0,1,1:,:],(0,0,0,1))+F.pad(post_flow[0,2,1:,:-1],(1,0,0,1))+F.pad(post_flow[0,3,:,1:],(0,1,0,0))+post_flow[0,4,:,:]+F.pad(post_flow[0,5,:,:-1],(1,0,0,0))+F.pad(post_flow[0,6,:-1,1:],(0,1,1,0))+F.pad(post_flow[0,7,:-1,:],(0,0,1,0))+F.pad(post_flow[0,8,:-1,:-1],(1,0,1,0))+post_flow[0,9,:,:]*mask_boundry
loss_prev_flow = criterion(reconstruction_from_prev, target)
loss_post_flow = criterion(reconstruction_from_post, target)
loss_prev_flow_inverse = criterion(reconstruction_from_prev_inverse, target)
loss_post_flow_inverse = criterion(reconstruction_from_post_inverse, target)
# cycle consistency
loss_prev_consistency = criterion(prev_flow[0,0,1:,1:], prev_flow_inverse[0,8,:-1,:-1])+criterion(prev_flow[0,1,1:,:], prev_flow_inverse[0,7,:-1,:])+criterion(prev_flow[0,2,1:,:-1], prev_flow_inverse[0,6,:-1,1:])+criterion(prev_flow[0,3,:,1:], prev_flow_inverse[0,5,:,:-1])+criterion(prev_flow[0,4,:,:], prev_flow_inverse[0,4,:,:])+criterion(prev_flow[0,5,:,:-1], prev_flow_inverse[0,3,:,1:])+criterion(prev_flow[0,6,:-1,1:], prev_flow_inverse[0,2,1:,:-1])+criterion(prev_flow[0,7,:-1,:], prev_flow_inverse[0,1,1:,:])+criterion(prev_flow[0,8,:-1,:-1], prev_flow_inverse[0,0,1:,1:])
loss_post_consistency = criterion(post_flow[0,0,1:,1:], post_flow_inverse[0,8,:-1,:-1])+criterion(post_flow[0,1,1:,:], post_flow_inverse[0,7,:-1,:])+criterion(post_flow[0,2,1:,:-1], post_flow_inverse[0,6,:-1,1:])+criterion(post_flow[0,3,:,1:], post_flow_inverse[0,5,:,:-1])+criterion(post_flow[0,4,:,:], post_flow_inverse[0,4,:,:])+criterion(post_flow[0,5,:,:-1], post_flow_inverse[0,3,:,1:])+criterion(post_flow[0,6,:-1,1:], post_flow_inverse[0,2,1:,:-1])+criterion(post_flow[0,7,:-1,:], post_flow_inverse[0,1,1:,:])+criterion(post_flow[0,8,:-1,:-1], post_flow_inverse[0,0,1:,1:])
loss = loss_prev_flow+loss_post_flow+loss_prev_flow_inverse+loss_post_flow_inverse+loss_prev_consistency+loss_post_consistency
# direct loss
if dloss_on:
loss_prev_direct = criterion(prev_flow[0,0,1:,1:], prev_flow[0,0,1:,1:])+criterion(prev_flow[0,1,1:,:], prev_flow[0,1,:-1,:])+criterion(prev_flow[0,2,1:,:-1], prev_flow[0,2,:-1,1:])+criterion(prev_flow[0,3,:,1:], prev_flow[0,3,:,:-1])+criterion(prev_flow[0,4,:,:], prev_flow[0,4,:,:])+criterion(prev_flow[0,5,:,:-1], prev_flow[0,5,:,1:])+criterion(prev_flow[0,6,:-1,1:], prev_flow[0,6,1:,:-1])+criterion(prev_flow[0,7,:-1,:], prev_flow[0,7,1:,:])+criterion(prev_flow[0,8,:-1,:-1], prev_flow[0,8,1:,1:])
loss_post_direct = criterion(post_flow[0,0,1:,1:], post_flow[0,0,1:,1:])+criterion(post_flow[0,1,1:,:], post_flow[0,1,:-1,:])+criterion(post_flow[0,2,1:,:-1], post_flow[0,2,:-1,1:])+criterion(post_flow[0,3,:,1:], post_flow[0,3,:,:-1])+criterion(post_flow[0,4,:,:], post_flow[0,4,:,:])+criterion(post_flow[0,5,:,:-1], post_flow[0,5,:,1:])+criterion(post_flow[0,6,:-1,1:], post_flow[0,6,1:,:-1])+criterion(post_flow[0,7,:-1,:], post_flow[0,7,1:,:])+criterion(post_flow[0,8,:-1,:-1], post_flow[0,8,1:,1:])
loss_prev_inv_direct = criterion(prev_flow_inverse[0,0,1:,1:], prev_flow_inverse[0,0,1:,1:])+criterion(prev_flow_inverse[0,1,1:,:], prev_flow_inverse[0,1,:-1,:])+criterion(prev_flow_inverse[0,2,1:,:-1], prev_flow_inverse[0,2,:-1,1:])+criterion(prev_flow_inverse[0,3,:,1:], prev_flow_inverse[0,3,:,:-1])+criterion(prev_flow_inverse[0,4,:,:], prev_flow_inverse[0,4,:,:])+criterion(prev_flow_inverse[0,5,:,:-1], prev_flow_inverse[0,5,:,1:])+criterion(prev_flow_inverse[0,6,:-1,1:], prev_flow_inverse[0,6,1:,:-1])+criterion(prev_flow_inverse[0,7,:-1,:], prev_flow_inverse[0,7,1:,:])+criterion(prev_flow_inverse[0,8,:-1,:-1], prev_flow_inverse[0,8,1:,1:])
loss_post_inv_direct = criterion(post_flow_inverse[0,0,1:,1:], post_flow_inverse[0,0,1:,1:])+criterion(post_flow_inverse[0,1,1:,:], post_flow_inverse[0,1,:-1,:])+criterion(post_flow_inverse[0,2,1:,:-1], post_flow_inverse[0,2,:-1,1:])+criterion(post_flow_inverse[0,3,:,1:], post_flow_inverse[0,3,:,:-1])+criterion(post_flow_inverse[0,4,:,:], post_flow_inverse[0,4,:,:])+criterion(post_flow_inverse[0,5,:,:-1], post_flow_inverse[0,5,:,1:])+criterion(post_flow_inverse[0,6,:-1,1:], post_flow_inverse[0,6,1:,:-1])+criterion(post_flow_inverse[0,7,:-1,:], post_flow_inverse[0,7,1:,:])+criterion(post_flow_inverse[0,8,:-1,:-1], post_flow_inverse[0,8,1:,1:])
loss += float(args.myloss) *(loss_prev_direct + loss_post_direct + loss_prev_inv_direct + loss_post_inv_direct)
# MAE
overall = ((reconstruction_from_prev+reconstruction_from_prev_inverse)/2.0).type(torch.FloatTensor)
mae += abs(overall.data.sum()-target.sum())
losses.update(loss.item(), img.size(0))
optimizer.zero_grad()
loss.backward()
optimizer.step()
batch_time.update(time.time() - end)
end = time.time()
del prev_img
del img
del post_img
del target
if i % args.print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
with open(os.path.join(args.savefolder, 'log.txt'), mode='a') as f:
f.write('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t\n'
.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses))
mae = mae/len(train_loader)
print(' * Train MAE {mae:.3f} '
.format(mae=mae))
print(' * Train Loss {loss:.3f} '
.format(loss=losses.avg))
with open(os.path.join(args.savefolder, 'log.txt'), mode='a') as f:
f.write('Train MAE:{mae:.3f} \nTrain Loss:{loss:.3f} \n\n'
.format(mae=mae, loss=losses.avg))
def validate(val_list, model, criterion, device):
global args
print ('begin val')
with open(os.path.join(args.savefolder, 'log.txt'), mode='a') as f:
f.write('begin val\n')
val_dataset = dataset_factory(val_list, args, mode="val")
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=1)
model.eval()
losses = AverageMeter()
mae = 0
for i,(prev_img, img, post_img, target ) in enumerate(val_loader):
# only use previous frame in inference time, as in real-time application scenario, future frame is not available
with torch.no_grad():
prev_img = prev_img.to(device, dtype=torch.float)
prev_img = Variable(prev_img)
img = img.to(device, dtype=torch.float)
img = Variable(img)
post_img = post_img.to(device, dtype=torch.float)
post_img = Variable(post_img)
prev_flow = model(prev_img,img)
prev_flow_inverse = model(img,prev_img)
post_flow = model(img, post_img)
post_flow_inverse = model(post_img, img)
target = target.type(torch.FloatTensor)[0].to(device, dtype=torch.float)
target = Variable(target)
mask_boundry = torch.zeros(prev_flow.shape[2:])
mask_boundry[0,:] = 1.0
mask_boundry[-1,:] = 1.0
mask_boundry[:,0] = 1.0
mask_boundry[:,-1] = 1.0
mask_boundry = Variable(mask_boundry.cuda())
reconstruction_from_prev = F.pad(prev_flow[0,0,1:,1:],(0,1,0,1))+F.pad(prev_flow[0,1,1:,:],(0,0,0,1))+F.pad(prev_flow[0,2,1:,:-1],(1,0,0,1))+F.pad(prev_flow[0,3,:,1:],(0,1,0,0))+prev_flow[0,4,:,:]+F.pad(prev_flow[0,5,:,:-1],(1,0,0,0))+F.pad(prev_flow[0,6,:-1,1:],(0,1,1,0))+F.pad(prev_flow[0,7,:-1,:],(0,0,1,0))+F.pad(prev_flow[0,8,:-1,:-1],(1,0,1,0))+prev_flow[0,9,:,:]*mask_boundry
reconstruction_from_post = torch.sum(post_flow[0,:9,:,:],dim=0)+post_flow[0,9,:,:]*mask_boundry
reconstruction_from_prev_inverse = torch.sum(prev_flow_inverse[0,:9,:,:],dim=0)+prev_flow_inverse[0,9,:,:]*mask_boundry
reconstruction_from_post_inverse = F.pad(post_flow_inverse[0,0,1:,1:],(0,1,0,1))+F.pad(post_flow_inverse[0,1,1:,:],(0,0,0,1))+F.pad(post_flow_inverse[0,2,1:,:-1],(1,0,0,1))+F.pad(post_flow_inverse[0,3,:,1:],(0,1,0,0))+post_flow_inverse[0,4,:,:]+F.pad(post_flow_inverse[0,5,:,:-1],(1,0,0,0))+F.pad(post_flow_inverse[0,6,:-1,1:],(0,1,1,0))+F.pad(post_flow_inverse[0,7,:-1,:],(0,0,1,0))+F.pad(post_flow_inverse[0,8,:-1,:-1],(1,0,1,0))+post_flow_inverse[0,9,:,:]*mask_boundry
overall = ((reconstruction_from_prev+reconstruction_from_prev_inverse)/2.0).type(torch.FloatTensor)
loss_prev_flow = criterion(reconstruction_from_prev, target)
loss_post_flow = criterion(reconstruction_from_post, target)
loss_prev_flow_inverse = criterion(reconstruction_from_prev_inverse, target)
loss_post_flow_inverse = criterion(reconstruction_from_post_inverse, target)
# cycle consistency
loss_prev_consistency = criterion(prev_flow[0,0,1:,1:], prev_flow_inverse[0,8,:-1,:-1])+criterion(prev_flow[0,1,1:,:], prev_flow_inverse[0,7,:-1,:])+criterion(prev_flow[0,2,1:,:-1], prev_flow_inverse[0,6,:-1,1:])+criterion(prev_flow[0,3,:,1:], prev_flow_inverse[0,5,:,:-1])+criterion(prev_flow[0,4,:,:], prev_flow_inverse[0,4,:,:])+criterion(prev_flow[0,5,:,:-1], prev_flow_inverse[0,3,:,1:])+criterion(prev_flow[0,6,:-1,1:], prev_flow_inverse[0,2,1:,:-1])+criterion(prev_flow[0,7,:-1,:], prev_flow_inverse[0,1,1:,:])+criterion(prev_flow[0,8,:-1,:-1], prev_flow_inverse[0,0,1:,1:])
loss_post_consistency = criterion(post_flow[0,0,1:,1:], post_flow_inverse[0,8,:-1,:-1])+criterion(post_flow[0,1,1:,:], post_flow_inverse[0,7,:-1,:])+criterion(post_flow[0,2,1:,:-1], post_flow_inverse[0,6,:-1,1:])+criterion(post_flow[0,3,:,1:], post_flow_inverse[0,5,:,:-1])+criterion(post_flow[0,4,:,:], post_flow_inverse[0,4,:,:])+criterion(post_flow[0,5,:,:-1], post_flow_inverse[0,3,:,1:])+criterion(post_flow[0,6,:-1,1:], post_flow_inverse[0,2,1:,:-1])+criterion(post_flow[0,7,:-1,:], post_flow_inverse[0,1,1:,:])+criterion(post_flow[0,8,:-1,:-1], post_flow_inverse[0,0,1:,1:])
loss = loss_prev_flow+loss_post_flow+loss_prev_flow_inverse+loss_post_flow_inverse+loss_prev_consistency+loss_post_consistency
if dloss_on:
loss_prev_direct = criterion(prev_flow[0,0,1:,1:], prev_flow[0,0,1:,1:])+criterion(prev_flow[0,1,1:,:], prev_flow[0,1,:-1,:])+criterion(prev_flow[0,2,1:,:-1], prev_flow[0,2,:-1,1:])+criterion(prev_flow[0,3,:,1:], prev_flow[0,3,:,:-1])+criterion(prev_flow[0,4,:,:], prev_flow[0,4,:,:])+criterion(prev_flow[0,5,:,:-1], prev_flow[0,5,:,1:])+criterion(prev_flow[0,6,:-1,1:], prev_flow[0,6,1:,:-1])+criterion(prev_flow[0,7,:-1,:], prev_flow[0,7,1:,:])+criterion(prev_flow[0,8,:-1,:-1], prev_flow[0,8,1:,1:])
loss_post_direct = criterion(post_flow[0,0,1:,1:], post_flow[0,0,1:,1:])+criterion(post_flow[0,1,1:,:], post_flow[0,1,:-1,:])+criterion(post_flow[0,2,1:,:-1], post_flow[0,2,:-1,1:])+criterion(post_flow[0,3,:,1:], post_flow[0,3,:,:-1])+criterion(post_flow[0,4,:,:], post_flow[0,4,:,:])+criterion(post_flow[0,5,:,:-1], post_flow[0,5,:,1:])+criterion(post_flow[0,6,:-1,1:], post_flow[0,6,1:,:-1])+criterion(post_flow[0,7,:-1,:], post_flow[0,7,1:,:])+criterion(post_flow[0,8,:-1,:-1], post_flow[0,8,1:,1:])
loss_prev_inv_direct = criterion(prev_flow_inverse[0,0,1:,1:], prev_flow_inverse[0,0,1:,1:])+criterion(prev_flow_inverse[0,1,1:,:], prev_flow_inverse[0,1,:-1,:])+criterion(prev_flow_inverse[0,2,1:,:-1], prev_flow_inverse[0,2,:-1,1:])+criterion(prev_flow_inverse[0,3,:,1:], prev_flow_inverse[0,3,:,:-1])+criterion(prev_flow_inverse[0,4,:,:], prev_flow_inverse[0,4,:,:])+criterion(prev_flow_inverse[0,5,:,:-1], prev_flow_inverse[0,5,:,1:])+criterion(prev_flow_inverse[0,6,:-1,1:], prev_flow_inverse[0,6,1:,:-1])+criterion(prev_flow_inverse[0,7,:-1,:], prev_flow_inverse[0,7,1:,:])+criterion(prev_flow_inverse[0,8,:-1,:-1], prev_flow_inverse[0,8,1:,1:])
loss_post_inv_direct = criterion(post_flow_inverse[0,0,1:,1:], post_flow_inverse[0,0,1:,1:])+criterion(post_flow_inverse[0,1,1:,:], post_flow_inverse[0,1,:-1,:])+criterion(post_flow_inverse[0,2,1:,:-1], post_flow_inverse[0,2,:-1,1:])+criterion(post_flow_inverse[0,3,:,1:], post_flow_inverse[0,3,:,:-1])+criterion(post_flow_inverse[0,4,:,:], post_flow_inverse[0,4,:,:])+criterion(post_flow_inverse[0,5,:,:-1], post_flow_inverse[0,5,:,1:])+criterion(post_flow_inverse[0,6,:-1,1:], post_flow_inverse[0,6,1:,:-1])+criterion(post_flow_inverse[0,7,:-1,:], post_flow_inverse[0,7,1:,:])+criterion(post_flow_inverse[0,8,:-1,:-1], post_flow_inverse[0,8,1:,1:])
loss += float(args.myloss) *(loss_prev_direct + loss_post_direct + loss_prev_inv_direct + loss_post_inv_direct)
target = target.type(torch.FloatTensor)
losses.update(loss.item(), img.size(0))
mae += abs(overall.data.sum()-target.sum())
del prev_img
del img
del target
mae = mae/len(val_loader)
print(' * Val MAE {mae:.3f} '
.format(mae=mae))
print(' * Val Loss {loss:.3f} '
.format(loss=losses.avg))
with open(os.path.join(args.savefolder, 'log.txt'), mode='a') as f:
f.write('Val MAE:{mae:.3f} \nVal Loss:{loss:.3f} \n\n'
.format(mae=mae, loss=losses.avg))
return mae
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
if __name__ == '__main__':
main()
|
/**
* Generic tree structure. Only concrete subclasses of this can be instantiated.
*/
public class TreeNode<NodeType extends TreeNode<NodeType>> {
protected ArrayList<NodeType> children = Lists.newArrayList();
public NodeType getChild(int i) {
return hasChild(i) ? children.get(i) : null;
}
public void addChild(NodeType n) {
children.add(n);
}
public void addChildren(List <? extends NodeType > n) {
children.addAll(n);
}
public boolean hasChild(int i) { return children.size() > i; }
public void setChild(int index, NodeType n) { children.set(index, n); }
public ArrayList<NodeType> getChildren() { return children; }
public void clearChildren() { children.clear(); }
public void removeNode(int i){
if (children != null && i>=0 && i< children.size()) {
children.remove(i);
}
}
/**
* Count the total number of nodes in this tree. Leaf node will return 1.
* Non-leaf node will include all its children.
*/
public int numNodes() {
int numNodes = 1;
for (NodeType child: children) {
numNodes += child.numNodes();
}
return numNodes;
}
/**
* Add all nodes in the tree that satisfy 'predicate' to the list 'matches'
* This node is checked first, followed by its children in order. If the node
* itself matches, the children are skipped.
*/
public <C extends TreeNode<NodeType>, D extends C> void collect(
Predicate<? super C> predicate, Collection<D> matches) {
// TODO: the semantics of this function are very strange. contains()
// checks using .equals() on the nodes. In the case of literals, slotrefs
// and maybe others, two different tree node objects can be equal and
// this function would only return one of them. This is not intuitive.
// We rely on these semantics to not have duplicate nodes. Investigate this.
if (predicate.apply((C) this) && !matches.contains(this)) {
matches.add((D) this);
return;
}
for (NodeType child: children) {
child.collect(predicate, matches);
}
}
/**
* Add all nodes in the tree that are of class 'cl' to the list 'matches'.
* This node is checked first, followed by its children in order. If the node
* itself is of class 'cl', the children are skipped.
*/
public <C extends TreeNode<NodeType>, D extends C> void collect(
Class cl, Collection<D> matches) {
if (cl.equals(getClass())) {
matches.add((D) this);
return;
}
for (NodeType child: children) {
child.collect(cl, matches);
}
}
/**
* Add all nodes in the tree that satisfy 'predicate' to the list 'matches'
* This node is checked first, followed by its children in order. All nodes
* that match in the subtree are added.
*/
public <C extends TreeNode<NodeType>, D extends C> void collectAll(
Predicate<? super C> predicate, List<D> matches) {
if (predicate.apply((C) this)) {
matches.add((D) this);
}
for (NodeType child: children) {
child.collectAll(predicate, matches);
}
}
/**
* For each expression in 'nodeList', collect all subexpressions satisfying 'predicate'
* into 'matches'
*/
public static <C extends TreeNode<C>, D extends C> void collect(
Collection<C> nodeList, Predicate<? super C> predicate, Collection<D> matches) {
for (C node: nodeList) {
node.collect(predicate, matches);
}
}
/**
* For each expression in 'nodeList', collect all subexpressions of class 'cl'
* into 'matches'
*/
public static <C extends TreeNode<C>, D extends C> void collect(
Collection<C> nodeList, Class cl, Collection<D> matches) {
for (C node: nodeList) {
node.collect(cl, matches);
}
}
public boolean contains(Class cl) {
if (cl.isAssignableFrom(this.getClass()) && this.getClass().isAssignableFrom(cl)) {
return true;
}
for (NodeType child : children) {
if (child.contains(cl)) {
return true;
}
}
return false;
}
/**
* Return true if this node or any of its children satisfy 'predicate'.
*/
public <C extends TreeNode<NodeType>> boolean contains(
Predicate<? super C> predicate) {
if (predicate.apply((C) this)) {
return true;
}
for (NodeType child: children) {
if (child.contains(predicate)) {
return true;
}
}
return false;
}
/**
* For each node in nodeList, return true if any subexpression satisfies
* contains('predicate').
*/
public static <C extends TreeNode<C>, D extends C> boolean contains(
Collection<C> nodeList, Predicate<? super C> predicate) {
for (C node: nodeList) {
if (node.contains(predicate)) {
return true;
}
}
return false;
}
/**
* Return true if any node in nodeList contains children of class cl.
*/
public static <C extends TreeNode<C>> boolean contains(
List<C> nodeList, Class cl) {
for (C node: nodeList) {
if (node.contains(cl)) {
return true;
}
}
return false;
}
public boolean containsSubclass(Class cl) {
if (cl.isAssignableFrom(this.getClass())) {
return true;
}
for (NodeType child : children) {
if (child.containsSubclass(cl)) {
return true;
}
}
return false;
}
/**
* Return 'this' or first child that is exactly of class 'cl'.
* Looks for matching children via depth-first, left-to-right traversal.
*/
public <C extends NodeType> C findFirstOf(Class<C> cl) {
if (this.getClass().equals(cl)) {
return (C) this;
}
for (NodeType child : children) {
NodeType result = child.findFirstOf(cl);
if (result != null) {
return (C) result;
}
}
return null;
}
} |
More fun from the self-loathing society: This American Life had a show about how young female undercover cops infiltrated a high school and flirted with boys to entrap them into selling pot, so they could charge them with felonies and destroy their lives at an early age.
Last year in three high schools in Florida, several undercover police officers posed as students. The undercover cops went to classes, became Facebook friends and flirted with the other students. One 18-year-old honor student named Justin fell in love with an attractive 25-year-old undercover cop after spending weeks sharing stories about their lives, texting and flirting with each other.
One day she asked Justin if he smoked pot. Even though he didn't smoke marijuana, the love-struck teen promised to help find some for her. Every couple of days she would text him asking if he had the marijuana. Finally, Justin was able to get it to her. She tried to give him $25 for the marijuana and he said he didn't want the money -- he got it for her as a present.
A short while later, the police did a big sweep and arrest 31 students -- including Justin. Almost all were charged with selling a small amount of marijuana to the undercover cops. Now Justin has a felony hanging over his head. |
/**
* Jackson-friendly version of {@link Loan}
*/
public class JsonAdaptedLoan extends JsonAdaptedTransaction {
/**
* Constructs a {@code JsonAdaptedLoan} with the given loan details.
*/
@JsonCreator
public JsonAdaptedLoan(@JsonProperty("description") String description,
@JsonProperty("amount") String amount,
@JsonProperty("date") String date) {
super(description, amount, date, "Loan");
}
/**
* Converts a given {@code Loan} into this class for Jackson use.
*/
public JsonAdaptedLoan(Loan loan) {
super(loan);
}
/**
* Converts this Jackson-friendly adapted loan object into the model's Loan.
*/
public Loan toModelType() throws IllegalValueException {
TransactionDescriptor descriptor = toDescriptor();
return new Loan(descriptor.description, descriptor.amount, descriptor.date);
}
} |
/**
* this is the tmporary game loop
*/
public void run(){
this.init();
int fps = 60;
double timePerTick = 1000000000 / fps;
double delta = 0;
long now;
long lastTime = System.nanoTime();
long timer = 0;
int ticks = 0;
while(running){
now = System.nanoTime();
delta += (now - lastTime) / timePerTick;
timer += now - lastTime;
lastTime = now;
if(delta >= 1){
update();
render();
ticks++;
delta--;
}
if(timer >= 1000000000){
System.out.println("Ticks and Frames: " + ticks);
ticks = 0;
timer = 0;
}
}
stop();
} |
/*!
@file
Defines `boost::hana::pair`.
@copyright Louis Dionne 2013-2017
Distributed under the Boost Software License, Version 1.0.
(See accompanying file LICENSE.md or copy at http://boost.org/LICENSE_1_0.txt)
*/
#ifndef BOOST_HANA_PAIR_HPP
#define BOOST_HANA_PAIR_HPP
#include <boost/hana/fwd/pair.hpp>
#include <boost/hana/config.hpp>
#include <boost/hana/detail/decay.hpp>
#include <boost/hana/detail/ebo.hpp>
#include <boost/hana/detail/intrinsics.hpp>
#include <boost/hana/detail/operators/adl.hpp>
#include <boost/hana/detail/operators/comparable.hpp>
#include <boost/hana/detail/operators/orderable.hpp>
#include <boost/hana/fwd/core/make.hpp>
#include <boost/hana/fwd/first.hpp>
#include <boost/hana/fwd/second.hpp>
#include <type_traits>
#include <utility>
BOOST_HANA_NAMESPACE_BEGIN
namespace detail {
template <int> struct pix; // pair index
}
//////////////////////////////////////////////////////////////////////////
// pair
//////////////////////////////////////////////////////////////////////////
//! @cond
template <typename First, typename Second>
struct pair : detail::operators::adl<pair<First, Second>>
, private detail::ebo<detail::pix<0>, First>
, private detail::ebo<detail::pix<1>, Second>
{
// Default constructor
template <typename ...dummy, typename = typename std::enable_if<
BOOST_HANA_TT_IS_CONSTRUCTIBLE(First, dummy...) &&
BOOST_HANA_TT_IS_CONSTRUCTIBLE(Second, dummy...)
>::type>
constexpr pair()
: detail::ebo<detail::pix<0>, First>()
, detail::ebo<detail::pix<1>, Second>()
{ }
// Variadic constructors
template <typename ...dummy, typename = typename std::enable_if<
BOOST_HANA_TT_IS_CONSTRUCTIBLE(First, First const&, dummy...) &&
BOOST_HANA_TT_IS_CONSTRUCTIBLE(Second, Second const&, dummy...)
>::type>
constexpr pair(First const& fst, Second const& snd)
: detail::ebo<detail::pix<0>, First>(fst)
, detail::ebo<detail::pix<1>, Second>(snd)
{ }
template <typename T, typename U, typename = typename std::enable_if<
BOOST_HANA_TT_IS_CONVERTIBLE(T&&, First) &&
BOOST_HANA_TT_IS_CONVERTIBLE(U&&, Second)
>::type>
constexpr pair(T&& t, U&& u)
: detail::ebo<detail::pix<0>, First>(static_cast<T&&>(t))
, detail::ebo<detail::pix<1>, Second>(static_cast<U&&>(u))
{ }
// Possibly converting copy and move constructors
template <typename T, typename U, typename = typename std::enable_if<
BOOST_HANA_TT_IS_CONSTRUCTIBLE(First, T const&) &&
BOOST_HANA_TT_IS_CONSTRUCTIBLE(Second, U const&) &&
BOOST_HANA_TT_IS_CONVERTIBLE(T const&, First) &&
BOOST_HANA_TT_IS_CONVERTIBLE(U const&, Second)
>::type>
constexpr pair(pair<T, U> const& other)
: detail::ebo<detail::pix<0>, First>(hana::first(other))
, detail::ebo<detail::pix<1>, Second>(hana::second(other))
{ }
template <typename T, typename U, typename = typename std::enable_if<
BOOST_HANA_TT_IS_CONSTRUCTIBLE(First, T&&) &&
BOOST_HANA_TT_IS_CONSTRUCTIBLE(Second, U&&) &&
BOOST_HANA_TT_IS_CONVERTIBLE(T&&, First) &&
BOOST_HANA_TT_IS_CONVERTIBLE(U&&, Second)
>::type>
constexpr pair(pair<T, U>&& other)
: detail::ebo<detail::pix<0>, First>(hana::first(static_cast<pair<T, U>&&>(other)))
, detail::ebo<detail::pix<1>, Second>(hana::second(static_cast<pair<T, U>&&>(other)))
{ }
// Copy and move assignment
template <typename T, typename U, typename = typename std::enable_if<
BOOST_HANA_TT_IS_ASSIGNABLE(First&, T const&) &&
BOOST_HANA_TT_IS_ASSIGNABLE(Second&, U const&)
>::type>
constexpr pair& operator=(pair<T, U> const& other) {
hana::first(*this) = hana::first(other);
hana::second(*this) = hana::second(other);
return *this;
}
template <typename T, typename U, typename = typename std::enable_if<
BOOST_HANA_TT_IS_ASSIGNABLE(First&, T&&) &&
BOOST_HANA_TT_IS_ASSIGNABLE(Second&, U&&)
>::type>
constexpr pair& operator=(pair<T, U>&& other) {
hana::first(*this) = hana::first(static_cast<pair<T, U>&&>(other));
hana::second(*this) = hana::second(static_cast<pair<T, U>&&>(other));
return *this;
}
// Prevent the compiler from defining the default copy and move
// constructors, which interfere with the SFINAE above.
~pair() = default;
friend struct first_impl<pair_tag>;
friend struct second_impl<pair_tag>;
template <typename F, typename S> friend struct pair;
};
//! @endcond
template <typename First, typename Second>
struct tag_of<pair<First, Second>> {
using type = pair_tag;
};
//////////////////////////////////////////////////////////////////////////
// Operators
//////////////////////////////////////////////////////////////////////////
namespace detail {
template <>
struct comparable_operators<pair_tag> {
static constexpr bool value = true;
};
template <>
struct orderable_operators<pair_tag> {
static constexpr bool value = true;
};
}
//////////////////////////////////////////////////////////////////////////
// Product
//////////////////////////////////////////////////////////////////////////
template <>
struct make_impl<pair_tag> {
template <typename F, typename S>
static constexpr pair<
typename detail::decay<F>::type,
typename detail::decay<S>::type
> apply(F&& f, S&& s) {
return {static_cast<F&&>(f), static_cast<S&&>(s)};
}
};
template <>
struct first_impl<pair_tag> {
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second>& p) {
return detail::ebo_get<detail::pix<0>>(
static_cast<detail::ebo<detail::pix<0>, First>&>(p)
);
}
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second> const& p) {
return detail::ebo_get<detail::pix<0>>(
static_cast<detail::ebo<detail::pix<0>, First> const&>(p)
);
}
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second>&& p) {
return detail::ebo_get<detail::pix<0>>(
static_cast<detail::ebo<detail::pix<0>, First>&&>(p)
);
}
};
template <>
struct second_impl<pair_tag> {
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second>& p) {
return detail::ebo_get<detail::pix<1>>(
static_cast<detail::ebo<detail::pix<1>, Second>&>(p)
);
}
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second> const& p) {
return detail::ebo_get<detail::pix<1>>(
static_cast<detail::ebo<detail::pix<1>, Second> const&>(p)
);
}
template <typename First, typename Second>
static constexpr decltype(auto) apply(hana::pair<First, Second>&& p) {
return detail::ebo_get<detail::pix<1>>(
static_cast<detail::ebo<detail::pix<1>, Second>&&>(p)
);
}
};
BOOST_HANA_NAMESPACE_END
#endif // !BOOST_HANA_PAIR_HPP
|
The Galactic Center region viewed by H.E.S.S
The Galactic center region is the most active region in the Milky Way harboring a wealth of photon sources at all wavelengths. H.E.S.S. observations of the Galactic Center (GC) region revealed for the first time in very high energy (VHE, E>100 GeV) gamma-rays a detailed view of the innermost 100 pc of the Milky Way and provided a valuable probe for the acceleration processes and propagation of energetic particles near the GC. H.E.S.S. has taken more than 180 hours of good-quality observations toward the GC region since the experience started in 2003. A strong and steady gamma-ray source has been detected coincident in position with the supermassive black hole Sgr A*. Besides the central pointlike source, a diffuse emission extended along the Galactic Plane has been detected within about 1$^{\circ}$ around the GC. An accurate analysis of the Galactic center region suggests that the diffuse emission may dominate highest energy end of the overall GC source spectrum. I will review the current VHE view by H.E.S.S. of the GC region and briefly discuss the theoretical models which explain VHE gamma-ray emissions of the central source and the diffuse emission.
Introduction
The Galactic Center (GC) region harbours a variety of potential sources of high-energy radiation including the supermassive black hole Sagittarius (Sgr) A* of 2.6 × 10 6 M ⊙ (Schodel et al. 2002), and a number of supernova remnants, among them the Sgr A East remnant of a giant supernova explosion which happened about 10000 years ago. The Galactic Center was therefore a prime target for observations with Imaging Atmospheric Cherenkov telescopes (IACTs), and detection of very high energy (VHE, E> 100 GeV) gamma rays was reported by The CANGAROO (Tsuchiya et al. 2004), VERITAS (Kosack et al. 2004), H.E.S.S. (Aharonian et al. 2004) and MAGIC (Albert et al. 2006) from the direction of the Galactic Center (GC). The nature of this source is still unknown. The H.E.S.S. observations of the GC region led to the detection of a point-like source of VHE gamma-rays at the gravitational center of the Galaxy (HESS J1745-290), compatible with the positions of the supermassive black hole Sgr A*, the supernova remnant (SNR) Sgr A East, and the plerion G359.95-0.04. A larger exposure of the region in 2004 revealed a second source: the supernova remnant G0.9+0.1 . The subtraction of these two sources revealed a ridge of diffuse emission extending along the Galactic plane for roughly 2 • (Fig. 1).
The H.E.S.S. instrument
The H.E.S.S. (High Energy Stereoscopic System) experiment is an array of four identical imaging atmospheric Cherenkov telescopes located in the Khomas Highland of Namibia (23 • 16 ′ 18 ′′ South, 16 • 30 ′ 00 ′′ East) at an altitude of 1800 m above sea level. Each telescope has an optical reflector consisting of 382 round facets of 60 cm diameter each, yielding a total mirror area of 107 m 2 . The Cherenkov light, emitted by charged particles in the electromagnetic showers initiated by primary gamma rays, is focused on cameras equipped with 960 photomultiplier tubes, each one subtending a field-of-view of 0.16 • . The large field-of-view (∼5 • ) permits survey coverage in a single pointing. The direction and the energy of the primary gamma rays are reconstructed by the stereoscopic technique.
HESS J1745-290: counterparts and spectrum
In December 2004, H.E.S.S. reported the detection of VHE gamma rays from the center of our Galaxy, at the time based on data obtained with the first two H.E.S.S. telescopes during 16h of observations in 2003. Within the -at the time unprecedented -precision of 30" in RA and Dec, the location of the source HESS J1745-290 was consistent with the Galactic gravitational center, and the spectrum of gamma rays was consistent with a power law up to 10 TeV. Towards identifying the origin of the gamma rays, a multi-year effort was invested aimed at improving the pointing position of the H.E.S.S. telescopes. After a careful investigation of the pointing systematics of the H.E.S.S. telescopes, the systematic error on the centroid position was reduced from 30" to 6" per axis, with a comparable statistical error -by far the best source location achieved in gamma rays so far (Acero et al. 2010). The thus determined source position is within 8"±9" stat ±9" sys from Sgr A*, well consistent with the location of the black hole and the pulsar wind nebula (PWL) G359.95-0.04, but it excludes Sgr A East remnant as the main counterpart of the VHE emission at the level of 5-7σ, depending on the assumed position of the VHE emission in Sgr A East (Fig. 2, left; see Acero et al. 2010, for more details). Using 93h of data on the central source accumulated in the years 2004, 2005 and 2006, the energy spectrum of the gamma rays was measured with high precision, revealing an energy break or cutoff in the spectrum around 15 TeV (Fig. 2, right). No signs of variability has been found (Aharonian et al. 2009). Different mechanisms have been suggested to explain the broadband spectrum of the GC. Firstly, the stochastic acceleration of electrons interacting with the turbulent magnetic field in the vicinity of Sgr A*, as discussed by Liu et al. (2006), has been advocated to explain the millimeter and sub-millimeter emission. This model would also reproduce the IR and X-ray flaring. In addition, it assumes that charged particles are accreted onto the black hole, and predicts the escape of protons from the accretion disk and their acceleration (Liu et al. 2006). These protons produce π 0 mesons by inelastic collisions with the interstellar medium in the central star cluster of the Galaxy. The cut-off energy found in the gamma-ray spectrum could reflect a cut-off E cut,p in the primary proton spectrum. In that case, one would expect a cut-off in the gamma-ray spectral shape at E cut ≃ E cut,p /30. The measured value of ∼15 TeV would correspond in this scenario to a cut-off energy in the primary proton spectrum between 100-400 TeV depending on the strength of the exponential cut-off. Energy-dependent diffusion models of protons to the outside of the central few parsecs of the Milky Way (Aharonian & Neronov 2005) are alternative plausible mechanisms to explain the TeV emission observed with the H.E.S.S. instrument. They would lead to a spectral break as in the measured spectrum due to competition between injection and escape of protons outside the vicinity of the GC.
The diffuse emission from the Galactic Center Ridge
In order to search for much fainter emission, an analysis of the GC region was made (Aharonian et al. 2006) subtracting the best fit model for point-like emission at the position of HESS J1745-290 and the SNR G0.9+0.1. Two significant features are apparent after subtraction: extended emission spatially coincident with the unidentified EGRET source 3EGJ1744-3011 and emission extending along the Galactic plane for roughly 2 • . The latter emission is not only clearly extended in longitude l, but also significantly extended in latitude b (beyond the angular resolution of H.E.S.S.) with a characteristic root mean square (rms) width of 0.2 • , as can be seen in Fig. 1. The reconstructed gamma-ray spectrum for the region -0.8 • < l < 0.8 • , |b| < 0.3 • (with point-source emission subtracted) is well described by a power law with photon index Λ = 2.29 ± ֒ A0.07 stat ± 0.20 sys (Fig. 3). Given the plausible assumption that the gamma-ray emission takes place near the center of the Galaxy, at a distance of about 8.5 kpc, the observed rms extension in latitude of 0.2 • corresponds to a scale of ≈ 30 pc. This value is similar to that of interstellar material in giant molecular clouds in this region, as traced by their CO emission and in particular by their CS emission (Tsuboi et al. 1999). At least for |l| < 1., a close match between the distribution of the VHE gamma-ray emission and the density of dense interstellar gas is found (see Aharonian et al. 2006, for more details). The close correlation between gamma-ray emission and available target material in the central 200 pc of our galaxy is a strong indication for an origin of this emission in the interactions of CRs. The hardness of the gamma-ray spectrum and the conditions in those molecular SF2A 2011 Aharonian et al. (2006). clouds indicate that the cosmic rays giving rise to the gamma-rays are likely to be protons and nuclei rather than electrons. Since in the case of a power-law energy distribution the spectral index of the gamma-rays closely traces the spectral index of the CRs themselves, the measured gamma-ray spectrum implies a CR spectrum near the GC with a spectral index close to 2.3, significantly harder than in the solar neighbourhood (where an index of 2.75 is measured). Given the probable proximity and young age of particle accelerators, propagation effects are likely to be less pronounced than in the Galaxy as a whole, providing a natural explanation for the harder spectrum which is closer to the intrinsic CR-source spectra. In addition, the key experimental facts of a harder than expected spectrum, and a higher than expected TeV flux, imply that there is an additional young component to the GC cosmic-ray population above the CR 'sea' which fills the Galaxy. This is the first time that such direct evidence for recently accelerated (hadronic) CRs in any part of our galaxy has been found.
The observation of a deficit in VHE emission at l = 1.3 • relative to the available target material (see Fig. 2, bottom) suggests that CRs, which were recently accelerated in a source or sources in the GC region, have not yet diffused out beyond |l| = 1 • . Therefore the central CRs accelerators would only been active in the GC for the past 10,000 years. The fact that the diffuse emission exhibits a photon index which is the same -within errors -as that of HESS J1745-290 suggests that the underlying astrophysical emitter of HESS J1745-290 could be the source in question. Within the 1' error box of HESS J1745-290 are two compelling candidates for such a CR accelerator. The first is the SNR Sgr A East with its estimated age around 10 kyr. The second is the supermassive black hole Sgr A* which may have been more active in the past (Aharonian & Neronov 2005).
Conclusions
Observations with H.E.S.S. provide a very sensitive view of this interesting region. With the recent data from the H.E.S.S. instrument, a rich VHE gamma-ray morphology becomes evident, giving strong indication for the existence of a cosmic ray accelerator within the central 10 pc of the Milky Way. Future observations with more sensitive instruments such as CTA will significantly improve our knowledge about the GC region at VHE energies. |
import java.util.*;
public class Main1
{
public static void main(String args[])
{
Scanner sc = new Scanner(System.in);
int n=0;
n = sc.nextInt();
int arr [] = new int[n];
for(int i=0;i<n;i++)
{
arr[i] = sc.nextInt();
}
int smaller = arr[0];
int bigger = arr[0];
int smallindex=0;
int bigindex=0;
for(int i=0;i<n;i++)
{
if(arr[i]<=smaller)
{
smaller = arr[i];
smallindex=i;
}
if(arr[i]>bigger)
{
bigger = arr[i];
bigindex=i;
}
}
int cnt = 0;
cnt = bigindex+(arr.length-1-smallindex);
if(bigindex>smallindex)
{
cnt--;
}
System.out.println(cnt);
}
} |
#!/usr/bin/env python
"""
File name: test_CTCI_Ch4_Ex4.py
Author: <NAME>
Date created: 2/19/2019
Date last modified: 2/19/2019
Python Version: 3.7
Description: CTCI 4.4 Check Balanced
Implement a function to check if a binary tree is balanced. For the purposes of
this question, a balanced tree is defined to be a tree such that the heights of
the two subtrees of any node never differ by more than one.
Classes:
BalancedBST
New Methods:
is_balanced_first
is_balanced
_is_balanced_helper
"""
from CTCI.Ch4_Trees_and_Graphs.common.BinaryTree import BinarySearchTree
class BalancedBST(BinarySearchTree):
def is_balanced(self):
"""Return True if BST is balanced
:return BST balanced condition
"""
if self.root is None:
return
return False if self._is_balanced_helper(self.root) is False else True
def _is_balanced_helper(self, node):
"""BST Balance Helper
:param node Root of BST
:return Height if balanced, False if unbalanced
"""
if node is None:
return -1
left_height = self._is_balanced_helper(node.left)
right_height = self._is_balanced_helper(node.right)
if left_height is False or right_height is False:
return False
if abs(left_height - right_height) > 1:
return False
else:
return max(left_height, right_height) + 1
def is_balanced_first(self):
"""Determine if BST is balanced
:returns If BST is balanced, return True
"""
if self.is_empty():
return
if abs(self.get_height(self.root.left) - self.get_height(self.root.right)) > 1:
return False
else:
return True
|
import React from 'react';
// @material-ui/core components
import { makeStyles } from '@material-ui/core/styles';
import { Card } from '@material-ui/core';
const useStyles = makeStyles((theme) => ({
card: {
border: '0',
marginBottom: '30px',
marginTop: '30px',
borderRadius: '6px',
color: 'rgba(' + theme.palette.common.black + ', 0.87)',
background: theme.palette.common.white,
width: '100%',
boxShadow: '0 1px 4px 0 rgba(' + theme.palette.common.black + ', 0.14)',
position: 'relative',
display: 'flex',
flexDirection: 'column',
minWidth: '0',
wordWrap: 'break-word',
fontSize: '.875rem',
overflow: 'visible',
},
}));
const CardComponent: React.FC<{
children?: React.ReactNode;
}> = ({ children }) => {
const classes = useStyles();
return <Card className={classes.card}>{children}</Card>;
};
export default CardComponent;
|
<reponame>recp/AssetK<gh_stars>100-1000
/*
* Copyright (C) 2020 <NAME>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../common.h"
#include "common.h"
#include <cglm/cglm.h>
void
ak_coordAxisOri(AkCoordSys * __restrict coordSys,
AkAxisOrientation axis,
int ori[3]) {
int axisOri[3];
int coord[3];
int i, j;
ak_coordAxisToiVec3(axis, axisOri);
ak_coordToiVec3(coordSys, coord);
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++)
if (axisOri[i] == coord[j])
ori[i] = (j + 1) * glm_sign(axisOri[j]);
else if (abs(axisOri[i]) == abs(coord[j]))
ori[i] = -(j + 1) * glm_sign(axisOri[j]);
}
}
void
ak_coordAxisOriAbs(AkCoordSys * __restrict coordSys,
AkAxisOrientation axis,
int newAxisOri[3]) {
ak_coordAxisOri(coordSys, axis, newAxisOri);
newAxisOri[0] = abs(newAxisOri[0]) - 1;
newAxisOri[1] = abs(newAxisOri[1]) - 1;
newAxisOri[2] = abs(newAxisOri[2]) - 1;
}
|
Trends in overweight by educational level in 33 low‐ and middle‐income countries: the role of parity, age at first birth and breastfeeding
This study examined trends in overweight among women of reproductive age by educational level in 33 low‐ and middle‐income countries, and estimated the contribution of parity, age at first birth and breastfeeding to these trends. We used repeated cross‐sectional Demographic Health Surveys of 255,828 women aged 25–49 years interviewed between 1992 and 2009. We applied logistic regression to model overweight (>25 kg m−2) as a function of education, reproductive variables and time period by country and region. The prevalence of overweight ranged from 3.4% in South and Southeast Asia to 73.7% in North Africa West/Central Asia during the study period. The association between education and overweight differed across regions. In North Africa West/Central Asia and Latin American, lower education was associated with higher overweight prevalence, while the inverse was true in South/Southeast Asia and Sub‐Saharan Africa. In all regions, there was a consistent pattern of increasing overweight trends across all educational groups. Older age at first birth, longer breastfeeding and lower parity were associated with less overweight, for differences by educational level in overweight prevalence and trends. |
<filename>src/interfaces/sort-options.ts
import { SortOrder } from "enums/sort-order";
interface SortOptions<T = any> {
column?: keyof T;
order?: SortOrder;
}
export type { SortOptions };
|
/**
* Fill this node with a deep copy of the given node.
*/
public void copyFrom(QueryTreeNode node) throws StandardException {
super.copyFrom(node);
ResultSetNode other = (ResultSetNode)node;
this.resultColumns = (ResultColumnList)getNodeFactory().copyNode(other.resultColumns,
getParserContext());
this.insertSource = other.insertSource;
} |
import * as Transport from 'winston-transport'
interface DatadogTcpTransportOptions extends Transport.TransportStreamOptions {
host: string,
port: number,
apiKey: string,
level?: string,
reconnectInterval?: number,
reconnectAttempts?: number,
bufferLength?: number,
tags?: {}
}
declare class DatadogTcpTransport extends Transport {
constructor(options: DatadogTcpTransportOptions);
}
export = DatadogTcpTransport
|
/**
* Method to register a client by calling the dynamic client registration endpoint in API Manager.
*
* @param applicationRequestDTO OAuth Application Request DTO
* @param username tenant username
* @param password tenant password
* @return OAuthApplicationInfoDTO
*/
public static OAuthApplicationInfoDTO registerClient(OAuthApplicationRequestDTO applicationRequestDTO,
String username, char[] password)
throws OnPremiseGatewayException {
String restApiVersion = ConfigManager.getConfigManager()
.getProperty(OnPremiseGatewayConstants.API_VERSION_PROPERTY);
if (restApiVersion == null) {
restApiVersion = OnPremiseGatewayConstants.API_DEFAULT_VERSION;
if (log.isDebugEnabled()) {
log.debug("Using default API version: " + restApiVersion);
}
} else if (OnPremiseGatewayConstants.CLOUD_API.equals(restApiVersion)) {
restApiVersion = OnPremiseGatewayConstants.EMPTY_STRING;
if (log.isDebugEnabled()) {
log.debug("Cloud API doesn't have an version. Therefore, removing the version");
}
}
String apiPublisherUrl = ConfigManager.getConfigManager()
.getProperty(OnPremiseGatewayConstants.API_PUBLISHER_URL_PROPERTY_KEY);
if (apiPublisherUrl == null) {
apiPublisherUrl = OnPremiseGatewayConstants.DEFAULT_API_PUBLISHER_URL;
if (log.isDebugEnabled()) {
log.debug("Using default API publisher URL: " + apiPublisherUrl);
}
}
String clientRegistrationUrl =
apiPublisherUrl + OnPremiseGatewayConstants.DYNAMIC_CLIENT_REGISTRATION_URL_SUFFIX
.replace(OnPremiseGatewayConstants.API_VERSION_PARAM, restApiVersion).replace("//",
OnPremiseGatewayConstants.URL_PATH_SEPARATOR);
URL apiPublisherUrlValue = MicroGatewayCommonUtil.getURLFromStringUrlValue(apiPublisherUrl);
HttpClient httpClient = APIUtil.getHttpClient(apiPublisherUrlValue.getPort(), apiPublisherUrlValue
.getProtocol());
String authHeader = getBasicAuthHeaderValue(username, password);
HttpPost httpPost = new HttpPost(clientRegistrationUrl);
httpPost.addHeader(OnPremiseGatewayConstants.AUTHORIZATION_HEADER, authHeader);
httpPost.addHeader(OnPremiseGatewayConstants.CONTENT_TYPE_HEADER,
OnPremiseGatewayConstants.CONTENT_TYPE_APPLICATION_JSON);
try {
StringEntity requestEntity = new StringEntity(applicationRequestDTO.toString());
requestEntity.setContentType(OnPremiseGatewayConstants.CONTENT_TYPE_APPLICATION_JSON);
httpPost.setEntity(requestEntity);
} catch (UnsupportedEncodingException e) {
throw new OnPremiseGatewayException("Failed to assign configured payload to client registration " +
"request.", e);
}
String response = HttpRequestUtil.executeHTTPMethodWithRetry(httpClient, httpPost,
OnPremiseGatewayConstants.DEFAULT_RETRY_COUNT);
if (log.isDebugEnabled()) {
log.debug("Received Client Registration OAuthApplicationInfoDTO");
}
InputStream is = new ByteArrayInputStream(response.getBytes(
Charset.forName(OnPremiseGatewayConstants.DEFAULT_CHARSET)));
ObjectMapper mapper = new ObjectMapper();
try {
return mapper.readValue(is, OAuthApplicationInfoDTO.class);
} catch (IOException e) {
throw new OnPremiseGatewayException("Failed to convert Client Registration response into " +
"OAuthApplicationInfoDTO.", e);
}
} |
/**
* Creates response with status 'OK'.
*
* @param body for the response
* @return HTTP response instance
*/
protected HttpResponse okResponse(InputStream body) {
BasicStatusLine statusLine = new BasicStatusLine(HttpVersion.HTTP_1_1, 200, "OK");
BasicHttpEntity entity = new BasicHttpEntity();
entity.setContent(body);
BasicHttpResponse response = new BasicHttpResponse(statusLine);
response.setEntity(entity);
return response;
} |
#include <bits/stdc++.h>
#define ll long long
using namespace std;
ll a[1000000];
// string lltoString(ll timestamp)
// {
// string result;
// ostringstream ss;
// ss<<timestamp;
// istringstream is(ss.str());
// is>>result;
// return result;
// }
string lltoString(ll t) {
string tmp = "";
while(t) {
int y = t % 10;
tmp += y + '0';
t /= 10;
}
string ans = "";
int len = tmp.length();
for(int i = len-1; i >= 0 ; i--) {
ans += tmp[i];
}
return ans;
}
int getAns(ll x, ll y)
{
string sx = lltoString(x);
string sy = lltoString(y);
int lenx = sx.length();
int leny = sy.length();
int pos = 0, tmp = 0;
for (int i = 0; i < lenx; i++)
{
if (sy[pos] == sx[i])
{
tmp++;
pos++;
}
if (pos == leny)
break;
}
int ans = lenx + leny - 2 * tmp;
return ans;
}
int main()
{
ios::sync_with_stdio(false);
cin.tie(0);
a[0] = 1;
for (int i = 1; i <= 60; i++)
{
a[i] = a[i - 1] * 2;
// cout << a[i] << endl;
}
int t;
cin >> t;
while (t--)
{
ll x;
cin >> x;
int ans = 0x7fffffff;
for (int i = 0; i <= 60; i++)
{
ans = min(ans, getAns(x, a[i]));
}
cout << ans << "\n";
}
cout.flush();
getchar();
return 0;
} |
//// packages that contain a "Client" identifier
func jClientID(pkgPath string) *j.Statement {
if pkgPath != "" {
return j.Qual(pkgPath, srcClient)
}
return j.Id(srcClient)
} |
Dell Computer Inc. (NASDAQ:DELL), a stalwart pioneer in the direct marketing of computer equipment, has turned to distributors Ingram Micro Inc. and Tech Data Corp. to win a bigger share of sales to small to medium-sized businesses.
Under terms of the deal, Dell will provide Ingram Micro (NYSE:IM) and Tech Data (NASDAQ:TECD) with 11 preconfigured desktops and three preconfigured notebooks from its Vostro line.
The agreement also includes some of the companys monitors and extended limited hardware warranties. The two distributors each have long-standing relationships with Dell for sales of third-party software and peripherals and those arrangements will not be disturbed by the new one.
Dell officials attributed the distribution agreements to a desire to get product into customers hands faster.
Our partners often have cases where they need a product and need it quickly, said Greg Davis, Dell vice president and general manager, global commercial channels. Our agreements with Tech Data and Ingram Micro meet that need and provide an opportunity to grow and expand the reach of our partners.
Resellers interested in the standard configuration units available through distribution will retain the option of buying directly from Dell, Davis said.
Both Ingram Micro and Tech Data viewed Dells move as a wholesale endorsement of the channel and its ability to get to SMB customers, and each praised the vendor for making it.
Two-tier distribution is the most effective and efficient way to get product to the SMB marketplace, said Keith Bradley, president of Ingram Micro North America. Its great to see a vendor such as Dell finally realize that.
To see that Dell is recognizing the value of distribution and the efficiencies and access to product in our model benefits us greatly, said Joe Quaglia, Tech Data senior vice president of U.S. Marketing.
Product list could expand
For now, sales are restricted to the United States, but all involved parties said that the agreement will extend to Canada in the near future. The distributors acknowledged that if the arrangements succeed, their expectation is that the list of available products will expand to include Dells servers, printers, projectors and accessories.
As our server and storage division continues to scale for us, [adding Dell servers] would be a natural extension, Bradley said.
Both Tech Data and Ingram Micro quickly pointed out that the agreements are intended to uncover new SMB customers for Dell and not designed to snatch market share from competitors already selling through the distributors. Dell believes it can up its SMB share by leveraging the extended reseller and customer lists, rapid delivery times, logistics, support and resources afforded it by distribution.
We view this as an incremental revenue opportunity, said Bradley. Well identify VARs that have targeted end users with less than 25 seats and have our telesales organization call to educate them on customers in their area they can sell to, said Bradley.
We know there is a large SMB community that currently buys Dell product, said Quaglia. Dell definitely doesnt want us to lay back, they want us to hit that market, he said.
Dells step into distribution fleshes out a channel sales strategy the vendor initially crafted two years ago and positions it on footing similar to that of key rivals Hewlett Packard Co. and IBM Corp., both of whom distribute computer equipment through Ingram Micro and Tech Data. IBM declined to comment on the deal, citing company policy.
Prior to the agreement, Dells two-tier distribution efforts had been confined to countries and markets where it believed it could not reach channel partners on its own. The company had steadfastly refrained from using distributors in segments such as SMB where it believed it could tend to partners directly.
But a growing number of its 15,000 U.S.-based solution providers, many of whom hone in on the SMB market, voiced interest in obtaining systems without waiting the typical five-to-10 days for delivery when purchasing directly from Dell. Both Ingram Micro and Tech Data have pledged to provide same-day shipping of Dells products to resellers. |
<reponame>moonrhythm/parapet<filename>pkg/headers/intercept.go
package headers
import (
"bufio"
"net"
"net/http"
)
// InterceptRequest creates new request interceptor
func InterceptRequest(f func(http.Header)) *RequestInterceptor {
return &RequestInterceptor{Intercept: f}
}
// RequestInterceptor intercepts request's headers
type RequestInterceptor struct {
Intercept func(http.Header)
}
// ServeHandler implements middleware interface
func (m RequestInterceptor) ServeHandler(h http.Handler) http.Handler {
if m.Intercept == nil {
return h
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
m.Intercept(r.Header)
h.ServeHTTP(w, r)
})
}
// ResponseInterceptFunc is the function for response's interceptor
type ResponseInterceptFunc func(w ResponseHeaderWriter)
// ResponseHeaderWriter type
type ResponseHeaderWriter interface {
StatusCode() int
Header() http.Header
WriteHeader(statusCode int)
}
// InterceptResponse creates new response interceptor
func InterceptResponse(f ResponseInterceptFunc) *ResponseInterceptor {
return &ResponseInterceptor{Intercept: f}
}
// ResponseInterceptor intercepts response's headers
type ResponseInterceptor struct {
Intercept ResponseInterceptFunc
}
// ServeHandler implements middleware interface
func (m ResponseInterceptor) ServeHandler(h http.Handler) http.Handler {
if m.Intercept == nil {
return h
}
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
nw := interceptRW{
ResponseWriter: w,
f: m.Intercept,
status: http.StatusOK,
}
defer nw.intercept()
h.ServeHTTP(&nw, r)
})
}
type interceptRW struct {
http.ResponseWriter
wroteHeader bool
intercepted bool
status int
f ResponseInterceptFunc
}
func (w *interceptRW) intercept() {
if w.intercepted {
return
}
w.intercepted = true
w.f(w)
}
func (w *interceptRW) WriteHeader(statusCode int) {
if !w.intercepted {
w.status = statusCode
w.intercept()
}
if w.wroteHeader {
return
}
w.wroteHeader = true
w.ResponseWriter.WriteHeader(statusCode)
}
func (w *interceptRW) Write(p []byte) (int, error) {
if !w.wroteHeader {
w.WriteHeader(http.StatusOK)
}
return w.ResponseWriter.Write(p)
}
// StatusCode returns status code
func (w *interceptRW) StatusCode() int {
return w.status
}
// Push implements Pusher interface
func (w *interceptRW) Push(target string, opts *http.PushOptions) error {
if w, ok := w.ResponseWriter.(http.Pusher); ok {
return w.Push(target, opts)
}
return http.ErrNotSupported
}
// Flush implements Flusher interface
func (w *interceptRW) Flush() {
if w, ok := w.ResponseWriter.(http.Flusher); ok {
w.Flush()
}
}
// Hijack implements Hijacker interface
func (w *interceptRW) Hijack() (net.Conn, *bufio.ReadWriter, error) {
if w, ok := w.ResponseWriter.(http.Hijacker); ok {
return w.Hijack()
}
return nil, nil, http.ErrNotSupported
}
|
async def create_container(self, container_name: str, container_configuration: ContainerConfiguration) \
-> Optional[Union[DockerContainer, Container]]:
LOGGER.debug("Creating container: {:s}".format(container_name))
if container_configuration.networks:
first_network_name = container_configuration.networks[0]
first_network = {first_network_name: {}}
else:
first_network = {}
try:
container = await self.__docker_client.containers.create(
name=container_name,
config={
"Image": container_configuration.image,
"Env": container_configuration.environment,
"HostConfig": {
"Binds": container_configuration.volumes,
"AutoRemove": True
},
"NetworkingConfig": {
"EndpointsConfig": first_network
}
}
)
if not isinstance(container, DockerContainer):
LOGGER.warning("Failed to create container: {:s}".format(
container_configuration.container_name))
return None
for other_network_name in container_configuration.networks[1:]:
other_network = await self.__docker_client.networks.get(net_specs=other_network_name)
await other_network.connect(
config={
"Container": container_name,
"EndpointConfig": {}
}
)
return container
except ClientError as client_error:
LOGGER.warning("Received {}: {}".format(type(client_error).__name__, client_error))
LOGGER.info("Trying the 'docker' library instead of 'aiodocker'")
return await self._create_container_backup(container_name, container_configuration)
except DockerError as docker_error:
LOGGER.warning("Received {}: {}".format(type(docker_error).__name__, docker_error))
return None |
Two ambitious, imaginative, complex novels, both loaded with drama and improbable incidents and crazy people. One of them gently asking you, by means of a series of absorbing textual and graphic puzzles, to be mindful of human fallibility repeating itself, and of the fragility and beauty of the earth; the other, an interesting but ultimately narcissistic tale of art and marriage.
I feel kind of bad because I am clearly the short straw in the “Lotto” of this tournament, so far as Groff is concerned; even the name “Lancelot Satterwhite” caused me to snort right on contact. But people love this book, really love it—the president, even, loves this book, apparently.
So the excitingly depraved revelations contained in its denouement recall the wild antics of the late Jackie Collins, but they are dressed up in this very solemn Henry Jamesian manner, and the disconnect is fatal. This book takes itself beyond seriously, and while that is not a crime, it is not to my taste.
The real problem with Fates and Furies, though, is that the story collapses under the weight of its own melodrama. Now, I love melodrama, and genre fiction in general; I have collected vintage Harlequin romances for years on end, so I should really love this book, and I am sad I didn’t. But even a decent Harlequin romance will have a certain slight self-mocking awareness that the whole business of sex and romance is kind of silly, and comical, whereas Fates and Furies doesn’t have a comical bone in its whole bod; there is a little “dry wit,” but in general the world of the novel is presented as important and true, as deserving of attention and as full of significance as the real world, the one we inhabit.
With respect to the style, for example to the moustache that “gave a leap like a goosed mouse” (wtf), I know a lot of people like this kind of prose, but it is not for me. Groff goes straight for the purple, page after page. “This gorgeous girl he’d magicked into wife,” for instance, a phrase repeatedly invoked by other critics as an example of the writer’s skill. I on the other hand gave a leap like a… hrm. Like a French waiter’s moustache. “The hand clutching the curtain was red, ragged, as if it had spent a lifetime among the cold guts of fish.” “The clouds like blackberry jam in the sky, faint double-boiler thunder from the north.” For this reader, at least, clouds are not and can never be even remotely like blackberry jam. It’s distracting, like being attacked by curlicues.
The story presents a critique of Lotto’s effortless privilege, how obstacles just seem to melt out of the way of a “golden boy,” when in reality, someone else is always breaking a sweat shoving them aside for him. This critique, however, is offered from a place of equally oblivious privilege. To some extent, that’s deliberate: Groff has sympathy and censure for both her protagonists, though she is definitely more on Mathilde’s side. But it’s all very self-serious, full of portent, dressed up in bits of Greek mythology, and baldly materialist; taste-worshipping, you might say. This book truly loves, for example—especially, its heroine loves—fancy paintings and books, fancy restaurants and clothes.
Fates and Furies is the torridly sensual story of a grown-up Mean Girl named Mathilde and the doomed and clueless but radiantly beautiful and magnetic love of her life, the fabulous playwright Lancelot (“Lotto”) Satterwhite. First we see their marriage through his eyes; then through hers. Surprise! They do not look at this thing the same way at all.
Ordinarily I prefer my fiction fairly straight, so I figured that Lauren Groff’s Fates and Furies , a hefty novel consisting largely of the minutely detailed dissection of a long marriage, would be right up my street. Romance, duplicity, intellectual sparring, sex, drama, talk of art, elegant interiors, bits of French. Bring it on!
There’s one huge problem with this novel, and that is that it is too damn hard to understand. It asks an enormous amount of the reader, perhaps unreasonably so. You don’t read it so much as mount an investigation that may take weeks.
It was loping toward me. Before I could take a breath, a beast was on me with fangs and fur and claws. Kicking out with my one good leg and spinning wildly in the dust, I struck out with my broken foot … I have never been fearful of hounds, but the attack was mounted with such speed and ferocity, I began to think I would know what it was to be eaten alive by a growling living thing.
Dodson’s prose, broken up into all these epistolary and scholarly and bureaucratic fragments, is for the most part spare and workmanlike. The few passages of startling beauty that have slipped into his pages, arising generally from the pen of the 19th-century naturalist Zadock Thomas, are all the more striking for their scarcity:
While you are having fun sorting all the pieces of the story out, Dodson is exploring many other things, such as Texas and its history, and natural history; how tyranny takes hold in a frightened population; our lack of respect for the natural world; ambition, betrayal, and lust; the helpless protectiveness of parents for their children; archiving, handwriting, and all sorts of things to do with paper and recordkeeping; and love, death, and what may come after or between. Not only in these imagined worlds, but in our own.
The hidden things in this book are so subtly designed. For example, I realized with a shock that the verso of the faux-tissue-guard page doesn’t exactly match the recto, as a casual reader would assume it must. You can’t possibly look too closely at each page. Pay particular attention to the dustjacket!
Mention has been made of the book-within-a-book structure of Bats of the Republic, but there are at least four books in there, by my reckoning: the one we’re reading, plus Back on the Nightway, The Sisters Gray, and The City-State, each written by a different author (kind of). Plus another one that is maybe destroyed. In every bit of the text and images there are clues and details that are pertinent to the stories. There are unreliable narrators, too. (That is the part I am kind of stuck in—trying to figure out exactly whom to believe, and when.)
The story is set in alternative versions of Texas that are semi-recognizable, with protagonists who read as various alternative versions of the author. In 1843, Zadock Thomas has been sent on a super-dangerous mission by his frail beloved’s father to deliver a secret letter to General Irion in the middle of the war over Texas. In 2143, his descendant, Zeke Thomas, inhabits an equally dangerous Republic of Texas: Civilization has collapsed and humanity has withdrawn into a walled territory consisting of seven city-states. Zeke stands to inherit the senate seat of his recently deceased grandfather, though his enemies are scheming to prevent him from taking power.
Author/designer/illustrator Zachary Thomas Dodson designed this book within an inch of its life, is what I am saying. It’s amazing, just as a made thing. But to read? I went into Bats of the Republic expecting to be manifestly annoyed by all the steampunk gewgaws and furbelows. And found instead an exquisitely complex and exciting book that I will be not only recommending here, but returning to as soon as I am done writing, because I haven’t worked it all out yet and I MUST. With this weirdly enchanting artifact, Dodson has vaulted right into the top rank of speculative fictionalists.
Maria Bustillos is a journalist and critic based in Los Angeles. She has written on culture, politics, technology, and business for the New Yorker, Harper’s, the New York Times, The Awl, the Guardian, Bloomberg, etc. Her first published fiction appeared in the Paris Review earlier this year. Known connections to this year’s contenders: “I had an essay published in that cat video collection at Coffee House Press, which published The Story of My Teeth.”
Maria Bustillos: The dystopian novel Bats of the Republic announces itself from the first instant as being gimmicky as hell, though very handsomely and intricately produced: Its slick, heavy pages contain epistolary fragments written hundreds of years apart, books within books, codes and symbols, inserts and envelopes and trompe l’oeil tissue guards, different colored inks to signify different timelines, mirror writing, handwritten notes, a telegram, maps and drawings, charts, certificates, and forms of all kinds.
Match Commentary By Kevin Guilfoile & John Warner
John: Whelp, this seals it. I am officially the worst prognosticator in the history of the ToB. Prior to the tourney I put A Little Life and Fates and Furies as the 1 and 1a favorites to win the whole thing and here it is, day one and Fates goes down.
Kevin: Fates and Furies was my favorite novel of 2015, John. It’s also true that I might be practically its perfect reader.
Lauren and I grew up on the same street, on the same lake, in the same small town, maybe a hundred yards from each other. She was a decade younger than me, so we didn’t know each other then, but I know exactly the house she lived in, and the route she walked to school (where we sat in the same classrooms and had nearly all the same teachers). I know the kind of pizza she ate, and even the guy who made it (What’s up Johnny!). When a character in her novels goes to Stewart’s for ice cream, I know exactly the Stewart’s she means. When that same character drives to a nearby village to go for a swim in the public gym, I know exactly the gym she means. You and I know a lot of writers and it is always a smile when you come across a passage in one of their books that reveals something about the author that you already knew. I don’t know Lauren nearly as well as I know other novelists, but I have that experience more with her fiction than I do with anyone else’s. There is something unique about sharing the place of your childhood that creates an almost telepathic bond. It’s a shorthand you only share with people who understand the map of your youth.
But that isn’t why I loved this book so much.
I was captivated by it from the start, but the way the story flips over and the narrative starts tumbling rapidly downhill at the midway point is a marvel of novelistic architecture. It begins as the sum of a number of literary clichés (whirlwind romance + husband’s family doesn’t like the new wife + tortured writer) and, in fact, the first half of this story would be a pretty conventional literary novella on its own. Then Lauren spends the rest of the book—the other side of the equation—deconstructing those clichés pretty masterfully.
Judge Bustillos pulls a few lines from the novel as evidence of Groff’s purplish prose (although she acknowledges the degree to which Lauren’s writing has been praised in most quarters). I am generally against cherry-picking lines to showcase bad (or good) writing, even though I catch myself doing it, as well—it’s an irresistible rabbit in the critic’s bag of tricks. But I think this is an example that illustrates why that practice can be misleading. Throughout the novel, Lauren makes some curious choices with regard to word choice and syntax, to be sure, but it is all in the service of a voice I found entirely hypnotic. If you don’t fall under its spell, you may react the way Judge Bustillos did, but I was completely in its thrall. If the last words on the last page of this book were, “Kevin, you are now a chicken,” I would be writing this commentary in scratches on the floor of an overcrowded Tyson killing floor.
Lauren also does something with point of view that I was really taken with. Most of this story is written in a third-person POV attached to one or other of the characters. Occasionally, however, she will slip into omniscience, only for a few words, usually to correct something that one of the characters is wrongly (or perhaps deceptively) saying or thinking. (Forget that crap I said before, I’m cherry-picking an example):
“I mean, of course, wife in the genderless sense of helpmeet,” Lancelot said. “There are male wives. When I was an actor, I was so underemployed that I basically did all the housework myself while Mathilde earned the dough. [He did the dishes; that part was true.] … ”
The proximate function of this device is to keep reminding us of the unreliability of our narrators, and there are no doubt precedents for it going back to the Greek chorus (which folds neatly into the dramaturgical motifs throughout the book), but playing loose with POV is a dangerous game for a writer and I can’t think of many novelists who have pulled it off as originally and effectively as Groff has here.
John: I read Fates and Furies on a cross-country flight. There are no higher stakes for a book than being my airplane reading. If the book is no good, I no longer even have SkyMall for a distraction. It is whatever I’ve chosen to read or nothing, and I enjoyed Fates so thoroughly that I would’ve been happy to bypass San Francisco and go on to Hawaii just so I had enough time to get through the last 80 pages, after which I would sit back and have a good think about the book for the rest of the flight.
I agree that the purple prose charge seems unfair. Of course, I’m saying that because I love the book and the writing, so I’m inclined to defend it from the standpoint of my conclusion, rather than as a judicious weighing of the evidence. But for all of Groff’s verbal filigree, it never felt like she had anything less than total control, including the use of the “Greek Chorus.” There is another very popular book in our tournament to which the purple prose charge could stick, but I disagree with it here.
Kevin: I read Bats of the Republic in ARC form about a year ago. The advance copy was a paperback, printed in black and white. Unlike the finished hardcover, there was no MacGuffin envelope tucked into the binding at the end—its contents were presented as just more pages in the book. (I won’t say more about this, but those who have read the novel will understand why that is problematic.) When I finished I thought it was very enjoyable, and also that I didn’t really understand it entirely. But the way the book was described in accompanying material made it clear that the final version was going to be ambitious in thrilling ways. (As Judge Bustillos describes, you will get more pleasure from simply holding this book in your hands than from any other published this year.)
Months later, a friend of mine who is a firefighter was looking for something to read during down time at the station. I thought Bats might be something he’d dig, and so I recommended it. He came over my house a few days later, hardcover in hand, and wanted to talk about it. As he spoke I realized how much I had missed reading it in ARC form as I did. Just from that discussion (and a subsequent rereading of several sections and especially the ending) my estimation of the book increased tremendously.
Like Judge Bustillos, I think there is still a great deal I have missed, but the parts I don’t entirely understand have been fun and interesting to contemplate (and discuss) in the months since. Bats of the Republic is a book that would almost certainly be better digested on a subsequent reading, and there is plenty of textual evidence that Dodson actually wants you to go back and start the book over again. I will definitely do that someday (I now have a hardcover and will likely keep it forever, it’s so beautiful), but for now I will enjoy following the reactions of smart people like Judge Bustillos as they try to unpack it.
John: Bats is a beautiful artifact and I took a real pleasure in using the ribbon to mark my place every time I left off from the text. I have to believe that reading it in ARC form without the benefit of the full extent of Dodson’s design and packaging would have been a greatly diminished experience. As I read, I really, really, really wanted to know what was in that envelope, simply for the fact that it was sealed in an envelope and I was commanded not to look until the right time.
Despite my admiration for the design and packaging, and that tantalizing envelope, for me, Bats was meh. Not for a second did I feel emotionally invested in any of the characters or their fates. It’s a fun adventure yarn, and some of the nested metatextual stories within stories are intriguing, but I’m going to suggest there’s a lot less there, there, than Judge Bustillos perceives after her initial reading. The Möbius design theme that runs through the book is likely to be the experience if one goes looking for deeper connections: an endless loop over the same ground. I think the interesting novel inside the amazing packaging is more illusory than real. This aura of mystery plays to the novel’s advantage in that it can be different things to different people, but when I reflect back on the actual reading experience, I grieve for the loss of Fates from our tournament.
I can only hope it returns as a Zombie.
Kevin: Losing Fates is a crushing, personal blow for me on Day One (or Day 1A), but my consolation prize (as I hopefully wait for Lauren’s potential return in the Zombies) will be further discussion about Bats of the Republic. I understand what you are saying about it. I, too, find myself rolling my eyes at people who think a book must be good because they don’t understand it. To be clear, Bats isn’t some obtuse, impenetrable read, and even on the surface, I found myself engaged in the story. I just don’t feel like I have fully synthesized all the elements here. It’s thrilling the way a half-finished puzzle is. Of course a puzzle only thrills if you have confidence in the puzzle constructor. Your read of the novel didn’t provide you with that faith, but Judge Bustillos and I felt more reassured. When I get to my re-read I will report back to you and let you know if I still have the same religion.
Brad Listi judges tomorrow’s matchup, which pits one of the most celebrated debuts of 2015, Viet Thanh Nguyen’s The Sympathizer, against an almost forgotten debut originally published some 40 years ago, Fran Ross’s Oreo.
The official 2016 Tournament of Books T-Shirt by book designer Janet Hansen. Order yours! |
<gh_stars>10-100
import CircuitBreaker from 'circuit-breaker-js';
/** We give more weight to ratings with reviews than those without. */
export const dtrTextReviewMultiplier = 10;
export const dtrD2ReviewsEndpoint = 'https://api.tracker.gg/api/v1/destiny-2/db/reviews';
const TIMEOUT = 3000;
const HTTP_503_TIMEOUT = 10 * 60 * 1000;
const circuitBreaker = new CircuitBreaker({
timeoutDuration: TIMEOUT,
windowDuration: 1 * 60 * 1000, // 1 minute
volumeThreshold: 2
});
let lastFiveOhThreeCaught: Date | null;
function fiveOhThreeCaughtRecently(): boolean {
if (!lastFiveOhThreeCaught) {
return false;
}
return Date.now() - lastFiveOhThreeCaught.getTime() <= HTTP_503_TIMEOUT;
}
export function dtrFetch(url: string, body: object) {
const controller = typeof AbortController === 'function' ? new AbortController() : null;
const signal = controller && controller.signal;
const request = new Request(url, {
method: 'POST',
body: JSON.stringify(body),
headers: {
'Content-Type': 'application/json'
}
});
let timer;
if (controller) {
timer = setTimeout(() => controller.abort(), TIMEOUT);
}
return new Promise((resolve, reject) => {
if (fiveOhThreeCaughtRecently()) {
reject(new Error('Waiting due to HTTP 503'));
} else {
circuitBreaker.run(
(success, failure) => {
Promise.resolve(fetch(request, { signal }))
.finally(() => {
if (controller) {
clearTimeout(timer);
}
})
.then((r) => {
if (r.status === 503) {
lastFiveOhThreeCaught = new Date();
failure();
reject(new Error('HTTP 503 returned'));
}
return r;
})
.then(
(v) => {
success();
resolve(v);
},
(e) => {
failure();
reject(e);
}
);
},
() => reject(new Error('Circuit breaker open'))
);
}
});
}
|
/*
*-----------------------------------------------------------------------------
*
* TransferPostgresError --
*
* Obtains the connection related error message from the Postgres
* client library and transfers them into the Tcl interpreter.
* Unfortunately we cannot get error number or SQL state in
* connection context.
*
* Results:
* None.
*
* Side effects:
*
* Sets the interpreter result and error code to describe the SQL
* connection error.
*
*-----------------------------------------------------------------------------
*/
static void
TransferPostgresError(
Tcl_Interp* interp,
PGconn* pgPtr
) {
Tcl_Obj* errorCode = Tcl_NewObj();
Tcl_ListObjAppendElement(NULL, errorCode, Tcl_NewStringObj("TDBC", -1));
Tcl_ListObjAppendElement(NULL, errorCode,
Tcl_NewStringObj("GENERAL_ERROR", -1));
Tcl_ListObjAppendElement(NULL, errorCode,
Tcl_NewStringObj("HY000", -1));
Tcl_ListObjAppendElement(NULL, errorCode, Tcl_NewStringObj("POSTGRES", -1));
Tcl_ListObjAppendElement(NULL, errorCode,
Tcl_NewWideIntObj(-1));
Tcl_SetObjErrorCode(interp, errorCode);
Tcl_SetObjResult(interp, Tcl_NewStringObj(PQerrorMessage(pgPtr), -1));
} |
/**
* @TODO Implement main method to serve as test client for the EvacuationQueue
*/
public static void main(String args[])
{
} |
/**
* Represents the result of a call to <i>getUpdates</i> REST service.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public class UpdateResult implements Serializable {
private static final long serialVersionUID = -4560342931918215225L;
private boolean ok;
@JsonProperty("result")
private List<Update> updates;
public UpdateResult() {
}
public boolean isOk() {
return ok;
}
public void setOk(boolean ok) {
this.ok = ok;
}
public List<Update> getUpdates() {
return updates;
}
public void setUpdates(List<Update> updates) {
this.updates = updates;
}
@Override
public String toString() {
final StringBuilder sb = new StringBuilder("UpdateResult{");
sb.append("ok=").append(ok);
sb.append(", updates=").append(updates);
sb.append('}');
return sb.toString();
}
} |
def sum(self, left_index, right_index):
if right_index < left_index:
raise ValueError("Right index must be higher than left index")
n = len(self.items)
left_index += n
right_index += n + 1
result = 0
while left_index < right_index:
if left_index & 1:
result += self.tree[left_index]
left_index += 1
if right_index & 1:
right_index -= 1;
result += self.tree[right_index]
left_index >>= 1
right_index >>= 1
return result |
/**
* Display Events on third panel.
*/
@FXML
private void handleEvent() {
eventListPanel = new EventListPanel(logic.getFilteredEventList());
personListPanelPlaceholder2.getChildren().clear();
personListPanelPlaceholder2.getChildren().add(eventListPanel.getRoot());
} |
def _make_rest_call(self, url, action_result):
config = self.get_config()
resp_json = None
params = {'url': url}
headers = {'Authorization': 'Token {0}'.format(config[PHISINIT_JSON_API_KEY])}
try:
r = requests.get(PHISINIT_LOOKUP_URL, params=params, headers=headers)
except Exception as e:
return action_result.set_status(phantom.APP_ERROR, PHISINIT_ERR_SERVER_CONNECTION, e), resp_json
action_result.add_debug_data({'r_text': r.text if r else 'r is None'})
try:
resp_json = r.json()
except Exception as e:
msg_string = r.text.replace('{', '').replace('}', '')
return action_result.set_status(phantom.APP_ERROR, msg_string, e), resp_json
if r.status_code == 200:
return phantom.APP_SUCCESS, resp_json
action_result.add_data(resp_json)
details = json.dumps(resp_json).replace('{', '').replace('}', '')
return (action_result.set_status(phantom.APP_ERROR,
PHISINIT_ERR_FROM_SERVER.format(status=r.status_code, detail=details)), resp_json) |
/****************************************************************************************************/
/**
* NetIF_Tx()
*
* @brief Transmit data packets to network interface(s)/device(s).
*
* @param p_buf_list Pointer to network buffer data packet(s) to transmit via network interface(s)/
* device(s) [see Note #1a].
*
* @param p_err Pointer to variable that will receive the return error code from this function.
*
* @note (1) On any error(s), the current transmit packet may be discarded by handler functions;
* but any remaining transmit packet(s) are still transmitted.
* However, while IP transmit fragmentation is NOT currently supported,
* transmit data packet lists are limited to a single transmit data packet.
*******************************************************************************************************/
void NetIF_Tx(NET_BUF *p_buf_list,
RTOS_ERR *p_err)
{
NET_BUF *p_buf;
NET_BUF *p_buf_next;
NET_BUF_HDR *p_buf_hdr;
p_buf = p_buf_list;
while (p_buf != DEF_NULL) {
p_buf_hdr = &p_buf->Hdr;
p_buf_next = p_buf_hdr->NextBufPtr;
NetIF_TxHandler(p_buf,
p_err);
if (RTOS_ERR_CODE_GET(*p_err) != RTOS_ERR_NONE) {
goto exit;
}
if (p_buf_next != DEF_NULL) {
p_buf_hdr->NextBufPtr = DEF_NULL;
p_buf_hdr = &p_buf_next->Hdr;
p_buf_hdr->PrevBufPtr = DEF_NULL;
}
p_buf = p_buf_next;
}
exit:
return;
} |
def prepare_as_panel(df, stock_name):
df['stock'] = stock_name
return df.reset_index().set_index(['stock', 'Date']).to_panel() |
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"os/exec"
"runtime"
"strings"
)
type data struct {
ImportPath string
Name string
}
func main() {
cmd := exec.Command(runtime.Version(), "list", "-json")
cmd.Stderr = os.Stderr
b, err := cmd.Output()
if err != nil {
cmd = exec.Command("go", "list", "-json")
cmd.Stderr = os.Stderr
b, err = cmd.Output()
}
checkErr(err)
d := data{}
checkErr(json.Unmarshal(b, &d))
goSrcFile := os.Getenv("GOFILE")
var goDestFile string
{
split := strings.Split(goSrcFile, ".")
split = split[:len(split)-1]
goDestFile = strings.Join(split, ".") + ".mock.go"
}
runCommand("mockgen",
"-write_package_comment=false",
fmt.Sprintf("-package=%s", d.Name),
fmt.Sprintf("-self_package=%s", d.ImportPath),
fmt.Sprintf("-source=%s", goSrcFile),
fmt.Sprintf("-destination=%s", goDestFile))
runCommand("debugflag", goDestFile)
}
func runCommand(name string, arg ...string) {
cmd := exec.Command(name, arg...)
cmd.Stderr = os.Stderr
cmd.Stdin = os.Stdin
cmd.Stdout = os.Stdout
checkErr(cmd.Run())
}
func checkErr(err error) {
if err != nil {
log.Fatal(err)
}
}
|
// Macros are created using the macro_rules! macro.
macro_rules! say_hello {
() => {
println!("Hello!");
};
}
fn main() {
say_hello!();
}
// So why are macros useful?
// 1. Don't repeat yourself.
// There are many cases where you may need similar functionality in multiple places but with different types.
// Often, writing a macro is a useful way to avoid repeating code.
// 2. Domain-specific languages.
// Macros allow you to define special syntax for a specific purpose.
// 3. Variadic interfaces.
// Sometimes you want to define an interface that takes a variable number of arguments.
// An example is println! which could take any number of arguments,
// depending on the format string!. (More on this later) |
<reponame>andrewhickman/calegon
extern crate bytecount;
#[macro_use]
extern crate lalrpop_util;
extern crate int_hash;
extern crate lazy_static;
extern crate memchr;
#[cfg(any(test, feature = "arbitrary"))]
#[macro_use]
extern crate proptest;
extern crate linked_hash_map;
#[cfg(any(test, feature = "arbitrary"))]
extern crate proptest_recurse;
extern crate regex;
extern crate seahash;
pub mod ast;
#[cfg(any(test, feature = "arbitrary"))]
pub mod arbitrary;
mod error;
mod symbol;
lalrpop_mod!(parser);
#[cfg(test)]
mod tests;
pub use self::error::{Error, Location};
pub use self::symbol::{Symbol, SymbolMap};
use std::str::FromStr;
impl FromStr for ast::File {
type Err = Error;
fn from_str(input: &str) -> Result<ast::File, Self::Err> {
use lazy_static::lazy_static;
use parser::FileParser;
use symbol::Interner;
lazy_static! {
static ref PARSER: FileParser = FileParser::new();
}
PARSER
.parse(&mut Interner::write(), input)
.map_err(|err| Error::new(input, err))
}
}
|
<reponame>manuth/WoltLabCompiler
import { INode } from "../../NodeSystem/INode";
import { Category } from "../Category";
import { ICategoryOptions } from "../ICategoryOptions";
import { GroupOption } from "./GroupOption";
import { IGroupOptionOptions } from "./IGroupOptionOptions";
/**
* Represents an option-category for groups.
*/
export class GroupCategory extends Category<GroupOption, IGroupOptionOptions>
{
/**
* Initializes a new instance of the {@link GroupCategory `GroupCategory`} class.
*
* @param node
* The node of the category.
*
* @param options
* The options of the category.
*/
public constructor(node: INode, options: ICategoryOptions<IGroupOptionOptions>)
{
super(
node,
options,
(category: Category<GroupOption, IGroupOptionOptions>, opts: IGroupOptionOptions) =>
{
return new GroupOption(category, opts);
});
}
}
|
def lowering(self):
loops, idxs = LoopNode.create_loops(self.in_var.dim)
in_var_idx = IndexedVariable(self.in_var)
out_var_idx = IndexedVariable(self.out_var)
in_var_idx.set_indices(idxs)
out_var_idx.set_indices(idxs)
condition = Expression('{t_var_idx} < 0', t_var_idx=in_var_idx)
if self.alpha == 0:
false_exp = Constant(0)
else:
false_exp = Expression('{alpha} * {t_var_idx}', t_var_idx=in_var_idx)
cond_node = ConditionalNode(out_var_idx, condition, false_exp, in_var_idx)
loops[-1].add_edge('content', cond_node)
self.var_decls.append(self.out_var)
self.add_edge('content', loops[0]) |
use crate::tetris::{Point, Size};
use std::slice::Iter;
use crate::gaming_screen::data::bricks::Brick;
pub(crate) mod bricks;
pub(crate) mod game_panel;
pub struct BlocksData {
pub blocks: Vec<Vec<bool>>,
block_size: Size,
}
fn create_empty_line(width: u16) -> Vec<bool> {
[false].repeat(width as usize).iter().map(|x| { *x }).collect()
}
pub struct TryCleanLinesResult {
pub clear_lines: Vec<u16>,
}
impl BlocksData {
pub fn new(block_size: Size) -> BlocksData {
let mut blocks = Vec::new();
for _line in 0..block_size.height {
blocks.push(create_empty_line(block_size.width))
}
BlocksData {
blocks,
block_size,
}
}
pub fn test_points(&self, points: Iter<Point>, flag: bool) -> Result<(), ()> {
for point in points {
let line = self.blocks.get(point.y as usize);
if line.is_none() {
return Err(());
}
let new_position = line.unwrap().get(point.x as usize);
if new_position.is_none() {
return Err(());
}
if *new_position.unwrap() != flag {
return Err(());
}
}
Ok(())
}
pub fn set_points(&mut self, points: Iter<Point>, flag: bool) {
for point in points {
self.blocks[point.y as usize][point.x as usize] = flag;
}
}
pub fn set_brick(&mut self, brick: &Brick, flag: bool) {
self.set_points(brick.points.iter(), flag)
}
pub fn set_region(&mut self, point: Point, size: Size, flag: bool) {
let vec = &mut self.blocks;
for y in point.y..point.y + size.height {
let line = &mut vec[y as usize];
for x in point.x..point.x + size.width {
line[x as usize] = flag;
}
}
}
pub fn try_clean_lines(&mut self) -> Result<TryCleanLinesResult, ()> {
let mut result = Vec::new();
for y in 0..self.blocks.len() {
let line = &self.blocks[y];
if line.iter().all(|x| { *x }) {
result.push(y);
}
}
return if result.len() > 0 {
result.reverse();
for index in result.iter() {
self.blocks.remove(*index);
}
for _index in result.iter() {
self.blocks.insert(0, create_empty_line(self.block_size.width));
}
Ok(TryCleanLinesResult {
clear_lines: result.iter().map(|i| { *i as u16 }).collect()
})
} else {
Err(())
};
}
} |
package filter
import (
"bytes"
"github.com/hidal-go/hidalgo/values"
)
type ValueFilter interface {
FilterValue(v values.Value) bool
}
type SortableFilter interface {
ValueFilter
FilterSortable(v values.Sortable) bool
// ValuesRange returns an optional range of value that matches the filter.
// It is used as an optimization for complex filters for backend to limit the range of keys that will be considered.
ValuesRange() *Range
}
var _ SortableFilter = Any{}
type Any struct{}
func (Any) FilterValue(v values.Value) bool {
return v != nil
}
func (Any) FilterSortable(v values.Sortable) bool {
return v != nil
}
func (Any) ValuesRange() *Range {
return nil
}
// EQ is a shorthand for Equal.
func EQ(v values.Value) SortableFilter {
return Equal{Value: v}
}
var _ SortableFilter = Equal{}
type Equal struct {
Value values.Value
}
func (f Equal) FilterValue(a values.Value) bool {
switch a := a.(type) {
case values.Bytes:
b, ok := f.Value.(values.Bytes)
if !ok {
return false
}
return bytes.Equal(a, b)
}
return f.Value == a
}
func (f Equal) FilterSortable(a values.Sortable) bool {
b, ok := f.Value.(values.Sortable)
if !ok {
return a == nil && f.Value == nil
}
switch a := a.(type) {
case values.Bytes:
b, ok := b.(values.Bytes)
if !ok {
return false
}
return bytes.Equal(a, b)
}
return f.Value == a
}
func (f Equal) ValuesRange() *Range {
b, ok := f.Value.(values.Sortable)
if !ok {
return nil
}
return &Range{
Start: GTE(b),
End: LTE(b),
}
}
// LT is a "less than" filter. Shorthand for Less.
func LT(v values.Sortable) *Less {
return &Less{Value: v}
}
// LTE is a "less than or equal" filter. Shorthand for Less.
func LTE(v values.Sortable) *Less {
return &Less{Value: v, Equal: true}
}
var _ SortableFilter = Less{}
type Less struct {
Value values.Sortable
Equal bool
}
func (f Less) FilterValue(v values.Value) bool {
a, ok := v.(values.Sortable)
if !ok && v != nil {
return false
}
return f.FilterSortable(a)
}
func (f Less) FilterSortable(v values.Sortable) bool {
if v == nil {
return true
}
c := values.Compare(v, f.Value)
return c == -1 || (f.Equal && c == 0)
}
func (f Less) ValuesRange() *Range {
return &Range{End: &f}
}
// GT is a "greater than" filter. Shorthand for Greater.
func GT(v values.Sortable) *Greater {
return &Greater{Value: v}
}
// GTE is a "greater than or equal" filter. Shorthand for Greater.
func GTE(v values.Sortable) *Greater {
return &Greater{Value: v, Equal: true}
}
var _ SortableFilter = Greater{}
type Greater struct {
Value values.Sortable
Equal bool
}
func (f Greater) FilterValue(v values.Value) bool {
a, ok := v.(values.Sortable)
if !ok && v != nil {
return false
}
return f.FilterSortable(a)
}
func (f Greater) FilterSortable(v values.Sortable) bool {
if v == nil {
return true
}
c := values.Compare(v, f.Value)
return c == +1 || (f.Equal && c == 0)
}
func (f Greater) ValuesRange() *Range {
return &Range{Start: &f}
}
var _ SortableFilter = Range{}
// Range represents a range of sortable values.
// If inclusive is set, the range is [start, end], if not, the range is (start, end).
type Range struct {
Start *Greater
End *Less
}
// isPrefix checks if the range describes a prefix. In this case Start.Value describes the prefix.
func (f Range) isPrefix() bool {
if f.Start == nil || !f.Start.Equal {
return false
}
s, ok := f.Start.Value.(values.BinaryString)
if !ok {
return false
}
end := s.PrefixEnd()
if end == nil {
return f.End == nil
}
if f.End == nil || f.End.Equal {
return false
}
return values.Compare(end, f.End.Value) == 0
}
// Prefix returns a common prefix of the range. Boolean flag indicates if prefix fully describes the range.
func (f Range) Prefix() (values.BinaryString, bool) {
if !f.isPrefix() {
// TODO: calculate common prefix
return nil, false
}
p, ok := f.Start.Value.(values.BinaryString)
return p, ok
}
func (f Range) FilterValue(v values.Value) bool {
a, ok := v.(values.Sortable)
if !ok && v != nil {
return false
}
return f.FilterSortable(a)
}
func (f Range) FilterSortable(v values.Sortable) bool {
if v == nil {
return f.Start != nil
}
if f.Start != nil && !f.Start.FilterSortable(v) {
return false
}
if f.End != nil && !f.End.FilterSortable(v) {
return false
}
return true
}
func (f Range) ValuesRange() *Range {
return &f
}
type And []ValueFilter
func (arr And) FilterValue(v values.Value) bool {
for _, f := range arr {
if !f.FilterValue(v) {
return false
}
}
return true
}
type Or []ValueFilter
func (arr Or) FilterValue(v values.Value) bool {
for _, f := range arr {
if f.FilterValue(v) {
return true
}
}
return false
}
type Not struct {
Filter ValueFilter
}
func (f Not) FilterValue(v values.Value) bool {
return !f.Filter.FilterValue(v)
}
func Prefix(pref values.BinaryString) SortableFilter {
gt := GTE(pref)
end := pref.PrefixEnd()
if end == nil {
return *gt
}
return Range{
Start: gt,
End: LT(end),
}
}
|
package utils
import (
"encoding/binary"
"math"
"strconv"
"strings"
"unsafe"
)
//获取source的子串,如果start小于0或者end大于source长度则返回""
//start:开始index,从0开始,包括0
//end:结束index,以end结束,但不包括end
func Substring(source string, start int, end int) string {
var r = []rune(source)
length := len(r)
if start < 0 || start > end {
return ""
}
if end > length {
end = length
}
if start == 0 && end >= length {
return source
}
return string(r[start:end])
}
func SubstrAfter(s, substr string) string {
a := strings.Index(s, substr)
return Substring(s, a+len(substr), len(s))
}
func SubstrBefore(s, substr string) string {
b := strings.Index(s, substr)
return Substring(s, 0, b)
}
func SubstrBetween(s, afterStr, beforeStr string) string {
a := strings.Index(s, afterStr)
b := strings.Index(s, beforeStr)
return Substring(s, a+len(afterStr), b)
}
func ArrayContains(arr []string, s string) bool {
for _, t := range arr {
if t == s {
return true
}
}
return false
}
//float32 转 String工具类,保留6位小数
func Float32ToString(input_num float32) string {
// to convert a float number to a string
return strconv.FormatFloat(float64(input_num), 'f', -1, 64)
}
func Float64ToString(input_num float64) string {
return Float64ToStr(input_num, -1)
}
func Float64ToStr(input_num float64, prec int) string {
return strconv.FormatFloat(input_num, 'f', prec, 64)
}
func Float64sToStrings(input_nums []float64) []string {
var res []string
for _, f := range input_nums {
res = append(res, Float64ToString(f))
}
return res
}
func StrArr2ToFloat(arr *[][]interface{}) {
for i, _ := range *arr {
for j, _ := range (*arr)[i] {
(*arr)[i][j] = StrToFloat64((*arr)[i][j].(string))
}
}
}
func StrArr2ToFloatArr2(arr [][]string) [][]float64 {
res := [][]float64{}
for i, _ := range arr {
res = append(res, []float64{})
for j, _ := range (arr)[i] {
res[i] = append(res[i], StrToFloat64((arr)[i][j]))
}
}
return res
}
func StrToFloat64(input_num string) float64 {
value, _ := strconv.ParseFloat(input_num, 64)
return value
}
func StrToFloat64Def(input_num string, defValue float64) float64 {
if input_num == "" {
return defValue
}
return StrToFloat64(input_num)
}
func StrToInt(input_num string) int {
res, _ := strconv.Atoi(input_num)
return res
}
func StrToIntDef(input_num string, defValue int) int {
if input_num == "" {
return defValue
}
return StrToInt(input_num)
}
func StrToBool(inputValue string) bool {
res, _ := strconv.ParseBool(inputValue)
return res
}
func StrToBoolDef(inputValue string, defValue bool) bool {
if inputValue == "" {
return defValue
}
return StrToBool(inputValue)
}
//string到int64
func StrToInt64(input_num string) int64 {
i, _ := strconv.ParseInt(input_num, 10, 64)
return i
}
func StrToInt64Def(inputValue string, defValue int64) int64 {
if inputValue == "" {
return defValue
}
return StrToInt64(inputValue)
}
//string到uint64
func StrToUint64(input_num string) uint64 {
i, _ := strconv.ParseUint(input_num, 10, 64)
return i
}
func StrToUint64Def(inputValue string, defValue uint64) uint64 {
if inputValue == "" {
return defValue
}
return StrToUint64(inputValue)
}
// int64到string
func Int64ToStr(num int64) string {
return strconv.FormatInt(num, 10)
}
// uint64到string
func Uint64ToStr(num uint64) string {
return strconv.FormatUint(num, 10)
}
func Int2Byte(data int) (ret []byte) {
var len uintptr = unsafe.Sizeof(data)
ret = make([]byte, len)
var tmp int = 0xff
var index uint = 0
for index = 0; index < uint(len); index++ {
ret[index] = byte((tmp << (index * 8) & data) >> (index * 8))
}
return ret
}
func Byte2Int(data []byte) int {
var ret int = 0
var len int = len(data)
var i uint = 0
for i = 0; i < uint(len); i++ {
ret = ret | (int(data[i]) << (i * 8))
}
return ret
}
func ByteToUint64(data []byte) uint64 {
return binary.BigEndian.Uint64(data)
}
func Uint64ToByte(i uint64) []byte {
b := make([]byte, 8)
binary.BigEndian.PutUint64(b, i)
return b[:]
}
func Int64ToBytes(i int64) []byte {
var buf = make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(i))
return buf
}
func BytesToInt64(buf []byte) int64 {
return int64(binary.BigEndian.Uint64(buf))
}
func Float32ToByte(float float32) []byte {
bits := math.Float32bits(float)
bytes := make([]byte, 4)
binary.LittleEndian.PutUint32(bytes, bits)
return bytes
}
func ByteToFloat32(bytes []byte) float32 {
bits := binary.LittleEndian.Uint32(bytes)
return math.Float32frombits(bits)
}
func Float64ToByte(float float64) []byte {
bits := math.Float64bits(float)
bytes := make([]byte, 8)
binary.LittleEndian.PutUint64(bytes, bits)
return bytes
}
func ByteToFloat64(bytes []byte) float64 {
bits := binary.LittleEndian.Uint64(bytes)
return math.Float64frombits(bits)
}
|
Nonlinear adaptive image filtering based on inhomogeneous diffusion and differential geometry
The inadequacy of the classic linear approach to edge detection and scale space filtering lies in the spatial averaging of the Laplacian. The Laplacian is the divergence of the gradient and thus is the divergence of both magnitude and direction. The divergence in magnitude characterizes edges and this divergence must not be averaged if the image structure is to be preserved. We introduce a new nonlinear filtering theory that only averages the divergence of direction. This averaging keeps edges and lines intact as their direction is nondivergent. Noise does not have this nondivergent consistency and its divergent direction is averaged. Higher order structures such as corners are singular points or inflection points in the divergence of direction and also are averaged. Corners are intersection points of edges of nondivergent direction (or smooth curves of small divergence in direction) and their averaging is limited. This approach provides a better compromise between noise removal and preservation of image structure. Experiments that verify and demonstrate the adequacy of this new theory are presented. |
// This test only covers the case when Run() stops.
func TestRunner_Run(t *testing.T) {
ctx := context.Background()
b := []byte(`
test_pathway:
historical_data:
- result:
order_profile: UREA AND ELECTROLYTES
results:
- test_name: Creatinine
value: 126.00
unit: UMOLL
abnormal_flag: HIGH
parameters:
time_from_now: -48h
pathway:
- admission:
loc: Renal
- result:
order_profile: UREA AND ELECTROLYTES
results:
- test_name: Creatinine
value: 153.00
unit: UMOLL
abnormal_flag: HIGH
- discharge: {}`)
mainDir := testwrite.BytesToDir(t, b, "pathway.yml")
hl7.TimezoneAndLocation("Europe/London")
now := time.Date(2020, 2, 12, 0, 0, 0, 0, time.UTC)
args := testhospital.Arguments
args.PathwayArguments.Dir = mainDir
args.PathwayArguments.Names = []string{"test_pathway"}
tests := []struct {
maxPathways int
wantMessages int
}{
{maxPathways: 0, wantMessages: 0},
{maxPathways: 1, wantMessages: 4},
{maxPathways: 2, wantMessages: 8},
}
for _, tc := range tests {
t.Run(fmt.Sprintf("%d", tc.maxPathways), func(t *testing.T) {
clock := testclock.WithTick(now, time.Second)
h := testhospital.New(ctx, t, testhospital.Config{
Config: hospital.Config{Clock: clock},
Arguments: args,
})
defer h.Close()
config := Config{
DashboardURI: nonEmptyString,
DashboardAddress: ":0000",
DashboardStaticDir: nonEmptyString,
MaxPathways: tc.maxPathways,
PathwaysPerHour: 3600,
Clock: clock,
}
runner, err := New(h.Hospital, config)
if err != nil {
t.Fatalf("New(%+v) failed with %v", config, err)
}
runner.Run(context.Background())
messages := h.Sender.GetSentMessages()
if got, want := len(messages), tc.wantMessages; got != want {
t.Errorf("h.Sender.GetSentMessages() got %d messages, want %v", got, want)
}
})
}
} |
<reponame>denisbider/Atomic<gh_stars>1-10
#include "AtIncludes.h"
#include "AtCsv.h"
#include "AtNumCvt.h"
namespace At
{
void CsvReader::Init(char fieldDelim, char commentDelim)
{
m_fieldDelim = fieldDelim;
m_commentDelim = commentDelim;
sizet i {};
m_delims [i++] = m_fieldDelim;
if (commentDelim != 0)
m_delims [i++] = m_commentDelim;
m_delims [i++] = '"';
m_delims [i++] = 0;
}
void CsvReader::SkipLines(sizet nrLines)
{
Str discarded;
while (nrLines-- > 0)
m_lineReader.ReadLine(discarded);
}
bool CsvReader::ReadRecord(Vec<Str>& record, sizet& nrFields)
{
nrFields = 0;
do
{
Str line;
if (!m_lineReader.ReadLine(line))
return false;
Seq remaining { line };
while (true)
{
if (!remaining.n)
break;
if (record.Len() <= nrFields)
record.Add();
bool resume = false;
while (true)
{
Str& field { record[nrFields] };
if (!ParseField(resume, field, remaining))
{
if (m_lineReader.ReadLine(line))
{
remaining = line;
resume = true;
continue;
}
}
break;
}
++nrFields;
}
}
while (nrFields == 0);
// Clear any columns not present in this record
for (sizet i=nrFields; i!=record.Len(); ++i)
record[i].Clear();
return true;
}
bool CsvReader::ParseField(bool resume, Str& outField, Seq& remaining)
{
if (!resume)
{
// Begin reading field
outField.Clear();
Seq chunk { remaining.ReadToFirstByteOf(m_delims) };
if (!remaining.n)
{
// Field not quoted and has no following field
outField.Add(chunk);
return true;
}
if (remaining.p[0] == m_fieldDelim)
{
// Field not quoted, but does have a following field
outField.Add(chunk);
remaining.DropByte();
return true;
}
if (m_commentDelim != 0 && remaining.p[0] == m_commentDelim)
{
// Field not quoted and followed by comment
outField.Add(chunk);
remaining.DropBytes(remaining.n);
return true;
}
// Field is quoted
remaining.DropByte();
}
// Finish reading quoted field
while (true)
{
Seq chunk { remaining.ReadToByte('"') };
outField.Add(chunk);
if (!remaining.n)
{
// Field continues on the next line
if (m_lineReader.LastLineEndCr()) outField.Ch('\r');
if (m_lineReader.LastLineEndLf()) outField.Ch('\n');
return false;
}
remaining.DropByte();
if (!remaining.n)
{
// Quoted field ended, and has no following field
return true;
}
if (remaining.p[0] == '"')
{
// Quote char is escaped, is part of field
outField.Ch('"');
remaining.DropByte();
continue;
}
// Quote char not escaped, expect next field
remaining.DropToFirstByteNotOf(" \t");
if (!remaining.n)
{
// Quoted field ended, and has no following field
return true;
}
if (remaining.p[0] == m_fieldDelim)
{
// Quoted field ended, and does have a following field
remaining.DropByte();
return true;
}
if (m_commentDelim != 0 && remaining.p[0] == m_commentDelim)
{
// Quoted field ended and followed by comment
remaining.DropBytes(remaining.n);
return true;
}
Str msg;
msg.Set("Expecting either field delimiter or new line at line ").UInt(m_lineReader.LineNr());
if (Path().Any())
msg.Add(" in CSV file ").Add(Path());
throw InputErr(msg);
}
}
}
|
// Returns the union of two AABBs (i.e. the smallest AABB containing bbox1 and bbox2)
inline AxesAlignedBoundingBox UnionOfAABBs(AxesAlignedBoundingBox const& bbox1,
AxesAlignedBoundingBox const& bbox2)
{
Vec3 minima;
Vec3 maxima;
std::transform(std::begin(bbox1.Minima()),
std::end(bbox1.Minima()),
std::begin(bbox2.Minima()),
std::begin(minima),
[](RealNum a, RealNum b) { return std::min(a, b); });
std::transform(std::begin(bbox1.Maxima()),
std::end(bbox1.Maxima()),
std::begin(bbox2.Maxima()),
std::begin(maxima),
[](RealNum a, RealNum b) { return std::max(a, b); });
return AxesAlignedBoundingBox(minima, maxima);
} |
/*******************************************************************************
function :this is the at receiver engine
parameters :
instruction :this task read the device continousely and blocked by the read function
if pass by mode, then pass it to the pass function;else will match the
at command, if not then match the out of band
*******************************************************************************/
static u32_t __rcv_task_entry(void *args)
{
bool_t matchret;
s32_t rcvlen;
while(1)
{
memset(g_at_cb.rcvbuf,0,cn_at_resp_maxlen);
rcvlen = __resp_rcv(g_at_cb.rcvbuf,cn_at_resp_maxlen,LOS_WAIT_FOREVER);
if(rcvlen > 0)
{
if(0 == g_at_cb.passmode)
{
matchret = __cmd_match(g_at_cb.rcvbuf,rcvlen);
if(false == matchret)
{
__oob_match(g_at_cb.rcvbuf,rcvlen);
}
}
else
{
if(NULL != g_at_cb.funcpass)
{
g_at_cb.funcpass(g_at_cb.rcvbuf,rcvlen);
}
}
}
}
} |
<reponame>jkomoros/sudoku
package sudoku
import (
"crypto/sha1"
"encoding/hex"
"fmt"
"log"
"sort"
"strconv"
"strings"
)
//DifficultySignals is a collection of names to float64 values, representing
//the various signals extracted from a SolveDirections, and used for the
//Difficulty calculation. Generally not useful to package users.
type DifficultySignals map[string]float64
//A difficulty signal generator can return more than one difficutly signal, so
//it doesn't just return float64 Each signal generator should always return a
//map with the SAME keys--so if you've called it once you know what the next
//calls will have as keys.
type difficultySignalGenerator func(directions SolveDirections) DifficultySignals
const _DIFFICULTY_WEIGHT_FILENAME = "difficulties.csv"
var difficultySignalGenerators []difficultySignalGenerator
//These are the weights that will be used to turn a list of signals into a
//difficulty. starting weights are set in hs_difficulty_weights.go, which is
//auto-generated. Generate those now:
//go:generate cmd/dokugen-analysis/internal/gendifficulties/gendifficulties
var difficultySignalWeights map[string]float64
//difficultyModelHashValue stashes the value of the hash of the model, so we
//don't have to calculate it too often.
var difficultyModelHashValue string
func init() {
difficultySignalGenerators = []difficultySignalGenerator{
signalTechnique,
signalNumberOfSteps,
signalTechniquePercentage,
signalPercentageFilledSteps,
signalNumberUnfilled,
signalStepsUntilNonFill,
signalPrecursorStepsLength,
}
}
//LoadDifficultyModel loads in a new difficulty model to use to score puzzles'
//difficulties. This will automatically change what DifficultyModelHash will
//return.
func LoadDifficultyModel(model map[string]float64) {
difficultySignalWeights = model
//Reset the stored model hash value so the next call to
//DifficultyModelHash will recacluate it.
difficultyModelHashValue = ""
}
//DifficultyModelHash is a unique string representing the exact difficulty
//model in use. Every time a new model is trained or loaded, this value will change.
//Therefore, if the value is different than last time you checked, the model has changed.
//This is useful for throwing out caches that assume the same difficulty model is in use.
func DifficultyModelHash() string {
if difficultyModelHashValue == "" {
//Generate a string with keys, vals in a known sequence, then hash.
//first, generate a list of all keys
var keys []string
for k := range difficultySignalWeights {
keys = append(keys, k)
}
sort.Strings(keys)
hash := sha1.New()
for _, k := range keys {
hash.Write([]byte(k + ":" + strconv.FormatFloat(difficultySignalWeights[k], 'f', -1, 64) + "\n"))
}
hashBytes := hash.Sum(nil)
base32str := strings.ToUpper(hex.EncodeToString(hashBytes))
difficultyModelHashValue = base32str
}
return difficultyModelHashValue
}
//Stats returns a printout of interesting statistics about the
//SolveDirections, including number of steps, difficulty (based on this solve
//description alone), how unrelated the cells in subsequent steps are, and the
//values of all of the signals used to generate the difficulty.
func (self SolveDirections) Stats() []string {
//TODO: test this. dokugen has a method that effectively tests this; just use that.
techniqueCount := make(map[string]int)
var lastStep *SolveStep
similarityAccum := 0.0
steps := self.Steps()
for _, step := range steps {
if lastStep != nil {
similarityAccum += step.TargetCells.chainSimilarity(lastStep.TargetCells)
}
techniqueCount[step.TechniqueVariant()] += 1
lastStep = step
}
similarityAccum /= float64(len(steps))
var result []string
//TODO: use a standard divider across the codebase
divider := "-------------------------"
result = append(result, divider)
//TODO: we shouldn't even include this... it's not meaningful to report the difficulty of a single solve.
result = append(result, fmt.Sprintf("Difficulty: %f", self.Signals().difficulty()))
result = append(result, divider)
result = append(result, fmt.Sprintf("Step count: %d", len(steps)))
result = append(result, divider)
result = append(result, fmt.Sprintf("Avg Similarity: %f", similarityAccum))
result = append(result, divider)
//We want a stable ordering for technique counts.
for _, technique := range AllTechniqueVariants {
//TODO: pad the technique name with enough spaces so the colon lines up.
result = append(result, fmt.Sprintf("%s: %d", technique, techniqueCount[technique]))
}
result = append(result, divider)
return result
}
//Description returns a comprehensive prose description of the
//SolveDirections, including reasoning for each step, that if followed would
//lead to the grid being solved. Unlike Walkthrough, Description() does not
//include diagrams for each step.
func (self SolveDirections) Description() []string {
//TODO: the IsHint directions don't sound as good as the old ones in OnlineSudoku did.
if len(self.CompoundSteps) == 0 {
return []string{"The puzzle is already solved."}
}
var descriptions []string
for i, compound := range self.CompoundSteps {
intro := ""
description := compound.Description()
if len(self.CompoundSteps) > 1 {
description = strings.ToLower(string(description[0])) + description[1:]
switch i {
case 0:
intro = "First, "
case len(self.CompoundSteps) - 1:
intro = "Finally, "
default:
//TODO: switch between "then" and "next" randomly.
intro = "Next, "
}
}
descriptions = append(descriptions, intro+description)
}
return descriptions
}
//Walkthrough prints an exhaustive set of human-readable directions that
//includes diagrams at each step to make it easier to follow.
func (self SolveDirections) Walkthrough() string {
//TODO: test this.
if len(self.CompoundSteps) == 0 {
return "The puzzle could not be solved with any of the techniques we're aware of."
}
steps := self.Steps()
clone := self.Grid().MutableCopy()
DIVIDER := "\n\n--------------------------------------------\n\n"
intro := fmt.Sprintf("This will take %d steps to solve.", len(steps))
intro += "\nWhen you start, your grid looks like this:\n"
intro += clone.Diagram(false)
intro += "\n"
intro += DIVIDER
descriptions := self.Description()
results := make([]string, len(steps))
for i, description := range descriptions {
result := description + "\n"
result += "After doing that, your grid will look like: \n\n"
steps[i].Apply(clone)
result += clone.Diagram(false)
results[i] = result
}
return intro + strings.Join(results, DIVIDER) + DIVIDER + "Now the puzzle is solved."
}
//Signals returns the DifficultySignals for this set of SolveDirections.
func (self SolveDirections) Signals() DifficultySignals {
//Because of the contract of a DifficultySignalGenerator (that it always
//returns the same keys), as long as DifficultySignalGenerators stays
//constant it's reasonable for callers to assume that one call to
//Signals() will return all of the string keys you'll see any time you
//call Signals()
result := DifficultySignals{}
for _, generator := range difficultySignalGenerators {
result.add(generator(self))
}
return result
}
//This will overwrite colliding values
//TODO: this is confusingly named
func (self DifficultySignals) add(other DifficultySignals) {
for key, val := range other {
self[key] = val
}
}
//For keys in both, will sum them together.
//TODO: this is confusingly named (compared to Add) Do we really need both Sum and Add?
func (self DifficultySignals) sum(other DifficultySignals) {
for key, val := range other {
self[key] += val
}
}
func (self DifficultySignals) difficulty() float64 {
accum := 0.0
if constant, ok := difficultySignalWeights["Constant"]; ok {
accum = constant
} else {
log.Println("Didn't have the constant term loaded.")
}
for signal, val := range self {
//We can discard the OK because 0 is a reasonable thing to do with weights we aren't aware of.
weight, _ := difficultySignalWeights[signal]
accum += val * weight
}
if accum < 0.0 {
log.Println("Accumuldated difficulty snapped to 0.0:", accum)
accum = 0.0
}
if accum > 1.0 {
log.Println("Accumulated difficulty snapped to 1.0:", accum)
accum = 1.0
}
return accum
}
//Rest of file is different Signals
//TODO: now that SolveDirections includes gridSnapshot, think if there are any
//additional Signals we can generate.
//This technique returns a count of how many each type of technique is seen.
//Different techniques are different "difficulties" so seeing more of a hard
//technique will Lead to a higher overall difficulty.
func signalTechnique(directions SolveDirections) DifficultySignals {
//Our contract is to always return every signal name, even if it's 0.0.
result := DifficultySignals{}
for _, techniqueName := range AllTechniqueVariants {
result[techniqueName+" Count"] = 0.0
}
for _, step := range directions.Steps() {
result[step.TechniqueVariant()+" Count"]++
}
return result
}
//This signal is just number of steps. More steps is PROBABLY a harder puzzle.
func signalNumberOfSteps(directions SolveDirections) DifficultySignals {
return DifficultySignals{
"Number of Steps": float64(len(directions.Steps())),
}
}
//This signal is like signalTechnique, except it returns the count divided by
//the TOTAL number of steps.
func signalTechniquePercentage(directions SolveDirections) DifficultySignals {
//Our contract is to always return every signal name, even if it's 0.0.
result := DifficultySignals{}
for _, techniqueName := range AllTechniqueVariants {
result[techniqueName+" Percentage"] = 0.0
}
count := len(directions.Steps())
if count == 0 {
return result
}
for _, step := range directions.Steps() {
result[step.TechniqueVariant()+" Percentage"]++
}
//Now normalize all of them
for name := range result {
result[name] /= float64(count)
}
return result
}
//This signal is how many steps are filled out of all steps. Presumably harder
//puzzles will have more non-fill steps.
func signalPercentageFilledSteps(directions SolveDirections) DifficultySignals {
numerator := 0.0
denominator := float64(len(directions.Steps()))
for _, step := range directions.Steps() {
if step.Technique.IsFill() {
numerator += 1.0
}
}
return DifficultySignals{
"Percentage Fill Steps": numerator / denominator,
}
}
//This signal is how many cells are unfilled at the beginning. Presumably
//harder puzzles will have fewer cells filled (although obviously this isn't
//necessarily true)
func signalNumberUnfilled(directions SolveDirections) DifficultySignals {
//We don't have access to the underlying grid, so we'll just count how
//many fill steps (since each can only add one number, and no numbers are
//ever unfilled)
count := 0.0
for _, step := range directions.Steps() {
if step.Technique.IsFill() {
count++
}
}
return DifficultySignals{
"Number Unfilled Cells": count,
}
}
//signalPrecursorStepsLength returns the length of the longest run of
//non-fill steps, and the average length of any run.
func signalPrecursorStepsLength(directions SolveDirections) DifficultySignals {
longestRun := 0
averageRunAccum := 0
for _, compoundStep := range directions.CompoundSteps {
length := len(compoundStep.PrecursorSteps)
if length > longestRun {
longestRun = length
}
averageRunAccum += length
}
averageRun := float64(averageRunAccum) / float64(len(directions.CompoundSteps))
return DifficultySignals{
"Average PrecursorSteps Length": averageRun,
"Longest PrecursorSteps Length": float64(longestRun),
}
}
//This signal is how many steps into the solve directions before you encounter
//your first non-fill step. Non-fill steps are harder, so this signal captures
//how easy the start of the puzzle is.
func signalStepsUntilNonFill(directions SolveDirections) DifficultySignals {
//TODO: should we get rid of this now that we have
//signalPrecursorStepsLength?
count := 0.0
for _, step := range directions.Steps() {
if !step.Technique.IsFill() {
break
}
count++
}
return DifficultySignals{
"Steps Until Nonfill": count,
}
}
|
/// Figure 10.31 How to handle SIGTSTP
///
/// Added a println to sig_tstp to show that the signal
/// is really caught
#[macro_use(as_void)]
extern crate apue;
extern crate libc;
use std::mem::uninitialized;
use std::ptr::null_mut;
use libc::{c_int, SIGTSTP, SIG_UNBLOCK, SIG_DFL, SIG_IGN, STDIN_FILENO, STDOUT_FILENO};
use libc::{sigemptyset, sigaddset, signal, kill, getpid, read, write};
use apue::my_libc::sigprocmask;
use apue::{LibcResult, err_sys};
const BUFFSIZE: usize = 1024;
unsafe fn sig_tstp(_: c_int) {
// move cursor to lower left corner, reset tty mode
// unblock SIGTSTP, since it's blocked while we're reading it
println!("last cleanup before SIGTSTP");
let mut mask = uninitialized();
sigemptyset(&mut mask);
sigaddset(&mut mask, SIGTSTP);
sigprocmask(SIG_UNBLOCK, &mask, null_mut());
signal(SIGTSTP, SIG_DFL); // reset disposition to default
kill(getpid(), SIGTSTP); // and send the signal to ourself
// we won't return from the kill until we're continued
signal(SIGTSTP, sig_tstp as usize); // reestablish signal handler
// ... reset tty mode, redraw screen ...
}
fn main() {
unsafe {
if signal(SIGTSTP, SIG_IGN) == SIG_DFL {
signal(SIGTSTP, sig_tstp as usize);
}
let buf = vec![0; BUFFSIZE];
while let Ok(n) = read(STDIN_FILENO, as_void!(buf), BUFFSIZE).check_positive() {
if write(STDOUT_FILENO, as_void!(buf), n as _) != n {
err_sys("write error");
}
}
}
}
|
<reponame>SpeedcuberM/KhSM<filename>Android/app/src/main/java/com/khsm/app/data/entities/Gender.java
package com.khsm.app.data.entities;
import com.google.gson.annotations.SerializedName;
public enum Gender {
@SerializedName("male")
MALE,
@SerializedName("female")
FEMALE
}
|
<reponame>stephanwilliams/rust386
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum AddressingMethod {
A, C, D, E, F, G, I, J, M, O, R, S, T, X, Y
}
impl AddressingMethod {
pub fn has_modrm(&self) -> bool {
match *self {
AddressingMethod::C |
AddressingMethod::D |
AddressingMethod::E |
AddressingMethod::G |
AddressingMethod::M |
AddressingMethod::R |
AddressingMethod::S |
AddressingMethod::T
=> true,
_ => false
}
}
}
#[allow(non_camel_case_types)]
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum OperantType {
a, b, c, d, p, s, v, w
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub struct UnresolvedOperand {
pub addr_method: AddressingMethod,
pub op_type: OperantType
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum UnresolvedRegister {
eAX,
eCX,
eDX,
eBX,
eSP,
eBP,
eSI,
eDI,
DX,
AL,
CL,
DL,
BL,
AH,
CH,
DH,
BH,
ES,
SS,
CS,
DS,
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum UnresolvedOp {
Operand(UnresolvedOperand),
Register(UnresolvedRegister),
Constant(u32)
}
impl UnresolvedOp {
pub fn has_modrm(&self) -> bool {
match *self {
UnresolvedOp::Operand(operand) => operand.addr_method.has_modrm(),
_ => false
}
}
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
pub enum UnresolvedOperands {
NotImplemented,
Invalid,
None,
Single(UnresolvedOp),
Double(UnresolvedOp, UnresolvedOp),
Triple(UnresolvedOp, UnresolvedOp, UnresolvedOp),
Group(usize),
GroupSingle(usize, UnresolvedOp),
GroupDouble(usize, UnresolvedOp, UnresolvedOp)
}
impl UnresolvedOperands {
pub fn has_modrm(&self) -> bool {
match *self {
UnresolvedOperands::Single(op1) => op1.has_modrm(),
UnresolvedOperands::Double(op1, op2) => op1.has_modrm() || op2.has_modrm(),
UnresolvedOperands::Triple(op1, op2, op3) => op1.has_modrm() || op2.has_modrm() || op3.has_modrm(),
UnresolvedOperands::Group(_) => true,
UnresolvedOperands::GroupSingle(_, _) => true,
UnresolvedOperands::GroupDouble(_, _, _) => true,
_ => false,
}
}
}
// Register: fixed or variable size
// Op: Operand OR Register
// opcode has one of:
// * None
// * Op
// * Op Op
// * Op Op Op
// * Group
macro_rules! op {
(grp $grp:expr) =>
(UnresolvedOperands::Group($grp));
(grp $grp:expr, reg $reg1:ident) =>
(UnresolvedOperands::GroupSingle(
$grp,
UnresolvedOp::Register(UnresolvedRegister::$reg1)
));
(grp $grp:expr, $addr1:ident $opty1:ident) =>
(UnresolvedOperands::GroupSingle(
$grp,
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 })
));
(grp $grp:expr, reg $reg1:ident, reg $reg2:ident) =>
(UnresolvedOperands::GroupDouble(
$grp,
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Register(UnresolvedRegister::$reg2)
));
(grp $grp:expr, reg $reg1:ident, $addr2:ident $opty2:ident) =>
(UnresolvedOperands::GroupDouble(
$grp,
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 })
));
(grp $grp:expr, $addr1:ident $opty1:ident, reg $reg2:ident) =>
(UnresolvedOperands::GroupDouble(
$grp,
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Register(UnresolvedRegister::$reg2)
));
(grp $grp:expr, $addr1:ident $opty1:ident, const $const3:expr) =>
(UnresolvedOperands::GroupDouble(
$grp,
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Constant($const3)
));
(grp $grp:expr, $addr1:ident $opty1:ident, $addr2:ident $opty2:ident) =>
(UnresolvedOperands::GroupDouble(
$grp,
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 })
));
(const $const1:expr) =>
(UnresolvedOperands::Single(
UnresolvedOp::Constant($const1)
));
(reg $reg1:ident) =>
(UnresolvedOperands::Single(
UnresolvedOp::Register(UnresolvedRegister::$reg1)
));
($addr1:ident $opty1:ident) =>
(UnresolvedOperands::Single(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 })
));
(reg $reg1:ident, reg $reg2:ident) =>
(UnresolvedOperands::Double(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Register(UnresolvedRegister::$reg2)
));
(reg $reg1:ident, $addr2:ident $opty2:ident) =>
(UnresolvedOperands::Double(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 })
));
($addr1:ident $opty1:ident, reg $reg2:ident) =>
(UnresolvedOperands::Double(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Register(UnresolvedRegister::$reg2)
));
($addr1:ident $opty1:ident, $addr2:ident $opty2:ident) =>
(UnresolvedOperands::Double(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 })
));
(reg $reg1:ident, reg $reg2:ident, reg $reg3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Register(UnresolvedRegister::$reg2),
UnresolvedOp::Register(UnresolvedRegister::$reg3)
));
(reg $reg1:ident, reg $reg2:ident, $addr3:ident $opty3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Register(UnresolvedRegister::$reg2),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr3, op_type: OperantType::$opty3 })
));
(reg $reg1:ident, $addr2:ident $opty2:ident, reg $reg3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 }),
UnresolvedOp::Register(UnresolvedRegister::$reg3)
));
(reg $reg1:ident, $addr2:ident $opty2:ident, $addr3:ident $opty3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Register(UnresolvedRegister::$reg1),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr3, op_type: OperantType::$opty3 })
));
($addr1:ident $opty1:ident, reg $reg2:ident, reg $reg3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Register(UnresolvedRegister::$reg2),
UnresolvedOp::Register(UnresolvedRegister::$reg3)
));
($addr1:ident $opty1:ident, reg $reg2:ident, $addr3:ident $opty3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Register(UnresolvedRegister::$reg2),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr3, op_type: OperantType::$opty3 })
));
($addr1:ident $opty1:ident, $addr2:ident $opty2:ident, reg $reg3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 }),
UnresolvedOp::Register(UnresolvedRegister::$reg3)
));
($addr1:ident $opty1:ident, $addr2:ident $opty2:ident, $addr3:ident $opty3:ident) =>
(UnresolvedOperands::Triple(
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr1, op_type: OperantType::$opty1 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr2, op_type: OperantType::$opty2 }),
UnresolvedOp::Operand(UnresolvedOperand { addr_method: AddressingMethod::$addr3, op_type: OperantType::$opty3 })
));
}
macro_rules! grp {
($num:expr) => (UnresolvedOperands::Group($num));
}
macro_rules! no_impl {
() => (UnresolvedOperands::NotImplemented)
}
macro_rules! inval {
() => (UnresolvedOperands::Invalid)
}
macro_rules! none {
() => (UnresolvedOperands::None)
}
pub static SINGLE_OPCODE_MAP: [UnresolvedOperands; 256] = [
/* 0x00 */ op!(E b, G b),
/* 0x01 */ op!(E v, G v),
/* 0x02 */ op!(G b, E b),
/* 0x03 */ op!(G v, E v),
/* 0x04 */ op!(reg AL, I b),
/* 0x05 */ op!(reg eAX, I v),
/* 0x06 */ op!(reg ES),
/* 0x07 */ op!(reg ES),
/* 0x08 */ op!(E b, G b),
/* 0x09 */ op!(E v, G v),
/* 0x0A */ op!(G b, E b),
/* 0x0B */ op!(G v, E v),
/* 0x0C */ op!(reg AL, I b),
/* 0x0D */ op!(reg eAX, I v),
/* 0x0E */ no_impl!(),
/* 0x0F */ inval!(),
/* 0x10 */ no_impl!(),
/* 0x11 */ no_impl!(),
/* 0x12 */ no_impl!(),
/* 0x13 */ no_impl!(),
/* 0x14 */ no_impl!(),
/* 0x15 */ no_impl!(),
/* 0x16 */ no_impl!(),
/* 0x17 */ no_impl!(),
/* 0x18 */ no_impl!(),
/* 0x19 */ no_impl!(),
/* 0x1A */ no_impl!(),
/* 0x1B */ no_impl!(),
/* 0x1C */ no_impl!(),
/* 0x1D */ no_impl!(),
/* 0x1E */ no_impl!(),
/* 0x1F */ no_impl!(),
/* 0x20 */ op!(E b, G b),
/* 0x21 */ op!(E v, G v),
/* 0x22 */ op!(G b, E b),
/* 0x23 */ op!(G v, E v),
/* 0x24 */ op!(reg AL, I b),
/* 0x25 */ op!(reg eAX, I v),
/* 0x26 */ inval!(),
/* 0x27 */ no_impl!(),
/* 0x28 */ op!(E b, G b),
/* 0x29 */ op!(E v, G v),
/* 0x2A */ op!(G b, E b),
/* 0x2B */ op!(G v, E v),
/* 0x2C */ op!(reg AL, I b),
/* 0x2D */ op!(reg eAX, I v),
/* 0x2E */ inval!(),
/* 0x2F */ no_impl!(),
/* 0x30 */ op!(E b, G b),
/* 0x31 */ op!(E v, G v),
/* 0x32 */ op!(G b, E b),
/* 0x33 */ op!(G v, E v),
/* 0x34 */ op!(reg AL, I b),
/* 0x35 */ op!(reg eAX, I v),
/* 0x36 */ inval!(),
/* 0x37 */ no_impl!(),
/* 0x38 */ op!(E b, G b),
/* 0x39 */ op!(E v, G v),
/* 0x3A */ op!(G b, E b),
/* 0x3B */ op!(G v, E v),
/* 0x3C */ op!(reg AL, I b),
/* 0x3D */ op!(reg eAX, I v),
/* 0x3E */ inval!(),
/* 0x3F */ no_impl!(),
/* 0x40 */ op!(reg eAX),
/* 0x41 */ op!(reg eCX),
/* 0x42 */ op!(reg eDX),
/* 0x43 */ op!(reg eBX),
/* 0x44 */ op!(reg eSP),
/* 0x45 */ op!(reg eBP),
/* 0x46 */ op!(reg eSI),
/* 0x47 */ op!(reg eDI),
/* 0x48 */ op!(reg eAX),
/* 0x49 */ op!(reg eCX),
/* 0x4A */ op!(reg eDX),
/* 0x4B */ op!(reg eBX),
/* 0x4C */ op!(reg eSP),
/* 0x4D */ op!(reg eBP),
/* 0x4E */ op!(reg eSI),
/* 0x4F */ op!(reg eDI),
/* 0x50 */ op!(reg eAX),
/* 0x51 */ op!(reg eCX),
/* 0x52 */ op!(reg eDX),
/* 0x53 */ op!(reg eBX),
/* 0x54 */ op!(reg eSP),
/* 0x55 */ op!(reg eBP),
/* 0x56 */ op!(reg eSI),
/* 0x57 */ op!(reg eDI),
/* 0x58 */ op!(reg eAX),
/* 0x59 */ op!(reg eCX),
/* 0x5A */ op!(reg eDX),
/* 0x5B */ op!(reg eBX),
/* 0x5C */ op!(reg eSP),
/* 0x5D */ op!(reg eBP),
/* 0x5E */ op!(reg eSI),
/* 0x5F */ op!(reg eDI),
/* 0x60 */ no_impl!(),
/* 0x61 */ no_impl!(),
/* 0x62 */ no_impl!(),
/* 0x63 */ no_impl!(),
/* 0x64 */ inval!(),
/* 0x65 */ inval!(),
/* 0x66 */ inval!(),
/* 0x67 */ inval!(),
/* 0x68 */ op!(I v),
/* 0x69 */ op!(G v, E v, I v),
/* 0x6A */ op!(I b),
/* 0x6B */ op!(G v, E v, I v),
/* 0x6C */ op!(Y b, reg DX),
/* 0x6D */ op!(Y v, reg DX),
/* 0x6E */ no_impl!(),
/* 0x6F */ no_impl!(),
/* 0x70 */ op!(J b),
/* 0x71 */ op!(J b),
/* 0x72 */ op!(J b),
/* 0x73 */ op!(J b),
/* 0x74 */ op!(J b),
/* 0x75 */ op!(J b),
/* 0x76 */ op!(J b),
/* 0x77 */ op!(J b),
/* 0x78 */ op!(J b),
/* 0x79 */ op!(J b),
/* 0x7A */ op!(J b),
/* 0x7B */ op!(J b),
/* 0x7C */ op!(J b),
/* 0x7D */ op!(J b),
/* 0x7E */ op!(J b),
/* 0x7F */ op!(J b),
/* 0x80 */ op!(grp 1, E b, I b),
/* 0x81 */ op!(grp 1, E v, I v),
/* 0x82 */ inval!(),
/* 0x83 */ op!(grp 1, E v, I b),
/* 0x84 */ op!(E b, G b),
/* 0x85 */ op!(E v, G v),
/* 0x86 */ no_impl!(),
/* 0x87 */ no_impl!(),
/* 0x88 */ op!(E b, G b),
/* 0x89 */ op!(E v, G v),
/* 0x8A */ op!(G b, E b),
/* 0x8B */ op!(G v, E v),
/* 0x8C */ op!(E w, S w),
/* 0x8D */ op!(G v, M v),
/* 0x8E */ op!(S w, E w),
/* 0x8F */ no_impl!(),
/* 0x90 */ none!(),
/* 0x91 */ no_impl!(),
/* 0x92 */ no_impl!(),
/* 0x93 */ no_impl!(),
/* 0x94 */ no_impl!(),
/* 0x95 */ no_impl!(),
/* 0x96 */ no_impl!(),
/* 0x97 */ no_impl!(),
/* 0x98 */ no_impl!(),
/* 0x99 */ no_impl!(),
/* 0x9A */ no_impl!(),
/* 0x9B */ no_impl!(),
/* 0x9C */ no_impl!(),
/* 0x9D */ no_impl!(),
/* 0x9E */ no_impl!(),
/* 0x9F */ no_impl!(),
/* 0xA0 */ op!(reg AL, O b),
/* 0xA1 */ op!(reg eAX, O v),
/* 0xA2 */ op!(O b, reg AL),
/* 0xA3 */ op!(O v, reg eAX),
/* 0xA4 */ op!(X b, Y b),
/* 0xA5 */ op!(X v, Y v),
/* 0xA6 */ no_impl!(),
/* 0xA7 */ no_impl!(),
/* 0xA8 */ op!(reg AL, I b),
/* 0xA9 */ op!(reg eAX, I v),
/* 0xAA */ op!(Y b, reg AL),
/* 0xAB */ op!(Y v, reg eAX),
/* 0xAC */ no_impl!(),
/* 0xAD */ no_impl!(),
/* 0xAE */ no_impl!(),
/* 0xAF */ no_impl!(),
/* 0xB0 */ op!(reg AL, I b),
/* 0xB1 */ op!(reg CL, I b),
/* 0xB2 */ op!(reg DL, I b),
/* 0xB3 */ op!(reg BL, I b),
/* 0xB4 */ op!(reg AH, I b),
/* 0xB5 */ op!(reg CH, I b),
/* 0xB6 */ op!(reg DH, I b),
/* 0xB7 */ op!(reg BH, I b),
/* 0xB8 */ op!(reg eAX, I v),
/* 0xB9 */ op!(reg eCX, I v),
/* 0xBA */ op!(reg eDX, I v),
/* 0xBB */ op!(reg eBX, I v),
/* 0xBC */ op!(reg eSP, I v),
/* 0xBD */ op!(reg eBP, I v),
/* 0xBE */ op!(reg eSI, I v),
/* 0xBF */ op!(reg eDI, I v),
/* 0xC0 */ op!(grp 2, E b, I b),
/* 0xC1 */ op!(grp 2, E v, I b),
/* 0xC2 */ no_impl!(),
/* 0xC3 */ none!(),
/* 0xC4 */ no_impl!(),
/* 0xC5 */ no_impl!(),
/* 0xC6 */ op!(E b, I b),
/* 0xC7 */ op!(E v, I v),
/* 0xC8 */ no_impl!(),
/* 0xC9 */ none!(),
/* 0xCA */ no_impl!(),
/* 0xCB */ no_impl!(),
/* 0xCC */ no_impl!(),
/* 0xCD */ no_impl!(),
/* 0xCE */ no_impl!(),
/* 0xCF */ no_impl!(),
/* 0xD0 */ op!(grp 2, E b, const 1),
/* 0xD1 */ op!(grp 2, E v, const 1),
/* 0xD2 */ op!(grp 2, E b, reg CL),
/* 0xD3 */ op!(grp 2, E v, reg CL),
/* 0xD4 */ no_impl!(),
/* 0xD5 */ no_impl!(),
/* 0xD6 */ inval!(),
/* 0xD7 */ no_impl!(),
/* 0xD8 */ no_impl!(),
/* 0xD9 */ no_impl!(),
/* 0xDA */ no_impl!(),
/* 0xDB */ no_impl!(),
/* 0xDC */ no_impl!(),
/* 0xDD */ no_impl!(),
/* 0xDE */ no_impl!(),
/* 0xDF */ no_impl!(),
/* 0xE0 */ no_impl!(),
/* 0xE1 */ no_impl!(),
/* 0xE2 */ no_impl!(),
/* 0xE3 */ no_impl!(),
/* 0xE4 */ op!(reg AL, I b),
/* 0xE5 */ op!(reg eAX, I b),
/* 0xE6 */ op!(I b, reg AL),
/* 0xE7 */ op!(I b, reg eAX),
/* 0xE8 */ op!(A v),
/* 0xE9 */ op!(J v),
/* 0xEA */ op!(A p),
/* 0xEB */ op!(J b),
/* 0xEC */ op!(reg AL, reg DX),
/* 0xED */ op!(reg eAX, reg DX),
/* 0xEE */ op!(reg DX, reg AL),
/* 0xEF */ op!(reg DX, reg eAX),
/* 0xF0 */ inval!(),
/* 0xF1 */ inval!(),
/* 0xF2 */ no_impl!(),
/* 0xF3 */ no_impl!(),
/* 0xF4 */ no_impl!(),
/* 0xF5 */ no_impl!(),
/* 0xF6 */ op!(grp 3, E b),
/* 0xF7 */ op!(grp 3, E v),
/* 0xF8 */ none!(),
/* 0xF9 */ none!(),
/* 0xFA */ none!(),
/* 0xFB */ none!(),
/* 0xFC */ none!(),
/* 0xFD */ none!(),
/* 0xFE */ no_impl!(),
/* 0xFF */ op!(grp 5),
];
pub static DOUBLE_OPCODE_MAP: [UnresolvedOperands; 256] = [
/* 0x00 */ no_impl!(),
/* 0x01 */ op!(grp 7),
/* 0x02 */ no_impl!(),
/* 0x03 */ no_impl!(),
/* 0x04 */ inval!(),
/* 0x05 */ inval!(),
/* 0x06 */ no_impl!(),
/* 0x07 */ inval!(),
/* 0x08 */ inval!(),
/* 0x09 */ inval!(),
/* 0x0A */ inval!(),
/* 0x0B */ inval!(),
/* 0x0C */ inval!(),
/* 0x0D */ inval!(),
/* 0x0E */ inval!(),
/* 0x0F */ inval!(),
/* 0x10 */ inval!(),
/* 0x11 */ inval!(),
/* 0x12 */ inval!(),
/* 0x13 */ inval!(),
/* 0x14 */ inval!(),
/* 0x15 */ inval!(),
/* 0x16 */ inval!(),
/* 0x17 */ inval!(),
/* 0x18 */ inval!(),
/* 0x19 */ inval!(),
/* 0x1A */ inval!(),
/* 0x1B */ inval!(),
/* 0x1C */ inval!(),
/* 0x1D */ inval!(),
/* 0x1E */ inval!(),
/* 0x1F */ inval!(),
/* 0x20 */ op!(R d, C d),
/* 0x21 */ op!(R d, D d),
/* 0x22 */ op!(C d, R d),
/* 0x23 */ op!(D d, R d),
/* 0x24 */ op!(R d, T d),
/* 0x25 */ inval!(),
/* 0x26 */ op!(T d, R d),
/* 0x27 */ inval!(),
/* 0x28 */ inval!(),
/* 0x29 */ inval!(),
/* 0x2A */ inval!(),
/* 0x2B */ inval!(),
/* 0x2C */ inval!(),
/* 0x2D */ inval!(),
/* 0x2E */ inval!(),
/* 0x2F */ inval!(),
/* 0x30 */ inval!(),
/* 0x31 */ inval!(),
/* 0x32 */ inval!(),
/* 0x33 */ inval!(),
/* 0x34 */ inval!(),
/* 0x35 */ inval!(),
/* 0x36 */ inval!(),
/* 0x37 */ inval!(),
/* 0x38 */ inval!(),
/* 0x39 */ inval!(),
/* 0x3A */ inval!(),
/* 0x3B */ inval!(),
/* 0x3C */ inval!(),
/* 0x3D */ inval!(),
/* 0x3E */ inval!(),
/* 0x3F */ inval!(),
/* 0x40 */ op!(G v, E v),
/* 0x41 */ op!(G v, E v),
/* 0x42 */ op!(G v, E v),
/* 0x43 */ op!(G v, E v),
/* 0x44 */ op!(G v, E v),
/* 0x45 */ op!(G v, E v),
/* 0x46 */ op!(G v, E v),
/* 0x47 */ op!(G v, E v),
/* 0x48 */ op!(G v, E v),
/* 0x49 */ op!(G v, E v),
/* 0x4A */ op!(G v, E v),
/* 0x4B */ op!(G v, E v),
/* 0x4C */ op!(G v, E v),
/* 0x4D */ op!(G v, E v),
/* 0x4E */ op!(G v, E v),
/* 0x4F */ op!(G v, E v),
/* 0x50 */ inval!(),
/* 0x51 */ inval!(),
/* 0x52 */ inval!(),
/* 0x53 */ inval!(),
/* 0x54 */ inval!(),
/* 0x55 */ inval!(),
/* 0x56 */ inval!(),
/* 0x57 */ inval!(),
/* 0x58 */ inval!(),
/* 0x59 */ inval!(),
/* 0x5A */ inval!(),
/* 0x5B */ inval!(),
/* 0x5C */ inval!(),
/* 0x5D */ inval!(),
/* 0x5E */ inval!(),
/* 0x5F */ inval!(),
/* 0x60 */ inval!(),
/* 0x61 */ inval!(),
/* 0x62 */ inval!(),
/* 0x63 */ inval!(),
/* 0x64 */ inval!(),
/* 0x65 */ inval!(),
/* 0x66 */ inval!(),
/* 0x67 */ inval!(),
/* 0x68 */ inval!(),
/* 0x69 */ inval!(),
/* 0x6A */ inval!(),
/* 0x6B */ inval!(),
/* 0x6C */ inval!(),
/* 0x6D */ inval!(),
/* 0x6E */ inval!(),
/* 0x6F */ inval!(),
/* 0x70 */ inval!(),
/* 0x71 */ inval!(),
/* 0x72 */ inval!(),
/* 0x73 */ inval!(),
/* 0x74 */ inval!(),
/* 0x75 */ inval!(),
/* 0x76 */ inval!(),
/* 0x77 */ inval!(),
/* 0x78 */ inval!(),
/* 0x79 */ inval!(),
/* 0x7A */ inval!(),
/* 0x7B */ inval!(),
/* 0x7C */ inval!(),
/* 0x7D */ inval!(),
/* 0x7E */ inval!(),
/* 0x7F */ inval!(),
/* 0x80 */ op!(J v),
/* 0x81 */ op!(J v),
/* 0x82 */ op!(J v),
/* 0x83 */ op!(J v),
/* 0x84 */ op!(J v),
/* 0x85 */ op!(J v),
/* 0x86 */ op!(J v),
/* 0x87 */ op!(J v),
/* 0x88 */ op!(J v),
/* 0x89 */ op!(J v),
/* 0x8A */ op!(J v),
/* 0x8B */ op!(J v),
/* 0x8C */ op!(J v),
/* 0x8D */ op!(J v),
/* 0x8E */ op!(J v),
/* 0x8F */ op!(J v),
/* 0x90 */ op!(E b),
/* 0x91 */ op!(E b),
/* 0x92 */ op!(E b),
/* 0x93 */ op!(E b),
/* 0x94 */ op!(E b),
/* 0x95 */ op!(E b),
/* 0x96 */ op!(E b),
/* 0x97 */ op!(E b),
/* 0x98 */ op!(E b),
/* 0x99 */ op!(E b),
/* 0x9A */ op!(E b),
/* 0x9B */ op!(E b),
/* 0x9C */ op!(E b),
/* 0x9D */ op!(E b),
/* 0x9E */ op!(E b),
/* 0x9F */ op!(E b),
/* 0xA0 */ no_impl!(),
/* 0xA1 */ no_impl!(),
/* 0xA2 */ inval!(),
/* 0xA3 */ no_impl!(),
/* 0xA4 */ no_impl!(),
/* 0xA5 */ no_impl!(),
/* 0xA6 */ inval!(),
/* 0xA7 */ inval!(),
/* 0xA8 */ no_impl!(),
/* 0xA9 */ no_impl!(),
/* 0xAA */ inval!(),
/* 0xAB */ no_impl!(),
/* 0xAC */ no_impl!(),
/* 0xAD */ no_impl!(),
/* 0xAE */ inval!(),
/* 0xAF */ no_impl!(),
/* 0xB0 */ inval!(),
/* 0xB1 */ inval!(),
/* 0xB2 */ no_impl!(),
/* 0xB3 */ no_impl!(),
/* 0xB4 */ no_impl!(),
/* 0xB5 */ no_impl!(),
/* 0xB6 */ op!(G v, E b),
/* 0xB7 */ op!(G v, E w),
/* 0xB8 */ inval!(),
/* 0xB9 */ inval!(),
/* 0xBA */ no_impl!(),
/* 0xBB */ no_impl!(),
/* 0xBC */ no_impl!(),
/* 0xBD */ no_impl!(),
/* 0xBE */ no_impl!(),
/* 0xBF */ no_impl!(),
/* 0xC0 */ inval!(),
/* 0xC1 */ inval!(),
/* 0xC2 */ inval!(),
/* 0xC3 */ inval!(),
/* 0xC4 */ inval!(),
/* 0xC5 */ inval!(),
/* 0xC6 */ inval!(),
/* 0xC7 */ inval!(),
/* 0xC8 */ inval!(),
/* 0xC9 */ inval!(),
/* 0xCA */ inval!(),
/* 0xCB */ inval!(),
/* 0xCC */ inval!(),
/* 0xCD */ inval!(),
/* 0xCE */ inval!(),
/* 0xCF */ inval!(),
/* 0xD0 */ inval!(),
/* 0xD1 */ inval!(),
/* 0xD2 */ inval!(),
/* 0xD3 */ inval!(),
/* 0xD4 */ inval!(),
/* 0xD5 */ inval!(),
/* 0xD6 */ inval!(),
/* 0xD7 */ inval!(),
/* 0xD8 */ inval!(),
/* 0xD9 */ inval!(),
/* 0xDA */ inval!(),
/* 0xDB */ inval!(),
/* 0xDC */ inval!(),
/* 0xDD */ inval!(),
/* 0xDE */ inval!(),
/* 0xDF */ inval!(),
/* 0xE0 */ inval!(),
/* 0xE1 */ inval!(),
/* 0xE2 */ inval!(),
/* 0xE3 */ inval!(),
/* 0xE4 */ inval!(),
/* 0xE5 */ inval!(),
/* 0xE6 */ inval!(),
/* 0xE7 */ inval!(),
/* 0xE8 */ inval!(),
/* 0xE9 */ inval!(),
/* 0xEA */ inval!(),
/* 0xEB */ inval!(),
/* 0xEC */ inval!(),
/* 0xED */ inval!(),
/* 0xEE */ inval!(),
/* 0xEF */ inval!(),
/* 0xF0 */ inval!(),
/* 0xF1 */ inval!(),
/* 0xF2 */ inval!(),
/* 0xF3 */ inval!(),
/* 0xF4 */ inval!(),
/* 0xF5 */ inval!(),
/* 0xF6 */ inval!(),
/* 0xF7 */ inval!(),
/* 0xF8 */ inval!(),
/* 0xF9 */ inval!(),
/* 0xFA */ inval!(),
/* 0xFB */ inval!(),
/* 0xFC */ inval!(),
/* 0xFD */ inval!(),
/* 0xFE */ inval!(),
/* 0xFF */ inval!(),
];
pub static GROUP_MAP: [[UnresolvedOperands; 8]; 9] = [
// Group 1
[
/* 0b000 */ none!(),
/* 0b001 */ none!(),
/* 0b010 */ none!(),
/* 0b011 */ none!(),
/* 0b100 */ none!(),
/* 0b101 */ none!(),
/* 0b110 */ none!(),
/* 0b111 */ none!(),
],
// Group 2
[
/* 0b000 */ none!(),
/* 0b001 */ none!(),
/* 0b010 */ none!(),
/* 0b011 */ none!(),
/* 0b100 */ none!(),
/* 0b101 */ none!(),
/* 0b110 */ inval!(),
/* 0b111 */ none!(),
],
// Group 3
[
/* 0b000 */ op!(I b),
/* 0b001 */ inval!(),
/* 0b010 */ none!(),
/* 0b011 */ none!(),
/* 0b100 */ op!(reg AL),
/* 0b101 */ op!(reg AL),
/* 0b110 */ op!(reg AL),
/* 0b111 */ op!(reg AL),
],
// Group 4
[
/* 0b000 */ no_impl!(),
/* 0b001 */ no_impl!(),
/* 0b010 */ inval!(),
/* 0b011 */ inval!(),
/* 0b100 */ inval!(),
/* 0b101 */ inval!(),
/* 0b110 */ inval!(),
/* 0b111 */ inval!(),
],
// Group 5
[
/* 0b000 */ op!(E v),
/* 0b001 */ op!(E v),
/* 0b010 */ op!(E v),
/* 0b011 */ no_impl!(),
/* 0b100 */ op!(E v),
/* 0b101 */ op!(E p),
/* 0b110 */ op!(E v),
/* 0b111 */ inval!(),
],
// Group 6
[
/* 0b000 */ no_impl!(),
/* 0b001 */ no_impl!(),
/* 0b010 */ no_impl!(),
/* 0b011 */ no_impl!(),
/* 0b100 */ no_impl!(),
/* 0b101 */ no_impl!(),
/* 0b110 */ inval!(),
/* 0b111 */ inval!(),
],
// Group 7
[
/* 0b000 */ op!(M s),
/* 0b001 */ op!(M s),
/* 0b010 */ op!(M s),
/* 0b011 */ op!(M s),
/* 0b100 */ op!(E w),
/* 0b101 */ inval!(),
/* 0b110 */ op!(E w),
/* 0b111 */ inval!(),
],
// Group 8
[
/* 0b000 */ inval!(),
/* 0b001 */ inval!(),
/* 0b010 */ inval!(),
/* 0b011 */ inval!(),
/* 0b100 */ none!(),
/* 0b101 */ none!(),
/* 0b110 */ none!(),
/* 0b111 */ none!(),
],
// Group 3 Alternate
[
/* 0b000 */ op!(I v),
/* 0b001 */ inval!(),
/* 0b010 */ none!(),
/* 0b011 */ none!(),
/* 0b100 */ op!(reg eAX),
/* 0b101 */ op!(reg eAX),
/* 0b110 */ op!(reg eAX),
/* 0b111 */ op!(reg eAX),
],
];
|
/**
* PropertyDescriptor may lose the references to the write and read methods during
* garbage collection. If the methods can't be found, we should retry once to
* ensure that our PropertyDescriptor hasn't gone bad and the method really
* isn't there.
*
* @param writeMethod {@code true} to look for the write method for a property,
* {@code false} to look for the read method
* @return the method or {@code null}
* @throws NoSuchMethodException if we've already retried finding the method once
* @see <a href="https://github.com/DozerMapper/dozer/issues/118">Dozer mapping stops working</a>
*/
private Method retryMissingMethod(boolean writeMethod) throws NoSuchMethodException {
if (propertyDescriptorsRefreshed) {
throw new NoSuchMethodException(
"Unable to determine " + (writeMethod ? "write" : "read") +
" method for Field: '" + fieldName + "' in Class: " + clazz);
} else {
refreshPropertyDescriptors();
return writeMethod ? getWriteMethod() : getReadMethod();
}
} |
def allstack(vals, depth=0):
if type(vals[0]) is ndarray:
return concatenate(vals, axis=depth)
else:
return concatenate([allstack(x, depth+1) for x in vals], axis=depth) |
package gorden
import (
"net/http"
"github.com/gorilla/sessions"
)
type Manager struct {
strategy Strategy
sessionConfig SessionConfig
cookieStore *sessions.CookieStore
}
func (manager *Manager) Authenticate(arguments interface{}) bool {
return manager.strategy.Authenticate(arguments)
}
func (manager *Manager) IsAuthenticated() bool {
return manager.strategy.IsAuthenticated()
}
func (manager *Manager) SetUser(r *http.Request, w http.ResponseWriter, user_id int) {
session, _ := manager.cookieStore.Get(r, manager.sessionConfig.CookieName)
session.Values["user_id"] = user_id
sessions.Save(r, w)
}
func (manager *Manager) GetUser(r *http.Request) interface{} {
session, _ := manager.cookieStore.Get(r, manager.sessionConfig.CookieName)
return session.Values["user_id"]
}
func NewManager(strategyName string, sessionConfig SessionConfig) *Manager {
store := sessions.NewCookieStore(sessionConfig.CookieKey)
strategyi, _ := strategies[strategyName]
config := &Manager{
strategy: strategyi,
sessionConfig: sessionConfig,
cookieStore: store,
}
return config
} |
Cheers and beers: Guardiola toasts his Bayern team of champions as they celebrate Oktoberfest in Munich
GARY NEVILLE COLUMN
Click here to read his latest column on Bayern Munich - We've forgotten just what made British football great
Pep Guardiola is thriving on and off the pitch at Bayern Munich, merging his and their winning ways and embracing the local Barvarian culture.
The former Barcelona boss led his team on a family day out at Oktoberfest as the annual German festival marked its last official day of celebrations for the year.
Traditional lederhosen, hats and pints the size of their head were the order of the day in Munich.
VIDEO Scroll down to watch Pep and Bayern players having a right old knees up
New local: Bayern Munich manager Pep Guardiola has taken beer to a stein in Munich Quality time: Guardiola was joined by his wife, Christina, and his team at Oktoberfest
Bayern couldn't have had a much smoother start to Guardiola's first season, currently topping the Bundesliga, one point ahead of Borussia Dortmund, and they have two wins from two matches in the group stages of the Champions League as they look to defend their title.
Add to that victory over Chelsea in the Super Cup and you have a happy team indeed.
Wives, girlfriends and kids were all welcome as they gathered for the 180th Oktoberfest celebrations at Kaefers Wiesenschaenke.
Beerfest: Midfielder Bastian Schweinsteiger with girlfriend Sarah Brander
Cheers: Thomas Muller got some training in on his left bicep alongside his wife Lisa
Down in one: Franck Ribery's little one makes short work of her bottle - he is a devout Muslim and doesn't touch alcohol - while Spanish midfielder Javi Martinez gives Oktoberfest the thumbs up
As Europe enters an international break, Bayern's next match won't be until October 19 when they host Mainz in the Bundesliga.
Few Bayern players will be getting a rest during the period, however, with no less than 15 players called up for international duties, including seven for the German national team.
Arjen Robben believes despite his side's top form, the break comes at a good time for Bayern.
'It will do the Bayern players a world of good to be away from the club for a few days and breathe some fresh air and be surrounded by other people for a change,' he said.
Family day: Belgium's Daniel van Buyten was happy to celebrate German beer with his wife Celine
Dress-ups: Also getting in on the act were Arjen Robben with wife Bernadien and Dante and his wife Jocelina
Sweethearts: Striker Claudio Pizarro with his childhood love Karla Salcedo
Young guns: (From left) Mitchell Weiser, Jerome Boateng and Xherdan Shaqiri
This is of course not the first time Guardiola has donned lederhosen in his time at Bayern - the new manager looked perfectly at home in the shorts and braces when he posed for the traditional team photo at the start of the season.
The European champions are also partial to a brew, having tipped beer over each other as they celebrated their 23rd Bundesliga title last year at Allianz Arena. |
/**
* This class represents the {@link CustomField} for the <code>materials</code>
* property of {@link RegistrationUnit} class. Component consist of
* {@link Table} and {@link Button} for creating new and deleting selected
* {@link Material} items from a table.
*/
class MaterialsCustomField extends CustomField {
private static final long serialVersionUID = 1L;
private final Logger logger = LoggerFactory.getLogger(MaterialsCustomField.class.getName());
private MaterialsCustomFieldController controller = new MaterialsCustomFieldController(this);
private BeanItemContainer<Material> materialContainer = new BeanItemContainer<Material>(Material.class);
private static final int NUMBER_OF_VISIBLE_ROWS = 4;
private static final String SUBTYPEID_PROPERTY = "subtypeId";
private Table materialsList;
public MaterialsCustomField(String caption) {
logger.trace("Entering MaterialsCustomField()");
setCaption(caption);
HorizontalLayout layout = new HorizontalLayout();
layout.setSizeFull();
layout.setSpacing(true);
materialsList = new Table();
materialsList.setSelectable(true);
materialsList.setEditable(true);
materialsList.setTableFieldFactory(new MaterialsTableFieldFactory());
materialsList.setMultiSelect(true);
materialsList.setSizeFull();
materialsList.setImmediate(true);
materialsList.setColumnHeaderMode(Table.COLUMN_HEADER_MODE_HIDDEN);
materialsList.setPageLength(NUMBER_OF_VISIBLE_ROWS);
materialsList.setContainerDataSource(materialContainer);
materialsList.setVisibleColumns(new Object[] {SUBTYPEID_PROPERTY});
materialsList.addActionHandler(controller);
materialsList.addListener((ItemClickListener) controller);
layout.addComponent(materialsList);
layout.setExpandRatio(materialsList, 1f);
setCompositionRoot(layout);
logger.trace("Exiting MaterialsCustomField()");
}
/**
* This method is automatically called when there is a need to read and set
* the value to a table.
* <p>
* Basically, it is overriden because our {@link Table} is in a multiselect
* mode. This again means that the table will return the {@link Set} of the
* selected items. And this is the sole reason that <code>getType()</code>
* is always returning <code>ArrayList.class</code>.
* <p>
* If we do not override this method and return <code>ArrayList.class</code>
* , framework data binding will not be able to set the value for the
* <code>materials</code> property of {@link RegistrationUnit} class. Our
* <code>materials</code> property is a {@link List} of {@link Material}
* objects. The {@link List} interface is not compatible to {@link Set}
* interface and the framework will throw exception.
*/
@Override
public Class<?> getType() {
logger.trace("Entering getType()");
logger.trace("Exiting getType()");
return ArrayList.class;
}
/**
* Method handles the selection of items.
* <p>
* This is modeled against the usual way that various operating systems
* behave. For example, if user makes a multiple selection and triggers
* context menu on one of the selected item, this needs to show a context
* menu but without deselection of items.
* <p>
* But if the user has selected more than one item and triggers the context
* menu on item which is not in the selection list, application will show
* the context menu but it will also deselect every item and instead select
* the one that user tried to trigger context menu on.
*
* @param item
* - {@link Material} which needs to be added to selection.
*/
void selectTableItem(Object item) {
logger.trace("Entering selectTableItem()");
@SuppressWarnings("unchecked")
Set<Material> selectedItems = ((Set<Material>) materialsList.getValue());
if (selectedItems.contains(item) == false) {
materialsList.setValue(null);
}
materialsList.select(item);
logger.trace("Exiting selectTableItem()");
}
/**
* Removes selected items in a table from the material container.
*/
void removeSelectedItems() {
logger.trace("Entering removeSelectedItems()");
@SuppressWarnings("unchecked")
Set<Material> selectedItems = ((Set<Material>) materialsList.getValue());
for (Material item : selectedItems) {
materialsList.removeItem(item);
}
logger.trace("Exiting removeSelectedItems()");
}
/**
* Adds new item of {@link Material} type to the material container.
*/
public void addNewItem() {
logger.trace("Entering addNewItem()");
Material material = ObjectFactory.createMaterial();
materialsList.addItem(material);
materialsList.setValue(null);
materialsList.setCurrentPageFirstItemId(material);
materialsList.select(material);
logger.trace("Entering addNewItem()");
}
/**
* This method is automatically called by the framework when
* <code>materials</code> property of the {@link RegistrationUnit} object is
* bounded to {@link Field}. This happens in
* {@link RegistrationUnitFieldFactory}.
* <p>
* This method is used to populate the container which holds
* {@link Material} objects.
*/
@Override
public void setPropertyDataSource(Property propertyDataSource) {
logger.trace("Entering setPropertyDataSource()");
super.setPropertyDataSource(propertyDataSource);
@SuppressWarnings("unchecked")
List<Material> materials = (List<Material>) propertyDataSource.getValue();
materialContainer.removeAllItems();
materialContainer.addAll(materials);
logger.trace("Exiting setPropertyDataSource()");
}
/**
* This method is automatically called by the framework when the
* {@link Form} is commited or when there is a need to read the value from
* the field (user selected another item from the table).
* <p>
* Method will return every {@link Material} from the container.
*/
@Override
public Object getValue() {
logger.trace("Entering getValue()");
ArrayList<Material> materials = new ArrayList<Material>();
for (Object itemId : materialContainer.getItemIds()) {
materials.add(materialContainer.getItem(itemId).getBean());
}
logger.trace("Exiting getValue()");
return materials;
}
/**
* This method is automatically called by the framework when there is a need
* to discard the value from a field. For example, user clicked on
* "Discard changes" button located in the form.
* <p>
* This will simply overwrite the {@link Material} container with the
* original list of materials available from the time when the form
* initially bound the <code>materials</code> property of
* {@link RegistrationUnit} object.
*/
@Override
public void discard() throws SourceException {
logger.trace("Entering discard()");
super.discard();
Property propertyDataSource = getPropertyDataSource();
if (propertyDataSource != null) {
@SuppressWarnings("unchecked")
List<Material> materials = (List<Material>) propertyDataSource.getValue();
materialContainer.removeAllItems();
materialContainer.addAll(materials);
}
logger.trace("Exiting discard()");
}
} |
#include<bits/stdc++.h>
using namespace std;
char c[1000001];
int n;
char a[1000001];
int m;
int main()
{
scanf("%s",c+1);
n=strlen(c+1);
if (n==3) { printf("%c",c[1]); return 0; }
int i=1,j=n;
for (;i+3<=j;i++,j--)
if (c[i]==c[j]) a[++m]=c[i];
else if (c[i]==c[j-1]) a[++m]=c[i],j--;
else if (c[i+1]==c[j]) a[++m]=c[i+1],i++;
else a[++m]=c[i+1],i++,j--;
for (int k=1;k<=m;k++)
printf("%c",a[k]);
if (j>=i) printf("%c",c[i]);
for (int k=m;k>=1;k--)
printf("%c",a[k]);
return 0;
}
|
module Preprocess.Expression where
import qualified Interpreter.AST as I
import qualified Parse.AST as P
import Preprocess.Constant
preprocessExpression :: P.Expression -> I.Expression
preprocessExpression (P.ConstE c) = I.Constant (preprocessConst c)
preprocessExpression (P.ConsE a b) = I.Cons (preprocessExpression a) (preprocessExpression b)
preprocessExpression (P.IfElseE a b c) = I.IfElse (preprocessExpression a) (preprocessExpression b) (preprocessExpression c)
preprocessExpression (P.LetE (P.FunctionP i a) val) = lambdaDesugar i a val
preprocessExpression (P.LetE pattern val) = I.Bind (preprocessPattern pattern) (preprocessExpression val)
preprocessExpression (P.BlockE body) = I.Block (preprocessExpression <$> body)
preprocessExpression (P.FuncApplicationE f val) = I.FunctionApplication (preprocessExpression f) (preprocessExpression val)
preprocessExpression (P.IdentifierE i) = I.Reference (preprocessIdent i)
preprocessExpression (P.ListE elems) = I.List (preprocessExpression <$> elems)
preprocessExpression (P.InfixApplicationE op l r) = I.FunctionApplication (I.FunctionApplication (I.Reference $ preprocessIdent op) (preprocessExpression l)) (preprocessExpression r)
preprocessExpression (P.MatchE e cases) = I.Match (preprocessExpression e) (preprocessCase <$> cases)
where
preprocessCase (P.MatchLine i a) = I.MatchCase (preprocessPattern i) (preprocessExpression a)
preprocessExpression s = error $ "Cannot preprocess expression: " ++ show s
lambdaDesugar :: P.Identifier -> [P.Pattern] -> P.Expression -> I.Expression
lambdaDesugar fName args body = I.Bind (I.IdentifierPattern $ preprocessIdent fName) (desugarWithoutName args body)
desugarWithoutName :: [P.Pattern] -> P.Expression -> I.Expression
desugarWithoutName [] body = preprocessExpression body
desugarWithoutName [arg] body = I.Lambda (preprocessPattern arg) (preprocessExpression body)
desugarWithoutName (arg : args) body = I.Lambda (preprocessPattern arg) (desugarWithoutName args body)
preprocessPattern :: P.Pattern -> I.Pattern
preprocessPattern (P.IdentifierP i) = I.IdentifierPattern (preprocessIdent i)
preprocessPattern (P.ConsP a b) = I.ConsPattern (preprocessPattern a) (preprocessPattern b)
preprocessPattern (P.ConstantP c) = I.ConstantPattern (preprocessConst c)
preprocessPattern P.WildP = I.WildcardPattern
preprocessPattern (P.ListP elems) = I.ListPattern (preprocessPattern <$> elems)
preprocessPattern (P.FunctionP _ _) = error "Function pattern should not exist anymore"
preprocessIdent :: P.Identifier -> I.Identifier
preprocessIdent (P.NormalIdentifier i) = I.SimpleIdentifier i
preprocessIdent (P.OpIdentifier i) = I.OperatorIdentifier i
|
<reponame>B2M-Software/SMG2.0
package ch.iec._61400.ews._1;
import javax.xml.bind.annotation.XmlRegistry;
/**
* This object contains factory methods for each
* Java content interface and Java element interface
* generated in the ch.iec._61400.ews._1 package.
* <p>An ObjectFactory allows you to programatically
* construct new instances of the Java representation
* for XML content. The Java representation of XML
* content can consist of schema derived interfaces
* and classes representing the binding of schema
* type definitions, element declarations and model
* groups. Factory methods for each of these are
* provided in this class.
*
*/
@XmlRegistry
public class ObjectFactory {
/**
* Create a new ObjectFactory that can be used to create new instances of schema derived classes for package: ch.iec._61400.ews._1
*
*/
public ObjectFactory() {
}
/**
* Create an instance of {@link SetDataSetValuesResponse }
*
*/
public SetDataSetValuesResponse createSetDataSetValuesResponse() {
return new SetDataSetValuesResponse();
}
/**
* Create an instance of {@link AssociateRequest }
*
*/
public AssociateRequest createAssociateRequest() {
return new AssociateRequest();
}
/**
* Create an instance of {@link SetLCBValuesResponse }
*
*/
public SetLCBValuesResponse createSetLCBValuesResponse() {
return new SetLCBValuesResponse();
}
/**
* Create an instance of {@link TOptFldsBRCB }
*
*/
public TOptFldsBRCB createTOptFldsBRCB() {
return new TOptFldsBRCB();
}
/**
* Create an instance of {@link GetURCBValuesResponse }
*
*/
public GetURCBValuesResponse createGetURCBValuesResponse() {
return new GetURCBValuesResponse();
}
/**
* Create an instance of {@link GetDataDirectoryRequest }
*
*/
public GetDataDirectoryRequest createGetDataDirectoryRequest() {
return new GetDataDirectoryRequest();
}
/**
* Create an instance of {@link CommandTerminationResponse }
*
*/
public CommandTerminationResponse createCommandTerminationResponse() {
return new CommandTerminationResponse();
}
/**
* Create an instance of {@link SetDataValuesRequest }
*
*/
public SetDataValuesRequest createSetDataValuesRequest() {
return new SetDataValuesRequest();
}
/**
* Create an instance of {@link TCodedEnum }
*
*/
public TCodedEnum createTCodedEnum() {
return new TCodedEnum();
}
/**
* Create an instance of {@link RemoveSubscriptionRequest }
*
*/
public RemoveSubscriptionRequest createRemoveSubscriptionRequest() {
return new RemoveSubscriptionRequest();
}
/**
* Create an instance of {@link CommandTerminationRequest }
*
*/
public CommandTerminationRequest createCommandTerminationRequest() {
return new CommandTerminationRequest();
}
/**
* Create an instance of {@link DeleteDataSetRequest }
*
*/
public DeleteDataSetRequest createDeleteDataSetRequest() {
return new DeleteDataSetRequest();
}
/**
* Create an instance of {@link TDataAttribute }
*
*/
public TDataAttribute createTDataAttribute() {
return new TDataAttribute();
}
/**
* Create an instance of {@link CancelRequest }
*
*/
public CancelRequest createCancelRequest() {
return new CancelRequest();
}
/**
* Create an instance of {@link TLOGEntry }
*
*/
public TLOGEntry createTLOGEntry() {
return new TLOGEntry();
}
/**
* Create an instance of {@link SetBRCBValuesRequest }
*
*/
public SetBRCBValuesRequest createSetBRCBValuesRequest() {
return new SetBRCBValuesRequest();
}
/**
* Create an instance of {@link TControlValue }
*
*/
public TControlValue createTControlValue() {
return new TControlValue();
}
/**
* Create an instance of {@link SelectWithValueResponse }
*
*/
public SelectWithValueResponse createSelectWithValueResponse() {
return new SelectWithValueResponse();
}
/**
* Create an instance of {@link GetLogicalDeviceDirectoryRequest }
*
*/
public GetLogicalDeviceDirectoryRequest createGetLogicalDeviceDirectoryRequest() {
return new GetLogicalDeviceDirectoryRequest();
}
/**
* Create an instance of {@link QueryLogAfterResponse }
*
*/
public QueryLogAfterResponse createQueryLogAfterResponse() {
return new QueryLogAfterResponse();
}
/**
* Create an instance of {@link GetDataSetValuesResponse }
*
*/
public GetDataSetValuesResponse createGetDataSetValuesResponse() {
return new GetDataSetValuesResponse();
}
/**
* Create an instance of {@link GetBRCBValuesResponse }
*
*/
public GetBRCBValuesResponse createGetBRCBValuesResponse() {
return new GetBRCBValuesResponse();
}
/**
* Create an instance of {@link QueryLogAfterRequest }
*
*/
public QueryLogAfterRequest createQueryLogAfterRequest() {
return new QueryLogAfterRequest();
}
/**
* Create an instance of {@link CreateDataSetResponse }
*
*/
public CreateDataSetResponse createCreateDataSetResponse() {
return new CreateDataSetResponse();
}
/**
* Create an instance of {@link TReportFormat }
*
*/
public TReportFormat createTReportFormat() {
return new TReportFormat();
}
/**
* Create an instance of {@link CreateDataSetRequest }
*
*/
public CreateDataSetRequest createCreateDataSetRequest() {
return new CreateDataSetRequest();
}
/**
* Create an instance of {@link GetServerDirectoryResponse }
*
*/
public GetServerDirectoryResponse createGetServerDirectoryResponse() {
return new GetServerDirectoryResponse();
}
/**
* Create an instance of {@link SelectRequest }
*
*/
public SelectRequest createSelectRequest() {
return new SelectRequest();
}
/**
* Create an instance of {@link TDataAttributeDefinition }
*
*/
public TDataAttributeDefinition createTDataAttributeDefinition() {
return new TDataAttributeDefinition();
}
/**
* Create an instance of {@link GetBRCBValuesRequest }
*
*/
public GetBRCBValuesRequest createGetBRCBValuesRequest() {
return new GetBRCBValuesRequest();
}
/**
* Create an instance of {@link TArray }
*
*/
public TArray createTArray() {
return new TArray();
}
/**
* Create an instance of {@link SetBRCBValuesResponse }
*
*/
public SetBRCBValuesResponse createSetBRCBValuesResponse() {
return new SetBRCBValuesResponse();
}
/**
* Create an instance of {@link AddSubscriptionResponse }
*
*/
public AddSubscriptionResponse createAddSubscriptionResponse() {
return new AddSubscriptionResponse();
}
/**
* Create an instance of {@link GetServerDirectoryRequest }
*
*/
public GetServerDirectoryRequest createGetServerDirectoryRequest() {
return new GetServerDirectoryRequest();
}
/**
* Create an instance of {@link TEnumerated }
*
*/
public TEnumerated createTEnumerated() {
return new TEnumerated();
}
/**
* Create an instance of {@link SelectResponse }
*
*/
public SelectResponse createSelectResponse() {
return new SelectResponse();
}
/**
* Create an instance of {@link GetLCBValuesRequest }
*
*/
public GetLCBValuesRequest createGetLCBValuesRequest() {
return new GetLCBValuesRequest();
}
/**
* Create an instance of {@link AbortResponse }
*
*/
public AbortResponse createAbortResponse() {
return new AbortResponse();
}
/**
* Create an instance of {@link SetDataSetValuesRequest }
*
*/
public SetDataSetValuesRequest createSetDataSetValuesRequest() {
return new SetDataSetValuesRequest();
}
/**
* Create an instance of {@link SetURCBValuesRequest }
*
*/
public SetURCBValuesRequest createSetURCBValuesRequest() {
return new SetURCBValuesRequest();
}
/**
* Create an instance of {@link AbortRequest }
*
*/
public AbortRequest createAbortRequest() {
return new AbortRequest();
}
/**
* Create an instance of {@link TDataAttrType }
*
*/
public TDataAttrType createTDataAttrType() {
return new TDataAttrType();
}
/**
* Create an instance of {@link GetDataDefinitionResponse }
*
*/
public GetDataDefinitionResponse createGetDataDefinitionResponse() {
return new GetDataDefinitionResponse();
}
/**
* Create an instance of {@link OperateResponse }
*
*/
public OperateResponse createOperateResponse() {
return new OperateResponse();
}
/**
* Create an instance of {@link TTimeStamp }
*
*/
public TTimeStamp createTTimeStamp() {
return new TTimeStamp();
}
/**
* Create an instance of {@link GetDataSetDirectoryResponse }
*
*/
public GetDataSetDirectoryResponse createGetDataSetDirectoryResponse() {
return new GetDataSetDirectoryResponse();
}
/**
* Create an instance of {@link TOptFldsURCB }
*
*/
public TOptFldsURCB createTOptFldsURCB() {
return new TOptFldsURCB();
}
/**
* Create an instance of {@link GetLogicalDeviceDirectoryResponse }
*
*/
public GetLogicalDeviceDirectoryResponse createGetLogicalDeviceDirectoryResponse() {
return new GetLogicalDeviceDirectoryResponse();
}
/**
* Create an instance of {@link GetLogicalNodeDirectoryRequest }
*
*/
public GetLogicalNodeDirectoryRequest createGetLogicalNodeDirectoryRequest() {
return new GetLogicalNodeDirectoryRequest();
}
/**
* Create an instance of {@link TQuality }
*
*/
public TQuality createTQuality() {
return new TQuality();
}
/**
* Create an instance of {@link TDetailQual }
*
*/
public TDetailQual createTDetailQual() {
return new TDetailQual();
}
/**
* Create an instance of {@link QueryLogByTimeRequest }
*
*/
public QueryLogByTimeRequest createQueryLogByTimeRequest() {
return new QueryLogByTimeRequest();
}
/**
* Create an instance of {@link TCheck }
*
*/
public TCheck createTCheck() {
return new TCheck();
}
/**
* Create an instance of {@link ReleaseResponse }
*
*/
public ReleaseResponse createReleaseResponse() {
return new ReleaseResponse();
}
/**
* Create an instance of {@link TimeActivatedOperateResponse }
*
*/
public TimeActivatedOperateResponse createTimeActivatedOperateResponse() {
return new TimeActivatedOperateResponse();
}
/**
* Create an instance of {@link GetDataDefinitionRequest }
*
*/
public GetDataDefinitionRequest createGetDataDefinitionRequest() {
return new GetDataDefinitionRequest();
}
/**
* Create an instance of {@link GetLCBValuesResponse }
*
*/
public GetLCBValuesResponse createGetLCBValuesResponse() {
return new GetLCBValuesResponse();
}
/**
* Create an instance of {@link QueryLogByTimeResponse }
*
*/
public QueryLogByTimeResponse createQueryLogByTimeResponse() {
return new QueryLogByTimeResponse();
}
/**
* Create an instance of {@link GetLogicalNodeDirectoryResponse }
*
*/
public GetLogicalNodeDirectoryResponse createGetLogicalNodeDirectoryResponse() {
return new GetLogicalNodeDirectoryResponse();
}
/**
* Create an instance of {@link TDataAttributeValue }
*
*/
public TDataAttributeValue createTDataAttributeValue() {
return new TDataAttributeValue();
}
/**
* Create an instance of {@link TFcdFcdaType }
*
*/
public TFcdFcdaType createTFcdFcdaType() {
return new TFcdFcdaType();
}
/**
* Create an instance of {@link GetURCBValuesRequest }
*
*/
public GetURCBValuesRequest createGetURCBValuesRequest() {
return new GetURCBValuesRequest();
}
/**
* Create an instance of {@link GetDataValuesResponse }
*
*/
public GetDataValuesResponse createGetDataValuesResponse() {
return new GetDataValuesResponse();
}
/**
* Create an instance of {@link TOrigin }
*
*/
public TOrigin createTOrigin() {
return new TOrigin();
}
/**
* Create an instance of {@link DeleteDataSetResponse }
*
*/
public DeleteDataSetResponse createDeleteDataSetResponse() {
return new DeleteDataSetResponse();
}
/**
* Create an instance of {@link OperateRequest }
*
*/
public OperateRequest createOperateRequest() {
return new OperateRequest();
}
/**
* Create an instance of {@link GetDataSetValuesRequest }
*
*/
public GetDataSetValuesRequest createGetDataSetValuesRequest() {
return new GetDataSetValuesRequest();
}
/**
* Create an instance of {@link GetLogStatusValuesRequest }
*
*/
public GetLogStatusValuesRequest createGetLogStatusValuesRequest() {
return new GetLogStatusValuesRequest();
}
/**
* Create an instance of {@link TDATASet }
*
*/
public TDATASet createTDATASet() {
return new TDATASet();
}
/**
* Create an instance of {@link TOptFldsLCB }
*
*/
public TOptFldsLCB createTOptFldsLCB() {
return new TOptFldsLCB();
}
/**
* Create an instance of {@link GetLogStatusValuesResponse }
*
*/
public GetLogStatusValuesResponse createGetLogStatusValuesResponse() {
return new GetLogStatusValuesResponse();
}
/**
* Create an instance of {@link CancelResponse }
*
*/
public CancelResponse createCancelResponse() {
return new CancelResponse();
}
/**
* Create an instance of {@link GetDataValuesRequest }
*
*/
public GetDataValuesRequest createGetDataValuesRequest() {
return new GetDataValuesRequest();
}
/**
* Create an instance of {@link ReleaseRequest }
*
*/
public ReleaseRequest createReleaseRequest() {
return new ReleaseRequest();
}
/**
* Create an instance of {@link ReportResponse }
*
*/
public ReportResponse createReportResponse() {
return new ReportResponse();
}
/**
* Create an instance of {@link TBasicType }
*
*/
public TBasicType createTBasicType() {
return new TBasicType();
}
/**
* Create an instance of {@link ReportRequest }
*
*/
public ReportRequest createReportRequest() {
return new ReportRequest();
}
/**
* Create an instance of {@link GetDataDirectoryResponse }
*
*/
public GetDataDirectoryResponse createGetDataDirectoryResponse() {
return new GetDataDirectoryResponse();
}
/**
* Create an instance of {@link RemoveSubscriptionResponse }
*
*/
public RemoveSubscriptionResponse createRemoveSubscriptionResponse() {
return new RemoveSubscriptionResponse();
}
/**
* Create an instance of {@link TOrcat }
*
*/
public TOrcat createTOrcat() {
return new TOrcat();
}
/**
* Create an instance of {@link TTrgCond }
*
*/
public TTrgCond createTTrgCond() {
return new TTrgCond();
}
/**
* Create an instance of {@link TEntryData }
*
*/
public TEntryData createTEntryData() {
return new TEntryData();
}
/**
* Create an instance of {@link GetDataSetDirectoryRequest }
*
*/
public GetDataSetDirectoryRequest createGetDataSetDirectoryRequest() {
return new GetDataSetDirectoryRequest();
}
/**
* Create an instance of {@link SetURCBValuesResponse }
*
*/
public SetURCBValuesResponse createSetURCBValuesResponse() {
return new SetURCBValuesResponse();
}
/**
* Create an instance of {@link SetLCBValuesRequest }
*
*/
public SetLCBValuesRequest createSetLCBValuesRequest() {
return new SetLCBValuesRequest();
}
/**
* Create an instance of {@link TDAType }
*
*/
public TDAType createTDAType() {
return new TDAType();
}
/**
* Create an instance of {@link TimeActivatedOperateRequest }
*
*/
public TimeActivatedOperateRequest createTimeActivatedOperateRequest() {
return new TimeActivatedOperateRequest();
}
/**
* Create an instance of {@link SelectWithValueRequest }
*
*/
public SelectWithValueRequest createSelectWithValueRequest() {
return new SelectWithValueRequest();
}
/**
* Create an instance of {@link AssociateResponse }
*
*/
public AssociateResponse createAssociateResponse() {
return new AssociateResponse();
}
/**
* Create an instance of {@link SetDataValuesResponse }
*
*/
public SetDataValuesResponse createSetDataValuesResponse() {
return new SetDataValuesResponse();
}
/**
* Create an instance of {@link AddSubscriptionRequest }
*
*/
public AddSubscriptionRequest createAddSubscriptionRequest() {
return new AddSubscriptionRequest();
}
}
|
def Output_ReachOutput(self, riv, rch, prof, nVar):
rc = self._rc
nRS = None
rs = None
ChannelDist = None
value = None
res = rc.Output_ReachOutput(riv, rch, prof, nVar, nRS, rs, ChannelDist,
value)
riv, rch, prof, nVar, nRS, rs, ChannelDist, value = res
return nRS, rs, ChannelDist, value |
Buy Photo Deputy Rowan County Clerk Brian Mason tells the media Monday morning that he will continue issuing marriage licenses. (September 14, 2015) (Photo: Tim Webb, Special to the CJ)Buy Photo
MOREHEAD, Ky. — Although she returned to her office Monday, Rowan County Clerk Kim Davis remained out of sight as one of her deputies issued a heavily altered marriage license to a lesbian couple, likely shielding Davis from more jail time but not the disappointment of her fiery religious supporters.
Davis resumed work for the first time following her high-profile release from jail last week. Before opening, she told reporters that she still refuses to authorize any marriage licenses, but will not block a deputy clerk who began providing them to couples more than a week ago.
Shannon and Carmen Wampler-Collins, the only applicants Monday, slogged through a crowd of protesters and media outside the courthouse, where religiously fervent demonstrators were howling anti-gay messages and waving signs. Supporters formed a human chain to help escort them inside, but two hecklers continued to taunt them as they filled out paperwork at the counter.
Wording on the license was modified to remove any mention of Davis or her office, and the form now states that it was issued pursuant to a federal court order rather than the county clerk. A space normally signed by the deputy clerk is now signed by a "notary public."
Still, the couple said they don't have any concerns over validity since Gov. Steve Beshear, Attorney General Jack Conway and many others argue that the forms are legitimate.
“It’s a temporary patch," said Shannon Wampler-Collins. "It will work for right now, but I would like to see her resign if she is not going to do the job.”
The couple has been together for 23 years and already held a commitment ceremony in 1995. They are raising two children together, and in the worst case scenario, "we'll come back," Carmen said. "We are going forward until someone tells us otherwise.”
A tense scene inside the clerk's office was prolonged by a printer error as one man shouted that sin would separate the women from God and "there's no such thing as homosexual marriage."
NEWSLETTERS Get the Breaking News newsletter delivered to your inbox We're sorry, but something went wrong Breaking news alerts Please try again soon, or contact Customer Service at 1-800-866-2211. Delivery: Varies Invalid email address Thank you! You're almost signed up for Breaking News Keep an eye out for an email to confirm your newsletter registration. More newsletters
Elizabeth Johnston, a protester who traveled from Ohio, accused the couple of persecuting Davis and called the deputy who issued the license a coward.
"It is a compromise to let homosexual marriages come out of this office," she said in an interview. "This is not about just Kim's name. This is about ... whether we are going to defy God who defined marriage as between one man and one woman."
Shannon Wampler-Collins said she was so happy to get the paperwork that she didn't even hear the detractors, and nearly all of Davis' supporters had left the courthouse by early afternoon.
Earlier in the day, Davis said she faced a choice between her conscience and freedom and prayed hard about her decision. She called the altered forms a remedy to reconcile her conscience with the federal injunction against her, and she bemoaned that her deputy clerks have been caught in the middle.
"If any of them feels that they must issue an unauthorized license to avoid being thrown in jail, I understand their tough choice and I will take no action against them," she said.
Davis also reiterated her plea for Beshear and legislators to rewrite state marriage laws to accommodate her religious objections. She argued that millions of others in the public and private sector face similar conflicts.
"I don't want to have this conflict. I don't want to be in the spotlight. And I certainly don't want to be a whipping post," she said. "I am no hero. I'm just a person who has been transformed by the grace of God."
One of her attorneys, Harry Mihet, chief litigation counsel at Liberty Counsel, pledged Monday to vigorously defend her in the multiple lawsuits that resulted from her policy. He said the group will file an additional lawsuit against Beshear, who has declined to call a special session on the matter.
Revising the licenses was a "good-faith" attempt to satisfy the court and couples in the case, and U.S. District Judge David Bunning knew that the forms were being changed when he released Davis from jail, Mihet argued. He said Liberty Counsel has not discussed any challenges to the licenses issued so far.
“They will have to remain in legal limbo for now until hopefully the governor and the legislature intervenes to change the law,” he said.
But Beshear said Monday that, even with the latest changes, he was "confident and satisfied" that the licenses comply with Kentucky statute and will be recognized as valid.
Since Bunning jailed Davis for contempt of court, Deputy Clerk Brian Mason has issued around 11 licenses, mostly for same-sex couples.
When Bunning released Davis from jail on Tuesday, he warned her not to interfere with deputies and called on deputy clerks to file regular status updates.
Shannon and Carmen Wampler-Collins, who live in Lexington, Ky., said they decided to obtain a license in Rowan County because Carmen grew up in the community and wanted to show locals that it's OK to be gay. Aside from the circus outside, finally getting the form was exciting, Shannon said.
But Mary Hargis, an organizer with the Rowan County Rights Coalition, which has protested Davis' policy for weeks, called attempts to equate Davis with civil rights icons a "travesty," and said no couple should be told they are going to hell or have to endure being called sodomites just to get a license. She said legislators need to pass a law to address public officials who refuse to perform their duties.
"When (Davis) willfully decided that she was going to disobey the law, she became a criminal," Hargis said. "For whatever reason, all criminals have an excuse for their actions."
Ante Pavkovic, a pastor from North Carolina who has protested same-sex marriage outside the courthouse about two weeks, appeared conflicted over the announcement Monday. He said Davis' attorney is probably trying to protect her but failing to address the larger principles at stake. He said it's hard to know if Davis backed down or received bad legal advice.
"On appearances, it is not enough," he said. "It's yielding our republic to a rogue Supreme Court ... and that's a big mistake if that's what it is."
Reporter Mike Wynn can be reached at (502) 875-5136. Follow him on Twitter at @MikeWynn_CJ.
Read or Share this story: http://cjky.it/1UQAK99 |
//-----------------------------------------------------------------------------
// Purpose: Removes all outputs from the outputs combo box that are NOT present
// in the given entity's output list. Used when multiple entities are
// selected into the Entity Properties dialog.
// Input : pEntity - Entity to use for filter.
//-----------------------------------------------------------------------------
void COP_Output::FilterEntityOutputs(CMapEntity *pEntity) {
GDclass *pClass = pEntity->GetClass();
if (pClass == NULL) {
return;
}
char szText[MAX_PATH];
int nCount = m_ComboOutput.GetCount();
if (nCount > 0) {
for (int i = nCount - 1; i >= 0; i--) {
if (m_ComboOutput.GetLBText(i, szText) != CB_ERR) {
if (pClass->FindOutput(szText) == NULL) {
m_ComboOutput.DeleteString(i);
}
}
}
}
} |
East Lansing voters are deciding Tuesday night whether to become Michigan’s 23rd city that imposes an income tax on its residents. (Photo: .)
East Lansing voters defeated a plan to make their city the state's 23rd to impose an income tax on its residents, according to unofficial results late Tuesday night.
A majority of voters -- 53 percent -- rejected the ballot question to impose a 1 percent income tax on residents and 0.5 percent income tax on nonresidents. It would have covered Michigan State University’s 2,500 workers and most of its 50,300 students, as well as the city's 48,000 other residents.
There would have been an exemption from the income tax for those making $5,000 or less a year.
A separate proposal to reduce property taxes from 20 mills to 13 won with 63 percent in favor of the tax reduction.
The proposed twin moves would have boosted city coffers an estimated $5 million a year, according to a city study. That could have helped address the city’s $200 million in debt caused by rising pension and retiree health care costs. East Lansing has cut its workforce by a fourth, reduced health care benefits and entered cost-sharing partnerships with other municipalities.
MSU, which is the city’s biggest employer, opposed the plan as a blow to low-salaried workers and most part-time working students. University leaders argue the proposals would have shifted the tax burden from residents to MSU, which has more workers, 14,000, than all other city employers combined.
MSU proposed giving the city $2 million a year for 10 years in lieu of the tax, an offer rejected by East Lansing Mayor Mark Meadows.
The income tax-related ballot questions have caused tensions between city leaders and university officials.
"Some (MSU regents) remain adamantly opposed to paying the City to remedy its past financial mismanagement,” MSU President Lou Anna Simon wrote on July 21.
Meadows said other factors had caused the fiscal mess. “This statement is offensive and uninformed,” he wrote July 25.
The only major Michigan college whose hometown has a local income tax is Wayne State University in Detroit. Nearby Lansing also has a city income tax.
[email protected]
More election results:
Read or Share this story: http://detne.ws/2j6Nwd3 |
<gh_stars>1-10
import { RouteDefinition } from '../../types';
import { createRedirection, createRoute, includeRoutes } from '../routes';
describe.skip('utils', () => {
describe('createRoute', () => {
const RootView = () => null;
const LoginView = () => null;
const SignUpView = () => null;
beforeAll(() => {
Reflect.defineMetadata('self:controller', {}, RootView);
Reflect.defineMetadata('self:controller', {}, LoginView);
Reflect.defineMetadata('self:controller', {}, SignUpView);
});
it('creates a valid route definition', () => {
const route = createRoute('/', RootView);
expect(route).toEqual<RouteDefinition>({
path: '/',
Component: RootView,
children: [],
});
});
it('creates children routes', () => {
const route = createRoute('/', RootView, [createRoute('/login', LoginView)]);
expect(route).toEqual<RouteDefinition>({
path: '/',
Component: RootView,
children: [{ path: '/login', Component: LoginView, children: [] }],
});
});
it('flattens an array of arrays as children', () => {
const result = createRoute('/home', RootView, [
[createRoute('/login', LoginView)],
[createRoute('/sign-up', SignUpView)],
]);
expect(result).toEqual({
path: '/home',
controller: RootView,
children: [
{ path: '/login', controller: LoginView, children: [] },
{ path: '/sign-up', controller: SignUpView, children: [] },
],
});
});
it('creates an empty controller when a controller-less view is passed instead', () => {
const route = createRoute('/', () => null);
expect(route.Component).toBeDefined();
// @ts-expect-error
expect(route.Component.displayName).toBe('EmptyController');
});
});
describe('includeRoutes', () => {
class LoginView {}
class SignUpView {}
class TestModule {}
const loginRoute = {
path: '/login',
controller: LoginView,
children: [],
};
const signUpRoute = {
path: '/sign-up',
controller: LoginView,
children: [],
};
beforeAll(() => {
Reflect.defineMetadata('self:controller', {}, LoginView);
Reflect.defineMetadata('self:controller', {}, SignUpView);
Reflect.defineMetadata('self:subapp', { routing: [loginRoute, signUpRoute] }, TestModule);
});
it('should generate a prefixed array of routes with isIncluded as true', () => {
const result = includeRoutes('/test', TestModule);
expect(result).toHaveLength(2);
expect(result[0]).toEqual({ ...loginRoute, path: '/test/login' });
expect(result[1]).toEqual({ ...signUpRoute, path: '/test/sign-up' });
});
});
describe('createRedirection', () => {
it('creates a valid route definition', () => {
const route = createRedirection('/from', '/to');
expect(route.path).toEqual('/from');
expect(route.children).toEqual([]);
expect(route.Component).toBeDefined();
});
it('creates a controller that will redirect when mounted', () => {
const { Component } = createRedirection('/from', '/to');
const replaceMock = jest.fn();
const fakeHistory = { replace: replaceMock };
expect(fakeHistory.replace).toHaveBeenCalledTimes(1);
expect(fakeHistory.replace).toHaveBeenCalledWith('/to');
});
});
});
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2022/1/10 13:11
# @Author : WeiHua
import cv2
import numpy as np
import torch
from mmdet.models import StandardRoIHead
from mmdet.models.builder import HEADS
from mmdet.models.roi_heads.mask_heads.fcn_mask_head import _do_paste_mask
from mmdet.core import bbox2roi
import os
@HEADS.register_module()
class ReI_StandardRoIHead(StandardRoIHead):
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
ret_det=False,
**kwargs):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
ret_det (Bool): if returning detection results
Returns:
dict[str, Tensor]: a dictionary of loss components
list[Tensor]: a list of Tensors with shape (N, 4), N means
num of boxes, 4 means (tl_x, tl_y, br_x, br_y).
list[Tensor]: a list of Tensors with shape (N, img_h, img_w),
N means num of boxes.
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
# type : class SamplingResult
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
if ret_det: # to be checked
assert len(img_metas) == len(sampling_results)
det_boxes = []
with torch.no_grad():
# bbox_results['bbox_pred']: (samples of all imgs, 4) -> delta (X, Y, W, H)
bbox_pred_delta = bbox_results['bbox_pred']
st_cnt = 0
for i, img_meta in enumerate(img_metas):
num_samples = len(sampling_results[i].bboxes)
# (N_sample, 4), 4 represents (tl_x, tl_y, br_x, br_y)
cur_bboxes = self.bbox_head.bbox_coder.decode(sampling_results[i].bboxes,
bbox_pred_delta[st_cnt:st_cnt+num_samples],
max_shape=img_meta['img_shape'])
# only keep positive boxes
det_boxes.append(cur_bboxes[:len(sampling_results[i].pos_inds)])
st_cnt += num_samples
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
if ret_det:
total_pos = sum([len(x) for x in det_boxes])
assert mask_results['mask_pred'].shape[0] == total_pos, "Positive samples doesn't match"
det_masks = []
with torch.no_grad():
st_cnt = 0
for i, img_meta in enumerate(img_metas):
img_shape = img_meta['img_shape']
# N_pos_samples, h, w
cur_masks, _ = _do_paste_mask(mask_results['mask_pred'][st_cnt:st_cnt+len(det_boxes[i]), ...],
det_boxes[i], img_shape[0], img_shape[1],
skip_empty=False)
st_cnt += len(det_boxes[i])
det_masks.append((cur_masks >= self.test_cfg.mask_thr_binary).to(dtype=torch.bool))
if ret_det:
# with torch.no_grad():
# self.vis_det_train(det_boxes, det_masks, img_metas)
return losses, det_boxes, det_masks
return losses
def vis_det_train(self, det_boxes, det_masks, img_metas,
save_dir='/usr/local/dataset/whua/ie_e2e_log/det_train_vis'):
"""
Visualize detection results during train
Args:
det_boxes (list[Tensor]): Tensor with shape N, 4 -> 4 indicates
(tl_x, tl_y, br_x, br_y)
det_masks (list[Tensor]): Tensor with shape N, h, w -> h, w indicates
the height and width of image
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
Returns:
"""
assert len(det_boxes) == len(det_masks) == len(img_metas)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
# 1. mask src with mask
# 2. draw box on mask
for det_boxes_, det_masks_, img_metas_ in zip(det_boxes, det_masks, img_metas):
boxes = det_boxes_.detach().cpu().detach().numpy()
masks = det_masks_.detach().cpu().detach().numpy()
img_name = img_metas_['filename'].split('/')[-1]
img_name_meta = img_name.split('.')[0]
out_dir = os.path.join(save_dir, img_name_meta)
if not os.path.exists(out_dir):
os.mkdir(out_dir)
for idx, (box, mask_) in enumerate(zip(boxes, masks)):
mask = (255*mask_).astype(np.uint8)
full_box = np.array([
[box[0], box[1]],
[box[2], box[1]],
[box[2], box[3]],
[box[0], box[3]]
])
cv2.polylines(mask, [full_box.astype(np.int32)], isClosed=True, color=(255, 255, 255), thickness=2)
cv2.imwrite(os.path.join(out_dir, f"{idx}.jpg"), mask)
# def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
# img_metas):
# """Run forward function and calculate loss for box head in training."""
# rois = bbox2roi([res.bboxes for res in sampling_results])
# bbox_results = self._bbox_forward(x, rois)
#
# bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
# gt_labels, self.train_cfg)
# loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
# bbox_results['bbox_pred'], rois,
# *bbox_targets)
# if 'nan' in str(loss_bbox['loss_bbox'].item()):
# import ipdb
# ipdb.set_trace()
#
# bbox_results.update(loss_bbox=loss_bbox)
# return bbox_results |
Lots of non-answers here. I will offer some of my experiences as a former Mormon, but keep in mind that (true to the question) I believe these are reasons to criticize/condemn the Mormon church’s structure, and NOT its individual members. I don’t mean to ridicule others for their beliefs, but I do mean to share why many view the church as a whole in a negative light. A final caveat, there are many subsects of Mormonism. I am just discussing the largest branch, aka “Brighamite” or The Church of Jesus Christ of Latter Day Saints.
In my experience the primary criticism is because the LDS church has a very active hand in politics. They have been one of (if not THE) biggest opponent of marriage equality for the last couple of decades. In the 90s, they were involved in Hawaii against gay marriage, going so far as issuing new doctrine (Family: A Proclamation to the World) to justify to the court that they had a vested interest/standing and could file amicus curiae briefs. More recently they encouraged members to participate in politicking for Proposition 8 in California, and were even charged for illegally misreporting political contributions. In areas such as Utah where Mormonism is the dominant religion, it often is criticized by non (and former) members for essentially controlling politics. Particularly with social issues, the LDS church is viewed as imposing their beliefs on others and using government to do it. Some simple examples are that Utah beer is required to have a lower alcohol percentage to be sold in grocery stores. Liquor and other adult beverages can only be purchased in state-run liquor stores. Restaurants cannot serve or pour alcohol in view of the public (called the “zion curtain” law). These are not life or death impositions, but they are obvious attempts to legislate morality, and that is annoying at best and oppressive at worst.
Those more familiar with the faith may criticize it for being somewhat two-faced. As I already mentioned, the LDS church has led the charge against marriage equality, yet 150 years ago, the church was *itself* persecuted, largely for their belief about marriage (polygamy). Many people see it as hypocritical for the LDS church to adamantly fight for defining marriage as “between one man and one woman”, when the own teachings of the church defined it differently until relatively recently (this is actually debatable, and it could be argued that the doctrine of polygamy exists, though it is not actively practiced).
Another issue that I would say is mostly only heard from former members, is the LDS church’s financial arrangements are viewed as dishonest. The church does not publish information on how much tithing money is collected, but they expect members to pay a full 10% of their earnings. While the church should certainly be commended for using a good amount of money on charity work and disaster relief, the church also has major holdings in real estate, including ranches and farms, and larger projects such as the City Creek Mall (Salt Lake City) and a luxury apartment complex in Philadelphia. Again, the church does not fully disclose financial information in the way that many other churches do, but critics of the church will argue that the LDS church operates more like a major corporation or a political action committee than what is traditionally thought of as a “church”.
A final issue I will bring up is the church’s own stances on historical wrong-doings. A leading apostle of the church, Dallin Oaks, stated that the church does not “ seek apologies or [..] give them”. There are many issues in the church’s past that some would like to see recognized by LDS leadership. In addition to the recent involvement against gay rights, the LDS church also fought against the equal rights amendment; the LDS church did not allow black people to enter temples to receive saving ordinations and did not allow them to hold priesthood until 1978; early LDS leaders did many questionable things (to put it lightly) along with polygamy including secret marriages, marrying children as young as 14, and marrying other men’s wives. Of course, these things on their own also bring criticism to the church, though most members of the church would disavow these things (to varying degrees). |
/**
* This class is used to record other exceptions except block exception.
*
* @author jialiang.linjl
* @author Eric Zhao
*/
public final class Tracer {
/**
* Trace provided {@link Throwable} and increment exception count to entry in current context.
*
* @param e exception to record
*/
public static void trace(Throwable e) {
trace(e, 1);
}
/**
* Trace provided {@link Throwable} and add exception count to entry in current context.
*
* @param e exception to record
* @param count exception count to add
*/
public static void trace(Throwable e, int count) {
if (e == null || e instanceof BlockException) {
return;
}
Context context = ContextUtil.getContext();
if (context == null) {
return;
}
DefaultNode curNode = (DefaultNode)context.getCurNode();
traceExceptionToNode(e, count, context.getCurEntry(), curNode);
}
/**
* Trace provided {@link Throwable} and add exception count to current entry in provided context.
*
* @param e exception to record
* @param count exception count to add
* @since 1.4.2
*/
public static void traceContext(Throwable e, int count, Context context) {
if (e == null || e instanceof BlockException) {
return;
}
if (context == null) {
return;
}
DefaultNode curNode = (DefaultNode)context.getCurNode();
traceExceptionToNode(e, count, context.getCurEntry(), curNode);
}
/**
* Trace provided {@link Throwable} and increment exception count to provided entry.
*
* @param e exception to record
* @since 1.4.2
*/
public static void traceEntry(Throwable e, Entry entry) {
traceEntry(e, 1, entry);
}
/**
* Trace provided {@link Throwable} and add exception count to provided entry.
*
* @param e exception to record
* @param count exception count to add
* @since 1.4.2
*/
public static void traceEntry(Throwable e, int count, Entry entry) {
if (e == null || e instanceof BlockException) {
return;
}
if (entry == null || entry.getCurNode() == null) {
return;
}
DefaultNode curNode = (DefaultNode)entry.getCurNode();
traceExceptionToNode(e, count, entry, curNode);
}
private static void traceExceptionToNode(Throwable t, int count, Entry entry, DefaultNode curNode) {
if (curNode == null) {
return;
}
for (MetricExtension m : MetricExtensionProvider.getMetricExtensions()) {
m.addException(entry.getResourceWrapper().getName(), count, t);
}
// clusterNode can be null when Constants.ON is false.
ClusterNode clusterNode = curNode.getClusterNode();
if (clusterNode == null) {
return;
}
clusterNode.trace(t, count);
}
private Tracer() {}
} |
def ansi_code(name):
try:
obj = colorama
for part in name.split("."):
obj = getattr(obj, part)
return obj
except AttributeError:
return "" |
package main
import (
"context"
"fmt"
"io/ioutil"
"log/syslog"
"os"
"os/signal"
"path/filepath"
"syscall"
"text/tabwriter"
"time"
"github.com/gravitational/rigging"
goyaml "github.com/ghodss/yaml"
yaml "github.com/ghodss/yaml"
"github.com/gravitational/trace"
log "github.com/sirupsen/logrus"
logrusSyslog "github.com/sirupsen/logrus/hooks/syslog"
"gopkg.in/alecthomas/kingpin.v2"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
func main() {
var quiet bool
if err := run(&quiet); err != nil {
log.Error(trace.DebugReport(err))
if !quiet {
fmt.Printf("ERROR: %v\n", err.Error())
}
os.Exit(255)
}
}
func run(quiet *bool) error {
var (
app = kingpin.New("rig", "CLI utility to simplify K8s updates")
debug = app.Flag("debug", "turn on debug logging").Bool()
kubeConfig = app.Flag("kubeconfig", "path to kubeconfig").Default(filepath.Join(os.Getenv("HOME"), ".kube", "config")).String()
namespace = app.Flag("namespace", "Namespace of the changesets").Default(rigging.DefaultNamespace).String()
cupsert = app.Command("upsert", "Upsert resources in the context of a changeset")
cupsertChangeset = Ref(cupsert.Flag("changeset", "name of the changeset").Short('c').Envar(changesetEnvVar).Required())
cupsertFile = cupsert.Flag("file", "file with new resource spec").Short('f').Required().String()
cupsertConfigMap = app.Command("configmap", "Upsert configmap in the context of a changeset")
cupsertConfigMapChangeset = Ref(cupsertConfigMap.Flag("changeset", "name of the changeset").Short('c').Envar(changesetEnvVar).Required())
cupsertConfigMapName = cupsertConfigMap.Arg("name", "ConfigMap name").Required().String()
cupsertConfigMapNamespace = cupsertConfigMap.Flag("resource-namespace", "ConfigMap namespace").Default(rigging.DefaultNamespace).String()
cupsertConfigMapFiles = cupsertConfigMap.Flag("from-file", "files or directories with contents").Strings()
cupsertConfigMapLiterals = cupsertConfigMap.Flag("from-literal", "literals in form of key=val").Strings()
cstatus = app.Command("status", "Check status of all operations in a changeset")
cstatusResource = Ref(cstatus.Arg("resource", "resource to check, e.g. tx/tx1").Required())
cstatusAttempts = cstatus.Flag("retry-attempts", "file with new daemon set spec").Default("1").Int()
cstatusPeriod = cstatus.Flag("retry-period", "file with new daemon set spec").Default(fmt.Sprintf("%v", rigging.DefaultRetryPeriod)).Duration()
cget = app.Command("get", "Display one or many changesets")
cgetChangeset = Ref(cget.Flag("changeset", "Changeset name").Short('c').Envar(changesetEnvVar))
cgetOut = cget.Flag("output", "output type, one of 'yaml' or 'text'").Short('o').Default("").String()
ctr = app.Command("cs", "low level operations on changesets")
ctrDelete = ctr.Command("delete", "Delete a changeset by name")
ctrDeleteForce = ctrDelete.Flag("force", "Ignore error if resource is not found").Bool()
ctrDeleteChangeset = Ref(ctrDelete.Flag("changeset", "Changeset name").Short('c').Envar(changesetEnvVar).Required())
crevert = app.Command("revert", "Revert the changeset")
crevertChangeset = Ref(crevert.Flag("changeset", "name of the changeset").Short('c').Envar(changesetEnvVar).Required())
cfreeze = app.Command("freeze", "Freeze the changeset")
cfreezeChangeset = Ref(cfreeze.Flag("changeset", "name of the changeset").Short('c').Envar(changesetEnvVar).Required())
cdelete = app.Command("delete", "Delete a resource in a context of a changeset")
cdeleteForce = cdelete.Flag("force", "Ignore error if resource is not found").Bool()
cdeleteCascade = cdelete.Flag("cascade", "Delete sub resouces, e.g. Pods for Daemonset").Default("true").Bool()
cdeleteChangeset = Ref(cdelete.Flag("changeset", "Changeset name").Short('c').Envar(changesetEnvVar).Required())
cdeleteResource = Ref(cdelete.Arg("resource", "Resource name to delete").Required())
cdeleteResourceNamespace = cdelete.Flag("resource-namespace", "Resource namespace").Default(rigging.DefaultNamespace).String()
)
app.Flag("quiet", "Suppress program output").Short('q').BoolVar(quiet)
cmd, err := app.Parse(os.Args[1:])
if err != nil {
return trace.Wrap(err)
}
switch {
case *quiet:
TurnOffLogging()
case *debug:
InitLoggerDebug()
default:
InitLoggerCLI()
}
client, config, err := getClient(*kubeConfig)
if err != nil {
return trace.Wrap(err)
}
ctx, cancel := context.WithCancel(context.Background())
go func() {
exitSignals := make(chan os.Signal, 1)
signal.Notify(exitSignals, syscall.SIGTERM, syscall.SIGINT)
select {
case sig := <-exitSignals:
log.Infof("signal: %v", sig)
cancel()
}
}()
switch cmd {
case cupsert.FullCommand():
return upsert(ctx, client, config, *namespace, *cupsertChangeset, *cupsertFile)
case cstatus.FullCommand():
return status(ctx, client, config, *namespace, *cstatusResource, *cstatusAttempts, *cstatusPeriod)
case cget.FullCommand():
return get(ctx, client, config, *namespace, *cgetChangeset, *cgetOut)
case cdelete.FullCommand():
return deleteResource(ctx, client, config, *namespace, *cdeleteChangeset, *cdeleteResourceNamespace, *cdeleteResource, *cdeleteCascade, *cdeleteForce)
case ctrDelete.FullCommand():
return csDelete(ctx, client, config, *namespace, *ctrDeleteChangeset, *ctrDeleteForce)
case crevert.FullCommand():
return revert(ctx, client, config, *namespace, *crevertChangeset)
case cfreeze.FullCommand():
return freeze(ctx, client, config, *namespace, *cfreezeChangeset)
case cupsertConfigMap.FullCommand():
return upsertConfigMap(ctx, client, config, *namespace, *cupsertConfigMapChangeset, *cupsertConfigMapName, *cupsertConfigMapNamespace, *cupsertConfigMapFiles, *cupsertConfigMapLiterals)
}
return trace.BadParameter("unsupported command: %v", cmd)
}
func getClient(configPath string) (*kubernetes.Clientset, *rest.Config, error) {
// creates the in-cluster config
config, err := rest.InClusterConfig()
if err == nil {
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, trace.Wrap(err)
}
return client, config, nil
}
config, err = clientcmd.BuildConfigFromFlags("", configPath)
if err != nil {
return nil, nil, trace.Wrap(err)
}
client, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, nil, trace.Wrap(err)
}
return client, config, nil
}
func Ref(s kingpin.Settings) *rigging.Ref {
r := new(rigging.Ref)
s.SetValue(r)
return r
}
func revert(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, changeset rigging.Ref) error {
if changeset.Kind != rigging.KindChangeset {
return trace.BadParameter("expected %v, got %v", rigging.KindChangeset, changeset.Kind)
}
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.Revert(ctx, namespace, changeset.Name)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("changeset %v reverted \n", changeset.Name)
return nil
}
func freeze(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, changeset rigging.Ref) error {
if changeset.Kind != rigging.KindChangeset {
return trace.BadParameter("expected %v, got %v", rigging.KindChangeset, changeset.Kind)
}
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.Freeze(ctx, namespace, changeset.Name)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("changeset %v frozen, no further modifications are allowed\n", changeset.Name)
return nil
}
func deleteResource(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, changeset rigging.Ref, resourceNamespace string, resource rigging.Ref, cascade, force bool) error {
if changeset.Kind != rigging.KindChangeset {
return trace.BadParameter("expected %v, got %v", rigging.KindChangeset, changeset.Kind)
}
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.DeleteResource(ctx, namespace, changeset.Name, resourceNamespace, resource, cascade)
if err != nil {
if force && trace.IsNotFound(err) {
fmt.Printf("%v is not found, force flag is set, %v not updated, ignoring \n", resource.String(), changeset.Name)
return nil
}
return trace.Wrap(err)
}
fmt.Printf("changeset %v updated \n", changeset.Name)
return nil
}
func upsertConfigMap(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, changesetNamespace string, changeset rigging.Ref, configMapName, configMapNamespace string, files []string, literals []string) error {
if changeset.Kind != rigging.KindChangeset {
return trace.BadParameter("expected %v, got %v", rigging.KindChangeset, changeset.Kind)
}
configMap, err := rigging.GenerateConfigMap(configMapName, configMapNamespace, files, literals)
if err != nil {
return trace.Wrap(err)
}
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
data, err := goyaml.Marshal(configMap)
if err != nil {
return trace.Wrap(err)
}
err = cs.Upsert(ctx, changesetNamespace, changeset.Name, data)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("changeset %v updated \n", changeset.Name)
return nil
}
func upsert(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, changeset rigging.Ref, filePath string) error {
if changeset.Kind != rigging.KindChangeset {
return trace.BadParameter("expected %v, got %v", rigging.KindChangeset, changeset.Kind)
}
data, err := ReadPath(filePath)
if err != nil {
return trace.Wrap(err)
}
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.Upsert(ctx, namespace, changeset.Name, data)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("changeset %v updated \n", changeset.Name)
return nil
}
func status(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, resource rigging.Ref,
retryAttempts int, retryPeriod time.Duration) error {
switch resource.Kind {
case rigging.KindChangeset:
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.Status(ctx, namespace, resource.Name, retryAttempts, retryPeriod)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("no errors detected for %v\n", resource.Name)
return nil
case rigging.KindDaemonSet:
daemonSet, err := client.AppsV1().DaemonSets(namespace).Get(resource.Name, metav1.GetOptions{})
if err != nil {
return trace.Wrap(err)
}
updater, err := rigging.NewDaemonSetControl(rigging.DSConfig{
DaemonSet: daemonSet,
Client: client,
})
if err != nil {
return trace.Wrap(err)
}
return rigging.PollStatus(ctx, retryAttempts, retryPeriod, updater)
case rigging.KindDeployment:
deployment, err := client.AppsV1().Deployments(namespace).Get(resource.Name, metav1.GetOptions{})
if err != nil {
return trace.Wrap(err)
}
updater, err := rigging.NewDeploymentControl(rigging.DeploymentConfig{
Deployment: deployment,
Client: client,
})
if err != nil {
return trace.Wrap(err)
}
return rigging.PollStatus(ctx, retryAttempts, retryPeriod, updater)
}
return trace.BadParameter("don't know how to check status of %v", resource.Kind)
}
const (
outputYAML = "yaml"
// humanDateFormat is a human readable date formatting
humanDateFormat = "Mon Jan _2 15:04 UTC"
changesetEnvVar = "RIG_CHANGESET"
)
func get(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, ref rigging.Ref, output string) error {
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
if ref.Name == "" {
changesets, err := cs.List(ctx, namespace)
if err != nil {
return trace.Wrap(err)
}
switch output {
case outputYAML:
data, err := yaml.Marshal(changesets)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("%v\n", string(data))
return nil
default:
if len(changesets.Items) == 0 {
fmt.Printf("No changesets found\n")
return nil
}
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
defer w.Flush()
fmt.Fprintf(w, "Name\tCreated\tStatus\tOperations\n")
for _, tr := range changesets.Items {
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", tr.Name, tr.CreationTimestamp.Format(humanDateFormat), tr.Spec.Status, len(tr.Spec.Items))
}
return nil
}
}
tr, err := cs.Get(ctx, namespace, ref.Name)
if err != nil {
return trace.Wrap(err)
}
switch output {
case outputYAML:
data, err := yaml.Marshal(tr)
if err != nil {
return trace.Wrap(err)
}
fmt.Printf("%v\n", string(data))
return nil
default:
fmt.Printf("Changeset %v in namespace %v\n\n", tr.Name, tr.Namespace)
w := new(tabwriter.Writer)
w.Init(os.Stdout, 0, 8, 1, '\t', 0)
defer w.Flush()
fmt.Fprintf(w, "Operation\tTime\tStatus\tDescription\n")
for i, op := range tr.Spec.Items {
var info string
opInfo, err := rigging.GetOperationInfo(op)
if err != nil {
info = err.Error()
} else {
info = opInfo.String()
}
fmt.Fprintf(w, "%v\t%v\t%v\t%v\n", i, op.CreationTimestamp.Format(humanDateFormat), op.Status, info)
}
return nil
}
}
func csDelete(ctx context.Context, client *kubernetes.Clientset, config *rest.Config, namespace string, tr rigging.Ref, force bool) error {
cs, err := rigging.NewChangeset(ctx, rigging.ChangesetConfig{
Client: client,
Config: config,
})
if err != nil {
return trace.Wrap(err)
}
err = cs.Delete(ctx, namespace, tr.Name)
if err != nil {
if trace.IsNotFound(err) && force {
fmt.Printf("%v is not found and force is set\n", tr.Name)
return nil
}
return trace.Wrap(err)
}
fmt.Printf("%v has been deleted\n", tr.Name)
return nil
}
// InitLoggerCLI tools by default log into syslog, not stderr
func InitLoggerCLI() {
log.SetLevel(log.InfoLevel)
// clear existing hooks:
log.StandardLogger().Hooks = make(log.LevelHooks)
log.SetFormatter(&trace.TextFormatter{})
hook, err := logrusSyslog.NewSyslogHook("", "", syslog.LOG_WARNING, "")
if err != nil {
// Bail out if syslog is not available
return
}
log.AddHook(hook)
log.SetOutput(ioutil.Discard)
}
// InitLoggerDebug configures the logger to dump everything to stderr
func InitLoggerDebug() {
// clear existing hooks:
log.StandardLogger().Hooks = make(log.LevelHooks)
log.SetFormatter(&trace.TextFormatter{})
log.SetOutput(os.Stderr)
log.SetLevel(log.DebugLevel)
}
// TurnOffLogging disable logging
func TurnOffLogging() {
log.StandardLogger().Hooks = make(log.LevelHooks)
log.SetOutput(ioutil.Discard)
log.SetLevel(log.FatalLevel)
}
// NormalizePath normalises path, evaluating symlinks and converting local
// paths to absolute
func NormalizePath(path string) (string, error) {
s, err := filepath.Abs(path)
if err != nil {
return "", trace.ConvertSystemError(err)
}
abs, err := filepath.EvalSymlinks(s)
if err != nil {
return "", trace.ConvertSystemError(err)
}
return abs, nil
}
// ReadPath reads file at given path
func ReadPath(path string) ([]byte, error) {
abs, err := NormalizePath(path)
if err != nil {
return nil, trace.Wrap(err)
}
bytes, err := ioutil.ReadFile(abs)
if err != nil {
return nil, trace.ConvertSystemError(err)
}
return bytes, nil
}
|
from django.contrib import admin
from django.utils.translation import ugettext as _
class CustomerTypeFilter(admin.SimpleListFilter):
""" Admin filter which allows us to only show in company / physical personal customer. """
title = _("Type de client")
parameter_name = 'customer_type'
COMPANY = 'company'
PERSONAL = 'personal'
def lookups(self, request, model_admin):
return [
(self.COMPANY, _("Société")),
(self.PERSONAL, _("Particulier")),
]
def queryset(self, request, queryset):
if self.value() == self.COMPANY:
return queryset.filter(company_vat__isnull=False)
if self.value() == self.PERSONAL:
return queryset.filter(company_vat__isnull=True)
return queryset |
<filename>NightEngine2/src/Editor/MemberSerializerEditor.hpp
/*!
@file MemberSerializerEditor.hpp
@author <NAME>
@brief Contain the Interface of MemberSerializerEditor
*/
#pragma once
#include <unordered_map>
#include "Core/Container/MurmurHash2.hpp"
#include <string>
namespace NightEngine
{
namespace Reflection
{
class Variable;
class Member;
class MetaType;
}
}
namespace Editor
{
//! @brief Class for serialize member to editor
class MemberSerializerEditor
{
public:
//! @brief Constructor
MemberSerializerEditor(void);
//! @brief Mark as new frame, update texture list property
void NewFrame(void);
//! @brief Draw the editor for member
bool DrawMemberEditor(NightEngine::Reflection::Member& member
, void* dataObject, const std::string& nameMingle = "");
//! @brief Draw the editor for member
bool DrawMetaTypeEditor(NightEngine::Reflection::MetaType* metaType, void* dataObject
, const std::string& nameMingle);
private:
struct StringHash
{
std::size_t operator()(const std::string& key) const
{
return NightEngine::Container::ConvertToHash(key.c_str(), key.size());
}
};
using EditorFunc = void(*)(NightEngine::Reflection::Variable&, const char*);
using TypeEditorFuncMap = std::unordered_map<std::string, EditorFunc>;
TypeEditorFuncMap m_typeEditorMap;
};
} |
/**
* <p><code>Registry</code> is a factory which gets implementations of
* the BioJava <code>SequenceDBLite</code> interface. This is the
* point of entry for OBDA access.</p>
*
* @author Brian Gilman
* @author Thomas Down
* @author Keith James
*
* @version $Revision: 3220 $
*/
public class Registry {
/**
* Registry Configuration instance
*/
private RegistryConfiguration regConfig = null;
/**
* Creates a new OBDA <code>Registry</code> with the specified
* configuration.
*
* @param regConfig a <code>RegistryConfiguration</code>.
*/
public Registry(RegistryConfiguration regConfig) {
this.regConfig = regConfig;
}
/**
* <code>getDatabase</code> retrieves a database instance known by
* a name <code>String</code>.
*
* @param dbName a <code>String</code> database name.
*
* @return a <code>SequenceDBLite</code>.
*
* @exception RegistryException if the registry does not contain a
* configuration for the specified name.
* @exception BioException if the provider fails.
*/
public SequenceDBLite getDatabase(String dbName)
throws RegistryException, BioException {
String providerName = "";
List dbConfigs =
(List) getRegistryConfiguration().getConfiguration().get(dbName);
if (dbConfigs == null) {
throw new RegistryException("Failed to find a configuration"
+ " for database: "
+ dbName);
}
for (Iterator ci = dbConfigs.iterator(); ci.hasNext();) {
Map dbConfig = (Map) ci.next();
providerName = (String) dbConfig.get("protocol");
SequenceDBLite db = null;
try {
db = getProvider(providerName).getSequenceDB(dbConfig);
} catch (RegistryException re) {
// We allow RegistryExceptions to cause a fallback to
// an alternative provider in the same config
continue;
}
catch (Exception e) {
// But more serious exceptions cause a failure
throw new RegistryException("Failed to configure database "
+ dbName);
}
if (db != null)
return db;
}
throw new RegistryException("Failed to find a configuration"
+ " for database: "
+ dbName);
}
private SequenceDBProvider getProvider(String providerName)
throws RegistryException {
try {
ClassLoader loader = ClassTools.getClassLoader(this);
Iterator implNames =
Services.getImplementationNames(SequenceDBProvider.class, loader).iterator();
while (implNames.hasNext()) {
String className = (String) implNames.next();
try {
Class clazz = loader.loadClass(className);
SequenceDBProvider seqDB =
(SequenceDBProvider) clazz.newInstance();
if (seqDB.getName().equals(providerName)) {
return seqDB;
}
} catch (ClassNotFoundException ce) {
throw new RegistryException(
"Could not find class: " + className +
" for service provider " + providerName, ce
);
}
}
throw new ProviderNotFoundException("No such provider exists: "
+ providerName);
} catch (Exception e) {
throw new RegistryException("Error accessing"
+ " SequenceDBProvider services",e);
}
}
/**
* <code>getRegistryConfiguration</code> returns the configuration
* of the registry.
*
* @return a <code>RegistryConfiguration</code>.
*/
public RegistryConfiguration getRegistryConfiguration() {
return this.regConfig;
}
} |
/*
Copyright (c) 2014, <NAME>
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are those
of the authors and should not be interpreted as representing official policies,
either expressed or implied, of the FreeBSD Project.
Copyright (c) 2018 by The May<NAME>, though its Special Purpose
Processor Development Group (SPPDG). All Rights Reserved Worldwide.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at http://www.apache.org/licenses/LICENSE-2.0.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including conditions of title, non-infringement, merchantability,
or fitness for a particular purpose
See the License for the specific language governing permissions and
limitations under the License.
This file is a snapshot of a work in progress, originated by Mayo
Clinic SPPDG.
*/
#include "avx_ecm.h"
#include <immintrin.h>
void vec_bignum_mask_rshift_n(bignum* u, bignum* v, int n, uint32_t wmask);
// ---------------------------------------------------------------------
// emulated instructions
// ---------------------------------------------------------------------
__m512i __inline _mm512_mulhi_epu32(__m512i a, __m512i b)
{
__m512i t1 = _mm512_shuffle_epi32(a, 0xB1);
__m512i t2 = _mm512_shuffle_epi32(b, 0xB1);
__m512i evens = _mm512_mul_epu32(a, b);
__m512i odds = _mm512_mul_epu32(t1, t2);
//return _mm512_mask_mov_epi32(_mm512_shuffle_epi32(evens, 0xB1), 0xaaaa, odds);
return _mm512_mask_mov_epi32(odds, 0x5555, _mm512_shuffle_epi32(evens, 0xB1));
}
__m512i __inline _mm512_mask_adc_epi32(__m512i a, __mmask16 m, __mmask16 c, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_add_epi32(a, b);
*cout = _mm512_cmplt_epu32_mask(t, a);
__m512i t2 = _mm512_mask_add_epi32(a, m, t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_kor(*cout, _mm512_mask_cmplt_epu32_mask(m, t2, t));
return t2;
}
__m512i __inline _mm512_adc_epi32_test1(__m512i a, __mmask16 c, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_add_epi32(a, b);
*cout = _mm512_cmplt_epu32_mask(t, a);
__m512i t2 = _mm512_add_epi32(t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_kor(*cout, _mm512_cmplt_epu32_mask(t2, t));
return t2;
}
__m512i __inline _mm512_adc_epi32_test2(__m512i a, __mmask16 c, __m512i b, __mmask16 *cout)
{
// looks like a slightly improved data dependency chain...
// but it tested slower for 1024-b inputs...
__m512i t = _mm512_add_epi32(a, b);
__mmask16 gt0 = _mm512_kor(_mm512_test_epi32_mask(b, b), c);
t = _mm512_add_epi32(t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_kand(_mm512_cmple_epu32_mask(t, a), gt0);
return t;
}
__m512i __inline _mm512_adc_epi32(__m512i a, __mmask16 c, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_add_epi32(a, b);
t = _mm512_add_epi32(t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_cmplt_epu32_mask(t, a) | (_mm512_cmpeq_epu32_mask(t, a) & c);
return t;
}
__m512i __inline _mm512_addcarry_epi32(__m512i a, __mmask16 c, __mmask16 *cout)
{
__m512i t = _mm512_add_epi32(a, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_cmplt_epu32_mask(t, a);
return t;
}
__m512i __inline _mm512_subborrow_epi32(__m512i a, __mmask16 c, __mmask16 *cout)
{
__m512i t = _mm512_sub_epi32(a, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_cmpeq_epu32_mask(a, _mm512_setzero_epi32());
return t;
}
__m512i __inline _mm512_mask_sbb_epi32(__m512i a, __mmask16 m, __mmask16 c, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_sub_epi32(a, b);
*cout = _mm512_cmpgt_epu32_mask(t, a);
__m512i t2 = _mm512_mask_sub_epi32(a, m, t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_kor(*cout, _mm512_cmpgt_epu32_mask(t2, t));
return t2;
}
__m512i __inline _mm512_sbb_epi32(__m512i a, __mmask16 c, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_sub_epi32(a, b);
*cout = _mm512_cmpgt_epu32_mask(t, a);
__m512i t2 = _mm512_sub_epi32(t, _mm512_maskz_set1_epi32(c, 1));
*cout = _mm512_kor(*cout, _mm512_cmpgt_epu32_mask(t2, t));
return t2;
}
__m512i __inline _mm512_sbb_epi64(__m512i a, __mmask8 c, __m512i b, __mmask8 *cout)
{
__m512i t = _mm512_sub_epi64(a, b);
*cout = _mm512_cmpgt_epu64_mask(t, a);
__m512i t2 = _mm512_sub_epi64(t, _mm512_maskz_set1_epi64(c, 1));
*cout = _mm512_kor(*cout, _mm512_cmpgt_epu64_mask(t2, t));
return t2;
}
__m512i __inline _mm512_addsetc_epi32(__m512i a, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_add_epi32(a, b);
*cout = _mm512_cmplt_epu32_mask(t, a);
return t;
}
__m512i __inline _mm512_subsetc_epi32(__m512i a, __m512i b, __mmask16 *cout)
{
__m512i t = _mm512_sub_epi32(a, b);
*cout = _mm512_cmpgt_epu32_mask(b, a);
return t;
}
__inline void _mm512_epi32_to_eo64(__m512i a, __m512i *e64, __m512i *o64)
{
*e64 = _mm512_maskz_mov_epi32(0x5555, a);
*o64 = _mm512_maskz_mov_epi32(0x5555, _mm512_shuffle_epi32(a, 0xB1));
return;
}
__inline __m512i _mm512_eo64lo_to_epi32(__m512i e64, __m512i o64)
{
return _mm512_mask_blend_epi32(0xAAAA, e64, _mm512_shuffle_epi32(o64, 0xB1));
}
__inline __m512i _mm512_eo64hi_to_epi32(__m512i e64, __m512i o64)
{
return _mm512_mask_blend_epi32(0xAAAA, _mm512_shuffle_epi32(e64, 0xB1), o64);
}
__inline void _mm512_mul_eo64_epi32(__m512i a, __m512i b, __m512i *e64, __m512i *o64)
{
// multiply the 16-element 32-bit vectors a and b to produce two 8-element
// 64-bit vector products e64 and o64, where e64 is the even elements
// of a*b and o64 is the odd elements of a*b
//__m512i t1 = _mm512_shuffle_epi32(a, 0xB1);
//__m512i t2 = _mm512_shuffle_epi32(b, 0xB1);
//_mm512_shuffle_epi32(a, 0xB1);
//_mm512_shuffle_epi32(b, 0xB1);
*e64 = _mm512_mul_epu32(a, b);
*o64 = _mm512_mul_epu32(_mm512_shuffle_epi32(a, 0xB1), _mm512_shuffle_epi32(b, 0xB1));
return;
}
#define _mm512_iseven_epi32(x) \
_mm512_cmp_epi32_mask(_mm512_setzero_epi32(), _mm512_and_epi32((x), _mm512_set1_epi32(1)), _MM_CMPINT_EQ)
#define _mm512_isodd_epi32(x) \
_mm512_cmp_epi32_mask(_mm512_set1_epi32(1), _mm512_and_epi32((x), _mm512_set1_epi32(1)), _MM_CMPINT_EQ)
#define ACCUM_EO_PROD2(sum_e, sum_o, carry_e, carry_o, in_e, in_o) \
sum_e = _mm512_add_epi64(sum_e, in_e); \
sum_o = _mm512_add_epi64(sum_o, in_o); \
scarry_e1 = _mm512_cmplt_epu64_mask(sum_e, in_e); \
scarry_o1 = _mm512_cmplt_epu64_mask(sum_o, in_o); \
carry_e = _mm512_mask_add_epi64(carry_e, scarry_e1, hiword, carry_e); \
carry_o = _mm512_mask_add_epi64(carry_o, scarry_o1, hiword, carry_o);
#define ACCUM_EO_PROD(sum_e, sum_o, carry_e, carry_o) \
sum_e = _mm512_add_epi64(sum_e, prod1_e); \
sum_o = _mm512_add_epi64(sum_o, prod1_o); \
scarry_e1 = _mm512_cmplt_epu64_mask(sum_e, prod1_e); \
scarry_o1 = _mm512_cmplt_epu64_mask(sum_o, prod1_o); \
carry_e = _mm512_mask_add_epi64(carry_e, scarry_e1, hiword, carry_e); \
carry_o = _mm512_mask_add_epi64(carry_o, scarry_o1, hiword, carry_o);
#define ACCUM_DOUBLED_EO_PROD(sum_e, sum_o, carry_e, carry_o) \
sum_e = _mm512_add_epi64(sum_e, prod1_e); \
sum_o = _mm512_add_epi64(sum_o, prod1_o); \
scarry_e1 = _mm512_cmplt_epu64_mask(sum_e, prod1_e); \
scarry_o1 = _mm512_cmplt_epu64_mask(sum_o, prod1_o); \
carry_e = _mm512_mask_add_epi64(carry_e, scarry_e1, hiword2, carry_e); \
carry_o = _mm512_mask_add_epi64(carry_o, scarry_o1, hiword2, carry_o);
void vecmulmod(bignum* a, bignum* b, bignum* c, bignum* n, bignum* s, monty* mdata)
{
int i, j, k;
__m512i a0, a1, a2, a3;
__m512i b0, b1, b2, b3, b4, b5, b6;
__m512i te0, te1, te2, te3, te4, te5, te6, te7;
__m512i to0, to1, to2, to3, to4, to5, to6, to7;
__m512i acc_e0;
__m512i acc_o0;
__m512i acc_e1;
__m512i acc_o1;
// 31
__m512i nhatvec_e = _mm512_load_epi32(mdata->vrho);
__m512i nhatvec_o = _mm512_shuffle_epi32(nhatvec_e, 0xB1);;
__m512i prod1_e;
__m512i prod1_o;
__m512i hiword = _mm512_set1_epi64(0x000000100000000);
__m512i zero = _mm512_set1_epi64(0);
// 37
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
__mmask16 scarry2;
__mmask16 scarry;
// zero the accumulator
acc_e0 = acc_o0 = acc_e1 = acc_o1 = zero;
// first half mul
for (i = 0; i < NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
for (j = i; j > 0; j--)
{
a0 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// finish each triangular shaped column sum
a0 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 0) * VECLEN);
a1 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(b->data + 0 * VECLEN);
b1 = _mm512_load_epi32(b->data + 1 * VECLEN);
b2 = _mm512_load_epi32(b->data + 2 * VECLEN);
b3 = _mm512_load_epi32(b->data + 3 * VECLEN);
// ======
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
// for those 's' we have already accumulated, compute the
// block s*n accumulations
for (j = i; j > 0; j--)
{
a0 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// now, column by column, add in the s*n contribution and reduce to
// a single 64+x bit accumulator while storing the intermediate product
// 's' as we go.
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
// second half mul
for (i = NBLOCKS; i < 2 * NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
for (j = i - NBLOCKS + 1; j < NBLOCKS; j++)
{
a0 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
// accumulate a * b
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
// accumulate s * n
a0 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(s->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(n->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// finish each triangular shaped column sum (a * b)
a1 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(b->data + (NWORDS - 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + (NWORDS - 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + (NWORDS - 3) * VECLEN);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
// finish each triangular shaped column sum (s * n)
a1 = _mm512_load_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(n->data + (NWORDS - 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + (NWORDS - 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + (NWORDS - 3) * VECLEN);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i - NBLOCKS) * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
scarry2 = _mm512_cmp_epu32_mask(a0, zero, _MM_CMPINT_EQ);
// subtract n from tmp
scarry = 0;
for (i = 0; i < NWORDS; i++)
{
a1 = _mm512_load_epi32(s->data + i * VECLEN);
b0 = _mm512_load_epi32(n->data + i * VECLEN);
a0 = _mm512_sbb_epi32(a1, scarry, b0, &scarry);
_mm512_store_epi32(c->data + i * VECLEN, a0);
}
// negate any final borrows if there was also a final carry.
scarry &= scarry2;
// if there was a final borrow, we didn't need to do the subtraction after all.
// replace with original results based on final borrow mask.
for (i = NWORDS - 1; i >= 0; i--)
{
b0 = _mm512_load_epi32(s->data + i * VECLEN);
_mm512_mask_store_epi32(c->data + i * VECLEN, scarry, b0);
}
c->size = NWORDS;
return;
}
void vecsqrmod(bignum* a, bignum* c, bignum* n, bignum* s, monty* mdata)
{
int i, j, k;
bignum* b = a;
__m512i a0, a1, a2, a3;
__m512i b0, b1, b2, b3, b4, b5, b6;
__m512i te0, te1, te2, te3, te4, te5, te6, te7;
__m512i to0, to1, to2, to3, to4, to5, to6, to7;
__m512i acc_e0;
__m512i acc_o0;
__m512i acc_e1;
__m512i acc_o1;
// 31
__m512i nhatvec_e = _mm512_load_epi32(mdata->vrho); // _mm512_set1_epi32(nhat);
__m512i nhatvec_o = _mm512_shuffle_epi32(nhatvec_e, 0xB1);;
__m512i prod1_e;
__m512i prod1_o;
__m512i hiword = _mm512_set1_epi64(0x000000100000000);
__m512i hiword2 = _mm512_set1_epi64(0x000000200000000);
__m512i zero = _mm512_set1_epi64(0);
// 37
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
__mmask16 scarry2;
__mmask16 scarry;
// zero the accumulator
acc_e0 = acc_o0 = acc_e1 = acc_o1 = zero;
// first half sqr
for (i = 0; i < NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
if (i & 1)
{
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
// i odd
for (j = 0; j < (i - 1) / 2; j++)
{
// for 384-bit inputs NBLOCKS=3 and this loop doesn't run at all.
// i=0 no, even
// i=1 no
// i=2 no, even
// for 1024-bit inputs NBLOCKS=8 and this loop runs 6 times over all i.
// i=0 no, even
// i=1 no
// i=2 no, even
// i=3 once
// i=4 no, even
// i=5 twice
// i=6 no, even
// i=7 thrice
// with the doubling trick we trade 96 instructions for each j-loop iteration
// and the 72 instructions for each odd i, after the j-loop,
// for the 32 instructions for each odd i. That saves 6*96+4*72-4*32=736 instructions.
//hips_block_mul_type3(a->data + ((j + 1) * BLOCKWORDS) * VECLEN,
// a->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS) * VECLEN, t_e, t_o);
a0 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 1) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 3) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 4) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS + 7) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
// since all terms in this loop are doubled, we do the doubling
// after the loop with a left shift.
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o); // a-1, b+1
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o); // a-2, b+2
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o); // a-3, b+3
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o); // a-4, b+4
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o); // a-1, b+2
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o); // a-2, b+3
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o); // a-3, b+4
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o); // a-4, b+5
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o); // a-1, b+3
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o); // a-2, b+4
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o); // a-3, b+5
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o); // a-4, b+6
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o); // a-1, b+4
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o); // a-2, b+5
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o); // a-3, b+6
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o); // a-4, b+7
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
}
// for 384-bit inputs when i == 1, j = 0, a = {3,2,1,0} and b = {2,3,4,5,6,7}
// for 512-bit inputs when i == 3, j = 1, a = {7,6,5,4} and b = {6,7,8,9,a,b}
// for 512-bit inputs when i == 1, j = 0, a = {3,2,1,0} and b = {2,3,4,5,6,7}
a0 = _mm512_load_epi32(a->data + (i * BLOCKWORDS - j * BLOCKWORDS - 1) * VECLEN);
a1 = _mm512_load_epi32(a->data + (i * BLOCKWORDS - j * BLOCKWORDS - 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + (i * BLOCKWORDS - j * BLOCKWORDS - 3) * VECLEN);
a3 = _mm512_load_epi32(a->data + (i * BLOCKWORDS - j * BLOCKWORDS - 4) * VECLEN);
b1 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + 7) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 0;
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
//te0 = _mm512_addsetc_epi64(te0, prod1_e, &scarry_e);
//to0 = _mm512_addsetc_epi64(to0, prod1_o, &scarry_o);
//te1 = _mm512_mask_add_epi64(te1, scarry_e, hiword2, te1);
//to1 = _mm512_mask_add_epi64(to1, scarry_o, hiword2, to1);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//te2 = _mm512_addsetc_epi64(te2, prod1_e, &scarry_e);
//to2 = _mm512_addsetc_epi64(to2, prod1_o, &scarry_o);
//te3 = _mm512_mask_add_epi64(te3, scarry_e, hiword2, te3);
//to3 = _mm512_mask_add_epi64(to3, scarry_o, hiword2, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
//te4 = _mm512_addsetc_epi64(te4, prod1_e, &scarry_e);
//to4 = _mm512_addsetc_epi64(to4, prod1_o, &scarry_o);
//te5 = _mm512_mask_add_epi64(te5, scarry_e, hiword2, te5);
//to5 = _mm512_mask_add_epi64(to5, scarry_o, hiword2, to5);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
//te6 = _mm512_addsetc_epi64(te6, prod1_e, &scarry_e);
//to6 = _mm512_addsetc_epi64(to6, prod1_o, &scarry_o);
//te7 = _mm512_mask_add_epi64(te7, scarry_e, hiword2, te7);
//to7 = _mm512_mask_add_epi64(to7, scarry_o, hiword2, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
// all terms so far need to be doubled. Do that all at once with these
// left shifts. We need the high-bit of the low word to end up in the
// 32nd bit position (because the high words are offset by that much
// for easier combining later on). _mm512_maskz_srli_epi32 allows us
// to do the right shift simultaneously with clearing out the low bits.
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//te0 = _mm512_addsetc_epi64(te0, prod1_e, &scarry_e);
//to0 = _mm512_addsetc_epi64(to0, prod1_o, &scarry_o);
//te1 = _mm512_mask_add_epi64(te1, scarry_e, hiword, te1);
//to1 = _mm512_mask_add_epi64(to1, scarry_o, hiword, to1);
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//te4 = _mm512_addsetc_epi64(te4, prod1_e, &scarry_e);
//to4 = _mm512_addsetc_epi64(to4, prod1_o, &scarry_o);
//te5 = _mm512_mask_add_epi64(te5, scarry_e, hiword, te5);
//to5 = _mm512_mask_add_epi64(to5, scarry_o, hiword, to5);
}
else
{
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
// i even
for (j = 0; j < i / 2; j++)
{
// for 384-bit inputs NBLOCKS=3 and this loop runs once.
// i=0: 0 times
// i=1: no, odd
// i=2: 1 time
// for 1024-bit inputs NBLOCKS=8 and this loop runs 6 times over all i.
// i=0 0 times
// i=1 no, odd
// i=2 1 time
// i=3 no, odd
// i=4 2 times
// i=5 no, odd
// i=6 3 times
// i=7 no, odd
// with the doubling trick we trade 96 instructions for each j-loop iteration
// and the 24 instructions for each even i, after the j-loop,
// for the 40 instructions once per even i. That saves 6*96+4*24-4*40=512 instructions.
//hips_block_mul_type3(a->data + ((j + 1) * BLOCKWORDS) * VECLEN,
// a->data + ((i - 1) * BLOCKWORDS - j * BLOCKWORDS) * VECLEN, t_e, t_o);
// when i = 2, j = 0, a = {3,2,1,0}, b = {5,6,7,8,9,a,b}
a0 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 1) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 3) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((j + 1) * BLOCKWORDS - 4) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((i - 1) * BLOCKWORDS - j
* BLOCKWORDS + 7) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o); // a-1, b+1
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o); // a-2, b+2
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o); // a-3, b+3
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o); // a-4, b+4
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o); // a-1, b+2
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o); // a-2, b+3
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o); // a-3, b+4
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o); // a-4, b+5
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o); // a-1, b+3
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o); // a-2, b+4
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o); // a-3, b+5
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o); // a-4, b+6
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o); // a-1, b+4
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o); // a-2, b+5
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o); // a-3, b+6
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o); // a-4, b+7
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
}
a0 = _mm512_load_epi32(a->data + (i / 2 * BLOCKWORDS + 0) * VECLEN);
a1 = _mm512_load_epi32(a->data + (i / 2 * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(a->data + (i / 2 * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(a->data + (i / 2 * BLOCKWORDS + 3) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 1;
_mm512_mul_eo64_epi32(a0, a1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, a2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, a3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, a2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
// all terms so far need to be doubled. Do that all at once with these
// left shifts.
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a0, a0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, a1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
}
// for those 's' we have already accumulated, compute the
// block s*n accumulations
for (j = 0; j < i; j++)
{
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
// accumulate s * n
a0 = _mm512_load_epi32(s->data + ((j + 1) * BLOCKWORDS - 1) * VECLEN);
a1 = _mm512_load_epi32(s->data + ((j + 1) * BLOCKWORDS - 2) * VECLEN);
a2 = _mm512_load_epi32(s->data + ((j + 1) * BLOCKWORDS - 3) * VECLEN);
a3 = _mm512_load_epi32(s->data + ((j + 1) * BLOCKWORDS - 4) * VECLEN);
b0 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(n->data + ((i - j - 1) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
} // now, column by column, add in the s*n contribution and reduce to
// a single 64+x bit accumulator while storing the intermediate product
// 's' as we go.
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
for (k = 0; k < j; k++)
{
a0 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + k) * VECLEN);
b0 = _mm512_load_epi32(n->data + (j - k) * VECLEN);
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
}
prod1_e = _mm512_mul_epu32(nhatvec_e, acc_e0);
prod1_o = _mm512_mul_epu32(nhatvec_o, acc_o0);
a0 = _mm512_eo64lo_to_epi32(prod1_e, prod1_o);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
b0 = _mm512_load_epi32(n->data + 0 * VECLEN);
_mm512_mul_eo64_epi32(b0, a0, &prod1_e, &prod1_o);
// add in the final product
ACCUM_EO_PROD(acc_e0, acc_o0, acc_e1, acc_o1);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
// second half sqr
for (i = 0; i < NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
for (j = 0; j < (NBLOCKS - i - 1) / 2; j++)
{
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
// Compute a solid block (all matching terms are in the lower
// half triangle of the expansion).
//hips_block_mul_type3(a->data + (i * BLOCKWORDS + (j + 2) * BLOCKWORDS) * VECLEN,
// a->data + (NWORDS - 2 * BLOCKWORDS - j * BLOCKWORDS) * VECLEN, t_e, t_o);
a0 = _mm512_load_epi32(a->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN);
a1 = _mm512_load_epi32(a->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN);
a2 = _mm512_load_epi32(a->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN);
a3 = _mm512_load_epi32(a->data + (NWORDS - 4 - j * BLOCKWORDS) * VECLEN);
b0 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 7) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o); // a-1, b+1
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o); // a-2, b+2
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o); // a-3, b+3
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o); // a-4, b+4
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o); // a-1, b+2
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o); // a-2, b+3
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o); // a-3, b+4
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o); // a-4, b+5
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o); // a-1, b+3
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o); // a-2, b+4
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o); // a-3, b+5
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o); // a-4, b+6
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o); // a-1, b+4
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o); // a-2, b+5
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o); // a-3, b+6
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o); // a-4, b+7
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
}
// The final block shape depends on the parity of i and NBLOCKS
// Block shape 1 (small upper triangle) if 'i' is odd and NBLOCKS is even,
// or if 'i' is even and NBLOCKS is odd.
// Block shape 2 if 'i' is odd and NBLOCKS is odd or if 'i' is even
// and NBLOCKS is even.
if (NBLOCKS & 1) // NBLOCKS is odd
{
// i odd, block shape 2.
if (i & 1)
{
// always a continuation of the full-block loop, so use the same
// loading pattern. Only now we don't need as many b-terms.
a0 = _mm512_load_epi32(a->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN);
a1 = _mm512_load_epi32(a->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN);
a2 = _mm512_load_epi32(a->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN);
a3 = _mm512_load_epi32(a->data + (NWORDS - 4 - j * BLOCKWORDS) * VECLEN);
b0 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 5) * VECLEN);
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
// all terms so far need to be doubled. Do that all at once with these
// left shifts.
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a3, a3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, a2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
}
else
{
// i even, block shape 1.
// always a continuation of the full-block loop, so use the same
// loading pattern. Only now we don't need as many b-terms.
a0 = _mm512_load_epi32(a->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN);
a1 = _mm512_load_epi32(a->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN);
a2 = _mm512_load_epi32(a->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, a2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, a1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
// technically only have to do these two if j > 0 (so that
// they are non-zero from full-block loop iterations).
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a1, a1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a0, a0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
}
}
else
{
// NBLOCKS is even
// i odd, block shape 1.
if (i & 1)
{
// always a continuation of the full-block loop, so use the same
// loading pattern. Only now we don't need as many b-terms.
a0 = _mm512_load_epi32(a->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN); // {f, b}
a1 = _mm512_load_epi32(a->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN); // {e, a}
a2 = _mm512_load_epi32(a->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN); // {d, 9}
//k == 0;
_mm512_mul_eo64_epi32(a0, a2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, a1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
// technically only have to do these two if j > 0 (so that
// they are non-zero from full-block loop iterations).
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a1, a1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a0, a0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
}
else
{
// i even, block shape 1.
// always a continuation of the full-block loop, so use the same
// loading pattern. Only now we don't need as many b-terms.
a0 = _mm512_load_epi32(a->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN); // {f, b}
a1 = _mm512_load_epi32(a->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN); // {e, a}
a2 = _mm512_load_epi32(a->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN); // {d, 9}
a3 = _mm512_load_epi32(a->data + (NWORDS - 4 - j * BLOCKWORDS) * VECLEN); // {c, 8}
b0 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 1) * VECLEN); // {9, 5}
b1 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 2) * VECLEN); // {a, 6}
b2 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 3) * VECLEN); // {b, 7}
b3 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 4) * VECLEN); // {c, 8}
b4 = _mm512_load_epi32(b->data + (j * BLOCKWORDS + i * BLOCKWORDS + 5) * VECLEN); // {d, 9}
// save independent sum/carry words for each product-column in the block.
// uses 11 register inputs, 16 register outputs, and 3 aux vectors.
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_DOUBLED_EO_PROD(te6, to6, te7, to7);
// all terms so far need to be doubled. Do that all at once with these
// left shifts.
te1 = _mm512_or_epi64(te1, _mm512_maskz_srli_epi32(0xaaaa, te0, 31));
to1 = _mm512_or_epi64(to1, _mm512_maskz_srli_epi32(0xaaaa, to0, 31));
te0 = _mm512_slli_epi64(te0, 1);
to0 = _mm512_slli_epi64(to0, 1);
te3 = _mm512_or_epi64(te3, _mm512_maskz_srli_epi32(0xaaaa, te2, 31));
to3 = _mm512_or_epi64(to3, _mm512_maskz_srli_epi32(0xaaaa, to2, 31));
te2 = _mm512_slli_epi64(te2, 1);
to2 = _mm512_slli_epi64(to2, 1);
te5 = _mm512_or_epi64(te5, _mm512_maskz_srli_epi32(0xaaaa, te4, 31));
to5 = _mm512_or_epi64(to5, _mm512_maskz_srli_epi32(0xaaaa, to4, 31));
te4 = _mm512_slli_epi64(te4, 1);
to4 = _mm512_slli_epi64(to4, 1);
te7 = _mm512_or_epi64(te7, _mm512_maskz_srli_epi32(0xaaaa, te6, 31));
to7 = _mm512_or_epi64(to7, _mm512_maskz_srli_epi32(0xaaaa, to6, 31));
te6 = _mm512_slli_epi64(te6, 1);
to6 = _mm512_slli_epi64(to6, 1);
// finally the two non-doubled terms.
_mm512_mul_eo64_epi32(a3, a3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, a2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
}
}
// the s*n term. No more doubling past here.
for (j = 0; j < NBLOCKS - 1 - i; j++)
{
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
a0 = _mm512_load_epi32(s->data + (NWORDS - 1 - j * BLOCKWORDS) * VECLEN);
a1 = _mm512_load_epi32(s->data + (NWORDS - 2 - j * BLOCKWORDS) * VECLEN);
a2 = _mm512_load_epi32(s->data + (NWORDS - 3 - j * BLOCKWORDS) * VECLEN);
a3 = _mm512_load_epi32(s->data + (NWORDS - 4 - j * BLOCKWORDS) * VECLEN);
b0 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(n->data + ((i + j) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// finish each triangluar shaped column sum (s * n)
a1 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(s->data + (i * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(n->data + (NWORDS - 1) * VECLEN);
b1 = _mm512_load_epi32(n->data + (NWORDS - 2) * VECLEN);
b2 = _mm512_load_epi32(n->data + (NWORDS - 3) * VECLEN);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
scarry2 = _mm512_cmp_epu32_mask(a0, zero, _MM_CMPINT_EQ);
// subtract n from tmp
scarry = 0;
for (i = 0; i < NWORDS; i++)
{
a1 = _mm512_load_epi32(s->data + i * VECLEN);
b0 = _mm512_load_epi32(n->data + i * VECLEN);
a0 = _mm512_sbb_epi32(a1, scarry, b0, &scarry);
_mm512_store_epi32(c->data + i * VECLEN, a0);
}
// negate any final borrows if there was also a final carry.
scarry &= scarry2;
// if there was a final borrow, we didn't need to do the subtraction after all.
// replace with original results based on final borrow mask.
for (i = NWORDS - 1; i >= 0; i--)
{
b0 = _mm512_load_epi32(s->data + i * VECLEN);
_mm512_mask_store_epi32(c->data + i * VECLEN, scarry, b0);
}
c->size = NWORDS;
return;
}
void vecmulmod_mersenne(bignum* a, bignum* b, bignum* c, bignum* n, bignum* s, monty* mdata)
{
int i, j;
__m512i a0, a1, a2, a3;
__m512i b0, b1, b2, b3, b4, b5, b6;
__m512i te0, te1, te2, te3, te4, te5, te6, te7;
__m512i to0, to1, to2, to3, to4, to5, to6, to7;
__m512i acc_e0;
__m512i acc_o0;
__m512i acc_e1;
__m512i acc_o1;
// 31
__m512i prod1_e;
__m512i prod1_o;
__m512i hiword = _mm512_set1_epi64(0x000000100000000);
__m512i zero = _mm512_set1_epi64(0);
// 37
__mmask8 scarry_e1 = 0;
__mmask8 scarry_o1 = 0;
__mmask16 scarry;
// zero the accumulator
acc_e0 = acc_o0 = acc_e1 = acc_o1 = zero;
// first half mul
for (i = 0; i < NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
for (j = i; j > 0; j--)
{
a0 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// finish each triangular shaped column sum
a0 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 0) * VECLEN);
a1 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(a->data + (i * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(b->data + 0 * VECLEN);
b1 = _mm512_load_epi32(b->data + 1 * VECLEN);
b2 = _mm512_load_epi32(b->data + 2 * VECLEN);
b3 = _mm512_load_epi32(b->data + 3 * VECLEN);
// ======
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
// now, do a carry-propagating column summation and store the results.
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + (i * BLOCKWORDS + j) * VECLEN, a0);
// now shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
// second half mul
for (i = NBLOCKS; i < 2 * NBLOCKS; i++)
{
te0 = te1 = te2 = te3 = te4 = te5 = te6 = te7 = zero;
to0 = to1 = to2 = to3 = to4 = to5 = to6 = to7 = zero;
for (j = i - NBLOCKS + 1; j < NBLOCKS; j++)
{
a0 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 3) * VECLEN);
a1 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 2) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 1) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - j) * BLOCKWORDS + 0) * VECLEN);
b0 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 3) * VECLEN);
b3 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 4) * VECLEN);
b4 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 5) * VECLEN);
b5 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 6) * VECLEN);
b6 = _mm512_load_epi32(b->data + ((j - 1) * BLOCKWORDS + 7) * VECLEN);
// accumulate a * b
//k == 0;
_mm512_mul_eo64_epi32(a0, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a1, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
//k == 1;
_mm512_mul_eo64_epi32(a0, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a1, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a2, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
//k == 2;
_mm512_mul_eo64_epi32(a0, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a1, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a2, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
_mm512_mul_eo64_epi32(a3, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
//k == 3;
_mm512_mul_eo64_epi32(a0, b3, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a1, b4, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a2, b5, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
_mm512_mul_eo64_epi32(a3, b6, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te6, to6, te7, to7);
}
// finish each triangular shaped column sum (a * b)
a1 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 1) * VECLEN);
a2 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 2) * VECLEN);
a3 = _mm512_load_epi32(a->data + ((i - NBLOCKS) * BLOCKWORDS + 3) * VECLEN);
b0 = _mm512_load_epi32(b->data + (NWORDS - 1) * VECLEN);
b1 = _mm512_load_epi32(b->data + (NWORDS - 2) * VECLEN);
b2 = _mm512_load_epi32(b->data + (NWORDS - 3) * VECLEN);
// ======
_mm512_mul_eo64_epi32(a1, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a2, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
_mm512_mul_eo64_epi32(a3, b2, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te0, to0, te1, to1);
// ======
_mm512_mul_eo64_epi32(a2, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
_mm512_mul_eo64_epi32(a3, b1, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te2, to2, te3, to3);
// ======
_mm512_mul_eo64_epi32(a3, b0, &prod1_e, &prod1_o);
ACCUM_EO_PROD(te4, to4, te5, to5);
j = 0;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te0, to0);
acc_e1 = _mm512_add_epi64(acc_e1, te1);
acc_o1 = _mm512_add_epi64(acc_o1, to1);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i)* BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 1;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te2, to2);
acc_e1 = _mm512_add_epi64(acc_e1, te3);
acc_o1 = _mm512_add_epi64(acc_o1, to3);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i)* BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 2;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te4, to4);
acc_e1 = _mm512_add_epi64(acc_e1, te5);
acc_o1 = _mm512_add_epi64(acc_o1, to5);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i)* BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
j = 3;
// accumulate this column-sum
ACCUM_EO_PROD2(acc_e0, acc_o0, acc_e1, acc_o1, te6, to6);
acc_e1 = _mm512_add_epi64(acc_e1, te7);
acc_o1 = _mm512_add_epi64(acc_o1, to7);
// store the low-word final result
a0 = _mm512_eo64lo_to_epi32(acc_e0, acc_o0);
_mm512_store_epi32(s->data + ((i)* BLOCKWORDS + j) * VECLEN, a0);
// and shift.
acc_e0 = _mm512_srli_epi64(acc_e0, 32);
acc_o0 = _mm512_srli_epi64(acc_o0, 32);
acc_e0 = _mm512_add_epi64(acc_e1, acc_e0);
acc_o0 = _mm512_add_epi64(acc_o1, acc_o0);
acc_e1 = zero;
acc_o1 = zero;
}
#ifdef DEBUG_MERSENNE
print_vechexbignum(s, "after hi half:");
#endif
// reduce by adding hi to lo. first right shift hi into output.
vec_bignum_mask_rshift_n(s, c, mdata->nbits, 0xffff);
int bshift = mdata->nbits % 32;
int wshift = mdata->nbits / 32;
#ifdef DEBUG_MERSENNE
print_vechexbignum(c, "hi part:");
print_vechexbignum(s, "lo part:");
#endif
// now add the low part into the high.
scarry = 0;
for (i = 0; i < wshift; i++)
{
a1 = _mm512_load_epi32(c->data + i * VECLEN);
b0 = _mm512_load_epi32(s->data + i * VECLEN);
a0 = _mm512_adc_epi32(a1, scarry, b0, &scarry);
_mm512_store_epi32(c->data + i * VECLEN, a0);
}
a1 = _mm512_load_epi32(c->data + i * VECLEN);
b0 = _mm512_load_epi32(s->data + i * VECLEN);
b0 = _mm512_and_epi32(_mm512_set1_epi32((1U << (uint32_t)(bshift)) - 1U), b0);
a0 = _mm512_adc_epi32(a1, scarry, b0, &scarry);
_mm512_store_epi32(c->data + i * VECLEN, a0);
for (i++; i < NWORDS; i++)
{
_mm512_store_epi32(s->data + i * VECLEN, _mm512_set1_epi32(0));
}
#ifdef DEBUG_MERSENNE
print_vechexbignum(c, "after add:");
#endif
// if there was a carry, add it back in.
a1 = _mm512_load_epi32(c->data + wshift * VECLEN);
scarry = _mm512_test_epi32_mask(a1, _mm512_set1_epi32((1 << (uint32_t)bshift)));
i = 0;
while (scarry > 0)
{
a1 = _mm512_load_epi32(c->data + i * VECLEN);
a0 = _mm512_addcarry_epi32(a1, scarry, &scarry);
_mm512_store_epi32(c->data + i * VECLEN, a0);
i++;
}
// clear the potential hi-bit
a1 = _mm512_load_epi32(c->data + wshift * VECLEN);
_mm512_store_epi32(c->data + wshift * VECLEN,
_mm512_and_epi32(_mm512_set1_epi32((1 << (uint32_t)(bshift)) - 1), a1));
#ifdef DEBUG_MERSENNE
print_vechexbignum(c, "after carry add:");
exit(1);
#endif
c->size = NWORDS;
return;
}
void vecsqrmod_mersenne(bignum* a, bignum* c, bignum* n, bignum* s, monty* mdata)
{
vecmulmod_mersenne(a, a, c, n, s, mdata);
return;
}
void vecaddmod_mersenne(bignum* a, bignum* b, bignum* c, monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// a, b, c, and n are aligned
// a and b are both positive
// n is the montgomery base
int i;
__mmask16 carry = 0;
__m512i avec;
__m512i bvec;
__m512i cvec;
int bshift = mdata->nbits % 32;
int wshift = mdata->nbits / 32;
// add
for (i = 0; i < NWORDS; i++)
{
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_adc_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, cvec);
}
// check for a carry.
avec = _mm512_load_epi32(c->data + wshift * VECLEN);
carry = _mm512_test_epi32_mask(avec, _mm512_set1_epi32((1 << bshift)));
// the modulo is just the low part plus 1 (the carry, if present).
cvec = _mm512_load_epi32(c->data + 0 * VECLEN);
bvec = _mm512_addcarry_epi32(cvec, carry, &carry);
_mm512_store_epi32(c->data + 0 * VECLEN, bvec);
for (i = 1; (i < NWORDS) && (carry > 0); i++)
{
cvec = _mm512_load_epi32(c->data + i * VECLEN);
bvec = _mm512_addcarry_epi32(cvec, carry, &carry);
_mm512_store_epi32(c->data + i * VECLEN, bvec);
}
// clear the potential hi-bit
avec = _mm512_load_epi32(c->data + wshift * VECLEN);
_mm512_store_epi32(c->data + wshift * VECLEN,
_mm512_and_epi32(_mm512_set1_epi64((1 << (bshift)) - 1), avec));
return;
}
void vecsubmod_mersenne(bignum* a, bignum* b, bignum* c, monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// s1 is of length VECLEN
// a, b, c, n, and s1 are aligned
// a and b are both positive
// a >= b
// n is the montgomery base
int i;
__mmask16 carry = 0;
__mmask16 mask = 0;
__m512i nvec;
__m512i avec;
__m512i bvec;
__m512i cvec;
int bshift = mdata->nbits % 32;
int wshift = mdata->nbits / 32;
// subtract
carry = 0;
for (i = 0; i <= wshift; i++)
{
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_sbb_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, cvec);
}
// if we had a final carry, then b was bigger than a so we need to re-add n.
mask = carry;
carry = 0;
nvec = _mm512_set1_epi32(0xffffffff);
for (i = 0; i <= wshift; i++)
{
cvec = _mm512_load_epi32(c->data + i * VECLEN);
bvec = _mm512_mask_adc_epi32(cvec, mask, carry, nvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, bvec);
}
// clear the potential hi-bit
avec = _mm512_load_epi32(c->data + wshift * VECLEN);
_mm512_store_epi32(c->data + wshift * VECLEN,
_mm512_and_epi32(_mm512_set1_epi64((1 << (bshift)) - 1), avec));
return;
}
void vec_simul_addsub_mersenne(bignum* a, bignum* b, bignum* sum, bignum* diff,
monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// a, b, c, and n are aligned
// a and b are both positive
// n is the montgomery base
// produce sum = a + b and diff = a - b at the same time which
// saves 3N loads (only have to load a,b, and n once)
int i;
__mmask16 carry = 0;
__mmask16 borrow = 0;
__mmask16 bmask = 0;
__m512i avec;
__m512i bvec;
__m512i cvec;
__m512i nvec;
int bshift = mdata->nbits % 32;
int wshift = mdata->nbits / 32;
for (i = 0; i <= wshift; i++)
{
// add
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_adc_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(sum->data + i * VECLEN, cvec);
// sub
cvec = _mm512_sbb_epi32(avec, borrow, bvec, &borrow);
_mm512_store_epi32(diff->data + i * VECLEN, cvec);
}
bmask = borrow; // result too small, need to add n
// check for a carry.
avec = _mm512_load_epi32(sum->data + wshift * VECLEN);
carry = _mm512_test_epi32_mask(avec, _mm512_set1_epi32((1 << bshift)));
// the modulo is just the low part plus 1 (the carry, if present).
cvec = _mm512_load_epi32(sum->data + 0 * VECLEN);
bvec = _mm512_addcarry_epi32(cvec, carry, &carry);
_mm512_store_epi32(sum->data + 0 * VECLEN, bvec);
for (i = 1; (i < NWORDS) && (carry > 0); i++)
{
cvec = _mm512_load_epi32(sum->data + i * VECLEN);
bvec = _mm512_addcarry_epi32(cvec, carry, &carry);
_mm512_store_epi32(sum->data + i * VECLEN, bvec);
}
// clear the potential hi-bit
avec = _mm512_load_epi32(sum->data + wshift * VECLEN);
_mm512_store_epi32(sum->data + wshift * VECLEN,
_mm512_and_epi32(_mm512_set1_epi32((1 << (bshift)) - 1), avec));
carry = 0;
nvec = _mm512_set1_epi32(0xffffffffULL);
for (i = 0; i <= wshift; i++)
{
// conditional add
cvec = _mm512_load_epi32(diff->data + i * VECLEN);
bvec = _mm512_mask_adc_epi32(cvec, bmask, carry, nvec, &carry);
_mm512_store_epi32(diff->data + i * VECLEN, bvec);
}
// clear the potential hi-bit
avec = _mm512_load_epi32(diff->data + wshift * VECLEN);
_mm512_store_epi32(diff->data + wshift * VECLEN,
_mm512_and_epi32(_mm512_set1_epi32((1 << (bshift)) - 1), avec));
return;
}
void vec_simul_addsub(bignum *a, bignum *b, bignum *sum, bignum *diff, monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// a, b, c, and n are aligned
// a and b are both positive
// n is the montgomery base
// produce sum = a + b and diff = a - b at the same time which
// saves 3N loads (only have to load a,b, and n once)
int i;
__mmask16 carry = 0;
__mmask16 borrow = 0;
__mmask16 cmask = 0;
__mmask16 cmask2 = 0;
__mmask16 bmask = 0;
__m512i avec;
__m512i bvec;
__m512i cvec;
__m512i nvec;
for (i = 0; i < NWORDS; i++)
{
// add
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_adc_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(sum->data + i * VECLEN, cvec);
// sub
cvec = _mm512_sbb_epi32(avec, borrow, bvec, &borrow);
_mm512_store_epi32(diff->data + i * VECLEN, cvec);
}
cmask = carry; // result too big, need to subtract n
bmask = borrow; // result too small, need to add n
cmask2 = cmask; // keep looking mask for add
// compare. the initial mask is equal to the addition carry
// because if the most significant word has a carry, then the
// result is bigger than n.
for (i = NWORDS - 1; i >= 0; i--)
{
cvec = _mm512_load_epi32(sum->data + i * VECLEN);
nvec = _mm512_load_epi32(mdata->n->data + i * VECLEN);
// compare those that have not already been decided using the mask
cmask |= _mm512_mask_cmp_epu32_mask(~cmask2, cvec, nvec, _MM_CMPINT_GT);
cmask2 |= _mm512_mask_cmp_epu32_mask(~cmask2, cvec, nvec, _MM_CMPINT_LT);
// decided all of them, stop comparing.
if (cmask2 == 0xffff)
{
break;
}
}
// check for equal as well by flipping mask bits that have still
// not been decided (i.e., are equal)
cmask |= (~cmask2);
carry = 0;
borrow = 0;
for (i = 0; i < NWORDS; i++)
{
// conditional sub
cvec = _mm512_load_epi32(sum->data + i * VECLEN);
nvec = _mm512_load_epi32(mdata->n->data + i * VECLEN);
bvec = _mm512_mask_sbb_epi32(cvec, cmask, borrow, nvec, &borrow);
_mm512_store_epi32(sum->data + i * VECLEN, bvec);
// conditional add
cvec = _mm512_load_epi32(diff->data + i * VECLEN);
bvec = _mm512_mask_adc_epi32(cvec, bmask, carry, nvec, &carry);
_mm512_store_epi32(diff->data + i * VECLEN, bvec);
}
return;
}
void vecaddmod(bignum *a, bignum *b, bignum *c, monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// a, b, c, and n are aligned
// a and b are both positive
// n is the montgomery base
int i;
__mmask16 carry = 0;
__mmask16 mask = 0;
__mmask16 mask2 = 0;
__m512i avec;
__m512i bvec;
__m512i cvec;
__m512i nvec;
// add
for (i = 0; i < NWORDS; i++)
{
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_adc_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, cvec);
}
mask = carry; // sub mask
mask2 = mask; // keep looking mask
// compare. the initial mask is equal to the addition carry
// because if the most significant word has a carry, then the
// result is bigger than n.
for (i = NWORDS - 1; i >= 0; i--)
{
cvec = _mm512_load_epi32(c->data + i * VECLEN);
nvec = _mm512_load_epi32(mdata->n->data + i * VECLEN);
// compare those that have not already been decided using the mask
mask |= _mm512_mask_cmp_epu32_mask(~mask2, cvec, nvec, _MM_CMPINT_GT);
mask2 |= _mm512_mask_cmp_epu32_mask(~mask2, cvec, nvec, _MM_CMPINT_LT);
// decided all of them, stop comparing.
if (mask2 == 0xffff)
{
break;
}
}
// check for equal as well by flipping mask bits that have still
// not been decided (i.e., are equal)
mask |= (~mask2);
// subtract n from c when c is not less than n, as indicated by a 1 bit in mask
carry = 0;
for (i = 0; i < NWORDS; i++)
{
cvec = _mm512_load_epi32(c->data + i * VECLEN);
nvec = _mm512_load_epi32(mdata->n->data + i * VECLEN);
bvec = _mm512_mask_sbb_epi32(cvec, mask, carry, nvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, bvec);
}
return;
}
void vecsubmod(bignum *a, bignum *b, bignum *c, monty* mdata)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// s1 is of length VECLEN
// a, b, c, n, and s1 are aligned
// a and b are both positive
// a >= b
// n is the montgomery base
int i;
__mmask16 carry = 0;
__mmask16 mask = 0;
__m512i nvec;
__m512i avec;
__m512i bvec;
__m512i cvec;
// subtract
carry = 0;
for (i = 0; i < NWORDS; i++)
{
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_sbb_epi32(avec, carry, bvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, cvec);
}
// if we had a final carry, then b was bigger than a so we need to re-add n.
mask = carry;
carry = 0;
for (i = 0; (i < NWORDS) && (mask > 0); i++)
{
avec = _mm512_load_epi32(c->data + i * VECLEN);
nvec = _mm512_load_epi32(mdata->n->data + i * VECLEN);
cvec = _mm512_mask_adc_epi32(avec, mask, carry, nvec, &carry);
_mm512_store_epi32(c->data + i * VECLEN, cvec);
}
return;
}
uint32_t vec_gte(bignum * u, bignum * v)
{
// decide if each of the bignums in vec 'u' is >=
// the corresponding bignum in vec 'v'.
// return a mask of results.
int i;
__mmask16 mdecided = 0;
__mmask16 mgte = 0;
for (i = NWORDS - 1; i >= 0; --i)
{
__m512i a = _mm512_load_epi32(u->data + i * VECLEN);
__m512i b = _mm512_load_epi32(v->data + i * VECLEN);
mgte |= _mm512_mask_cmp_epu32_mask(~mdecided, a, b, _MM_CMPINT_GT);
mdecided = mdecided | _mm512_mask_cmp_epu32_mask(~mdecided, a, b, _MM_CMPINT_LT);
if (mdecided == 0xffff)
break;
}
//equal if still undecided
mgte |= ~mdecided;
return (uint32_t)mgte;
}
uint32_t vec_eq(base_t * u, base_t * v, int sz)
{
// decide if each of the bignums in vec 'u' is >=
// the corresponding bignum in vec 'v'.
// return a mask of results.
int i;
__mmask16 meq = 0xffff;
for (i = sz - 1; i >= 0; --i)
{
__m512i a = _mm512_load_epi32(u + i * VECLEN);
__m512i b = _mm512_load_epi32(v + i * VECLEN);
meq = _mm512_mask_cmp_epu32_mask(meq, a, b, _MM_CMPINT_EQ);
if (meq == 0)
break;
}
return (uint32_t)meq;
}
uint32_t vec_bignum_mask_lshift_1(bignum * u, uint32_t wmask)
{
// return the left shift of bignum u by 1
int i;
__m512i nextcarry;
__m512i carry = _mm512_setzero_epi32();
__m512i word = _mm512_setzero_epi32();
for (i = 0; i < NWORDS; i++)
{
word = _mm512_load_epi32(u->data + i * VECLEN);
// _mm512_and_epi32(word, highmask) // not necessary to mask as the shift zero extends.
nextcarry = _mm512_srli_epi32(word, 31);
_mm512_mask_store_epi32(u->data + i * VECLEN, (__mmask16)wmask,
_mm512_or_epi32(_mm512_slli_epi32(word, 1), carry));
carry = nextcarry;
}
_mm512_mask_store_epi32(u->data + i * VECLEN, (__mmask16)wmask,
_mm512_or_epi32(_mm512_slli_epi32(word, 1), carry));
// return an overflow mask
return wmask & _mm512_cmp_epi32_mask(carry, _mm512_setzero_epi32(), _MM_CMPINT_GT);
}
void vec_bignum_mask_rshift_1(bignum * u, uint32_t wmask)
{
// return the right shift of bignum u by 1
int i;
__m512i nextcarry;
__m512i carry = _mm512_setzero_epi32();
__m512i lowmask = _mm512_set1_epi32(0x00000001);
__m512i word;
//carry = 0;
//for (i = sb - 1; i >= 0; --i)
//{
// nextcarry = (b->data[i] & mask) << y;
// a->data[i] = b->data[i] >> x | carry;
// carry = nextcarry;
//}
for (i = NWORDS - 1; i >= 0; i--)
{
word = _mm512_load_epi32(u->data + i * VECLEN);
nextcarry = _mm512_slli_epi32(_mm512_and_epi32(word, lowmask), 31);
_mm512_mask_store_epi32(u->data + i * VECLEN, (__mmask16)wmask,
_mm512_or_epi32(_mm512_srli_epi32(word, 1), carry));
carry = nextcarry;
}
return;
}
void vec_bignum_mask_rshift_n(bignum* u, bignum* v, int n, uint32_t wmask)
{
// return the right shift of bignum u by n bits
int i;
__m512i nextcarry;
__m512i carry = _mm512_set1_epi32(0);
__m512i lowmask;
__m512i word;
int wshift = n / 32;
int bshift = n % 32;
lowmask = _mm512_set1_epi32((1ULL << (uint32_t)bshift) - 1ULL);
for (i = 2 * NWORDS - 1; (i - wshift) >= 0; i--)
{
word = _mm512_load_epi32(u->data + i * VECLEN);
nextcarry = _mm512_slli_epi32(_mm512_and_epi32(word, lowmask), (DIGITBITS - bshift));
_mm512_mask_store_epi64(v->data + (i - wshift) * VECLEN, (__mmask16)wmask,
_mm512_or_epi32(_mm512_srli_epi32(word, bshift), carry));
carry = nextcarry;
}
return;
}
void vec_bignum_mask_sub(bignum *a, bignum *b, bignum *c, uint32_t wmask)
{
// assumptions:
// a, b, c are of length VECLEN * NWORDS
// s1 is of length VECLEN
// a, b, c, and s1 are aligned
// a and b are both positive
// a >= b
int i;
__mmask16 carry = 0;
__m512i avec;
__m512i bvec;
__m512i cvec;
if (wmask == 0)
return;
// subtract the selected elements ('1' in the mask)
carry = 0;
for (i = 0; i < NWORDS; i++)
{
avec = _mm512_load_epi32(a->data + i * VECLEN);
bvec = _mm512_load_epi32(b->data + i * VECLEN);
cvec = _mm512_sbb_epi32(avec, carry, bvec, &carry);
_mm512_mask_store_epi32(c->data + i * VECLEN, (__mmask16)wmask, cvec);
}
if (carry)
{
// subtract any final borrows that exceed the size of b.
_mm512_mask_store_epi32(c->data + i * VECLEN, (__mmask16)wmask & carry, _mm512_setzero_epi32());
}
return;
}
|
Chronic disease and lifestyle factors associated with change in sleep duration among older adults in the Singapore Chinese Health Study
Identifying risk factors for future change in sleep duration can clarify whether, and if so how, sleep and morbidity are bidirectionally related. To date, only limited longitudinal evidence exists characterizing changes to sleep duration among older adults. This study aimed to identify factors associated with change in sleep duration in a large sample of older adults (≥ 60 years) residing in Singapore (n = 10 335). These adults were monitored as part of the Singapore Chinese Health Study, which collected information regarding daily sleep duration at baseline (assessed in 1993–1998) and at a follow‐up wave conducted over a mean of 12.7 years later (assessed in 2006–2010). Among adults sleeping 6–8 h at baseline (n = 8265), most participants (55.6%) remained 6–8 h sleepers at follow‐up, while 8.4% became short (< 6 h) and 36.0% became long (> 8 h) sleepers. A history of stroke, diabetes, cancer, hip fracture and greater age all independently increased the odds of having long sleep duration at follow‐up, while greater educational attainment and weekly physical activity were both associated with reduced odds of becoming a long sleeper. Other than greater baseline age, the only factor related to higher odds of becoming a short sleeper was concurrent stomach/duodenal ulcer at follow‐up. Long sleep duration among older adults may therefore reflect longstanding disease processes, whereas the aetiology of short sleep may predominately involve factors other than those examined. Future research is needed to distinguish if/when long sleep duration serves the disease recovery process, and when long sleep duration complicates disease and requires sleep medicine interventions. |
// POST will start the building of a POST request
func (c *Case) POST(p string) *RequestBuilder {
return &RequestBuilder{
method: http.MethodPost,
path: p,
cas: c,
fail: c.fail,
}
} |
/**
* Positive test case for checkCustomerAccountsMaintenance method with mandatory parameters.
*/
@Test(enabled = true, groups = {"wso2.esb"},
description = "SAPByDesign {checkCustomerAccountsMaintenance} integration test with mandatory parameters.")
public void testCheckCustomerAccountsMaintenanceWithMandatoryParameters() throws Exception {
connectorProperties.setProperty("familyName", connectorProperties.getProperty("familyName")
+ System.currentTimeMillis());
SOAPEnvelope esbSoapResponse =
sendSOAPRequest(proxyUrl, "checkCustomerAccountsMaintenance.xml", esbRequestHeadersMap, "mediate",
SOAP_HEADER_XPATH_EXP, SOAP_BODY_XPATH_EXP);
OMElement esbResponseElement = AXIOMUtil.stringToOM(esbSoapResponse.getBody().toString());
String uuid = (String) xPathEvaluate(esbResponseElement, "string(//UUID/text())", nameSpaceMap);
String internalId = (String) xPathEvaluate(esbResponseElement, "string(//InternalID/text())", nameSpaceMap);
Assert.assertNotEquals(uuid, "");
Assert.assertNotEquals(internalId, "");
} |
<filename>features/kms-keystore-keplerlake/src/main/java/com/intel/kms/keplerlake/KeplerlakeClient.java<gh_stars>0
/*
* Copyright (C) 2019 Intel Corporation
* SPDX-License-Identifier: BSD-3-Clause
*/
package com.intel.kms.keplerlake;
import com.intel.dcsg.cpg.configuration.Configuration;
import com.intel.dcsg.cpg.tls.policy.TlsConnection;
import com.intel.mtwilson.jaxrs2.client.JaxrsClient;
import com.intel.mtwilson.jaxrs2.client.JaxrsClientBuilder;
import java.net.URL;
import java.util.Properties;
/**
*
* @author sshekhex
*/
public class KeplerlakeClient extends JaxrsClient {
public KeplerlakeClient(URL url) throws Exception {
super(JaxrsClientBuilder.factory().url(url).build());
}
public KeplerlakeClient(Properties properties) {
super(JaxrsClientBuilder.factory().configuration(properties).build());
}
public KeplerlakeClient(Configuration configuration) {
super(JaxrsClientBuilder.factory().configuration(configuration).build());
}
public KeplerlakeClient(Properties properties, TlsConnection tlsConnection) {
super(JaxrsClientBuilder.factory().configuration(properties).tlsConnection(tlsConnection).build());
}
}
|
// UrlFileNameMatchConditionParametersARMGenerator returns a generator of UrlFileNameMatchConditionParametersARM instances for property testing.
func UrlFileNameMatchConditionParametersARMGenerator() gopter.Gen {
if urlFileNameMatchConditionParametersARMGenerator != nil {
return urlFileNameMatchConditionParametersARMGenerator
}
generators := make(map[string]gopter.Gen)
AddIndependentPropertyGeneratorsForUrlFileNameMatchConditionParametersARM(generators)
urlFileNameMatchConditionParametersARMGenerator = gen.Struct(reflect.TypeOf(UrlFileNameMatchConditionParametersARM{}), generators)
return urlFileNameMatchConditionParametersARMGenerator
} |
_base_ = ['./pose-detection_static.py', '../_base_/backends/tensorrt.py']
onnx_config = dict(
input_shape=[192, 256],
dynamic_axes={
'input': {
0: 'batch',
},
'output': {
0: 'batch'
}
})
backend_config = dict(
common_config=dict(max_workspace_size=1 << 30),
model_inputs=[
dict(
input_shapes=dict(
input=dict(
min_shape=[1, 3, 256, 192],
opt_shape=[2, 3, 256, 192],
max_shape=[4, 3, 256, 192])))
])
|
Research on Temperature Prediction Model and Temperature Control Model for Fruit and Vegetable Greenhouses
China's agriculture is changing to a modern form of agriculture due to the continuous development of science and technology. In order to find the factors suitable for the growth of fruits and vegetables and provide suitable conditions for high-quality growth conditions for the production of fruits, it is especially crucial to improve the management of greenhouse fruit and vegetables. In this paper, an intelligent control scheme for greenhouses is presented. In order to predict the ambient temperature of greenhouses hourly, a temperature prediction model is constructed based on the growth environment of fruits and vegetables in greenhouses. Considering the influence of different environmental factors on temperature, the controller allows for the regulation of environmental parameters and promotes the better growth of greenhouse crops. The temperature prediction model based on ELM and BP-ANN is compared and analyzed. The mean square error of ELM is 0.0045, which is lower than the training error of BP-ANN, so it has good prediction effect. A greenhouse temperature control model is developed using PID controllers, and the proportional, integral, and differential parameters are adjusted to create a suitable climate for fruit and vegetable growth. |
Image copyright Getty Images Image caption It was the second time in one week the popular site went down
Facebook has restored access to its website after a 40-minute outage on Monday, the second time in a week that the site has gone down.
Users saw an error message that read "Sorry, something went wrong. We're working on it and we'll get it fixed as soon as we can."
Company shares were down nearly 4% at $89.25 (£58.83) shortly after the site went down. It also crashed on Thursday.
The social networking site is used by nearly 1.5 billion people worldwide.
Dave Lee, BBC North America technology reporter
All right, all right - the world is still spinning. But Facebook going down twice, in a relatively short timeframe, can be fairly significant. It moves markets - Facebook share price is down almost 4% as I write this.
The company's status page detailed a "major outage" on Monday, but things were soon back to normal.
A quick post-mortem, posted by an engineer at the company, said the problem was with its Graph API. In simplest terms, the "graph" is the term Facebook uses to describe the core of Facebook's system. Posts, photos, statuses are all connected to people, groups and pages via the Graph - and that's what failed.
Sometimes Facebook downtime can also affect other companies that tap into Facebook's data - like Tinder, for example.
The last time this happened, Facebook admitted it was something its own engineers had caused by tinkering. That's likely the case again here, but twice in a week will be a little frustrating for both users and shareholders.
Follow Dave Lee on Twitter @DaveLeeBBC
People took to Twitter to lament and poke fun at the situation.
The Kingston police force in London pre-empted emergency cold turkey calls, with tongue firmly in cheek.
Image copyright Twitter
Sites that monitor disruptions said North America was particularly badly affected this time.
"We are currently restoring Facebook services that people had trouble accessing earlier today due to a configuration," said a Facebook spokesman.
Some users took to speculating about the cause.
Image copyright Twitter
Others sarcastically bid the site good riddance.
Image copyright Ashley Gold
And some enjoyed underlining that Facebook's loss was Twitter's gain. |
// Run will start the crawling process
func (b *Bot) Run() {
w := &WebhookResponse{}
result, err := b.GetPosts()
if err != nil {
w.Error = err
b.makeRequest(w)
}
w.Response = result
b.makeRequest(w)
} |
// isPoolOperationPending returns true if pool operation is InProgress else return false
func (f *fixture) isPoolOperationPending(cspiName string, tConfig *testConfig) (bool, string) {
ns, name, err := cache.SplitMetaNamespaceKey(cspiName)
if err != nil {
return false, err.Error()
}
cspiObj, err := f.openebsClient.CstorV1().CStorPoolInstances(ns).Get(context.TODO(), name, metav1.GetOptions{})
if err != nil {
return false, err.Error()
}
if tConfig.writeCacheRaidGroups != nil || tConfig.dataRaidGroups != nil {
if ok, msg := isStatusConditionMatched(cspiObj, cstor.CSPIPoolExpansion, "PoolExpansionInProgress"); !ok {
return false, fmt.Sprintf("Expected pool expansion to be in progress but %s", msg)
}
}
return true, ""
} |
/**
* Base service for the standard slider client/server services
*/
public abstract class AbstractSliderLaunchedService extends
LaunchedWorkflowCompositeService {
private static final Logger log =
LoggerFactory.getLogger(AbstractSliderLaunchedService.class);
protected AbstractSliderLaunchedService(String name) {
super(name);
// make sure all the yarn configs get loaded
YarnConfiguration conf = new YarnConfiguration();
ConfigHelper.registerDeprecatedConfigItems();
}
/**
* look up the registry quorum from the config
* @return the quorum string
* @throws BadConfigException if it is not there or invalid
*/
public String lookupZKQuorum() throws BadConfigException {
String registryQuorum = getConfig().get(RegistryConstants.KEY_REGISTRY_ZK_QUORUM);
// though if neither is set: trouble
if (SliderUtils.isUnset(registryQuorum)) {
throw new BadConfigException(
"No Zookeeper quorum provided in the"
+ " configuration property " + RegistryConstants.KEY_REGISTRY_ZK_QUORUM
);
}
ZookeeperUtils.splitToHostsAndPortsStrictly(registryQuorum);
return registryQuorum;
}
/**
* Create, adopt ,and start the YARN registration service
* @return the registry operations service, already deployed as a child
* of the AbstractSliderLaunchedService instance.
*/
public RegistryOperations startRegistryOperationsService()
throws BadConfigException {
// push back the slider registry entry if needed
String quorum = lookupZKQuorum();
RegistryOperations registryWriterService =
createRegistryOperationsInstance();
deployChildService(registryWriterService);
return registryWriterService;
}
/**
* Create the registry operations instance. This is to allow
* subclasses to instantiate a subclass service
* @return an instance to match to the lifecycle of this service
*/
protected RegistryOperations createRegistryOperationsInstance() {
return RegistryOperationsFactory.createInstance("YarnRegistry", getConfig());
}
/**
* Utility method to require an argument to be set (non null, non-empty)
* @param argname argument name
* @param value value
* @throws BadCommandArgumentsException if the condition is not met
*/
protected static void requireArgumentSet(String argname, String value)
throws BadCommandArgumentsException {
require(isSet(value), "Required argument %s missing", argname );
}
/**
* Require a condition to hold; throw {@link BadCommandArgumentsException} if not.
* The exception text is the formatted message.
* @param condition condition
* @param message string to format
* @param args list of arguments to format.
* @throws BadCommandArgumentsException
*/
protected static void require(boolean condition, String message,
Object... args)
throws BadCommandArgumentsException {
if (!condition) {
throw new BadCommandArgumentsException(message, args);
}
}
} |
from pypy.objspace.fake.checkmodule import checkmodule
def test_checkmodule():
checkmodule('struct')
|
// Read implements the io.Reader interface.
func (f *File) Read(b []byte) (n int, err error) {
reader, err := f.getReader()
if err != nil {
return 0, err
}
return reader.Read(b)
} |
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package eu.fivegex.monitoring.appl.probes.delay.unidirectional;
import eu.reservoir.monitoring.appl.datarate.EveryNSeconds;
import eu.reservoir.monitoring.core.AbstractProbe;
import eu.reservoir.monitoring.core.DefaultProbeAttribute;
import eu.reservoir.monitoring.core.DefaultProbeValue;
import eu.reservoir.monitoring.core.Probe;
import eu.reservoir.monitoring.core.ProbeAttributeType;
import eu.reservoir.monitoring.core.ProbeMeasurement;
import eu.reservoir.monitoring.core.ProbeValue;
import eu.reservoir.monitoring.core.ProducerMeasurement;
import eu.reservoir.monitoring.core.TypeException;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.concurrent.LinkedBlockingQueue;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
*
* @author uceeftu
*/
public class DelayDestProbe extends AbstractProbe implements Probe {
int mgmPackets = 5;
int mgmTimeout = 1000;
int mgmInterval = 120; // seconds
int dataPackets = 5;
int dataTimeout = 1000;
int probeInterval = 30; // seconds, used also as the actual probe rate
UDPDataReceiver dataReceiver;
UDPMgmSender mgmSender;
Long timeOffset;
LinkedBlockingQueue<Long> queue;
private Logger LOGGER = LoggerFactory.getLogger(DelayDestProbe.class);
public DelayDestProbe(String probeName,
String mgmLocalAddr,
String mgmLocalPort,
String dataLocalAddr,
String dataLocalPort,
String mgmSourceAddr,
String mgmSourcePort,
String mgmPackets,
String mgmTimeout,
String mgmInterval,
String dataPackets,
String dataTimeout,
String dataInterval) throws SocketException, UnknownHostException {
this.mgmPackets = Integer.valueOf(mgmPackets);
this.mgmTimeout = Integer.valueOf(mgmTimeout);
this.mgmInterval = Integer.valueOf(mgmInterval);
this.dataPackets = Integer.valueOf(dataPackets);
this.dataTimeout = Integer.valueOf(dataTimeout);
this.probeInterval = Integer.valueOf(dataInterval);
queue = new LinkedBlockingQueue<>();
mgmSender = new UDPMgmSender(InetAddress.getByName(mgmLocalAddr),
Integer.valueOf(mgmLocalPort),
InetAddress.getByName(mgmSourceAddr),
Integer.valueOf(mgmSourcePort),
this.mgmPackets,
this.mgmTimeout,
this.mgmInterval);
dataReceiver = new UDPDataReceiver(Integer.valueOf(dataLocalPort), dataLocalAddr, queue, this.dataPackets, this.dataTimeout);
setName(probeName);
setDataRate(new EveryNSeconds(this.probeInterval));
addProbeAttribute(new DefaultProbeAttribute(0, "link", ProbeAttributeType.STRING, "id"));
addProbeAttribute(new DefaultProbeAttribute(1, "delay", ProbeAttributeType.LONG, "milliseconds"));
}
@Override
public void beginThreadBody() {
mgmSender.start();
dataReceiver.start();
}
@Override
public void endThreadBody() {
mgmSender.stop();
dataReceiver.stop();
}
@Override
public ProbeMeasurement collect() {
try {
Long dataDelay = queue.take();
LOGGER.info("Measured delay just taken off the queue (size=" + queue.size() + "): " + dataDelay);
timeOffset = mgmSender.getTimeOffset();
LOGGER.info("current time offset: " + timeOffset);
ArrayList<ProbeValue> list = new ArrayList<>(2);
list.add(new DefaultProbeValue(0, "vnf1vnf2")); // TODO check this and see if we need to use a parameter
list.add(new DefaultProbeValue(1, dataDelay + timeOffset));
ProbeMeasurement m = new ProducerMeasurement(this, list, "Link");
LOGGER.debug("Returning measurement: " + m.toString());
return m;
} catch (InterruptedException ie) {
LOGGER.error("Received interrupt: shutting down probe thread");
super.threadRunning = false;
} catch (TypeException te) {
LOGGER.error("Error while adding probe attribute: " + te.getMessage());
} catch (Exception e) {
LOGGER.error("Error " + e.getMessage());
}
return null;
}
}
|
/*
//@HEADER
// ************************************************************************
//
// Shards : Shared Discretization Tools
// Copyright 2008 Sandia Corporation
//
// Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
// the U.S. Government retains certain rights in this software.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// 1. Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// 3. Neither the name of the Corporation nor the names of the
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// Questions? Contact <NAME> (<EMAIL>),
// <NAME> (<EMAIL>), or
// <NAME> (<EMAIL>).
//
// ************************************************************************
//@HEADER
*/
#ifndef Shards_CellTopologyData_h
#define Shards_CellTopologyData_h
#if defined( __cplusplus )
extern "C" {
#endif
/** \addtogroup shards_package_cell_topology
* \{
*/
/*----------------------------------------------------------------------*/
struct CellTopologyData ;
struct CellTopologyData_Subcell ;
struct CellTopologyData_Permutation ;
/** \brief A simple 'C' struct of cell topology attributes.
*
* The topology may be extended such that the number of nodes
* (subcells of dimension zero) is greater than the number of
* vertices. In this case the vertices must be ordered first.
*
* Nodes, edges, and sides are subcells with a particular dimension.
* A cell has edges and sides only if its dimension is greater than one.
* - node has Dim == 0
* - edge has Dim == 1
* - side has Dim == dimension - 1.
*/
struct CellTopologyData {
/** \brief Base, a.k.a. not-extended, version of this topology
* where vertex_count == node_count.
*/
const struct CellTopologyData * base ;
/** \brief Intuitive name for this topology */
const char * name ;
/** \brief Unique key for this topology */
unsigned key ;
/** \brief Topological dimension */
unsigned dimension ;
/** \brief Number of vertices. */
unsigned vertex_count ;
/** \brief Number of nodes (a.k.a. \f$ {Cell}^{0} \f$ subcells).
*
* A topology is <em> extended </em> if node_count > vertex_count
*/
unsigned node_count ;
/** \brief Number of edges (a.k.a. \f$ {Cell}^{1} \f$ boundary subcells). */
unsigned edge_count ;
/** \brief Number of sides (a.k.a. \f$ {Cell}^{D-1} \f$ boundary subcells). */
unsigned side_count ;
/** \brief Number of defined permutations */
unsigned permutation_count ;
/** \brief Flag if the subcells of a given dimension are homogeneous */
unsigned subcell_homogeneity[4] ;
/** \brief Number of subcells of each dimension. */
unsigned subcell_count[4] ;
/** \brief Array of subcells of each dimension
*
* The length of each subcell array is subcell_count[Dim]
* - <b> subcell[Dim][Ord].topology </b> topology of the subcell
* - <b> subcell[Dim][Ord].node[I] </b> node ordinal of the subcell's node I
*/
const struct CellTopologyData_Subcell * subcell[4] ;
/** \brief Array of side subcells of length side_count
*
* The length of the side array is side_count
* - <b> side[Ord].topology </b> topology of the side
* - <b> side[Ord].node[I] </b> node ordinal of the side's node I
*/
const struct CellTopologyData_Subcell * side ;
/** \brief Array of edges subcells of length edge_count
*
* The length of the edge array is edge_count
* - <b> edge[Ord].topology </b> topology of the edge
* - <b> edge[Ord].node[I] </b> node ordinal of the edge's node I
*/
const struct CellTopologyData_Subcell * edge ;
/** \brief Array of node permutations.
*
* - required: 0 <= P < permutation_count
* - required: 0 <= I < node_count
*
* Let ParentCell be dimension D and SubCell be dimension dim < D.
* Let SubCell be connected as subcell Ord with permutation P.
*
* Then <b> ParentCell.node(K) == SubCell.node(I) </b> where:
* - SubCellTopology == ParentCellTopology->subcell[dim][Ord].topology
* - K = ParentCellTopology->subcell[dim][Ord].node[IP]
* - IP = SubCellTopology->permutation[P].node[I]
* - I = SubCellTopology->permutation_inverse[P].node[IP]
*
* The permutation map for P == 0 is required to be identity.
*/
const struct CellTopologyData_Permutation * permutation ;
const struct CellTopologyData_Permutation * permutation_inverse ;
};
/** \brief Subcell information.
*
* - required: 0 <= Dim <= 3
* - required: 0 <= Ord <= subcell_count[Dim]
* - required: 0 <= J < subcell[Dim][Ord]->subcell_count[0]
* - subcell[Dim][Ord].topology
* - subcell[Dim][Ord].node[J]
*/
struct CellTopologyData_Subcell {
/** \brief Subcell topology */
const struct CellTopologyData * topology ;
/** \brief Subcell indexing of \f$ {Cell}^{0} \f$
* with respect to parent cell. */
const unsigned * node ;
};
/** \brief Self-typedef */
typedef struct CellTopologyData CellTopologyData ;
/** \brief Array of node permutations.
*
* - required: 0 <= P < permutation_count
* - required: 0 <= I < node_count
*
* Let ParentCell be dimension D and SubCell be dimension dim < D.
* Let SubCell be connected as subcell Ord with permutation P.
*
* Then <b> ParentCell.node(K) == SubCell.node(I) </b> where:
* - SubCellTopology == ParentCellTopology->subcell[dim][Ord].topology
* - K = ParentCellTopology->subcell[dim][Ord].node[IP]
* - IP = SubCellTopology->permutation[P].node[I]
* - I = SubCellTopology->permutation_inverse[P].node[IP]
*
* The permutation map for P == 0 is required to be identity.
*/
struct CellTopologyData_Permutation {
const unsigned * node ;
unsigned polarity ;
};
/** \brief Values for the CellTopologyData_Permutation polarity */
enum {
CELL_PERMUTATION_POLARITY_IRRELEVANT = 0 ,
CELL_PERMUTATION_POLARITY_POSITIVE = 1 ,
CELL_PERMUTATION_POLARITY_NEGATIVE = 2
};
/** \brief Map a cell->face->edge ordinal to the cell->edge ordinal.
* Return -1 for erroneous input.
*/
extern
int mapCellFaceEdge( const CellTopologyData * cell_topology ,
unsigned face_ordinal ,
unsigned face_edge_ordinal );
/** \} */
#if defined( __cplusplus )
} /* extern "C" */
#endif
#endif /* Shards_CellTopologyData_h */
|
//========= Copyright Valve Corporation, All rights reserved. ============//
//
// Purpose:
//
// $NoKeywords: $
//
//=============================================================================//
#include "cbase.h"
#include "ai_default.h"
#include "ai_task.h"
#include "ai_schedule.h"
#include "ai_node.h"
#include "ai_hull.h"
#include "ai_hint.h"
#include "ai_memory.h"
#include "ai_route.h"
#include "ai_motor.h"
#include "soundent.h"
#include "game.h"
#include "npcevent.h"
#include "entitylist.h"
#include "activitylist.h"
#include "animation.h"
#include "basecombatweapon.h"
#include "IEffects.h"
#include "vstdlib/random.h"
#include "engine/IEngineSound.h"
#include "ammodef.h"
#include "util.h"
#include "hl1_ai_basenpc.h"
#include "hl1_basegrenade.h"
#include "movevars_shared.h"
#include "ai_basenpc.h"
ConVar sk_hassassin_health("sk_hassassin_health", "50");
//=========================================================
// monster-specific schedule types
//=========================================================
enum {
SCHED_ASSASSIN_EXPOSED = LAST_SHARED_SCHEDULE,// cover was blown.
SCHED_ASSASSIN_JUMP, // fly through the air
SCHED_ASSASSIN_JUMP_ATTACK, // fly through the air and shoot
SCHED_ASSASSIN_JUMP_LAND, // hit and run away
SCHED_ASSASSIN_FAIL,
SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY1,
SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY2,
SCHED_ASSASSIN_TAKE_COVER_FROM_BEST_SOUND,
SCHED_ASSASSIN_HIDE,
SCHED_ASSASSIN_HUNT,
};
Activity ACT_ASSASSIN_FLY_UP;
Activity ACT_ASSASSIN_FLY_ATTACK;
Activity ACT_ASSASSIN_FLY_DOWN;
//=========================================================
// monster-specific tasks
//=========================================================
enum {
TASK_ASSASSIN_FALL_TO_GROUND = LAST_SHARED_TASK + 1, // falling and waiting to hit ground
};
//=========================================================
// Monster's Anim Events Go Here
//=========================================================
#define ASSASSIN_AE_SHOOT1 1
#define ASSASSIN_AE_TOSS1 2
#define ASSASSIN_AE_JUMP 3
#define MEMORY_BADJUMP bits_MEMORY_CUSTOM1
class CNPC_HAssassin : public CHL1BaseNPC {
DECLARE_CLASS( CNPC_HAssassin, CHL1BaseNPC
);
public:
void Spawn(void);
void Precache(void);
int TranslateSchedule(int scheduleType);
void HandleAnimEvent(animevent_t *pEvent);
float MaxYawSpeed() { return 360.0f; }
void Shoot(void);
int MeleeAttack1Conditions(float flDot, float flDist);
int RangeAttack1Conditions(float flDot, float flDist);
int RangeAttack2Conditions(float flDot, float flDist);
int SelectSchedule(void);
void RunTask(const Task_t *pTask);
void StartTask(const Task_t *pTask);
Class_T Classify(void);
int GetSoundInterests(void);
void RunAI(void);
float m_flLastShot;
float m_flDiviation;
float m_flNextJump;
Vector m_vecJumpVelocity;
float m_flNextGrenadeCheck;
Vector m_vecTossVelocity;
bool m_fThrowGrenade;
int m_iTargetRanderamt;
int m_iFrustration;
int m_iAmmoType;
public:
DECLARE_DATADESC();
DEFINE_CUSTOM_AI;
};
LINK_ENTITY_TO_CLASS( monster_human_assassin, CNPC_HAssassin
);
BEGIN_DATADESC( CNPC_HAssassin )
DEFINE_FIELD( m_flLastShot, FIELD_TIME
),
DEFINE_FIELD( m_flDiviation, FIELD_FLOAT
),
DEFINE_FIELD( m_flNextJump, FIELD_TIME
),
DEFINE_FIELD( m_vecJumpVelocity, FIELD_VECTOR
),
DEFINE_FIELD( m_flNextGrenadeCheck, FIELD_TIME
),
DEFINE_FIELD( m_vecTossVelocity, FIELD_VECTOR
),
DEFINE_FIELD( m_fThrowGrenade, FIELD_BOOLEAN
),
DEFINE_FIELD( m_iTargetRanderamt, FIELD_INTEGER
),
DEFINE_FIELD( m_iFrustration, FIELD_INTEGER
),
//DEFINE_FIELD( m_iAmmoType, FIELD_INTEGER ),
END_DATADESC()
//=========================================================
// Spawn
//=========================================================
void CNPC_HAssassin::Spawn() {
Precache();
SetModel("models/hassassin.mdl");
SetHullType(HULL_HUMAN);
SetHullSizeNormal();
SetNavType(NAV_GROUND);
SetSolid(SOLID_BBOX);
AddSolidFlags(FSOLID_NOT_STANDABLE);
SetMoveType(MOVETYPE_STEP);
m_bloodColor = BLOOD_COLOR_RED;
ClearEffects();
m_iHealth = sk_hassassin_health.GetFloat();
m_flFieldOfView = VIEW_FIELD_WIDE; // indicates the width of this monster's forward view cone ( as a dotproduct result )
m_NPCState = NPC_STATE_NONE;
m_HackedGunPos = Vector(0, 24, 48);
m_iTargetRanderamt = 20;
SetRenderColor(255, 255, 255, 20);
m_nRenderMode = kRenderTransTexture;
CapabilitiesClear();
CapabilitiesAdd(bits_CAP_MOVE_GROUND);
CapabilitiesAdd(bits_CAP_INNATE_RANGE_ATTACK1 | bits_CAP_INNATE_RANGE_ATTACK2 | bits_CAP_INNATE_MELEE_ATTACK1);
NPCInit();
}
//=========================================================
// Precache - precaches all resources this monster needs
//=========================================================
void CNPC_HAssassin::Precache() {
m_iAmmoType = GetAmmoDef()->Index("9mmRound");
PrecacheModel("models/hassassin.mdl");
UTIL_PrecacheOther("npc_handgrenade");
PrecacheScriptSound("HAssassin.Shot");
PrecacheScriptSound("HAssassin.Beamsound");
PrecacheScriptSound("HAssassin.Footstep");
}
int CNPC_HAssassin::GetSoundInterests(void) {
return SOUND_WORLD |
SOUND_COMBAT |
SOUND_PLAYER |
SOUND_DANGER;
}
Class_T CNPC_HAssassin::Classify(void) {
return CLASS_HUMAN_MILITARY;
}
//=========================================================
// CheckMeleeAttack1 - jump like crazy if the enemy gets too close.
//=========================================================
int CNPC_HAssassin::MeleeAttack1Conditions(float flDot, float flDist) {
if (m_flNextJump < gpGlobals->curtime && (flDist <= 128 || HasMemory(MEMORY_BADJUMP)) && GetEnemy() != NULL) {
trace_t tr;
Vector vecMin = Vector(random->RandomFloat(0, -64), random->RandomFloat(0, -64), 0);
Vector vecMax = Vector(random->RandomFloat(0, 64), random->RandomFloat(0, 64), 160);
Vector vecDest = GetAbsOrigin() + Vector(random->RandomFloat(-64, 64), random->RandomFloat(-64, 64), 160);
UTIL_TraceHull(GetAbsOrigin() + Vector(0, 0, 36), GetAbsOrigin() + Vector(0, 0, 36), vecMin, vecMax, MASK_SOLID,
this, COLLISION_GROUP_NONE, &tr);
//NDebugOverlay::Box( GetAbsOrigin() + Vector( 0, 0, 36 ), vecMin, vecMax, 0,0, 255, 0, 2.0 );
if (tr.startsolid || tr.fraction < 1.0) {
return COND_TOO_CLOSE_TO_ATTACK;
}
float flGravity = GetCurrentGravity();
float time = sqrt(160 / (0.5 * flGravity));
float speed = flGravity * time / 160;
m_vecJumpVelocity = (vecDest - GetAbsOrigin()) * speed;
return COND_CAN_MELEE_ATTACK1;
}
if (flDist > 128)
return COND_TOO_FAR_TO_ATTACK;
return COND_NONE;
}
//=========================================================
// CheckRangeAttack1 - drop a cap in their ass
//
//=========================================================
int CNPC_HAssassin::RangeAttack1Conditions(float flDot, float flDist) {
if (!HasCondition(COND_ENEMY_OCCLUDED) && flDist > 64 && flDist <= 2048) {
trace_t tr;
Vector vecSrc = GetAbsOrigin() + m_HackedGunPos;
// verify that a bullet fired from the gun will hit the enemy before the world.
UTIL_TraceLine(vecSrc, GetEnemy()->BodyTarget(vecSrc), MASK_SOLID, this, COLLISION_GROUP_NONE, &tr);
if (tr.fraction == 1.0 || tr.m_pEnt == GetEnemy()) {
return COND_CAN_RANGE_ATTACK1;
}
}
return COND_NONE;
}
//=========================================================
// CheckRangeAttack2 - toss grenade is enemy gets in the way and is too close.
//=========================================================
int CNPC_HAssassin::RangeAttack2Conditions(float flDot, float flDist) {
m_fThrowGrenade = false;
if (!FBitSet (GetEnemy()->GetFlags(), FL_ONGROUND)) {
// don't throw grenades at anything that isn't on the ground!
return COND_NONE;
}
// don't get grenade happy unless the player starts to piss you off
if (m_iFrustration <= 2)
return COND_NONE;
if (m_flNextGrenadeCheck < gpGlobals->curtime && !HasCondition(COND_ENEMY_OCCLUDED) && flDist <= 512) {
Vector vTossPos;
QAngle vAngles;
GetAttachment("grenadehand", vTossPos, vAngles);
Vector vecToss = VecCheckThrow(this, vTossPos, GetEnemy()->WorldSpaceCenter(), flDist,
0.5); // use dist as speed to get there in 1 second
if (vecToss != vec3_origin) {
m_vecTossVelocity = vecToss;
// throw a hand grenade
m_fThrowGrenade = TRUE;
return COND_CAN_RANGE_ATTACK2;
}
}
return COND_NONE;
}
//=========================================================
// StartTask
//=========================================================
void CNPC_HAssassin::StartTask(const Task_t *pTask) {
switch (pTask->iTask) {
case TASK_RANGE_ATTACK2:
if (!m_fThrowGrenade) {
TaskComplete();
} else {
BaseClass::StartTask(pTask);
}
break;
case TASK_ASSASSIN_FALL_TO_GROUND:
m_flWaitFinished = gpGlobals->curtime + 2.0f;
break;
default:
BaseClass::StartTask(pTask);
break;
}
}
//=========================================================
// RunTask
//=========================================================
void CNPC_HAssassin::RunTask(const Task_t *pTask) {
switch (pTask->iTask) {
case TASK_ASSASSIN_FALL_TO_GROUND:
GetMotor()->SetIdealYawAndUpdate(GetEnemyLKP());
if (IsSequenceFinished()) {
if (GetAbsVelocity().z > 0) {
SetActivity(ACT_ASSASSIN_FLY_UP);
} else if (HasCondition(COND_SEE_ENEMY)) {
SetActivity(ACT_ASSASSIN_FLY_ATTACK);
SetCycle(0);
} else {
SetActivity(ACT_ASSASSIN_FLY_DOWN);
SetCycle(0);
}
ResetSequenceInfo();
}
if (GetFlags() & FL_ONGROUND) {
TaskComplete();
} else if (gpGlobals->curtime > m_flWaitFinished || GetAbsVelocity().z == 0.0) {
// I've waited two seconds and haven't hit the ground. Try to force it.
trace_t trace;
UTIL_TraceEntity(this, GetAbsOrigin(), GetAbsOrigin() - Vector(0, 0, 1), MASK_NPCSOLID, this,
COLLISION_GROUP_NONE, &trace);
if (trace.DidHitWorld()) {
SetGroundEntity(trace.m_pEnt);
} else {
// Try again in a couple of seconds.
m_flWaitFinished = gpGlobals->curtime + 2.0f;
}
}
break;
default:
BaseClass::RunTask(pTask);
break;
}
}
//=========================================================
// GetSchedule - Decides which type of schedule best suits
// the monster's current state and conditions. Then calls
// monster's member function to get a pointer to a schedule
// of the proper type.
//=========================================================
int CNPC_HAssassin::SelectSchedule(void) {
switch (m_NPCState) {
case NPC_STATE_IDLE:
case NPC_STATE_ALERT: {
if (HasCondition(COND_HEAR_DANGER) || HasCondition(COND_HEAR_COMBAT)) {
if (HasCondition(COND_HEAR_DANGER))
return SCHED_TAKE_COVER_FROM_BEST_SOUND;
else
return SCHED_INVESTIGATE_SOUND;
}
}
break;
case NPC_STATE_COMBAT: {
// dead enemy
if (HasCondition(COND_ENEMY_DEAD)) {
// call base class, all code to handle dead enemies is centralized there.
return BaseClass::SelectSchedule();
}
// flying?
if (GetMoveType() == MOVETYPE_FLYGRAVITY) {
if (GetFlags() & FL_ONGROUND) {
//Msg( "landed\n" );
// just landed
SetMoveType(MOVETYPE_STEP);
return SCHED_ASSASSIN_JUMP_LAND;
} else {
//Msg("jump\n");
// jump or jump/shoot
if (m_NPCState == NPC_STATE_COMBAT)
return SCHED_ASSASSIN_JUMP;
else
return SCHED_ASSASSIN_JUMP_ATTACK;
}
}
if (HasCondition(COND_HEAR_DANGER)) {
return SCHED_TAKE_COVER_FROM_BEST_SOUND;
}
if (HasCondition(COND_LIGHT_DAMAGE)) {
m_iFrustration++;
}
if (HasCondition(COND_HEAVY_DAMAGE)) {
m_iFrustration++;
}
// jump player!
if (HasCondition(COND_CAN_MELEE_ATTACK1)) {
//Msg( "melee attack 1\n");
return SCHED_MELEE_ATTACK1;
}
// throw grenade
if (HasCondition(COND_CAN_RANGE_ATTACK2)) {
//Msg( "range attack 2\n");
return SCHED_RANGE_ATTACK2;
}
// spotted
if (HasCondition(COND_SEE_ENEMY) && HasCondition(COND_ENEMY_FACING_ME)) {
//Msg("exposed\n");
m_iFrustration++;
return SCHED_ASSASSIN_EXPOSED;
}
// can attack
if (HasCondition(COND_CAN_RANGE_ATTACK1)) {
//Msg( "range attack 1\n" );
m_iFrustration = 0;
return SCHED_RANGE_ATTACK1;
}
if (HasCondition(COND_SEE_ENEMY)) {
//Msg( "face\n");
return SCHED_COMBAT_FACE;
}
// new enemy
if (HasCondition(COND_NEW_ENEMY)) {
//Msg( "take cover\n");
return SCHED_TAKE_COVER_FROM_ENEMY;
}
// ALERT( at_console, "stand\n");
return SCHED_ALERT_STAND;
}
break;
}
return BaseClass::SelectSchedule();
}
//=========================================================
// HandleAnimEvent - catches the monster-specific messages
// that occur when tagged animation frames are played.
//
// Returns number of events handled, 0 if none.
//=========================================================
void CNPC_HAssassin::HandleAnimEvent(animevent_t *pEvent) {
switch (pEvent->event) {
case ASSASSIN_AE_SHOOT1:
Shoot();
break;
case ASSASSIN_AE_TOSS1: {
Vector vTossPos;
QAngle vAngles;
GetAttachment("grenadehand", vTossPos, vAngles);
CHandGrenade *pGrenade = (CHandGrenade *) Create("grenade_hand", vTossPos, vec3_angle);
if (pGrenade) {
pGrenade->ShootTimed(this, m_vecTossVelocity, 2.0);
}
m_flNextGrenadeCheck = gpGlobals->curtime +
6;// wait six seconds before even looking again to see if a grenade can be thrown.
m_fThrowGrenade = FALSE;
// !!!LATER - when in a group, only try to throw grenade if ordered.
}
break;
case ASSASSIN_AE_JUMP: {
SetMoveType(MOVETYPE_FLYGRAVITY);
SetGroundEntity(NULL);
SetAbsVelocity(m_vecJumpVelocity);
m_flNextJump = gpGlobals->curtime + 3.0;
}
return;
default:
BaseClass::HandleAnimEvent(pEvent);
break;
}
}
//=========================================================
// Shoot
//=========================================================
void CNPC_HAssassin::Shoot(void) {
Vector vForward, vRight, vUp;
Vector vecShootOrigin;
QAngle vAngles;
if (GetEnemy() == NULL) {
return;
}
GetAttachment("guntip", vecShootOrigin, vAngles);
Vector vecShootDir = GetShootEnemyDir(vecShootOrigin);
if (m_flLastShot + 2 < gpGlobals->curtime) {
m_flDiviation = 0.10;
} else {
m_flDiviation -= 0.01;
if (m_flDiviation < 0.02)
m_flDiviation = 0.02;
}
m_flLastShot = gpGlobals->curtime;
AngleVectors(GetAbsAngles(), &vForward, &vRight, &vUp);
Vector vecShellVelocity = vRight * random->RandomFloat(40, 90) + vUp * random->RandomFloat(75, 200) +
vForward * random->RandomFloat(-40, 40);
EjectShell(GetAbsOrigin() + vUp * 32 + vForward * 12, vecShellVelocity, GetAbsAngles().y, 0);
FireBullets(1, vecShootOrigin, vecShootDir, Vector(m_flDiviation, m_flDiviation, m_flDiviation), 2048,
m_iAmmoType); // shoot +-8 degrees
//NDebugOverlay::Line( vecShootOrigin, vecShootOrigin + vecShootDir * 2048, 255, 0, 0, true, 2.0 );
CPASAttenuationFilter filter(this);
EmitSound(filter, entindex(), "HAssassin.Shot");
DoMuzzleFlash();
VectorAngles(vecShootDir, vAngles);
SetPoseParameter("shoot", vecShootDir.x);
m_cAmmoLoaded--;
}
//=========================================================
//=========================================================
int CNPC_HAssassin::TranslateSchedule(int scheduleType) {
// Msg( "%d\n", m_iFrustration );
switch (scheduleType) {
case SCHED_TAKE_COVER_FROM_ENEMY:
if (m_iHealth > 30)
return SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY1;
else
return SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY2;
case SCHED_TAKE_COVER_FROM_BEST_SOUND:
return SCHED_ASSASSIN_TAKE_COVER_FROM_BEST_SOUND;
case SCHED_FAIL:
if (m_NPCState == NPC_STATE_COMBAT)
return SCHED_ASSASSIN_FAIL;
break;
case SCHED_ALERT_STAND:
if (m_NPCState == NPC_STATE_COMBAT)
return SCHED_ASSASSIN_HIDE;
break;
//case SCHED_CHASE_ENEMY:
// return SCHED_ASSASSIN_HUNT;
case SCHED_MELEE_ATTACK1:
if (GetFlags() & FL_ONGROUND) {
if (m_flNextJump > gpGlobals->curtime) {
// can't jump yet, go ahead and fail
return SCHED_ASSASSIN_FAIL;
} else {
return SCHED_ASSASSIN_JUMP;
}
} else {
return SCHED_ASSASSIN_JUMP_ATTACK;
}
}
return BaseClass::TranslateSchedule(scheduleType);
}
//=========================================================
// RunAI
//=========================================================
void CNPC_HAssassin::RunAI(void) {
BaseClass::RunAI();
// always visible if moving
// always visible is not on hard
if (g_iSkillLevel != SKILL_HARD || GetEnemy() == NULL || m_lifeState == LIFE_DEAD || GetActivity() == ACT_RUN ||
GetActivity() == ACT_WALK || !(GetFlags() & FL_ONGROUND))
m_iTargetRanderamt = 255;
else
m_iTargetRanderamt = 20;
CPASAttenuationFilter filter(this);
if (GetRenderColor().a > m_iTargetRanderamt) {
if (GetRenderColor().a == 255) {
EmitSound(filter, entindex(), "HAssassin.Beamsound");
}
SetRenderColorA(MAX(GetRenderColor().a - 50, m_iTargetRanderamt));
m_nRenderMode = kRenderTransTexture;
} else if (GetRenderColor().a < m_iTargetRanderamt) {
SetRenderColorA(MIN(GetRenderColor().a + 50, m_iTargetRanderamt));
if (GetRenderColor().a == 255)
m_nRenderMode = kRenderNormal;
}
if (GetActivity() == ACT_RUN || GetActivity() == ACT_WALK) {
static int iStep = 0;
iStep = !iStep;
if (iStep) {
EmitSound(filter, entindex(), "HAssassin.Footstep");
}
}
}
AI_BEGIN_CUSTOM_NPC( monster_human_assassin, CNPC_HAssassin
)
DECLARE_TASK( TASK_ASSASSIN_FALL_TO_GROUND )
DECLARE_ACTIVITY( ACT_ASSASSIN_FLY_UP )
DECLARE_ACTIVITY( ACT_ASSASSIN_FLY_ATTACK )
DECLARE_ACTIVITY( ACT_ASSASSIN_FLY_DOWN )
//=========================================================
// AI Schedules Specific to this monster
//=========================================================
//=========================================================
// Enemy exposed assasin's cover
//=========================================================
//=========================================================
// > SCHED_ASSASSIN_EXPOSED
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_EXPOSED,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_RANGE_ATTACK1 0"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_ASSASSIN_JUMP"
" TASK_SET_SCHEDULE SCHEDULE:SCHED_TAKE_COVER_FROM_ENEMY"
" "
" Interrupts"
" COND_CAN_MELEE_ATTACK1"
)
//=========================================================
// > SCHED_ASSASSIN_JUMP
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_JUMP,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_PLAY_SEQUENCE ACTIVITY:ACT_HOP"
" TASK_SET_SCHEDULE SCHEDULE:SCHED_ASSASSIN_JUMP_ATTACK"
" "
" Interrupts"
)
//=========================================================
// > SCHED_ASSASSIN_JUMP_ATTACK
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_JUMP_ATTACK,
" Tasks"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_ASSASSIN_JUMP_LAND"
" TASK_ASSASSIN_FALL_TO_GROUND 0"
" "
" Interrupts"
)
//=========================================================
// > SCHED_ASSASSIN_JUMP_LAND
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_JUMP_LAND,
" Tasks"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_ASSASSIN_EXPOSED"
" TASK_SET_ACTIVITY ACTIVITY:ACT_IDLE"
" TASK_REMEMBER MEMORY:CUSTOM1"
" TASK_FIND_NODE_COVER_FROM_ENEMY 0"
" TASK_RUN_PATH 0"
" TASK_FORGET MEMORY:CUSTOM1"
" TASK_WAIT_FOR_MOVEMENT 0"
" TASK_REMEMBER MEMORY:INCOVER"
" TASK_FACE_ENEMY 0"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_RANGE_ATTACK1"
" "
" Interrupts"
)
//=========================================================
// Fail Schedule
//=========================================================
//=========================================================
// > SCHED_ASSASSIN_FAIL
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_FAIL,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_SET_ACTIVITY ACTIVITY:ACT_IDLE"
" TASK_WAIT_FACE_ENEMY 2"
" TASK_SET_SCHEDULE SCHEDULE:SCHED_CHASE_ENEMY"
" "
" Interrupts"
" COND_LIGHT_DAMAGE"
" COND_HEAVY_DAMAGE"
" COND_CAN_RANGE_ATTACK1"
" COND_CAN_RANGE_ATTACK2"
" COND_CAN_MELEE_ATTACK1"
" COND_HEAR_DANGER"
" COND_HEAR_PLAYER"
)
//=========================================================
// > SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY1
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY1,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_WAIT 0.2"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_RANGE_ATTACK1"
" TASK_FIND_COVER_FROM_ENEMY 0"
" TASK_RUN_PATH 0"
" TASK_WAIT_FOR_MOVEMENT 0"
" TASK_REMEMBER MEMORY:INCOVER"
" TASK_FACE_ENEMY 0"
" "
" Interrupts"
" COND_CAN_MELEE_ATTACK1"
" COND_NEW_ENEMY"
" COND_HEAR_DANGER"
)
//=========================================================
// > SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY2
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY2,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_WAIT 0.2"
" TASK_FACE_ENEMY 0"
" TASK_RANGE_ATTACK1 0"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_RANGE_ATTACK1"
" TASK_FIND_COVER_FROM_ENEMY 0"
" TASK_RUN_PATH 0"
" TASK_WAIT_FOR_MOVEMENT 0"
" TASK_REMEMBER MEMORY:INCOVER"
" TASK_FACE_ENEMY 0"
" "
" Interrupts"
" COND_CAN_MELEE_ATTACK1"
" COND_NEW_ENEMY"
" COND_HEAR_DANGER"
)
//=========================================================
// hide from the loudest sound source
//=========================================================
//=========================================================
// > SCHED_ASSASSIN_TAKE_COVER_FROM_BEST_SOUND
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_TAKE_COVER_FROM_BEST_SOUND,
" Tasks"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_MELEE_ATTACK1"
" TASK_STOP_MOVING 0"
" TASK_FIND_COVER_FROM_BEST_SOUND 0"
" TASK_RUN_PATH 0"
" TASK_WAIT_FOR_MOVEMENT 0"
" TASK_REMEMBER MEMORY:INCOVER"
" TASK_TURN_LEFT 179"
" "
" Interrupts"
" COND_NEW_ENEMY"
)
//=========================================================
// > SCHED_ASSASSIN_HIDE
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_HIDE,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_SET_ACTIVITY ACTIVITY:ACT_IDLE"
" TASK_WAIT 2.0"
" TASK_SET_SCHEDULE SCHEDULE:SCHED_CHASE_ENEMY"
" Interrupts"
" COND_NEW_ENEMY"
" COND_SEE_ENEMY"
" COND_SEE_FEAR"
" COND_LIGHT_DAMAGE"
" COND_HEAVY_DAMAGE"
" COND_PROVOKED"
" COND_HEAR_DANGER"
)
//=========================================================
// > SCHED_ASSASSIN_HUNT
//=========================================================
DEFINE_SCHEDULE
(
SCHED_ASSASSIN_HUNT,
" Tasks"
" TASK_STOP_MOVING 0"
" TASK_SET_FAIL_SCHEDULE SCHEDULE:SCHED_ASSASSIN_TAKE_COVER_FROM_ENEMY2"
" TASK_GET_PATH_TO_ENEMY 0"
" TASK_RUN_PATH 0"
" TASK_WAIT_FOR_MOVEMENT 0"
" Interrupts"
" COND_NEW_ENEMY"
" COND_CAN_RANGE_ATTACK1"
" COND_HEAR_DANGER"
)
AI_END_CUSTOM_NPC()
|
import {
BadRequestException,
Injectable,
InternalServerErrorException,
NotAcceptableException,
NotFoundException,
} from '@nestjs/common';
import { InjectModel } from '@nestjs/mongoose';
import { Model } from 'mongoose';
import { UserDoc } from '../User/User.model';
import { Address, AddressAttrWithoutUserID } from './Address.model';
import { timeoutMongooseQuery } from '../utils/helperFunction/timeout';
interface AddressAttrWithIdAndUserId extends AddressAttrWithoutUserID {
id: string;
userId: string;
}
@Injectable()
export class AddressService {
constructor(
@InjectModel('Address') private readonly addressModel: Model<Address>,
) {}
async addAddressToDatabase(
addressObj: AddressAttrWithoutUserID,
userInfo: UserDoc,
) {
try {
const address = await new this.addressModel({
...addressObj,
userId: userInfo.id,
}).save();
return address;
} catch (e) {
throw new NotAcceptableException(e);
}
}
async editAddressToDatabase(
addressObj: AddressAttrWithIdAndUserId,
userInfo: UserDoc,
) {
try {
if (addressObj.userId === userInfo.id.toString()) {
const updatedAddress = await timeoutMongooseQuery(
this.addressModel.findByIdAndUpdate(
addressObj.id,
{ ...addressObj },
{ new: true },
),
);
if (updatedAddress) {
return updatedAddress;
} else {
throw new NotFoundException('cannot update the with incorrect id');
}
} else {
throw new BadRequestException('invalid address');
}
} catch (e) {
if (typeof e === 'string') {
throw new InternalServerErrorException(e);
} else {
throw new BadRequestException('somthing went wrong');
}
}
}
async getAllAddressFromDatabase(userId: string) {
try {
return await timeoutMongooseQuery(
this.addressModel.find({ userId: userId }),
);
} catch (e) {
if (typeof e === 'string') {
throw new InternalServerErrorException(e);
} else {
throw new BadRequestException('somthing went wrong');
}
}
}
async getAddressfromDatabase(id: string, userId: string) {
try {
return await timeoutMongooseQuery(
this.addressModel.findOne({ _id: id, userId }),
);
} catch (e) {
if (typeof e === 'string') {
throw new InternalServerErrorException(e);
} else {
throw new BadRequestException('somthing went wrong');
}
}
}
}
|
/**
* Create a session for validating a phone number.
* <br>
* The identity server will send an SMS message containing a token. If that token is presented to the identity server in the future,
* it indicates that that user was able to read the SMS for that phone number, and so we validate ownership of the phone number.
* <br>
* Note that homeservers offer APIs that proxy this API, adding additional behaviour on top, for example, /register/msisdn/requestToken
* is designed specifically for use when registering an account and therefore will inform the user if the phone number given is already
* registered on the server.
* <br>
* <b>Requires auth</b>:Yes.
* <br>
* Return: {@link SessionResponse}.
* <p>Status code 200: Session created.</p>
* <p>Status code 400: An error ocurred. Some possible errors are:</p>
* <ul>
* <li>M_INVALID_ADDRESS: The phone number provided was invalid.</li>
* <li>M_SEND_ERROR: The validation SMS could not be sent.</li>
* <li>M_DESTINATION_REJECTED: The identity server cannot deliver an SMS to the provided country or region.</li>
* </ul>
*
* @param request JSON body request.
* @param uriInfo Request Information.
* @param httpHeaders Http headers.
* @param asyncResponse Asynchronous response.
* @param securityContext Security context.
*/
@Operation(
summary = "Create a session for validating a phone number.",
description = "The identity server will send an SMS message containing a token. If that token is presented to the identity server"
+ " in the future, it indicates that that user was able to read the SMS for that phone number, and so we validate ownership"
+ " of the phone number.\nNote that homeservers offer APIs that proxy this API, adding additional behaviour on top,"
+ " for example, /register/msisdn/requestToken is designed specifically for use when registering an account and therefore will"
+ " inform the user if the phone number given is already registered on the server.",
responses = {
@ApiResponse(
responseCode = "200",
description = "Session created.",
content = @Content(
schema = @Schema(
implementation = SessionResponse.class
)
)
),
@ApiResponse(
responseCode = "400",
description = "An error occured.",
content = @Content(
schema = @Schema(
implementation = ErrorResponse.class
)
)
)
}
)
@POST
@Path("/validate/msisdn/requestToken")
void createPhoneSession(
@RequestBody(
description = "JSON body request",
required = true
) PhoneRequestToken request,
@Context UriInfo uriInfo,
@Context HttpHeaders httpHeaders,
@Suspended AsyncResponse asyncResponse,
@Context SecurityContext securityContext
); |
<reponame>scroix/nodel
package org.nodel.core;
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicLong;
import org.nodel.Handler;
import org.nodel.SimpleName;
import org.nodel.threading.ThreadPool;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Base class for class acting as a Nodel channel server.
*/
public abstract class ChannelServer {
/**
* (logging)
*/
private static AtomicLong s_instanceCounter = new AtomicLong();
/**
* (threading)
*/
protected static ThreadPool s_threadPool = new ThreadPool("Nodel channel-servers", 128);
/**
* (logging)
*/
protected long _instance = s_instanceCounter.getAndIncrement();
/**
* (logging)
*/
protected Logger _logger = LoggerFactory.getLogger(String.format("%s.%s_%d", ChannelServer.class.getName(), this.getClass().getSimpleName(), _instance));
/**
* Instance signal / lock.
*/
protected Object _signal = new Object();
/**
* Can only be enabled once. (not thread safe)
*/
protected boolean _enabled = false;
/**
* Delegate to call when a crippling failure occurs.
*/
protected Handler.H1<Throwable> _onFailure;
/**
* The nodel server used by this channel server.
*/
private NodelServers _nodelServer;
/**
* Holds the events filter list.
*/
private Map<SimpleName, List<String>> _eventFiltersByNode = new HashMap<SimpleName, List<String>>();
/**
* Holds the action filter list.
*/
private Map<SimpleName, List<String>> _actionFiltersByNode = new HashMap<SimpleName, List<String>>();
public ChannelServer(NodelServers nodelServer) {
_nodelServer = nodelServer;
}
/**
* Attaches / detaches the failure handler.
* (unicast delegate, delegate must not block)
*
* @param handler null to clear.
*/
public void attachFailureHandler(Handler.H1<Throwable> handler) {
synchronized (_signal) {
if (_onFailure != null && handler != null)
throw new IllegalArgumentException("Handler is already set; must be cleared first using 'null'.");
_onFailure = handler;
}
} // (method)
/**
* Sends a message down the channel. (exception free, non-blocking)
*/
protected abstract void sendMessage(ChannelMessage message);
/**
* Sends an event message down the channel, applying any 'interest'
* filtering. (exception free, non-blocking)
*/
protected void sendEventMessage(String nodeName, String originalEvent, Object arg) {
SimpleName node = new SimpleName(nodeName);
String reducedEvent = Nodel.reduceToLower(originalEvent);
synchronized (_signal) {
List<String> eventFilters = _eventFiltersByNode.get(node);
if (eventFilters == null)
return;
// find the first matching event filter and use it
boolean found = false;
for (String eventFilter : eventFilters) {
if (Nodel.filterMatch(reducedEvent, eventFilter)) {
found = true;
break;
}
} // (for)
if (!found)
return;
}
ChannelMessage message = new ChannelMessage();
message.node = nodeName;
message.event = originalEvent;
message.arg = arg;
sendMessage(message);
} // (method)
/**
* (RESERVED)
*/
protected void sendMovedMessage(SimpleName node) {
ChannelMessage message = new ChannelMessage();
message.node = node.getReducedName();
message.announcement = ChannelMessage.Announcement.Moved;
sendMessage(message);
} // (method)
/**
* Sends a response to an "interests" request. (exception free,
* non-blocking)
*/
protected void sendInterestsResponse(String nodeName, String[] actions, String[] events) {
ChannelMessage response = new ChannelMessage();
response.node = nodeName;
response.events = events;
response.actions = actions;
sendMessage(response);
} // (method)
/**
* Sends a response to an "invoke" request. (exception free, non-blocking)
*/
private void sendInvokeResponseLookupFailure(String nodeName, String action) {
ChannelMessage response = new ChannelMessage();
response.node = nodeName;
response.error = "Action not found";
response.action = action;
sendMessage(response);
} // (method)
/**
* Starts processing. (may briefly block)
*/
public abstract void start();
/**
* Processes incoming messages.
*/
protected void handleMessage(final ChannelMessage message) {
_logger.info("Server: message arrived: " + message);
// 'interests' request
if (message.node != null && (message.events != null || message.actions != null)) {
SimpleName node = new SimpleName(message.node);
// register interest in the node
_nodelServer.registerInterest(this, message.node);
synchronized (_signal) {
// go through events
if (message.events != null) {
for (String event : message.events)
doAddEventFilter(node, event);
}
// go through actions
if (message.actions != null) {
for (String action : message.actions)
doAddActionFilter(node, action);
}
}
// determine all interests that have been matched
SimpleName[] allEvents = _nodelServer.getRegisteredEvents(node);
SimpleName[] allActions = _nodelServer.getRegisteredActions(node);
// filter out the events and actions
List<SimpleName> matchedEvents;
List<SimpleName> matchedActions;
synchronized (_signal) {
matchedEvents = new ArrayList<SimpleName>();
for (SimpleName event : allEvents) {
List<String> eventFilters = _eventFiltersByNode.get(node);
if (eventFilters != null && containsMatchingFilter(node, event.getReducedForMatchingName(), eventFilters))
matchedEvents.add(event);
}
matchedActions = new ArrayList<SimpleName>();
for (SimpleName action : allActions) {
List<String> actionFilters = _actionFiltersByNode.get(node);
if (actionFilters != null && containsMatchingFilter(node, action.getReducedForMatchingName(), actionFilters))
matchedActions.add(action);
}
}
// respond
sendInterestsResponse(message.node, SimpleName.intoOriginals(matchedActions), SimpleName.intoOriginals(matchedEvents));
return;
}
// 'invoke' request
if (message.node != null && message.action != null) {
final NodelServerAction handler = _nodelServer.getActionRequestHandler(message.node, message.action);
if (handler == null) {
sendInvokeResponseLookupFailure(message.node, message.action);
return;
}
// invoke on a separate thread
s_threadPool.execute(new Runnable() {
@Override
public void run() {
try {
// call the action
handler.handleActionRequest(message.arg);
} catch (Exception exc) {
// ignore exception
}
}
});
return;
}
} // (method)
/**
* (assumes locked)
*/
private void doAddEventFilter(SimpleName node, String eventFilter) {
String reducedEvent = Nodel.reduceFilter(eventFilter);
List<String> eventsList = _eventFiltersByNode.get(node);
if (eventsList == null) {
eventsList = new ArrayList<String>();
_eventFiltersByNode.put(node, eventsList);
}
if (!eventsList.contains(reducedEvent))
eventsList.add(reducedEvent);
} // (method)
/**
* (assumes locked)
*/
private void doAddActionFilter(SimpleName node, String actionFilter) {
String reducedAction = Nodel.reduceFilter(actionFilter);
List<String> actionsList = _actionFiltersByNode.get(node);
if (actionsList == null) {
actionsList = new ArrayList<String>();
_actionFiltersByNode.put(node, actionsList);
}
if (!actionsList.contains(reducedAction))
actionsList.add(reducedAction);
} // (method)
/**
* (args prechecked)
*/
private boolean containsMatchingFilter(SimpleName node, String value, List<String> filters) {
// find the first matching event filter
for (String eventFilter : filters) {
if (Nodel.filterMatch(value, eventFilter))
return true;
} // (for)
return false;
} // (method)
/**
* When a serious permanent failure occurs.
*/
protected void handleFailure(Exception exc) {
// complete clean up
if (_onFailure != null)
_onFailure.handle(exc);
}
} // (class)
|
// Sajal Asati
#include<bits/stdc++.h>
using namespace std;
typedef long long ll;
typedef long double ld;
#define pb push_back
#define mkp make_pair
#define pf push_front
#define ff first
#define ss second
// #define endl "\n"
#define lpstl(i,v) for(auto &i: v)
#define all(a) a.begin(),a.end()
#define rall(x) (x).rbegin(),(x).rend()
#define sz(a) (ll)(a.size())
#define tr(container, it) \
for(auto it = container.begin(); it != container.end(); ++it)
//containers
#define ii pair<int,int>
#define pll pair<ll,ll>
#define vi vector<int>
#define vll vector<ll>
#define vld vector<ld>
#define vii vector<ii>
#define vpll vector<pll>
#define pq priority_queue
#define minpq priority_queue <int, vector<int>, greater<int>>
const ll init = 1e9+7;
const ll N = 2010;
const ll inf = 1e18+7;
vll dist(N,inf);
vpll adj[N];//to store weigths also
vll visited(N,0), parent(N,-1);
priority_queue < pll, vector<pll>, greater<pll>> mpq;//min heap
void dijkstra(int start){
dist[start]=0;
mpq.push(mkp(0,start));
while(!mpq.empty()){
pll x = mpq.top(); mpq.pop();
ll cur_dist = x.ff, ver = x.ss;
if(!visited[ver]){
visited[ver]=1;
for(auto &u: adj[ver]){
if(!visited[u.ff] && u.ss < dist[u.ff]){
dist[u.ff] = u.ss;
parent[u.ff] = ver;
mpq.push(mkp(dist[u.ff],u.ff));
}
}
}
}
}
int main()
{
ios_base::sync_with_stdio(0); cin.tie(0); cout.tie(0);
int n; cin>>n;
vpll v(N);
for(int i=1;i<=n;++i){
ll a,b; cin>>a>>b;
v[i]={a,b};
}
vll v2(N); for(int i=1;i<=n;++i) cin>>v2[i];
vll v3(N); for(int i=1;i<=n;++i) cin>>v3[i];
for(int i=1;i<=n;++i){
for(int j=1;j<=n;++j){
if(i==j) continue;
ll ct = (abs(v[i].ff-v[j].ff) + abs(v[i].ss-v[j].ss))*(v3[i]+v3[j]);
adj[i].pb({j,ct});
adj[j].pb({i,ct});
}
}
for(int i=1;i<=n;++i){
adj[i].pb({0,v2[i]});
adj[0].pb({i,v2[i]});
}
dijkstra(0);
ll sum=0;
vi ans;
vii ans2;
for(int i=1;i<=n;++i){
if(parent[i]==0) ans.pb(i);
else{
int a=parent[i];
ans2.pb({max(a,i),min(a,i)});
}
sum += dist[i];
// sum += dist[i]-dist[parent[i]];
}
cout<<sum<<endl;
cout<<ans.size()<<endl;
for(int i=0;i<ans.size();++i){
cout<<ans[i]<<" ";
}cout<<endl;
cout<<ans2.size()<<endl;
for(auto u: ans2){
cout<<u.ff<<" "<<u.ss<<endl;
}
return 0;
} |
// Code generated. DO NOT EDIT.
package config
const Version = "v0.9.10-alpha.9"
|
Atlanta United’s first ever preseason training camp convenes on Monday in Bradenton, Florida at the IMG Academy. As the club embarks on their inaugural season, the technical staff has been hard at work assembling a roster to be competitive in MLS from the start. All indications are that the roster is not yet complete, so there will likely be shakeups between now and First Kick on March 5th.
However, to get a good understanding of how the roster is shaping up, let’s take a look at how a possible depth chart could look like if Tata Martino had one of those giant position boards like you see in all the American football movies. Here’s how we see the position depth playing out as the team arrives for training camp:
Goalkeepers
Jason: The potential Brad Guzan acquisition makes this a tricky situation. In a perfect world, Tambakis and Kapp get experience on loan while Kann backs up Guzan. Without Guzan in place, that could affect Tambakis’ loan opportunities.
Rob: Without the rumored arrival of Brad Guzan coming to fruition (yet), Alec Kann appears to have the inside track at the No. 1 spot. Worst case scenario seems to be Guzan joining in July. If that’s the case, Kann can hold the fort down until then with much of an issue. Tambakis could yo-yo between Atlanta and the USL affiliate depending on need of a back-up. Kapp is a sure bet to spend the season on loan somewhere.
Outside Backs
Jason: Garza and Bloom should be the guys here. They fit Tata Martino’s system very well, as long as they can stay healthy. The depth is currently lacking, with Ambrose the only dedicated outside back. Loyd will see some time on the right, but he is a better option for depth in the middle.
Rob: Greg Garza is as much of a lock to start as anyone on the roster if he’s healthy. His international and club experience will be invaluable to a young team. At right back, the picture is a little less clear. Mark Bloom seems the natural fit, but don’t be surprised to see Zach Loyd get minutes either if another starting caliber center back is signed. Mikey Ambrose should get his fair share of minutes with the age across the back line.
Center Backs
Jason: Parkhurst’s experience is critical in how Atlanta’s defense performs in 2017. If the Leandro Gonzalez Pirez acquisition happens, he is an ideal partner to start. Loyd’s injury history is a concern, but he should see his fair share of playing time. Larentowicz is a solid option here when needed. The club is lucky to not have to rush Robinson into duty straight away. With his time away with the U20 national team, he will not always be available.
Rob: Michael Parkhurst will be the rock Atlanta United build around in the back. He seems to be a lock with his partner yet to be decided. As of now, Loyd has the edge on the other spot, but we expect an additional signing soon. Larentowicz will have a part to play in midfield and defense but he likely won’t be starting many matches if all goes to plan. Miles Robinson will start at the bottom like most draft picks, but expectations will be high from him to climb up the depth chart as his rookie season progresses.
Central Midfielders
Jason: As it stands today, a combination of McCann, Almiron, and Asad should be the starting trio. Larentowicz provides a more purely defensive option at the #6. Kratz and Heath can fill any of the central midfield roles. If Asad can regain his form from 2015, this could be a dangerous group for the opposition.
Rob: This position is the most difficult to project simply because we don’t know what formation Tata Martino will favor. Chris McCann is your main holding midfielder. Expect him in the lineup at whichever spot is the most defensive with Larentowicz being his cover.
Miguel Almiron will likely be the main attacking midfielder in some fashion. He’s not the prototypical No. 10 but he’ll float all around the field and act as a playmaker. Asad is another versatile player who could wind up on the wing or in the midfield. A wildcard in the mix is Kevin Kratz. We don’t know much about him as a player. If he manages to contribute, then Atlanta’s midfield is in very good shape.
Wingers
Jason: If there is anywhere that I see the club adding the third Designated Player, I think it’s on the left wing. Peterson will be a good player for depth, he can cover both sides, but I think the club will be looking for more of an impact on the left. Villalba will have high expectations on the right. With Carleton due to spend quite a bit of time with the U17 national team, his contributions will likely be limited.
Rob: Versatility will play a big part on the wing. Players like Almiron and Asad can easily drift out there if necessary. As of now, Jacob Peterson has a good chance at playing time on the left wing with Hector Villalba a great bet to start on the right. This is all subject to change if another attacking player, possibly a third DP, is added. Andrew Carleton is very promising, but it’s probably best to exercise as much caution with him as possible in the opening stages of the season.
Strikers
Jason: Lots of pressure on Kenwyne Jones to be a big goal scorer for this team, and to also be the fulcrum of the attack. Vazquez will spend quite a bit of time away with the U20’s. Williams and Otoo would provide a different option up top than Jones, but could also see time out on loan. Gressel was drafted to contribute in the midfield, but he could be forced to reprise his senior season in college playing as a #9.
Rob: Who will score the goals? Hopefully Kenwyne Jones is the answer to that question. He’s the main man in front of goal as the roster currently stands and that doesn’t seem to be changing anytime soon. Vazquez seems the lock to be his back-up, but his availability could be affected by international duty. Behind him it gets murky with little-to-no MLS experience. Despite his designation as a midfielder, I believe Julian Gressel could fit a high pressing system very well as a striker if needed. |
Scientists have moved a step closer to correcting some unhealthy gene mutations with diet, according to a new research report appearing in the April 2012 issue of the journal Genetics. Researchers from the University of California, Berkeley, determined variations and responses to vitamin treatment in the human cystathionine beta synthase gene, which when defective, causes the disease homocystinuria, an inherited metabolic disorder sometimes treatable with vitamin B6. After the analysis, scientists correlated specific gene mutations with severity of the disease, ranging from perfectly healthy and functional to severe and untreatable.
Although the current study focused on homocystinuria, testing the effects of naturally occurring gene variations using surrogate organism genetics can be applied to other inherited disorders, such as neural tube defect, cleft palate, and blindness.
"The era of personal genome sequences is upon us, but there is a growing gap between the ability to sequence human genomes and the ability to understand the significance of variation in genome sequences," said Jasper Rine, Ph.D., the principal investigator of this research in the Department of Molecular and Cell Biology at the California Institute of Quantitative Biosciences at the University of California, Berkeley. "This study demonstrates one way to close the gap; the data separate gene variants into distinct classes, including a group amenable to dietary intervention."
To make their determination, scientists "swapped" the cystathionine beta synthase gene of baker's yeast with the gene from humans to test which variants were healthy, treatable, or untreatable with additional vitamin B6. As a result, the study clarified the function of 84 DNA sequence variants in this gene, which will help physicians more effectively treat patients based on their particular genotypes. In addition, this approach opens doors for future studies examining other human genes that similarly cross over between humans and yeast.
"We may have the DNA sequence of the human genome, but we're still trying to figure out what it means," said Mark Johnston, Editor-in-Chief of the journal Genetics. "This study moves us a step closer toward better understanding the genetic variability among people. More immediately, knowledge of these gene mutations will help physicians prescribe treatment based on genotype rather than outward symptoms or trial and error." |
<reponame>rhyep/Python_tutorials<filename>Language Skills/Python/Unit 07 Lists and Functions/01 Lists and Functions/List Recap/1-List accessing.py
n = [1, 3, 5]
# Add your code below
print n[1]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.