query
stringlengths 7
3.85k
| document
stringlengths 11
430k
| metadata
dict | negatives
sequencelengths 0
101
| negative_scores
sequencelengths 0
101
| document_score
stringlengths 3
10
| document_rank
stringclasses 102
values |
---|---|---|---|---|---|---|
PersistentVolumeName returns the name of the PV for this bucket. | func (bc BucketConfig) PersistentVolumeName(prefix string) string {
return prefix + "-" + bc.hash()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func getPodNameFromPersistentVolume(pv *v1.PersistentVolume) *spec.NamespacedName {\n\tnamespace := pv.Spec.ClaimRef.Namespace\n\tname := pv.Spec.ClaimRef.Name[len(constants.DataVolumeName)+1:]\n\treturn &spec.NamespacedName{Namespace: namespace, Name: name}\n}",
"func (o FioSpecVolumePersistentVolumeClaimSpecOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumePersistentVolumeClaimSpec) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceStorageosOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceStorageos) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o ArgoCDExportSpecStoragePvcOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDExportSpecStoragePvc) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceStorageosOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceStorageos) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumePersistentVolumeClaimSpecOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumePersistentVolumeClaimSpec) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumePersistentVolumeClaimSpecPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumePersistentVolumeClaimSpec) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o GetAppTemplateVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o AppTemplateVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o GetVpdsVpdOutput) VpdName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVpdsVpd) string { return v.VpdName }).(pulumi.StringOutput)\n}",
"func (o FioSpecVolumeVolumeSourceStorageosPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceStorageos) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceStorageosPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceStorageos) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumePersistentVolumeClaimSpecPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumePersistentVolumeClaimSpec) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o ArgoCDExportSpecStoragePvcPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDExportSpecStoragePvc) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (p *PersistentVolume) TableName() string {\n\treturn \"persistent_volume\"\n}",
"func (o GetAppTemplateVolumeOutput) StorageName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GetAppTemplateVolume) *string { return v.StorageName }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceScaleIOOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceScaleIO) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (o AppTemplateVolumeOutput) StorageName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateVolume) *string { return v.StorageName }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceScaleIOPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceScaleIO) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (v *btrfsVolume) Name() string {\n\treturn filepath.Base(v.Path())\n}",
"func (o IopingSpecVolumeVolumeSourceScaleIOOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceScaleIO) *string { return v.VolumeName }).(pulumi.StringPtrOutput)\n}",
"func (p *PersistentVolume) Delete(name string) error {\n\tvar b bytes.Buffer\n\tpath := strutil.Concat(\"/api/v1/persistentvolumes/\", name)\n\n\tresp, err := p.client.Delete(p.addr).\n\t\tPath(path).\n\t\tDo().\n\t\tBody(&b)\n\n\tif err != nil {\n\t\treturn errors.Errorf(\"failed to delete pv, name: %s, (%v)\", name, err)\n\t}\n\tif !resp.IsOK() {\n\t\tif resp.IsNotfound() {\n\t\t\treturn k8serror.ErrNotFound\n\t\t}\n\n\t\treturn errors.Errorf(\"failed to delete pv, name: %s, statuscode: %v, body: %v\",\n\t\t\tname, resp.StatusCode(), b.String())\n\t}\n\treturn nil\n}",
"func (r *ReconcileZdyfapi) NameNewPv(m *zdyfv1alpha1.Zdyfapi) *corev1.PersistentVolume {\n\n\tnpv := &corev1.PersistentVolume{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tKind: \"PersistentVolume\",\n\t\t\tAPIVersion: \"apps/v1\",\n\t\t},\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: m.Spec.NamePVName,\n\t\t\tNamespace: m.Namespace,\n\t\t\tLabels: map[string]string{\"app\": \"nn-pv-1\"},\n\t\t\t//Annotations: map[string]string{\"type\": \"namenode\"},\n\t\t\tOwnerReferences: []metav1.OwnerReference{\n\t\t\t\t*metav1.NewControllerRef(m, schema.GroupVersionKind{\n\t\t\t\t\tGroup: zdyfv1alpha1.SchemeGroupVersion.Group,\n\t\t\t\t\tVersion: zdyfv1alpha1.SchemeGroupVersion.Version,\n\t\t\t\t\tKind: \"Zdyfapi\",\n\t\t\t\t}),\n\t\t\t},\n\t\t},\n\t\tSpec: corev1.PersistentVolumeSpec{\n\t\t\tCapacity: corev1.ResourceList{\n\t\t\t\t\"storage\": resource.MustParse(m.Spec.NamePvStorage),\n\t\t\t},\n\t\t\t//VolumeMode: &corev1.PersistentVolumeFilesystem,\n\t\t\tAccessModes: []corev1.PersistentVolumeAccessMode{\n\t\t\t\tcorev1.ReadWriteOnce,\n\t\t\t},\n\t\t\tStorageClassName: m.Spec.NameSCName, //local volume 只提供了卷的延迟绑定\n\t\t\tPersistentVolumeReclaimPolicy: corev1.PersistentVolumeReclaimDelete,\n\t\t\tPersistentVolumeSource: corev1.PersistentVolumeSource{\n\t\t\t\tLocal: &corev1.LocalVolumeSource{\n\t\t\t\t\tPath: \"/var/lib/docker/volumes/namenode/_data\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tNodeAffinity: &corev1.VolumeNodeAffinity{\n\t\t\t\tRequired: &corev1.NodeSelector{\n\t\t\t\t\tNodeSelectorTerms: []corev1.NodeSelectorTerm{{ //pv node 关联没设置好\n\t\t\t\t\t\tMatchExpressions: []corev1.NodeSelectorRequirement{{\n\t\t\t\t\t\t\tKey: \"kubernetes.io/hostname\",\n\t\t\t\t\t\t\tOperator: corev1.NodeSelectorOpIn,\n\t\t\t\t\t\t\tValues: []string{\"docker-desktop\"}, //设置节点\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\treturn npv\n}",
"func (o IopingSpecVolumeVolumeSourceScaleIOPtrOutput) VolumeName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceScaleIO) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.VolumeName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceStorageosSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceStorageosSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceStorageosSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceStorageosSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (h *Handler) GetPV(object interface{}) ([]string, error) {\n\t// It does not need to check whether the pod exists,\n\t// GetPVC will do check.\n\tpvcList, err := h.GetPVC(object)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar pl []string\n\tfor _, pvc := range pvcList {\n\t\tpvcObj, err := h.clientset.CoreV1().\n\t\t\tPersistentVolumeClaims(h.namespace).Get(h.ctx, pvc, h.Options.GetOptions)\n\t\tif err == nil {\n\t\t\tpl = append(pl, pvcObj.Spec.VolumeName)\n\t\t}\n\t}\n\treturn pl, nil\n}",
"func (o OpenZfsVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *OpenZfsVolume) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func GetVolumeName(opts MountOptions) string {\n\tif opts.PVName != \"\" {\n\t\treturn opts.PVName\n\t}\n\treturn opts.genVolumeName()\n}",
"func (o GroupContainerVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupContainerVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceStorageosSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceStorageosSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceStorageosSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceStorageosSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceCsiNodePublishSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCsiNodePublishSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (v *Volume) Name() string {\n\treturn v.config.Name\n}",
"func (o FioSpecVolumeVolumeSourceGcePersistentDiskOutput) PdName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceGcePersistentDisk) string { return v.PdName }).(pulumi.StringOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceGcePersistentDiskOutput) PdName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceGcePersistentDisk) string { return v.PdName }).(pulumi.StringOutput)\n}",
"func (*KeyService) Name() string {\n\treturn \"PIV\"\n}",
"func (o OntapStorageVirtualMachineOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *OntapStorageVirtualMachine) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o FioSpecVolumeVolumeSourceCsiNodePublishSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCsiNodePublishSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceCsiNodePublishSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceCsiNodePublishSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o TriggerBuildOptionsVolumeOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TriggerBuildOptionsVolume) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o TriggerBuildStepVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v TriggerBuildStepVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceCsiNodePublishSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceCsiNodePublishSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (p *PersistentVolumeClaim) TableName() string {\n\treturn \"persistent_volume_claim\"\n}",
"func (o ClusterBuildStrategySpecBuildStepsVolumeMountsOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsVolumeMounts) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o BuildStrategySpecBuildStepsVolumeMountsOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsVolumeMounts) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o GetAppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o GroupInitContainerVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GroupInitContainerVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func GenerateVolumeName(clusterName, pvName string, maxLength int) string {\n\tprefix := clusterName + \"-dynamic\"\n\tpvLen := len(pvName)\n\n\t// cut the \"<clusterName>-dynamic\" to fit full pvName into maxLength\n\t// +1 for the '-' dash\n\tif pvLen+1+len(prefix) > maxLength {\n\t\tprefix = prefix[:maxLength-pvLen-1]\n\t}\n\treturn prefix + \"-\" + pvName\n}",
"func (o CassandraPartitionKeyOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v CassandraPartitionKey) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o KeyVaultPropertiesOutput) KeyName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KeyVaultProperties) *string { return v.KeyName }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceScaleIOSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceScaleIOSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o *V1VolumeClaim) GetName() string {\n\tif o == nil || o.Name == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Name\n}",
"func (o IopingSpecVolumeVolumeSourceIscsiSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceIscsiSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o VolumeV2Output) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *VolumeV2) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o AppTemplateContainerVolumeMountOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerVolumeMount) string { return v.Name }).(pulumi.StringOutput)\n}",
"func generateVolumeName(secretName string) string {\n\tnameSlices := strings.Split(secretName, \".\")\n\tvolName := \"\"\n\tif len(nameSlices) > 1 {\n\t\tvolName = nameSlices[1]\n\t} else {\n\t\tvolName = nameSlices[0]\n\t}\n\treturn volName\n}",
"func (o FioSpecVolumeVolumeSourceScaleIOSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceScaleIOSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceIscsiSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceIscsiSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceFlexVolumeSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceFlexVolumeSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceFlexVolumeSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceFlexVolumeSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (op *EncryptVolumesOperation) Name() string {\n\treturn op.lro.Name()\n}",
"func (o VirtualDatabaseSpecEnvValueFromSecretKeyRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseSpecEnvValueFromSecretKeyRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (pi *PackageInfo) FileVName(file *ast.File) *spb.VName {\n\tif v := pi.fileVName[file]; v != nil {\n\t\treturn v\n\t}\n\tv := proto.Clone(pi.VName).(*spb.VName)\n\tv.Language = \"\"\n\tv.Signature = \"\"\n\tv.Path = pi.FileSet.Position(file.Pos()).Filename\n\treturn v\n}",
"func (o FioSpecVolumeVolumeSourceScaleIOSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceScaleIOSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o KeyVaultPropertiesResponseOutput) KeyName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v KeyVaultPropertiesResponse) *string { return v.KeyName }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceScaleIOSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceScaleIOSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceGcePersistentDiskPtrOutput) PdName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceGcePersistentDisk) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PdName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (op *DeleteVolumeOperation) Name() string {\n\treturn op.lro.Name()\n}",
"func (s *Service) Get(ctx context.Context, name string) (*corev1.PersistentVolumeClaim, error) {\n\tn, err := s.client.CoreV1().PersistentVolumeClaims(defaultNamespaceName).Get(ctx, name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"get persistentVolumeClaim: %w\", err)\n\t}\n\treturn n, nil\n}",
"func (o FioSpecVolumeVolumeSourceIscsiSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceIscsiSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceGcePersistentDiskPtrOutput) PdName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceGcePersistentDisk) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.PdName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceIscsiSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceIscsiSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o GetVolumeGroupSapHanaVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetVolumeGroupSapHanaVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o VolumeGroupSapHanaVolumeOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v VolumeGroupSapHanaVolume) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o KeyVaultPropertiesPtrOutput) KeyName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KeyVaultProperties) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.KeyName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceRbdSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceRbdSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (v *VFile) Name() string {\n\treturn v.FileName\n}",
"func (o FioSpecVolumeVolumeSourceAzureFilePtrOutput) SecretName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceAzureFile) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SecretName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o KeyVaultPropertiesResponsePtrOutput) KeyName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *KeyVaultPropertiesResponse) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.KeyName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceFlexVolumeSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceFlexVolumeSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceRbdSecretRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceRbdSecretRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o VirtualDatabaseSpecDatasourcesPropertiesValueFromSecretKeyRefPtrOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *VirtualDatabaseSpecDatasourcesPropertiesValueFromSecretKeyRef) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Name\n\t}).(pulumi.StringPtrOutput)\n}",
"func (op *CreateVolumeOperation) Name() string {\n\treturn op.lro.Name()\n}",
"func (o IopingSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceFlexVolumeSecretRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceFlexVolumeSecretRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (s *Set) vname(n node) *spb.VName {\n\treturn &spb.VName{\n\t\tSignature: s.symbol(n.signature),\n\t\tCorpus: s.symbol(n.corpus),\n\t\tPath: s.symbol(n.path),\n\t\tRoot: s.symbol(n.root),\n\t\tLanguage: s.symbol(n.language),\n\t}\n}",
"func findPVByPVCName(ctx context.Context, coreClient corev1client.CoreV1Interface, name string) (*corev1.PersistentVolume, error) {\n\t// unfortunately we can't do \"coreClient.PersistentVolumeClaims(\"\").Get(ctx, name, ... )\"\n\tpvcs, err := coreClient.PersistentVolumeClaims(\"\").List(ctx, metav1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pvc *corev1.PersistentVolumeClaim\n\tfor _, p := range pvcs.Items {\n\t\tif p.Name == name {\n\t\t\tpvc = &p\n\t\t\tbreak\n\t\t}\n\t}\n\tif pvc == nil {\n\t\treturn nil, fmt.Errorf(\"can't find any %s persistentvolumeclaim\", name)\n\t}\n\tpvName := pvc.Spec.VolumeName\n\tpv, err := coreClient.PersistentVolumes().Get(ctx, pvName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn pv, nil\n}",
"func (s *persistentVolumeLister) Get(name string) (*corev1.PersistentVolume, error) {\n\treturn s.client.CoreV1().PersistentVolumes().Get(name, metav1.GetOptions{})\n}",
"func CreatePV(ctx context.Context, kubeCli kubernetes.Interface, vol *blockstorage.Volume, volType blockstorage.Type, annotations map[string]string, accessmodes []v1.PersistentVolumeAccessMode, volumemode *v1.PersistentVolumeMode) (string, error) {\n\tsizeFmt := fmt.Sprintf(\"%d\", vol.SizeInBytes)\n\tsize, err := resource.ParseQuantity(sizeFmt)\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Unable to parse sizeFmt %s\", sizeFmt)\n\t}\n\tmatchLabels := map[string]string{pvMatchLabelName: filepath.Base(vol.ID)}\n\n\t// Since behavior and error returned from repeated create might vary, check first\n\tsel := labelSelector(matchLabels)\n\toptions := metav1.ListOptions{LabelSelector: sel}\n\tpvl, err := kubeCli.CoreV1().PersistentVolumes().List(ctx, options)\n\tif err == nil && len(pvl.Items) == 1 {\n\t\treturn pvl.Items[0].Name, nil\n\t}\n\n\tif len(accessmodes) == 0 {\n\t\taccessmodes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}\n\t}\n\n\tpv := v1.PersistentVolume{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tGenerateName: \"kanister-pv-\",\n\t\t\tLabels: matchLabels,\n\t\t\tAnnotations: annotations,\n\t\t},\n\t\tSpec: v1.PersistentVolumeSpec{\n\t\t\tCapacity: v1.ResourceList{\n\t\t\t\tv1.ResourceName(v1.ResourceStorage): size,\n\t\t\t},\n\t\t\tAccessModes: accessmodes,\n\t\t\tVolumeMode: volumemode,\n\t\t\tPersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,\n\t\t},\n\t}\n\tswitch volType {\n\tcase blockstorage.TypeEBS:\n\t\tpv.Spec.PersistentVolumeSource.AWSElasticBlockStore = &v1.AWSElasticBlockStoreVolumeSource{\n\t\t\tVolumeID: vol.ID,\n\t\t}\n\t\tpv.ObjectMeta.Labels[kube.FDZoneLabelName] = vol.Az\n\t\tpv.ObjectMeta.Labels[kube.FDRegionLabelName] = zoneToRegion(vol.Az)\n\tcase blockstorage.TypeGPD:\n\t\tpv.Spec.PersistentVolumeSource.GCEPersistentDisk = &v1.GCEPersistentDiskVolumeSource{\n\t\t\tPDName: vol.ID,\n\t\t}\n\t\tpv.ObjectMeta.Labels[kube.FDZoneLabelName] = vol.Az\n\t\tpv.ObjectMeta.Labels[kube.FDRegionLabelName] = zoneToRegion(vol.Az)\n\n\tdefault:\n\t\treturn \"\", errors.Errorf(\"Volume type %v(%T) not supported \", volType, volType)\n\t}\n\n\tcreatedPV, err := kubeCli.CoreV1().PersistentVolumes().Create(ctx, &pv, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"Unable to create PV for volume %v\", pv)\n\t}\n\treturn createdPV.Name, nil\n}",
"func (o IopingSpecVolumeVolumeSourceAzureFilePtrOutput) SecretName() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceAzureFile) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.SecretName\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumePersistentVolumeClaimSpecDataSourceOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumePersistentVolumeClaimSpecDataSource) string { return v.Name }).(pulumi.StringOutput)\n}",
"func (p *pv) key() pvKey {\n\treturn newPVKey(p.Cluster, p.Name)\n}",
"func (o FolderBucketViewOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *FolderBucketView) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o VirtualDatabaseSpecEnvValueFromSecretKeyRefOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v VirtualDatabaseSpecEnvValueFromSecretKeyRef) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o RegionAutoscalerOutput) Name() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *RegionAutoscaler) pulumi.StringOutput { return v.Name }).(pulumi.StringOutput)\n}",
"func (o CassandraPartitionKeyResponseOutput) Name() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v CassandraPartitionKeyResponse) *string { return v.Name }).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceAzureFileOutput) SecretName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceAzureFile) string { return v.SecretName }).(pulumi.StringOutput)\n}",
"func (o FioSpecVolumeVolumeSourceAzureFileOutput) SecretName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceAzureFile) string { return v.SecretName }).(pulumi.StringOutput)\n}",
"func (p *PersistentVolume) List(name string) (apiv1.PersistentVolumeList, error) {\n\tvar b bytes.Buffer\n\tvar list apiv1.PersistentVolumeList\n\tpath := \"/api/v1/persistentvolumes/\"\n\n\tresp, err := p.client.Get(p.addr).\n\t\tPath(path).\n\t\tDo().\n\t\tBody(&b)\n\n\tif err != nil {\n\t\treturn list, errors.Errorf(\"failed to list related pv, name: %s, (%v)\", name, err)\n\t}\n\tif !resp.IsOK() {\n\t\tif resp.IsNotfound() {\n\t\t\treturn list, k8serror.ErrNotFound\n\t\t}\n\n\t\treturn list, errors.Errorf(\"failed to list related pv, name: %s, statuscode: %v, body: %v\",\n\t\t\tname, resp.StatusCode(), b.String())\n\t}\n\tif err := json.Unmarshal(b.Bytes(), &list); err != nil {\n\t\treturn list, err\n\t}\n\treturn list, nil\n}"
] | [
"0.6715421",
"0.6433566",
"0.6399416",
"0.63078755",
"0.6300491",
"0.62943375",
"0.62015206",
"0.620125",
"0.6192337",
"0.61908",
"0.61793375",
"0.61018664",
"0.60738367",
"0.60727775",
"0.6062434",
"0.5955387",
"0.59438527",
"0.594144",
"0.59110594",
"0.58653605",
"0.58627313",
"0.5857852",
"0.58280975",
"0.582242",
"0.5802141",
"0.5776833",
"0.57719636",
"0.5753991",
"0.574302",
"0.5696936",
"0.5686012",
"0.5679867",
"0.5676711",
"0.5666765",
"0.56664246",
"0.56563944",
"0.56497717",
"0.5641309",
"0.5640084",
"0.56120795",
"0.5604263",
"0.5602175",
"0.55588424",
"0.5556553",
"0.5544008",
"0.54951817",
"0.54772764",
"0.5471458",
"0.5455856",
"0.54326737",
"0.5432269",
"0.54307663",
"0.54257613",
"0.5411655",
"0.5407381",
"0.5402907",
"0.54024106",
"0.5400341",
"0.5391782",
"0.5361562",
"0.53437907",
"0.53388894",
"0.5327155",
"0.53244877",
"0.53195363",
"0.53093284",
"0.5303467",
"0.5284577",
"0.5283944",
"0.52825284",
"0.5276589",
"0.5272138",
"0.5269156",
"0.5266801",
"0.5263262",
"0.526181",
"0.5259126",
"0.52534837",
"0.5238968",
"0.52368313",
"0.52343935",
"0.52285784",
"0.52198094",
"0.5212147",
"0.5210993",
"0.5209995",
"0.5206372",
"0.52047694",
"0.5204633",
"0.51978844",
"0.5195815",
"0.51929295",
"0.51927257",
"0.5175971",
"0.51632345",
"0.51444024",
"0.51422983",
"0.5133385",
"0.51254404",
"0.51215786"
] | 0.6451138 | 1 |
Matches returns true if this bucket config equals the given endpoint & name. | func (bc BucketConfig) Matches(endpoint, bucketName string) bool {
return strings.ToLower(endpoint) == strings.ToLower(bc.Endpoint) &&
strings.ToLower(bucketName) == strings.ToLower(bc.Name)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (info *BaseEndpointInfo) Equal(other Endpoint) bool {\n\treturn info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()\n}",
"func (info *endpointsInfo) Equal(other proxy.Endpoint) bool {\n\treturn info.String() == other.String() && info.GetIsLocal() == other.GetIsLocal()\n}",
"func (ec EndpointCriteria) Equals(other EndpointCriteria) bool {\n\tss1 := stringSet(ec.Organizations)\n\tss2 := stringSet(other.Organizations)\n\treturn ec.Endpoint == other.Endpoint && ss1.equals(ss2)\n}",
"func (m MatcherFunc) Matches(endpoint Endpoint) (bool, error) {\n\treturn m(endpoint)\n}",
"func (ids WorkloadEndpointIdentifiers) NameMatches(name string) (bool, error) {\n\t// Extract the required segments for this orchestrator type.\n\treq, err := ids.getSegments()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Extract the parameters from the name.\n\tparts := ExtractDashSeparatedParms(name, len(req))\n\tif len(parts) == 0 {\n\t\treturn false, nil\n\t}\n\n\t// Check each name segment for a non-match.\n\tfor i, r := range req {\n\t\tif r.value != \"\" && r.value != parts[i] {\n\t\t\treturn false, nil\n\t\t}\n\t}\n\treturn true, nil\n}",
"func (in *EndpointSelector) DeepEqual(other *EndpointSelector) bool {\n\tif other == nil {\n\t\treturn false\n\t}\n\n\tif (in.LabelSelector == nil) != (other.LabelSelector == nil) {\n\t\treturn false\n\t} else if in.LabelSelector != nil {\n\t\tif !in.LabelSelector.DeepEqual(other.LabelSelector) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif (in.requirements == nil) != (other.requirements == nil) {\n\t\treturn false\n\t} else if in.requirements != nil {\n\t\tif !in.requirements.DeepEqual(other.requirements) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\tif in.cachedLabelSelectorString != other.cachedLabelSelectorString {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (r *EndpointRegistry) Has(addr wire.Address) bool {\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\n\t_, ok := r.endpoints[wire.Key(addr)]\n\n\treturn ok\n}",
"func (sd *ServiceData) Endpoint(name string) *EndpointData {\n\tfor _, ed := range sd.Endpoints {\n\t\tif ed.Method.Name == name {\n\t\t\treturn ed\n\t\t}\n\t}\n\treturn nil\n}",
"func stringInOriginItem(needle string, haystack []OriginItem) bool {\n\tresult := false\n\tfor _, item := range haystack {\n\t\tif needle == item.Endpoint {\n\t\t\tresult = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn result\n}",
"func (c *Config) Contains(pattern string) bool {\n\tif j := c.getJson(); j != nil {\n\t\treturn j.Contains(pattern)\n\t}\n\treturn false\n}",
"func (r *DeliveryResource) Match(e *ExportableResource) bool {\n\tif e.HasKind(r.Kind) &&\n\t\tr.CloudProvider() == e.CloudProvider &&\n\t\tr.Account() == e.Account &&\n\t\tr.Name() == e.Name {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (e *Endpoints) DeepEquals(o *Endpoints) bool {\n\tswitch {\n\tcase (e == nil) != (o == nil):\n\t\treturn false\n\tcase (e == nil) && (o == nil):\n\t\treturn true\n\t}\n\n\tif len(e.Backends) != len(o.Backends) {\n\t\treturn false\n\t}\n\n\tfor ip1, ports1 := range e.Backends {\n\t\tports2, ok := o.Backends[ip1]\n\t\tif !ok {\n\t\t\treturn false\n\t\t}\n\n\t\tif !ports1.DeepEquals(ports2) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func TestEndpointCase1(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func configsAreEqual(config1, config2 *ServiceAliasConfig) bool {\n\treturn config1.Name == config2.Name &&\n\t\tconfig1.Namespace == config2.Namespace &&\n\t\tconfig1.Host == config2.Host &&\n\t\tconfig1.Path == config2.Path &&\n\t\tconfig1.TLSTermination == config2.TLSTermination &&\n\t\treflect.DeepEqual(config1.Certificates, config2.Certificates) &&\n\t\t// Status isn't compared since whether certs have been written\n\t\t// to disk or not isn't relevant in determining whether a\n\t\t// route needs to be updated.\n\t\tconfig1.PreferPort == config2.PreferPort &&\n\t\tconfig1.InsecureEdgeTerminationPolicy == config2.InsecureEdgeTerminationPolicy &&\n\t\tconfig1.RoutingKeyName == config2.RoutingKeyName &&\n\t\tconfig1.IsWildcard == config2.IsWildcard &&\n\t\tconfig1.VerifyServiceHostname == config2.VerifyServiceHostname &&\n\t\treflect.DeepEqual(config1.HTTPResponseHeaders, config2.HTTPResponseHeaders) &&\n\t\treflect.DeepEqual(config1.HTTPRequestHeaders, config2.HTTPRequestHeaders) &&\n\t\treflect.DeepEqual(config1.Annotations, config2.Annotations) &&\n\t\treflect.DeepEqual(config1.ServiceUnits, config2.ServiceUnits)\n}",
"func TestEndpointCase24(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-west-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func recordExists(needle *endpoint.Endpoint, haystack []*endpoint.Endpoint) (*endpoint.Endpoint, bool) {\n\tfor _, record := range haystack {\n\t\tif record.DNSName == needle.DNSName {\n\t\t\treturn record, true\n\t\t}\n\t}\n\n\treturn nil, false\n}",
"func (r *GoogleChannelConfig) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalGoogleChannelConfig(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Location == nil && ncr.Location == nil {\n\t\t\tc.Config.Logger.Info(\"Both Location fields null - considering equal.\")\n\t\t} else if nr.Location == nil || ncr.Location == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Location field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Location != *ncr.Location {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func GetEndpointOfBucketFromCache(bucketName string) (string, bool) {\n\tlog.Infof(\"Start to get endpoint of bucket %s from cache\", bucketName)\n\tif endpoint, ok := bceconf.BucketEndpointCacheProvider.Get(bucketName); ok {\n\t\tlog.Infof(\"Success get endpoint of bucket %s from cache, endpoint is %s\", bucketName,\n\t\t\tendpoint)\n\t\treturn endpoint, true\n\t}\n\tlog.Infof(\"Failed to get endpoint of bucket %s from cache\", bucketName)\n\treturn \"\", false\n}",
"func TestEndpointCase25(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-west-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase2(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-northeast-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-northeast-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (bc BucketConfig) EndpointAsURL() string {\n\tprefix := \"http://\"\n\tif bc.Secure {\n\t\tprefix = \"https://\"\n\t}\n\treturn prefix + bc.Endpoint\n}",
"func (proxy *StandAloneProxyConfig) IsSecretConfiguredForArrays(secretName string) bool {\n\tfor _, array := range proxy.managedArrays {\n\t\tif array.ProxyCredentialSecrets[secretName].CredentialSecret == secretName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestEndpointCase44(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tEndpoint: ptr.String(\"https://example.com\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://example.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (proxy *StandAloneProxyConfig) IsSecretConfiguredForCerts(secretName string) bool {\n\tfound := false\n\tfor _, server := range proxy.managementServers {\n\t\tif server.CertSecret == secretName {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\treturn found\n}",
"func (e1 *Config) Equal(e2 *Config) bool {\n\tif e1 == e2 {\n\t\treturn true\n\t}\n\tif e1 == nil || e2 == nil {\n\t\treturn false\n\t}\n\tif e1.URL != e2.URL {\n\t\treturn false\n\t}\n\tif e1.Host != e2.Host {\n\t\treturn false\n\t}\n\tif e1.SigninURL != e2.SigninURL {\n\t\treturn false\n\t}\n\tif e1.SigninURLRedirectParam != e2.SigninURLRedirectParam {\n\t\treturn false\n\t}\n\tif e1.Method != e2.Method {\n\t\treturn false\n\t}\n\n\tmatch := sets.StringElementsMatch(e1.ResponseHeaders, e2.ResponseHeaders)\n\tif !match {\n\t\treturn false\n\t}\n\n\tif e1.RequestRedirect != e2.RequestRedirect {\n\t\treturn false\n\t}\n\tif e1.AuthSnippet != e2.AuthSnippet {\n\t\treturn false\n\t}\n\n\tif e1.AuthCacheKey != e2.AuthCacheKey {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveConnections != e2.KeepaliveConnections {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveShareVars != e2.KeepaliveShareVars {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveRequests != e2.KeepaliveRequests {\n\t\treturn false\n\t}\n\n\tif e1.KeepaliveTimeout != e2.KeepaliveTimeout {\n\t\treturn false\n\t}\n\n\tif e1.AlwaysSetCookie != e2.AlwaysSetCookie {\n\t\treturn false\n\t}\n\n\treturn sets.StringElementsMatch(e1.AuthCacheDuration, e2.AuthCacheDuration)\n}",
"func isAwsStatusEqual(aws *corev1.Secret, bkp *v1alpha1.Backup) bool {\n\treturn aws.Name != bkp.Status.AWSSecretName || aws.Namespace != bkp.Status.AwsCredentialsSecretNamespace\n}",
"func (ep *baseEndpoint) changed(endpoints map[string]int) bool {\n\tif len(ep.endpoints) != len(endpoints) {\n\t\treturn true\n\t}\n\n\tfor key, val := range endpoints {\n\t\tif oldVal, exist := ep.endpoints[key]; !exist || oldVal != val {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func ContainsBackend(name string) bool {\n\tfor k := range backendMap {\n\t\tif k == name {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (cs CredentialsStatus) Match(secret corev1.Secret) bool {\n\tswitch {\n\tcase cs.Reference == nil:\n\t\treturn false\n\tcase cs.Reference.Name != secret.ObjectMeta.Name:\n\t\treturn false\n\tcase cs.Reference.Namespace != secret.ObjectMeta.Namespace:\n\t\treturn false\n\tcase cs.Version != secret.ObjectMeta.ResourceVersion:\n\t\treturn false\n\t}\n\treturn true\n}",
"func (util copyHandlerUtil) blobNameMatchesThePatternComponentWise(pattern string, blobName string) bool {\n\t// find the number of path separator in pattern and blobName\n\t// If the number of path separator doesn't match, then blob name doesn't match the pattern\n\tpSepInPattern := strings.Count(pattern, common.AZCOPY_PATH_SEPARATOR_STRING)\n\tpSepInBlobName := strings.Count(blobName, common.AZCOPY_PATH_SEPARATOR_STRING)\n\tif pSepInPattern != pSepInBlobName {\n\t\treturn false\n\t}\n\t// If the number of path separator matches in both blobName and pattern\n\t// each component of the blobName should match each component in pattern\n\t// Length of patternComponents and blobNameComponents is same since we already\n\t// match the number of path separators above.\n\tpatternComponents := strings.Split(pattern, common.AZCOPY_PATH_SEPARATOR_STRING)\n\tblobNameComponents := strings.Split(blobName, common.AZCOPY_PATH_SEPARATOR_STRING)\n\tfor index := 0; index < len(patternComponents); index++ {\n\t\t// match the pattern component and blobName component\n\t\tif !util.blobNameMatchesThePattern(patternComponents[index], blobNameComponents[index]) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func TestEndpointCase17(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"sa-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.sa-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase0(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"af-south-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.af-south-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (service *S3Service) Endpoint() string {\n return service.endpoint;\n}",
"func (sc StorageConfig) Equals(other StorageConfig) bool {\n\treturn reflect.DeepEqual(sc, other)\n}",
"func TestEndpointCase31(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-north-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.cn-north-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func endpointsEquivalent(a, b *object.Endpoints) bool {\n\n\tif len(a.Subsets) != len(b.Subsets) {\n\t\treturn false\n\t}\n\n\t// we should be able to rely on\n\t// these being sorted and able to be compared\n\t// they are supposed to be in a canonical format\n\tfor i, sa := range a.Subsets {\n\t\tsb := b.Subsets[i]\n\t\tif !subsetsEquivalent(sa, sb) {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func validateEndpoint(endpoint string) ([]string, error) {\n\tendpointSet := make(map[string]struct{})\n\tendpoints := make([]string, 0)\n\tfor _, endpoint := range strings.Split(endpoint, \",\") {\n\t\ttrimEndpoint := strings.Trim(endpoint, \" \")\n\t\tif _, err := url.ParseRequestURI(trimEndpoint); err != nil {\n\t\t\treturn nil, errors.New(\"URL \\\"\" + trimEndpoint + \"\\\" is not a valid endpoint\")\n\t\t}\n\t\t// check if map contains this key to identify duplicate URLs\n\t\tif _, hasKey := endpointSet[trimEndpoint]; hasKey {\n\t\t\treturn nil, errors.New(\"URL \\\"\" + trimEndpoint + \"\\\" is a duplicate endpoint\")\n\t\t}\n\t\tendpointSet[trimEndpoint] = struct{}{}\n\t\tendpoints = append(endpoints, trimEndpoint)\n\t}\n\treturn endpoints, nil\n}",
"func (r *Key) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalKey(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func TestEndpointCase16(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"me-south-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.me-south-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase34(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase18(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func Matches(entry Entry, name string) bool {\n\tif entry == nil {\n\t\treturn false\n\t}\n\tsplittedName := strings.Split(name, \"*\")\n\tif len(splittedName) == 1 {\n\t\tif entry.Name() == name {\n\t\t\treturn true\n\t\t}\n\t} else if len(splittedName) == 2 {\n\t\tif strings.HasPrefix(entry.Name(), splittedName[0]) && strings.HasSuffix(entry.Name(), splittedName[1]) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestEndpointCase9(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ca-central-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ca-central-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func hasHostEndpoints(endpointIPs []string) bool {\n\tfor _, endpointIP := range endpointIPs {\n\t\tfound := false\n\t\tfor _, clusterNet := range config.Default.ClusterSubnets {\n\t\t\tif clusterNet.CIDR.Contains(net.ParseIP(endpointIP)) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestEndpointCase86(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t\tEndpoint: ptr.String(\"https://example.com\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://example.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestMatchesByPrefix(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tkey proto.Key\n\t\texpConfigs []ConfigUnion\n\t}{\n\t\t{proto.KeyMin, []ConfigUnion{config1}},\n\t\t{proto.Key(\"\\x01\"), []ConfigUnion{config1}},\n\t\t{proto.Key(\"/db\"), []ConfigUnion{config1}},\n\t\t{proto.Key(\"/db1\"), []ConfigUnion{config2, config1}},\n\t\t{proto.Key(\"/db1/a\"), []ConfigUnion{config2, config1}},\n\t\t{proto.Key(\"/db1/table1\"), []ConfigUnion{config3, config2, config1}},\n\t\t{proto.Key(\"/db1/table\\xff\"), []ConfigUnion{config3, config2, config1}},\n\t\t{proto.Key(\"/db2\"), []ConfigUnion{config1}},\n\t\t{proto.Key(\"/db3\"), []ConfigUnion{config4, config1}},\n\t\t{proto.Key(\"/db3\\xff\"), []ConfigUnion{config4, config1}},\n\t\t{proto.Key(\"/db5\"), []ConfigUnion{config1}},\n\t\t{proto.Key(\"/xfe\"), []ConfigUnion{config1}},\n\t\t{proto.Key(\"/xff\"), []ConfigUnion{config1}},\n\t}\n\tfor i, test := range testData {\n\t\tpcs := pcc.MatchesByPrefix(test.key)\n\t\tif len(pcs) != len(test.expConfigs) {\n\t\t\tt.Errorf(\"%d: expected %d matches, got %d\", i, len(test.expConfigs), len(pcs))\n\t\t\tcontinue\n\t\t}\n\t\tfor j, pc := range pcs {\n\t\t\tif pc.Config != test.expConfigs[j] {\n\t\t\t\tt.Errorf(\"%d: expected \\\"%d\\\"th config %v for %q; got %v\", i, j, test.expConfigs[j], test.key, pc.Config)\n\t\t\t}\n\t\t}\n\t}\n}",
"func (m URLPrefixMap) Contains(uri *url.URL) bool {\n\ts := strings.TrimPrefix(uri.Host, \"www.\")\n\tif _, ok := m[s]; ok {\n\t\treturn true\n\t}\n\tfor _, p := range strings.Split(uri.Path, \"/\") {\n\t\tif p == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\ts = fmt.Sprintf(\"%s/%s\", s, p)\n\t\tif _, ok := m[s]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestEndpointCase15(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-west-3\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-west-3.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (config *Config) ContainsExchange(id string) bool {\n\t_, ok := config.Exchanges[id]\n\treturn ok\n}",
"func TestEndpointCase12(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-south-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-south-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (m *endpoint) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConnectionString(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func TestEndpointCase4(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-northeast-3\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-northeast-3.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase59(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream\"),\n\t\tEndpoint: ptr.String(\"https://example.com\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://example.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase3(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-northeast-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-northeast-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase27(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase5(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-south-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-south-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase22(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase29(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-northwest-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.cn-northwest-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (b *Bucket) GetEndpoint() string {\n\treturn b.Endpoint\n}",
"func TestEndpointCase33(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func Has(client *http.Client, k []byte, host, bucket string, creds awsauth.Credentials) (has bool, err error) {\n\traw := fmt.Sprintf(\"https://%s/%s/%x\", host, bucket, k)\n\tloc, err := url.Parse(raw)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to parse '%s' as url: %v\", raw, err)\n\t}\n\n\treq, err := http.NewRequest(\"HEAD\", loc.String(), nil)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to create HEAD request: %v\", err)\n\t}\n\n\tawsauth.Sign(req, creds)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"failed to perform HEAD request: %v\", err)\n\t}\n\n\tif resp.StatusCode == http.StatusOK {\n\t\treturn true, nil\n\t} else if resp.StatusCode == http.StatusNotFound || resp.StatusCode == http.StatusForbidden {\n\t\t//AWS returns forbidden for a HEAD request if the one performing the operation does not have\n\t\t//list bucket permissions\n\t\treturn false, nil\n\t} else {\n\t\treturn false, fmt.Errorf(\"unexpected response from HEAD '%s' request: %s\", loc, resp.Status)\n\t}\n}",
"func TestEndpointCase36(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase26(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase32(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-north-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.cn-north-1.api.amazonwebservices.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase23(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase14(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-west-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-west-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase88(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (c *Client) Belongs(fullName string) bool {\n\tfullName = strings.TrimSpace(fullName)\n\tsubstr := strings.Split(fullName, \"/\")\n\tif len(substr) < 2 {\n\t\t// cannot be from the backup repository\n\t\t// as it starts with registry and organization\n\t\treturn false\n\t}\n\n\tfor _, r := range registryAliases[c.registry] {\n\t\tif substr[0] == r {\n\t\t\tif substr[1] == c.organization {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\n\treturn false\n}",
"func TestEndpointCase66(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-west-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-west-1:123:stream/test-stream\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis.us-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func ContainsAuthKey(key string) bool {\n\tfor _, k := range viper.GetStringSlice(AuthKey) {\n\t\tif k == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (r *EndpointRegistry) dialingEndpoint(a wire.Address) (_ *dialingEndpoint, created bool) {\n\tkey := wire.Key(a)\n\tentry, ok := r.dialing[key]\n\tif !ok {\n\t\tentry = newDialingEndpoint(a)\n\t\tr.dialing[key] = entry\n\t}\n\n\treturn entry, !ok\n}",
"func TestEndpointCase10(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-central-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-central-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (s L4Service) Equal(s2 L4Service) bool {\n\tif len(s.Endpoints) != len(s2.Endpoints) {\n\t\treturn false\n\t}\n\tfor _, s1e := range s.Endpoints {\n\t\tfound := false\n\t\tfor _, s2e := range s2.Endpoints {\n\t\t\tif reflect.DeepEqual(s1e, s2e) {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn false\n\t\t}\n\t}\n\ts.Endpoints = nil\n\ts2.Endpoints = nil\n\n\treturn reflect.DeepEqual(s, s2)\n}",
"func (r *UrlMap) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalUrlMap(b, c)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func TestEndpointCase35(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-west-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-west-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase21(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-east-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (r *Service) matcher(c *Client) func([]byte) bool {\n\treturn func(b []byte) bool {\n\t\tcr, err := unmarshalService(b, c, r)\n\t\tif err != nil {\n\t\t\tc.Config.Logger.Warning(\"failed to unmarshal provided resource in matcher.\")\n\t\t\treturn false\n\t\t}\n\t\tnr := r.urlNormalized()\n\t\tncr := cr.urlNormalized()\n\t\tc.Config.Logger.Infof(\"looking for %v\\nin %v\", nr, ncr)\n\n\t\tif nr.Project == nil && ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Both Project fields null - considering equal.\")\n\t\t} else if nr.Project == nil || ncr.Project == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Project field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Project != *ncr.Project {\n\t\t\treturn false\n\t\t}\n\t\tif nr.Name == nil && ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Both Name fields null - considering equal.\")\n\t\t} else if nr.Name == nil || ncr.Name == nil {\n\t\t\tc.Config.Logger.Info(\"Only one Name field is null - considering unequal.\")\n\t\t\treturn false\n\t\t} else if *nr.Name != *ncr.Name {\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n}",
"func (c *hostNameFormatConfig) IsValid(name string) bool {\n\tfor _, validator := range c.validators {\n\t\tif !validator.IsValid(name) {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}",
"func WithEndpoint(endpoint string) {\n\tcfg.endpoint = strings.TrimRight(endpoint, \"/\")\n}",
"func (o DiagnosticsStorageAccountConfigResponseOutput) QueueEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfigResponse) string { return v.QueueEndpoint }).(pulumi.StringOutput)\n}",
"func TestEndpointCase28(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"cn-north-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.cn-north-1.amazonaws.com.cn\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (r *EndpointRegistry) fullEndpoint(addr wire.Address, e *Endpoint) (_ *fullEndpoint, created bool) {\n\tkey := wire.Key(addr)\n\tr.mutex.Lock()\n\tdefer r.mutex.Unlock()\n\tentry, ok := r.endpoints[key]\n\tif !ok {\n\t\tentry = newFullEndpoint(e)\n\t\tr.endpoints[key] = entry\n\t}\n\treturn entry, !ok\n}",
"func TestEndpointCase87(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func assertEqualEndpoints(t *testing.T, expected, actual *Endpoint) {\n\tif !reflect.DeepEqual(*expected, *actual) {\n\t\tt.Errorf(\"Expected endpoint: %v, Actual %v\", expected, actual)\n\t}\n}",
"func IsBucketInBucketsArray(buckets []nb.BucketInfo, bucketName string) bool {\n\tfor _, b := range buckets {\n\t\tif b.Name == bucketName {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestEndpointCase37(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(true),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis-fips.us-gov-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func UrlSuffixMatches(suffixes ...string) goproxy.ReqConditionFunc {\n\treturn func(req *http.Request, ctx *goproxy.ProxyCtx) bool {\n\t\tfor _, suffix := range suffixes {\n\t\t\tif strings.HasSuffix(req.URL.Path, suffix) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n}",
"func TestEndpointCase38(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-gov-east-1\"),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-gov-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (s *MonitoringJobDefinitionSummary) SetEndpointName(v string) *MonitoringJobDefinitionSummary {\n\ts.EndpointName = &v\n\treturn s\n}",
"func TestEndpointCase91(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(true),\n\t\tUseDualStack: ptr.Bool(true),\n\t\tOperationType: ptr.String(\"control\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/test-stream/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.control-kinesis-fips.us-east-1.api.aws\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase11(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"eu-north-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.eu-north-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestMatchByPrefix(t *testing.T) {\n\tdefer leaktest.AfterTest(t)\n\tpcc := buildTestPrefixConfigMap()\n\ttestData := []struct {\n\t\tkey proto.Key\n\t\texpConfig interface{}\n\t}{\n\t\t{proto.KeyMin, config1},\n\t\t{proto.Key(\"\\x01\"), config1},\n\t\t{proto.Key(\"/db\"), config1},\n\t\t{proto.Key(\"/db1\"), config2},\n\t\t{proto.Key(\"/db1/a\"), config2},\n\t\t{proto.Key(\"/db1/table1\"), config3},\n\t\t{proto.Key(\"/db1/table\\xff\"), config3},\n\t\t{proto.Key(\"/db2\"), config1},\n\t\t{proto.Key(\"/db3\"), config4},\n\t\t{proto.Key(\"/db3\\xff\"), config4},\n\t\t{proto.Key(\"/db5\"), config1},\n\t\t{proto.Key(\"/xfe\"), config1},\n\t\t{proto.Key(\"/xff\"), config1},\n\t}\n\tfor i, test := range testData {\n\t\tpc := pcc.MatchByPrefix(test.key)\n\t\tif test.expConfig != pc.Config {\n\t\t\tt.Errorf(\"%d: expected config %v for %q; got %v\", i, test.expConfig, test.key, pc.Config)\n\t\t}\n\t}\n}",
"func (b *BusStop) Equals(busStop *BusStop) bool {\n\treturn b.Name == busStop.Name\n}",
"func (b *BusStop) Equals(busStop *BusStop) bool {\n\treturn b.Name == busStop.Name\n}",
"func (es *Endpoints) GetByUUID(uuid string) (*Endpoint, bool) {\n\tes.RLock()\n\tdefer es.RUnlock()\n\tif i, ok := es.mapUUID[uuid]; ok {\n\t\treturn es.endpoints[i].Copy(), true\n\t}\n\treturn nil, false\n}",
"func TestEndpointCase20(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-2\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-east-2.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase8(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"ap-southeast-3\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.ap-southeast-3.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func TestEndpointCase103(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-east-1\"),\n\t\tUseFIPS: ptr.Bool(false),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tOperationType: ptr.String(\"data\"),\n\t\tStreamARN: ptr.String(\"arn:aws:kinesis:us-east-1:123:stream/foobar\"),\n\t\tConsumerARN: ptr.String(\"arn:aws:kinesis:us-east-1:123456789123:stream/foobar/consumer/test-consumer:1525898737\"),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://123.data-kinesis.us-east-1.amazonaws.com\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}",
"func (c Config) Equals(another Config) bool {\n\tpathEquals := c.Filepath == another.Filepath\n\tdpEquals := c.DownloadsPath == another.DownloadsPath\n\ttpEquals := c.TargetBasePath == another.TargetBasePath\n\n\treturn pathEquals && dpEquals && tpEquals\n}",
"func TestEndpointCase42(t *testing.T) {\n\tvar params = EndpointParameters{\n\t\tRegion: ptr.String(\"us-isob-east-1\"),\n\t\tUseDualStack: ptr.Bool(false),\n\t\tUseFIPS: ptr.Bool(false),\n\t}\n\n\tresolver := NewDefaultEndpointResolverV2()\n\tresult, err := resolver.ResolveEndpoint(context.Background(), params)\n\t_, _ = result, err\n\n\tif err != nil {\n\t\tt.Fatalf(\"expect no error, got %v\", err)\n\t}\n\n\turi, _ := url.Parse(\"https://kinesis.us-isob-east-1.sc2s.sgov.gov\")\n\n\texpectEndpoint := smithyendpoints.Endpoint{\n\t\tURI: *uri,\n\t\tHeaders: http.Header{},\n\t\tProperties: smithy.Properties{},\n\t}\n\n\tif e, a := expectEndpoint.URI, result.URI; e != a {\n\t\tt.Errorf(\"expect %v URI, got %v\", e, a)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Headers, result.Headers); diff != \"\" {\n\t\tt.Errorf(\"expect headers to match\\n%s\", diff)\n\t}\n\n\tif diff := cmp.Diff(expectEndpoint.Properties, result.Properties,\n\t\tcmp.AllowUnexported(smithy.Properties{}),\n\t); diff != \"\" {\n\t\tt.Errorf(\"expect properties to match\\n%s\", diff)\n\t}\n}"
] | [
"0.597858",
"0.5928627",
"0.5899422",
"0.531344",
"0.5306883",
"0.52983534",
"0.5129041",
"0.51234996",
"0.51026905",
"0.50878805",
"0.5021614",
"0.5013788",
"0.49760675",
"0.49469203",
"0.4920381",
"0.49037552",
"0.48949546",
"0.48752022",
"0.48695588",
"0.48658732",
"0.4865524",
"0.48642996",
"0.48618683",
"0.48516437",
"0.48502657",
"0.48328778",
"0.48198855",
"0.4819727",
"0.481179",
"0.48067153",
"0.48065034",
"0.47838584",
"0.47701386",
"0.4769331",
"0.4768425",
"0.47621748",
"0.47592145",
"0.47563374",
"0.47548375",
"0.4751724",
"0.47504857",
"0.47195524",
"0.47178864",
"0.47145018",
"0.4686155",
"0.468339",
"0.4678037",
"0.46689534",
"0.46664187",
"0.46619588",
"0.46565208",
"0.46563277",
"0.4656125",
"0.4652623",
"0.465217",
"0.4632043",
"0.4631812",
"0.46314582",
"0.4631164",
"0.46283838",
"0.4618753",
"0.46129695",
"0.46124092",
"0.4612324",
"0.4590182",
"0.45884755",
"0.45819816",
"0.45786625",
"0.45769113",
"0.45744547",
"0.4571939",
"0.4568277",
"0.45670903",
"0.45592338",
"0.4558575",
"0.45569095",
"0.45545036",
"0.454944",
"0.45433944",
"0.453827",
"0.45368886",
"0.45361823",
"0.453574",
"0.45349622",
"0.4533958",
"0.45326662",
"0.45304474",
"0.4526625",
"0.4521641",
"0.45182267",
"0.45171574",
"0.45150262",
"0.4511637",
"0.4511637",
"0.4510252",
"0.45062807",
"0.450423",
"0.45011652",
"0.44980213",
"0.44978043"
] | 0.7934681 | 0 |
EndpointAsURL returns the endpoint of the bucket including scheme. | func (bc BucketConfig) EndpointAsURL() string {
prefix := "http://"
if bc.Secure {
prefix = "https://"
}
return prefix + bc.Endpoint
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (b *Bucket) GetEndpoint() string {\n\treturn b.Endpoint\n}",
"func (b *Bucket) GetURL(path string) string {\n\tif b.client == nil || b.client.Service == nil {\n\t\treturn \"\"\n\t}\n\treturn b.client.Service.Endpoint + \"/\" + b.name + \"/\" + path\n}",
"func (m *_EndpointDescription) GetEndpointUrl() PascalString {\n\treturn m.EndpointUrl\n}",
"func (a *API) GetEndpointURL() string {\n\tif a.Sandbox {\n\t\treturn \"https://apitest.authorize.net/xml/v1/request.api\"\n\t}\n\treturn \"https://api.authorize.net/xml/v1/request.api\"\n}",
"func (c *Client) bucketURL(bucket string) string {\n\tif IsValidBucket(bucket) && !strings.Contains(bucket, \".\") {\n\t\treturn fmt.Sprintf(\"https://%s.%s/\", bucket, c.hostname())\n\t}\n\treturn fmt.Sprintf(\"https://%s/%s/\", c.hostname(), bucket)\n}",
"func GetBlobEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.blob.%s\", accountName, baseUri)\n}",
"func (ch *clientSecureChannel) EndpointURL() string {\n\treturn ch.endpointURL\n}",
"func (e EndpointGateway) GetURL(ctx context.Context) (url string, err error) {\n\tvar urlStruct struct {\n\t\tURL string `json:\"url\"`\n\t}\n\treturn urlStruct.URL, e.doMethod(ctx, \"GET\", nil, &urlStruct)\n}",
"func EndpointURL(ep Endpoint) string {\n\treturn fmt.Sprintf(\"%s://%s\", ep.Network(), ep.Address())\n}",
"func bucketUrl(bucket string) string {\n\treturn fmt.Sprintf(\"https://www.googleapis.com/storage/v1/b/%s\", bucket)\n}",
"func (e *Endpoint) URL() string {\n\treturn e.url\n}",
"func GetURL(endpoint string) string {\n\n\t// Concatenate and return the API URL, version, and endpoint path.\n\treturn fmt.Sprintf(\"%s/%s/%s\", apiURL, apiVersion, endpoint)\n}",
"func (ep *Endpoint_DEPRECATED) URI() string {\n\tif ep.IsS3() {\n\t\treturn \"s3://\" + ep.S3Bucket + \"/\" + ep.S3Subfolder\n\t} else if ep.IsSFTP() {\n\t\treturn \"sftp://\" + ep.SFTPHostname + \"/\" + ep.SFTPDirectory\n\t}\n\tpanic(\"endpoint type not supported\")\n}",
"func (o BucketOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}",
"func (s3 *S3Resource) getS3BucketURLString() (Url *url.URL, err error) {\n\tendpoint, err := s3util.GetS3Endpoint(s3.context, s3.s3Object.Region)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbucketURL := \"https://\" + endpoint + \"/\" + s3.s3Object.Bucket\n\treturn url.Parse(bucketURL)\n}",
"func (r *Bucket) WebsiteEndpoint() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"websiteEndpoint\"])\n}",
"func (service *S3Service) Endpoint() string {\n return service.endpoint;\n}",
"func GetValidEndpoint(profile *entity.Profile) (*url.URL, error) {\n\tu, err := url.ParseRequestURI(profile.Endpoint)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid endpoint: %v due to %v\", profile.Endpoint, err)\n\t}\n\treturn u, nil\n}",
"func (c *Client) Endpoint() string {\n\treturn c.url\n}",
"func (l *LoadBalancer) GetEndpoint() url.URL {\n\treturn l.strategy.NextEndpoint()\n}",
"func (o FluxConfigurationBucketOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBucket) string { return v.Url }).(pulumi.StringOutput)\n}",
"func GetFileEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.file.%s\", accountName, baseUri)\n}",
"func (ep *Endpoint) URL() (string, error) {\n\tbaseURL, err := deploys.GetDownloadURL(ep.Version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn baseURL + \"/\" + ep.Resource + \".json\", nil\n}",
"func (m *LogicAppTriggerEndpointConfiguration) GetUrl()(*string) {\n val, err := m.GetBackingStore().Get(\"url\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (m *Attachment) GetURL() string {\n\turl, _ := core.GetS3Object(core.DefaultSigningTime, m.PATH)\n\n\treturn url\n}",
"func GetEndpointOfBucketFromeBos(cli *bos.Client, bucketName string) (string, error) {\n\tlog.Infof(\"Start to get endpoint of bucket %s from bos\", bucketName)\n\tregion, err := cli.GetBucketLocation(bucketName)\n\tif err != nil {\n\t\tlog.Infof(\"Failed to get endpoint of bucket %s from bos, Error: %s\", bucketName, err)\n\t\treturn \"\", err\n\t}\n\tlog.Infof(\"Success get region of bucket %s from bos, region is '%s'\", bucketName, region)\n\tif region == \"\" {\n\t\treturn \"\", fmt.Errorf(\"get a empty region from bos server!\")\n\t}\n\tendpoint, _ := bceconf.ServerConfigProvider.GetDomainByRegion(region)\n\tbceconf.BucketEndpointCacheProvider.Write(bucketName, endpoint, 3600)\n\treturn endpoint, nil\n}",
"func (st *state) apiEndpoint(path, query string) (*url.URL, error) {\n\treturn &url.URL{\n\t\tScheme: st.serverScheme,\n\t\tHost: st.Addr(),\n\t\tPath: path,\n\t\tRawQuery: query,\n\t}, nil\n}",
"func (a *S3Agent) GetSignedURL(bucket, key string) (*string, error) {\n\terr := bucketExists(a, a.BucketName, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif bucket == \"\" {\n\t\treturn nil, ErrBucketEmpty\n\t}\n\n\tif key == \"\" {\n\t\treturn nil, ErrKeyEmpty\n\t}\n\n\tinput := &s3.GetObjectInput{\n\t\tBucket: &bucket,\n\t\tKey: &key,\n\t}\n\n\treq, _ := a.GetObjectRequest(input)\n\texpiry := time.Now().Add(time.Hour * time.Duration(24)).String()\n\tdur, err := time.ParseDuration(expiry)\n\n\tsigned, err := req.Presign(dur)\n\treturn &signed, err\n}",
"func (o LookupSpacesBucketResultOutput) Endpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupSpacesBucketResult) string { return v.Endpoint }).(pulumi.StringOutput)\n}",
"func (c *Config) Endpoint() string {\n\turi := *c.exporterURL\n\n\tif uri.Scheme == \"file\" {\n\t\turi.RawQuery = \"\"\n\t}\n\turi.Scheme = \"\"\n\n\ts := uri.String()\n\tif strings.HasPrefix(s, \"//\") {\n\t\treturn s[2:]\n\t}\n\treturn s\n}",
"func GetEndpointFromRegion(region string) string {\n\tendpoint := fmt.Sprintf(\"https://sts.%s.amazonaws.com\", region)\n\tif strings.HasPrefix(region, \"cn-\") {\n\t\tendpoint = fmt.Sprintf(\"https://sts.%s.amazonaws.com.cn\", region)\n\t}\n\treturn endpoint\n}",
"func (m *Minio) GetFileURL(bucketName, fileName string) string {\n\treturn m.client.EndpointURL().String() + \"/\" + bucketName + \"/\" + fileName\n}",
"func (b *Bucket) GetSecretURL(path string) (string, error) {\n\treturn b.GetSecretURLWithExpire(path, defaultExpireSecond)\n}",
"func (e Endpoint) GetEndpoint() string {\n\treturn \"someendpoint\"\n}",
"func (c *ContainerClient) URL() string {\n\treturn c.client.endpoint\n}",
"func GetTableEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.table.%s\", accountName, baseUri)\n}",
"func Endpoint(parts ...string) *url.URL {\n\treturn endpoint(parts...)\n}",
"func (c *Client) GetURL() *url.URL {\n\treturn &url.URL{Scheme: \"wss\", Host: \"ws.kraken.com\"}\n}",
"func (c *pluginContext) GetEndpoint(svc endpoints.Service) (string, error) {\n\tif c.CloudType() != \"public\" {\n\t\treturn \"\", fmt.Errorf(\"only public cloud is supported\")\n\t}\n\n\tif !c.HasAPIEndpoint() {\n\t\treturn \"\", nil\n\t}\n\n\tvar cloudDomain string\n\tswitch cname := c.CloudName(); cname {\n\tcase \"bluemix\":\n\t\tcloudDomain = \"cloud.ibm.com\"\n\tcase \"staging\":\n\t\tcloudDomain = \"test.cloud.ibm.com\"\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"unknown cloud name '%s'\", cname)\n\t}\n\n\treturn endpoints.Endpoint(svc, cloudDomain, c.CurrentRegion().Name, c.IsPrivateEndpointEnabled(), c.IsAccessFromVPC())\n}",
"func (s *Services) GetEndpoint(serviceName string) (url *url.URL, err error) {\n\tresolver := s.updateInstances(serviceName)\n\tif resolver != nil && len(resolver.endpoints) > 0 {\n\t\turl = resolver.getHealthyInstanceEndpoint()\n\t} else {\n\t\terr = fmt.Errorf(\"no registered or healtly instances for service: %s\", serviceName)\n\t}\n\treturn\n}",
"func (this *HttpClient) getBucketURL(name string, version int, watch bool) string {\n\turlBuilder, err := url.Parse(this.url + BUCKET_PATH + name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tquery := urlBuilder.Query()\n\n\tquery.Set(\"watch\", strconv.FormatBool(watch))\n\tif version >= 0 {\n\t\tquery.Set(\"version\", strconv.Itoa(version))\n\t}\n\n\turlBuilder.RawQuery = query.Encode()\n\treturn urlBuilder.String()\n}",
"func (s *Server) Endpoint() (*url.URL, error) {\n\tif err := s.listenAndEndpoint(); err != nil {\n\t\treturn nil, err\n\t}\n\treturn s.endpoint, nil\n}",
"func (t PailType) GetDownloadURL(bucket, prefix, key string) string {\n\tswitch t {\n\tcase PailS3:\n\t\treturn fmt.Sprintf(\n\t\t\t\"https://%s.s3.amazonaws.com/%s\",\n\t\t\tbucket,\n\t\t\tstrings.Replace(filepath.Join(prefix, key), \"\\\\\", \"/\", -1),\n\t\t)\n\tdefault:\n\t\treturn \"\"\n\t}\n}",
"func (adp *s3Storage) URL(ctx context.Context, filename string) string {\n\treturn adp.dsn.URL(filename)\n}",
"func (l *Ledger) GetEndpoint() string {\n\treturn l.endpoint\n}",
"func (o DiagnosticsStorageAccountConfigResponseOutput) BlobEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfigResponse) string { return v.BlobEndpoint }).(pulumi.StringOutput)\n}",
"func (s *MinioStore) GetPresignedURL(bucketName string, objectName string) (string, error) {\n\turl, err := s.clnt.PresignedGetObject(bucketName, objectName, getExpires, url.Values{})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error creating get url\", err)\n\t}\n\treturn url.String(), err\n}",
"func (o ConsumerResponseOutput) EndpointUri() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ConsumerResponse) string { return v.EndpointUri }).(pulumi.StringOutput)\n}",
"func (c CredentialService) GetEndpoint() string {\n\treturn c.Endpoint\n}",
"func (o DiagnosticsStorageAccountConfigOutput) BlobEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfig) string { return v.BlobEndpoint }).(pulumi.StringOutput)\n}",
"func APIEndpoint(sandbox bool) string {\n\tif sandbox {\n\t\treturn SandboxURL\n\t}\n\treturn APIURL\n}",
"func (s Storage) BaseURL() *url.URL {\n\treturn s.cdnConf.CDNEndpointWithDefault(s.baseURI)\n}",
"func (o ServiceResponseOutput) EndpointUri() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ServiceResponse) string { return v.EndpointUri }).(pulumi.StringOutput)\n}",
"func (s *AzureBlobStorage) URI() string {\n\treturn \"azure://\" + s.options.Bucket + \"/\" + s.options.Prefix\n}",
"func (elasticsearch *Elasticsearch) GetURL() string {\n\tprotocol := \"http\"\n\tif elasticsearch.SSL {\n\t\tprotocol = \"https\"\n\t}\n\treturn fmt.Sprintf(\"%s://%s:%s\", protocol, elasticsearch.Host, strconv.Itoa(elasticsearch.Port))\n}",
"func GetQueueEndpoint(baseUri string, accountName string) string {\n\treturn fmt.Sprintf(\"https://%s.queue.%s\", accountName, baseUri)\n}",
"func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {\n\tif o.UseV2 {\n\t\tcfg, err := gcaws.V2ConfigFromURLParams(ctx, u.Query())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"open bucket %v: %v\", u, err)\n\t\t}\n\t\tclientV2 := s3v2.NewFromConfig(cfg)\n\t\treturn OpenBucketV2(ctx, clientV2, u.Host, &o.Options)\n\t}\n\tconfigProvider := &gcaws.ConfigOverrider{\n\t\tBase: o.ConfigProvider,\n\t}\n\toverrideCfg, err := gcaws.ConfigFromURLParams(u.Query())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"open bucket %v: %v\", u, err)\n\t}\n\tconfigProvider.Configs = append(configProvider.Configs, overrideCfg)\n\treturn OpenBucket(ctx, configProvider, u.Host, &o.Options)\n}",
"func (c *Config) GetAPIEndpoint() string {\n\treturn fmt.Sprintf(\"http://%s\", c.GetServerAddress())\n}",
"func (o *URLOpener) OpenBucketURL(ctx context.Context, u *url.URL) (*blob.Bucket, error) {\n\topts, err := o.ServiceURLOptions.withOverrides(u.Query())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsvcURL, err := NewServiceURL(opts)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := o.MakeClient(svcURL, ContainerName(u.Host))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn OpenBucket(ctx, client, &o.Options)\n}",
"func S3URL(path string) string {\n\tregion, bucket, key := s3parse(path)\n\n\tif region == \"\" {\n\t\tregion = s3region // fallback to default region\n\t}\n\tif region == \"\" {\n\t\ts3log(\"S3URL: could not find region: [%s]\", path)\n\t\treturn \"\"\n\t}\n\n\treturn fmt.Sprintf(\"https://s3-%s.amazonaws.com/%s/%s\", region, bucket, key)\n}",
"func getURI(webhook *triggersv1.WebhookInterceptor, ns string) (*url.URL, error) {\n\t// TODO: This should work for any Addressable.\n\t// Use something like https://github.com/knative/eventing-contrib/blob/7c0fc5cfa8bd44da0767d9e7b250264ea6eb7d8d/pkg/controller/sinks/sinks.go#L32\n\tswitch {\n\tcase webhook.URL != nil:\n\t\treturn webhook.URL.URL(), nil\n\tcase webhook.ObjectRef.Kind == \"Service\" && webhook.ObjectRef.APIVersion == \"v1\":\n\t\t// TODO: Also assuming port 80 and http here. Use DNS/or the env vars?\n\t\tif webhook.ObjectRef.Namespace != \"\" {\n\t\t\tns = webhook.ObjectRef.Namespace\n\t\t}\n\t\treturn url.Parse(fmt.Sprintf(\"http://%s.%s.svc/\", webhook.ObjectRef.Name, ns))\n\tdefault:\n\t\treturn nil, errors.New(\"invalid objRef\")\n\t}\n}",
"func (client *Client) GetBrokerURL(name string) (string, error) {\n\tnamespace := client.Namespace\n\tbrokerMeta := base.MetaEventing(name, namespace, \"Broker\")\n\treturn base.GetAddressableURI(client.Dynamic, brokerMeta)\n}",
"func ExampleAPIClient_CompileEndpointURL() {\n\tc := APIClient{\n\t\tClient: client.New(\n\t\t\tfunc(c *client.Client) {\n\t\t\t\tc.User = \"myusername\"\n\t\t\t\tc.Password = \"mypassword\"\n\t\t\t},\n\t\t),\n\t\tBaseUrl: \"https://url.to.publit\",\n\t}\n\n\tendpoint := \"someendpoint\"\n\n\turl := c.CompileEndpointURL(endpoint)\n\n\tfmt.Println(url)\n\t// Output: https://url.to.publit/production/v2.0/someendpoint\n}",
"func (o FluxConfigurationBucketPtrOutput) Url() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FluxConfigurationBucket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Url\n\t}).(pulumi.StringPtrOutput)\n}",
"func (s *Storage) PublicURL(ctx context.Context, key string) (string, error) {\n\tinput := &s3.GetObjectInput{\n\t\tBucket: &s.Bucket,\n\t\tKey: &key,\n\t}\n\treq, _ := s.svc.GetObjectRequest(input)\n\treturn req.Presign(time.Hour)\n}",
"func (c *bytemarkClient) GetEndpoint() string {\n\treturn c.urls.Brain\n}",
"func (o BucketOutput) IntranetEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.StringOutput { return v.IntranetEndpoint }).(pulumi.StringOutput)\n}",
"func GetEndpointOfBucketFromCache(bucketName string) (string, bool) {\n\tlog.Infof(\"Start to get endpoint of bucket %s from cache\", bucketName)\n\tif endpoint, ok := bceconf.BucketEndpointCacheProvider.Get(bucketName); ok {\n\t\tlog.Infof(\"Success get endpoint of bucket %s from cache, endpoint is %s\", bucketName,\n\t\t\tendpoint)\n\t\treturn endpoint, true\n\t}\n\tlog.Infof(\"Failed to get endpoint of bucket %s from cache\", bucketName)\n\treturn \"\", false\n}",
"func Endpoint() string {\n\treturn Server.URL\n}",
"func (c *Client) GetEndpoint() error {\n\treq, err := http.NewRequest(\"GET\", \"https://drive.amazonaws.com/drive/v1/account/endpoint\", nil)\n\treq.Header.Add(\"Authorization\", \"Bearer \"+c.config.AccessToken)\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.Status != \"200 OK\" {\n\t\treturn errors.New(\"Unsuccessful response for getting endpoint\")\n\t}\n\n\tdec := json.NewDecoder(resp.Body)\n\tvar ep endpointStruct\n\tif err := dec.Decode(&ep); err != nil {\n\t\tpanic(err)\n\t}\n\n\tc.config.ContentUrl = ep.ContentUrl\n\tc.config.MetaDataUrl = ep.MetaDataUrl\n\n\treturn nil\n}",
"func (dsn *S3DSN) URL(filename string) string {\n\tif dsn.PublicURL != nil {\n\t\tu, _ := url.Parse(dsn.PublicURL.String())\n\t\tu.Path = path.Join(filepath.Dir(u.Path), filename)\n\t\treturn u.String()\n\t}\n\n\tsvc := s3.New(dsn.Sess)\n\n\treq, _ := svc.GetObjectRequest(&s3.GetObjectInput{\n\t\tBucket: aws.String(dsn.Bucket),\n\t\tKey: aws.String(dsn.Key),\n\t})\n\n\turi, err := req.Presign(24 * 5 * time.Hour) // TODO: Auth URL: Public or Private URL\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tu, _ := url.Parse(uri) // TODO: Auth URL: Public or Private URL\n\tu.Path = path.Join(filepath.Dir(u.Path), filename)\n\tu.RawQuery = \"\"\n\tu.Fragment = \"\"\n\treturn u.String()\n}",
"func (c *Client) URL() string {\n\tu := url.URL{Scheme: \"http\", Host: c.host, Path: c.path}\n\treturn u.String()\n}",
"func (o BucketOutput) ExtranetEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Bucket) pulumi.StringOutput { return v.ExtranetEndpoint }).(pulumi.StringOutput)\n}",
"func (cPtr *Config) HTTPEndpoint() string {\n\tif cPtr.HTTPHost == \"\" {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"%-s:%-d\", cPtr.HTTPHost, cPtr.HTTPPort)\n}",
"func objectUrl(bucket string, filepath string) string {\n\treturn fmt.Sprintf(\"https://www.googleapis.com/storage/v1/b/%s/o/%s\", bucket, filepath)\n}",
"func (bc *BucketConfig) fixEndpoint() {\n\tif u, err := url.Parse(bc.Endpoint); err == nil {\n\t\tbc.Endpoint = u.Host\n\t\tif strings.ToLower(u.Scheme) == \"https\" {\n\t\t\tbc.Secure = true\n\t\t}\n\t}\n}",
"func (config *Config) GetURL() *url.URL {\n\tresolver := format.NewPropKeyResolver(config)\n\treturn config.getURL(&resolver)\n}",
"func (config *Config) GetURL() *url.URL {\n\tresolver := format.NewPropKeyResolver(config)\n\treturn config.getURL(&resolver)\n}",
"func TagEndpoint() Endpoint {\n\treturn func(u *url.URL) {\n\t\taddToURL(u, \"tags\")\n\t}\n}",
"func (lh *cloudFrontStorageMiddleware) URLFor(ctx context.Context, path string, options map[string]interface{}) (string, error) {\n\t// TODO(endophage): currently only supports S3\n\tkeyer, ok := lh.StorageDriver.(S3BucketKeyer)\n\tif !ok {\n\t\tcontext.GetLogger(ctx).Warn(\"the CloudFront middleware does not support this backend storage driver\")\n\t\treturn lh.StorageDriver.URLFor(ctx, path, options)\n\t}\n\n\tcfURL, err := lh.urlSigner.Sign(lh.baseURL+keyer.S3BucketKey(path), time.Now().Add(lh.duration))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn cfURL, nil\n}",
"func (c *Client) AuthURL(provider string) (string, error) {\n\tswitch provider {\n\tdefault:\n\t\treturn \"\", errors.New(\"No provider specified\")\n\n\tcase \"google\":\n\t\treturn c.googledrive.AuthURL(), nil\n\t}\n}",
"func (e *Etcd) URL() (string, error) {\n\tif e.AddressManager == nil {\n\t\treturn \"\", fmt.Errorf(\"Etcd's AddressManager is not initialized\")\n\t}\n\tport, err := e.AddressManager.Port()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\thost, err := e.AddressManager.Host()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"http://%s:%d\", host, port), nil\n}",
"func (c APIClient) CompileEndpointURL(endpoint string) string {\n\treturn fmt.Sprintf(\"%v/%v/%v/%v\", c.BaseURL, c.API, API_VERSION, endpoint)\n}",
"func (f *FileBlob) Url() *url.URL {\n\treturn f.url\n}",
"func (m *mockAPI) URL() string {\n\tif !m.isServing() {\n\t\tpanic(\"cannot retrieve endpoint from un-started server\")\n\t}\n\n\treturn m.Server.URL\n}",
"func ParseEndpoint(endpoint string) (*url.URL, error) {\n\tendpoint = FormatEndpoint(endpoint)\n\n\tu, err := url.Parse(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn u, nil\n}",
"func (a *Authorization) GetURL() string {\n\tif a == nil || a.URL == nil {\n\t\treturn \"\"\n\t}\n\treturn *a.URL\n}",
"func (s *Service) URL(scheme, path string) (string, error) {\n\tif !s.Public { // If the service is not public, fallback to a random node\n\t\thost, err := s.One()\n\t\tif err != nil {\n\t\t\treturn \"\", errgo.Mask(err)\n\t\t}\n\n\t\turl, err := host.URL(scheme, path)\n\t\tif err != nil {\n\t\t\treturn \"\", errgo.Mask(err)\n\t\t}\n\t\treturn url, nil\n\t}\n\n\t// If the service IS public, take the service node.\n\n\tvar url string\n\tvar port string\n\tvar ok bool\n\tif port, ok = s.Ports[scheme]; !ok {\n\t\treturn \"\", errors.New(\"unknown scheme\")\n\t}\n\n\tif s.User != \"\" {\n\t\turl = fmt.Sprintf(\"%s://%s:%s@%s:%s%s\",\n\t\t\tscheme, s.User, s.Password, s.Hostname, port, path,\n\t\t)\n\t} else {\n\t\turl = fmt.Sprintf(\"%s://%s:%s%s\",\n\t\t\tscheme, s.Hostname, port, path,\n\t\t)\n\t}\n\treturn url, nil\n}",
"func (file *File) GetURL() (URL *url.URL, err error) {\n\n\t// Get upload metadata\n\tuploadMetadata := file.upload.Metadata()\n\tif uploadMetadata == nil || uploadMetadata.ID == \"\" {\n\t\treturn nil, fmt.Errorf(\"upload has not been created yet\")\n\t}\n\n\t// Get file metadata\n\tfileMetadata := file.Metadata()\n\tif fileMetadata == nil || fileMetadata.ID == \"\" {\n\t\treturn nil, fmt.Errorf(\"file has not been uploaded yet\")\n\t}\n\n\tmode := \"file\"\n\tif uploadMetadata.Stream {\n\t\tmode = \"stream\"\n\t}\n\n\tvar domain string\n\tif uploadMetadata.DownloadDomain != \"\" {\n\t\tdomain = uploadMetadata.DownloadDomain\n\t} else {\n\t\tdomain = file.upload.client.URL\n\t}\n\n\tfileURL := fmt.Sprintf(\"%s/%s/%s/%s/%s\", domain, mode, uploadMetadata.ID, fileMetadata.ID, fileMetadata.Name)\n\n\t// Parse to get a nice escaped url\n\treturn url.Parse(fileURL)\n}",
"func (o DiagnosticsStorageAccountConfigOutput) TableEndpoint() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticsStorageAccountConfig) string { return v.TableEndpoint }).(pulumi.StringOutput)\n}",
"func (e *GSEvent) URL() string {\n\treturn fmt.Sprintf(\"gs://%v/%v\", e.Bucket, e.Name)\n}",
"func (p *Pipe) GetEndpoint() utils.Endpoint {\n\treturn *p.remote\n}",
"func (client *BaseClient) URL() *url.URL {\n\treturn client.url\n}",
"func (c *Client) URL() string {\n\treturn c.url\n}",
"func (c Client) AuthURL(state string) string {\n\tvalues := url.Values{\"client_id\": {c.ISS}, \"state\": {\"state\"}, \"response_type\": {\"code\"}}\n\treturn fmt.Sprintf(\"%s?%s\", c.Endpoint.AuthURL, values.Encode())\n}",
"func (o BlobOutput) Url() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Blob) pulumi.StringOutput { return v.Url }).(pulumi.StringOutput)\n}",
"func (k *Key) URL() string {\n\treturn k.url.String()\n}",
"func (b *STSBuilder) OIDCEndpointURL(value string) *STSBuilder {\n\tb.oidcEndpointURL = value\n\tb.bitmap_ |= 1\n\treturn b\n}",
"func GetSignedURL(ctx context.Context, bucketName, fileName, mimeType string, method SignedURLType, expires time.Time) (string, error) {\n\treturn storage.SignedURL(bucketName, fileName, &storage.SignedURLOptions{\n\t\tGoogleAccessID: gcp.DefaultServiceAccountName,\n\t\tMethod: string(method),\n\t\tExpires: expires,\n\t\tContentType: mimeType,\n\t\tSignBytes: func(b []byte) ([]byte, error) {\n\t\t\tresp, err := gcp.IAMService.Projects.ServiceAccounts.SignBlob(\n\t\t\t\tgcp.DefaultServiceAccountID,\n\t\t\t\t&iam.SignBlobRequest{BytesToSign: base64.StdEncoding.EncodeToString(b)},\n\t\t\t).Context(ctx).Do()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn base64.StdEncoding.DecodeString(resp.Signature)\n\t\t},\n\t})\n}",
"func Endpoint(rawurl string) Opt {\n\treturn func(c *Client) Opt {\n\t\told := c.url\n\t\tc.url = rawurl\n\t\treturn Endpoint(old)\n\t}\n}"
] | [
"0.70004314",
"0.69181967",
"0.68010235",
"0.67675567",
"0.6722981",
"0.67146015",
"0.66484934",
"0.6637463",
"0.6579585",
"0.6564449",
"0.65565455",
"0.65559167",
"0.63913435",
"0.6352629",
"0.6349848",
"0.6302285",
"0.618815",
"0.6147162",
"0.6122564",
"0.6111489",
"0.61059636",
"0.60998404",
"0.60981196",
"0.6021514",
"0.5998207",
"0.5989331",
"0.59882164",
"0.59777623",
"0.59691715",
"0.59646314",
"0.5960962",
"0.59533095",
"0.594109",
"0.59140813",
"0.5896697",
"0.58837837",
"0.58632743",
"0.5860131",
"0.58565396",
"0.5851351",
"0.5848697",
"0.58230954",
"0.5819771",
"0.58100677",
"0.5780483",
"0.57672477",
"0.5761371",
"0.5758571",
"0.57388175",
"0.572462",
"0.57201296",
"0.5710216",
"0.5704436",
"0.5692153",
"0.56809443",
"0.5661995",
"0.56529605",
"0.5644086",
"0.56404895",
"0.56394166",
"0.5632574",
"0.563062",
"0.56273717",
"0.5626032",
"0.56240976",
"0.5621422",
"0.55981964",
"0.55968565",
"0.5594355",
"0.55903727",
"0.55789566",
"0.55729526",
"0.5572741",
"0.5561862",
"0.55364454",
"0.5529068",
"0.5521675",
"0.5521675",
"0.55031174",
"0.5495173",
"0.54704946",
"0.5468374",
"0.5465888",
"0.5464967",
"0.54636145",
"0.5456741",
"0.5430231",
"0.5421669",
"0.5418979",
"0.5413911",
"0.540603",
"0.5383051",
"0.53821355",
"0.5372416",
"0.53643835",
"0.53615814",
"0.53576255",
"0.53554344",
"0.5354586",
"0.53478175"
] | 0.8297435 | 0 |
Reads a URL into a byte slice | func ReadUrl(url string) ([]byte, error) {
resp, err := http.Get(url)
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return nil, errors.New(url + ": " + resp.Status)
}
body, err := ioutil.ReadAll(resp.Body)
// log.Println(url + ":", string(body))
return body, err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ReadURL(url string, client *http.Client, header *http.Header) (body []byte, err error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif header != nil {\n\t\treq.Header = *header\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tklog.V(2).InfoS(\"Failed to read URL\", \"statusCode\", resp.StatusCode, \"URL\", url)\n\t\treturn nil, &HTTPError{\n\t\t\tStatusCode: resp.StatusCode,\n\t\t\tURL: url,\n\t\t}\n\t}\n\n\tlimitedReader := &io.LimitedReader{R: resp.Body, N: maxReadLength}\n\tcontents, err := io.ReadAll(limitedReader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif limitedReader.N <= 0 {\n\t\treturn nil, errors.New(\"the read limit is reached\")\n\t}\n\n\treturn contents, nil\n}",
"func ReadURL(url string) ([]byte, error) {\n response, err := http.Get(url)\n if err != nil {\n return nil, err\n }\n context, err := ioutil.ReadAll(response.Body)\n response.Body.Close()\n return context, err\n}",
"func readurl(url string) (data string, err error) {\n\tvar resp *http.Response\n\tresp, err = http.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer resp.Body.Close()\n\n\t// read data from url\n\tvar bytes []byte\n\tbytes, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = string(bytes)\n\treturn\n}",
"func ReadURLContents(targetURL string) ([]byte, error) {\n\tresp, err := http.Get(targetURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"request failed: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"response status code is %v\", resp.StatusCode)\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading response body failed: %v\", err)\n\t}\n\n\treturn b, nil\n}",
"func getRead(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, resp.Body.Close()\n}",
"func Get(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"wrong status code %d\\n\", resp.StatusCode)\n\t}\n\n\t//change io.Reader to bufio.Reader\n\tr := bufio.NewReader(resp.Body)\n\te := determinEncoding(r)\n\tuf8Reader := transform.NewReader(r, e.NewDecoder())\n\treturn ioutil.ReadAll(uf8Reader)\n}",
"func Read(urlPath string) (*bytes.Buffer, error) {\n\n\tresponse, err := coreHTTP.Get(urlPath) // nolint:gosec\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif response != nil {\n\t\tdefer response.Body.Close()\n\t}\n\tvar buf bytes.Buffer\n\t_, err = io.Copy(&buf, response.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &buf, nil\n}",
"func readFromURL(url string, writer io.Writer) error {\n\thttpTransport := new(http.Transport)\n\thttpTransport.Proxy = http.ProxyFromEnvironment\n\n\tc := &http.Client{Transport: httpTransport}\n\tr, err := c.Get(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer r.Body.Close()\n\tif r.StatusCode >= 400 {\n\t\treturn fmt.Errorf(\"%v returned %d\", url, r.StatusCode)\n\t}\n\t_, err = io.Copy(writer, r.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func readFileAtURL(url string) ([]byte, error) {\n\tclient := &http.Client{}\n\tlog.Printf(\"Fetching file at %q\", url)\n\treq, _ := http.NewRequest(http.MethodGet, url, nil)\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Printf(\"Request failed: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Request returned non-OK status: %s\", resp.Status)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Could not read file contents: %s\", err.Error())\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}",
"func readerFromURL(url string) io.ReadCloser {\n\tfmt.Println(fmt.Sprintf(\"Getting: %s\", url))\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Couldn't download file %s: %s\", url, err))\n\t}\n\n\tfileBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Unable to read file to bytes %s: %s\", url, err))\n\t}\n\tresp.Body.Close()\n\n\treturn ioutil.NopCloser(bytes.NewReader(fileBytes))\n}",
"func Get(url string) ([]byte, error) {\n\trsp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\n\t}\n\tdefer rsp.Body.Close()\n\treturn ioutil.ReadAll(rsp.Body)\n}",
"func Get(url string) ([]byte, error) {\n\tclient := http.Client{\n\t\tTimeout: time.Second * 3,\n\t}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tdata, err := ioutil.ReadAll(resp.Body)\n\n\treturn data, err\n}",
"func GetData(url string) []byte {\n\tlog.Println(\"getting data from URL: \", url)\n\n\tvar body []byte\n\n\t// define client with timeout of 10 seconds\n\tvar netClient = &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tresp, err := netClient.Get(url)\n\tdefer resp.Body.Close()\n\tif err != nil{\n\t\tlog.Println(err)\n\t\treturn []byte(\"\")\n\t}\n\n\t// check for gzip data, unzip if needed\n\tif strings.Contains(url, \".gz\") {\n\t\tlog.Println(\"content encoded with gzip\")\n\t\tbody = GUnzip(resp.Body)\n\t} else {\n\t\tlog.Println(\"content not encoded\")\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tcheck(err)\n\t}\n\n\tlog.Println(\"data received\")\n\treturn body\n}",
"func (reader) ReadFromURL(url string) (img image.Image, err error) {\n\tresp, err := http.Get(url)\n\tif err != nil || resp.StatusCode != http.StatusOK {\n\t\treturn nil, errors.New(\"The image url provided is invalid\")\n\t}\n\tdefer resp.Body.Close()\n\timg, _, err = image.Decode(resp.Body)\n\treturn\n}",
"func getBytesByUrl(jsonUrl string) []byte {\n\n\t//Get respons form url\n\tresp, err := http.Get(jsonUrl)\n\tcheckErr(err)\n\n\t//Turn json into bytes\n\tbytes, _ := ioutil.ReadAll(resp.Body)\n\n\treturn bytes\n}",
"func GetBodyFromURL(url string, client *http.Client) ([]byte, error) {\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer util.Close(resp.Body)\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Failed to load: %s\", resp.Status)\n\t}\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func (d *realDownloader) Get(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get for %v failed: %v\", url, err)\n\t}\n\tdefer resp.Body.Close()\n\trespBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to read body: %v\", err)\n\t}\n\treturn respBytes, nil\n}",
"func FetchURL(url string) []byte {\n\tresp, err := http.Get(url)\n\tif err != nil || resp.StatusCode != 200 {\n\t\tlog.Fatalf(\"Error fetching URL '%s': %s\", url, err)\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error reading body: %s\", err)\n\t}\n\treturn body\n}",
"func getURLContent(url string) ([]byte, error) {\n\tdur, err := time.ParseDuration(TimeoutDur)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tclient := http.Client{Timeout: dur}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte{}, err\n\n\t}\n\tdefer resp.Body.Close()\n\treturn body, err\n\n}",
"func GetURL(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}",
"func fetch(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Failed to read from URL\")\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func getStreamFromURL(fileURI string) (io.ReadCloser, error) {\n\turl, err := url.Parse(fileURI)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch url.Scheme {\n\tcase \"http\", \"https\":\n\t\tresp, err := http.Get(fileURI)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn resp.Body, nil\n\tcase \"file\", \"\":\n\t\tf, err := os.Open(fileURI)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn f, nil\n\tdefault:\n\t\treturn nil, errors.New(\"URL Not supported\")\n\t}\n}",
"func GetURL(url string) (reply []byte, err error) {\n\tcli := &http.Client{\n\t\tTimeout: RequestTimeout * time.Second,\n\t}\n\tresp, err := cli.Get(url)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\treply, err = ioutil.ReadAll(resp.Body)\n\treturn\n}",
"func ReadFromURI(uri string) (string, error) {\n\tvar data []byte\n\tif strings.HasPrefix(uri, \"http\") {\n\t\tresp, err := http.Get(uri)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tdata, err = ioutil.ReadAll(resp.Body)\n\t} else {\n\t\t// It should be a Filesystem uri\n\t\tabsPath, err := filepath.Abs(uri)\n\t\tdata, err = ioutil.ReadFile(absPath)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn string(data), nil\n}",
"func GetBytes(url string, data ...interface{}) []byte {\n\treturn RequestBytes(\"GET\", url, data...)\n}",
"func download_read(download *C.Download, bytes unsafe.Pointer, length C.size_t) C.ReadResult {\n\tdown, ok := universe.Get(download._handle).(*Download)\n\tif !ok {\n\t\treturn C.ReadResult{\n\t\t\terror: mallocError(ErrInvalidHandle.New(\"download\")),\n\t\t}\n\t}\n\n\tilength, ok := safeConvertToInt(length)\n\tif !ok {\n\t\treturn C.ReadResult{\n\t\t\terror: mallocError(ErrInvalidArg.New(\"length too large\")),\n\t\t}\n\t}\n\n\tvar buf []byte\n\t*(*reflect.SliceHeader)(unsafe.Pointer(&buf)) = reflect.SliceHeader{\n\t\tData: uintptr(bytes),\n\t\tLen: ilength,\n\t\tCap: ilength,\n\t}\n\n\tn, err := down.download.Read(buf)\n\treturn C.ReadResult{\n\t\tbytes_read: C.size_t(n),\n\t\terror: mallocError(err),\n\t}\n}",
"func GetUrlContent(url string, depth int) ([]byte, error) {\n\tlog.Println(depth, url)\n\tvar result []byte\n\tvar err error\n\tclient := &http.Client{\n\t\tCheckRedirect: nil,\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Println(\"http.NewRequest\", err)\n\t\treturn nil, err\n\t}\n\treq.Header.Add(\"User-Agent\", \"curl/7.50.3\")\n\treq.Header.Add(\"Accept\", \"*/*\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Println(\"client.Do\", err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tfinalUrl := resp.Request.URL.String()\n\tif finalUrl != url {\n\t\tdepth1 := depth + 1\n\t\tresult, err = GetUrlContent(finalUrl, depth1)\n\t\treturn result, err\n\t} else {\n\t\tif resp.StatusCode == 200 {\n\t\t\tresult, err = ioutil.ReadAll(resp.Body)\n\t\t\tlog.Println(\"S\", url)\n\t\t\treturn result, err\n\t\t} else {\n\t\t\tlog.Println(\"client.Do\", resp.StatusCode, url)\n\t\t\tfmt.Printf(resp.Status)\n\t\t\treturn nil, errors.New(\"Response Error\")\n\t\t}\n\t}\n}",
"func ReadUrl(url string) {\n\tval, err := ReadNewsFrom(url)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\trenderToSTDOUT(val)\n}",
"func GetResponse(url string) []byte {\n resp, err := http.Get(url)\n\n // handle the error if there is one\n if err != nil {\n panic(err)\n }\n\n // do this now so it won't be forgotten\n defer resp.Body.Close()\n\n // reads html as a slice of bytes\n html, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n panic(err)\n }\n\n return html\n}",
"func get(url string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"User-Agent\", userAgent)\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got status code: %s\", resp.Status)\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func Fetch(url string) ([]byte, error) {\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header.Add(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER\")\n\tresp, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"wrong status code %d\\n\", resp.StatusCode)\n\t}\n\n\t//change io.Reader to bufio.Reader\n\tr := bufio.NewReader(resp.Body)\n\te := determinEncoding(r)\n\tuf8Reader := transform.NewReader(r, e.NewDecoder())\n\treturn ioutil.ReadAll(uf8Reader)\n}",
"func GetUTF8BodyFromURL(urlStr string, ignoreIBS bool) (string, error) {\n\tclient := http.DefaultClient\n\tresp, err := client.Get(urlStr)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdefer resp.Body.Close()\n\tbyt, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Detect charset.\n\treturn GetUTF8Body(byt, resp.Header.Get(\"Content-Type\"), ignoreIBS)\n}",
"func (c *Client) Get(url string, header map[string]string) ([]byte, error) {\n\treq, err := c.newRequest(\"GET\", url, header)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn bytes, nil\n}",
"func DownloadURL(url string) ([]byte, error) {\n urls, err := getDownloadURLs(url, 1400000)\n if err != nil {\n return nil, err\n }\n\n output := \"\"\n\n for _, value := range urls {\n downloaded, _ := downloadPart(value)\n\n output += downloaded\n }\n\n return []byte(output), nil\n}",
"func Download(ctx context.Context, URL string) ([]byte, error) {\n\tparsedURL, err := url.Parse(URL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tclient, err := getClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbucket := client.Bucket(parsedURL.Host)\n\tobjectPath := string(parsedURL.Path[1:])\n\trc, err := bucket.Object(objectPath).NewReader(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer rc.Close()\n\n\tdata, err := ioutil.ReadAll(rc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func GetContent(url string) ([]byte, error) {\n\tr, err := GetContentReader(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Close()\n\treturn ioutil.ReadAll(r)\n}",
"func TestDownloadFileFromUrl(t *testing.T) {\n\n\tbuf, err := DownloadFileFromUrl(pdfUrl)\n\tif err != nil || buf == nil {\n\t\tlog.Printf(\n\t\t\t\"failed to download from url: %v\\n\"+\n\t\t\t\t\"Error: %v\", pdfUrl, err)\n\t\tt.FailNow()\n\t}\n\n\tif buf.Len() == 0 {\n\t\tlog.Printf(\"no empty buffer for url: %v\", pdfUrl)\n\t\tt.FailNow()\n\t}\n\n\tcontent := buf.String()\n\tif content == \"\" || content[0:4] != \"%PDF\" {\n\t\tlog.Printf(\"failed to decode for url: %v\", pdfUrl)\n\t\tt.FailNow()\n\t}\n}",
"func (p Parser) GetContents(url string, limit int) (string, error) {\n p.client = &http.Client{}\n resp, err := p.client.Get(url)\n if err != nil {\n return \"\", err\n }\n defer resp.Body.Close()\n\n if resp.StatusCode != http.StatusOK {\n return \"\", fmt.Errorf(\"Http code returned: %d\", resp.StatusCode)\n }\n\n data, err := ioutil.ReadAll(resp.Body)\n if err != nil {\n return \"\", err\n }\n\n str := string(data)\n if limit > 0 {\n str = str[:limit]\n }\n\n return str, nil\n}",
"func GetContentReader(url string) (io.ReadCloser, error) {\n\t// http.Get() allows up to 10 redirects\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif res.StatusCode/100 != 2 {\n\t\treturn nil, errors.New(url + \" returned non-successful status: \" + res.Status)\n\t}\n\treturn res.Body, nil\n}",
"func FetchFromURI(uri string, limit int64) ([]byte, error) {\n\tresponse, err := http.Get(uri)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer response.Body.Close()\n\n\treturn ioutil.ReadAll(io.LimitReader(response.Body, limit))\n}",
"func HTTPGetBlob(url string) ([]byte, int, error) {\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlogger.Errorf(\"get url %s error\\n\", url)\n\t\treturn nil, 0, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogger.Errorf(\"Read body from url %s error\\n\", url)\n\t\treturn nil, 0, err\n\t}\n\n\treturn body, int(resp.ContentLength), nil\n}",
"func GetFileBytes(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\tbuf, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}",
"func netread(url string) (io.ReadCloser, error) {\n\tclient := &http.Client{Timeout: 30 * time.Second}\n\tresp, err := client.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"unable to retreive network data for %s (%s)\", url, resp.Status)\n\t}\n\treturn resp.Body, nil\n}",
"func contentFromServer(url string) string {\n resp, err := http.Get(url)\n checkError(err)\n\n defer resp.Body.Close()\n bytes, err := ioutil.ReadAll(resp.Body)\n checkError(err)\n\n return string(bytes)\n}",
"func (b *BaseProvider) GetURL(u string) (string, []byte, error) {\n\tglog.Infof(\"Getting URL %s\", u)\n\tresp, err := b.Client.Get(u)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tfilename := \"\"\n\tcontHeader := resp.Header.Get(\"Content-Disposition\")\n\tres := strings.Split(contHeader, \"; \")\n\tfor _, res := range res {\n\t\tif strings.HasPrefix(res, \"filename=\") {\n\t\t\tfilename = strings.Split(res, \"=\")[1]\n\t\t}\n\t}\n\tfilename = strings.Trim(filename, \"\\\"\")\n\n\tdefer resp.Body.Close()\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn filename, content, err\n\t}\n\treturn filename, content, nil\n}",
"func downloadFromURL(u string) {\n\tresp, err := http.Get(u)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\n\tfileName, err := downloadingFileName(u)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tout, err := createFile(fileName)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() {\n\t\t_ = out.Close()\n\t}()\n\n\t_, err = io.Copy(out, resp.Body)\n}",
"func read(url string) (image.Image, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"err problem creating http request to read the image: %s\", err.Error())\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"err problem executing the http request to read the image: %s\", err.Error())\n\t}\n\n\tdefer resp.Body.Close()\n\n\tm, _, err := image.Decode(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"err problem reading the image from the content: %s\", err.Error())\n\t}\n\n\treturn m, nil\n}",
"func (d *downloader) Read(buf []byte) (int, error) {\n\t//if we don't have a response body, we're at EOF\n\tif d.Reader == nil {\n\t\treturn 0, io.EOF\n\t}\n\n\t//read from the current response body\n\tbytesRead, err := d.Reader.Read(buf)\n\td.BytesRead += int64(bytesRead)\n\tswitch err {\n\tcase nil:\n\t\treturn bytesRead, err\n\tcase io.EOF:\n\t\t//current response body is EOF -> close it\n\t\terr = d.Reader.Close()\n\t\td.Reader = nil\n\t\tif err != nil {\n\t\t\treturn bytesRead, err\n\t\t}\n\t\td.Reader = nil\n\tdefault:\n\t\t//unexpected read error\n\t\tif !d.shouldRetry() {\n\t\t\treturn bytesRead, err\n\t\t}\n\t\tlogg.Error(\"restarting GET %s after read error at offset %d: %s\",\n\t\t\td.URI, d.BytesRead, err.Error(),\n\t\t)\n\t\terr := d.Reader.Close()\n\t\tif err != nil {\n\t\t\tlogg.Error(\n\t\t\t\t\"encountered additional error when trying to close the existing reader: %s\",\n\t\t\t\terr.Error(),\n\t\t\t)\n\t\t}\n\t}\n\n\t//is there a next chunk?\n\tif d.BytesRead == d.BytesTotal {\n\t\treturn bytesRead, io.EOF\n\t}\n\n\t//get next chunk\n\tresp, headers, err := d.getNextChunk()\n\tif err != nil {\n\t\treturn bytesRead, err\n\t}\n\tif headers.ContentRangeStart != d.BytesRead {\n\t\tresp.Body.Close()\n\t\treturn bytesRead, fmt.Errorf(\n\t\t\t\"expected next segment to start at offset %d, but starts at %d\",\n\t\t\td.BytesRead, headers.ContentRangeStart,\n\t\t)\n\t}\n\td.Reader = resp.Body\n\treturn bytesRead, nil\n}",
"func (r *HTTPRetriever) retrieve(url string) []byte {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatalf(\"Error: %v\", err)\n\t}\n\n\tdefer resp.Body.Close()\n\tbody, _ := ioutil.ReadAll(resp.Body)\n\treturn body\n}",
"func Get(url string) ([]byte, error) {\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\t//logger.Error(\"NewRequest error\", logger.Any(\"err\", err), logger.String(\"url\", url))\n\t\treturn nil, err\n\t}\n\n\tresp, err := client.Do(req)\n\n\tif err != nil {\n\t\t//logger.Error(\"Do error\", logger.Any(\"err\", err.Error()), logger.String(\"url\", url))\n\t\treturn nil, err\n\t}\n\t//if resp.StatusCode != http.StatusOK {\n\t//\t_, err := ioutil.ReadAll(resp.Body)\n\t//\tif err == nil {\n\t//\t\tresp.Body.Close()\n\t//\t\t//logger.Error(\"ReadAll error\", logger.Any(\"err\", string(bz2)), logger.String(\"url\", url))\n\t//\t}\n\t//}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode == http.StatusOK {\n\t\tbz, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\t//logger.Error(\"ioutil.ReadAll err\", logger.Any(\"io\", err), logger.String(\"url\", url))\n\t\t\treturn nil, err\n\t\t}\n\t\treturn bz, nil\n\t}\n\treturn nil, nil\n}",
"func (a ReverseHttpFile) ReadAt(p []byte, off int64) (int, error) {\n\tif _, err := a.File.Seek(off, io.SeekStart); err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn a.File.Read(p)\n}",
"func (r *httpRetriever) get(uri *url.URL) (b []byte, err error) {\n\treturn r.getFile(uri)\n}",
"func ExampleRead() {\n\tinput := string(golden.Read(t, golden.Input))\n\tgot, err := base64.RawURLEncoding.DecodeString(input)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgolden.Assert(t, got)\n}",
"func getHttp(url url.URL) (io.ReadCloser, error) {\n\tresp, err := http.Get(url.String())\n\tif err != nil {\n\t\tlog.Printf(\"HTTP failed to GET url=%s. error=%s\\n\", url.String(), err)\n\t\treturn nil, err\n\t}\n\n\treturn resp.Body, nil\n}",
"func grab(path string) ([]byte, error) {\n\tres, err := http.Get(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuf, err := base64.StdEncoding.DecodeString(string(body))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buf, nil\n}",
"func loadDataURL(url string) ([]byte, error) {\n\n\tvar du dataURL\n\terr := parseDataURL(url, &du)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Checks for valid media type\n\tfound := false\n\tfor i := 0; i < len(validMediaTypes); i++ {\n\t\tif validMediaTypes[i] == du.MediaType {\n\t\t\tfound = true\n\t\t\tbreak\n\t\t}\n\t}\n\tif !found {\n\t\treturn nil, fmt.Errorf(\"data URI media type:%s not supported\", du.MediaType)\n\t}\n\n\t// Checks encoding\n\tif du.Encoding != \"base64\" {\n\t\treturn nil, fmt.Errorf(\"data URI encoding:%s not supported\", du.Encoding)\n\t}\n\n\t// Decodes data from BASE64\n\tdata, err := base64.StdEncoding.DecodeString(du.Data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func getContent(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"GET error: %v\", err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Status error: %v\", resp.StatusCode)\n\t}\n\n\tdata, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Read body: %v\", err)\n\t}\n\n\treturn data, nil\n}",
"func downloadWebPage(url string) []byte {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\tcontent, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn content\n}",
"func getBufferFromInputResource(inputPath string) ([]byte, error) {\n\n\turl, err := url.Parse(inputPath)\n\tif err != nil {\n\t\treturn nil, errors.New(\"Error parsing input file\")\n\t}\n\tif url.Scheme == \"http\" || url.Scheme == \"https\" {\n\t\tres, err := http.Get(inputPath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\treturn ioutil.ReadAll(res.Body)\n\t} else if url.Scheme == \"ftp\" {\n\t\treturn nil, errors.New(\"ftp not supported yet\")\n\n\t} else {\n\t\treturn ioutil.ReadFile(inputPath)\n\t}\n}",
"func GetFileSizeURL(url string) int64 {\n\tresp, err := http.Head(url)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tlog.Fatal(resp.Status)\n\t}\n\tsize, _ := strconv.Atoi(resp.Header.Get(\"Content-Length\"))\n\treturn int64(size)\n}",
"func (download *Download) Read(p []byte) (int, error) {\n\treturn download.pr.Read(p)\n}",
"func HTTPGet(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tglog.Errorf(\"URL=%s; Err=%s\", url, err)\n\t\treturn []byte{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func ConnRead(c *tls.Conn, b []byte) (int, error)",
"func read(url string, c chan *gofeed.Feed) {\n\tresp, err := request(url)\n\n\tif err != nil {\n\t\tc <- nil\n\t\treturn\n\t}\n\n\tif resp.StatusCode != http.StatusOK {\n\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\tlogrus.Error(err)\n\t\t\tc <- nil\n\t\t\treturn\n\t\t}\n\n\t\tbodyString := string(bodyBytes)\n\t\tlogrus.WithField(\"body\", bodyString).Error(resp.Status)\n\t\tc <- nil\n\t\treturn\n\t}\n\n\tc <- parseRequest(resp)\n}",
"func Download(url string) []byte {\n\tvar resp *http.Response\n\tvar body []byte\n\n\tsleeper()\n\n\tresp, body = httpRequest(url)\n\tif resp != nil {\n\t\tif resp.StatusCode != 200 {\n\t\t\tConfiguration.Logger.Warning.Printf(\"[%d] StatusCode - %s\\n\", resp.StatusCode, url)\n\t\t}\n\t} else {\n\t\tConfiguration.Logger.Warning.Printf(\"BodyNil - %s\\n\", url)\n\t}\n\treturn body\n}",
"func LoadFromHttp(source string) ([]byte, error) {\n\tvar resp *http.Response\n\n\tif !(strings.HasPrefix(source, \"http://\") || strings.HasPrefix(source, \"https://\")) {\n\t\treturn nil, WrongFormatError\n\t}\n\n\t// Create custom http transport\n\t// From: https://www.loginradius.com/blog/async/tune-the-go-http-client-for-high-performance/\n\thttpTransport := http.DefaultTransport.(*http.Transport).Clone()\n\thttpTransport.MaxIdleConns = 10\n\thttpTransport.MaxConnsPerHost = 10\n\thttpTransport.IdleConnTimeout = 60 * time.Second\n\thttpTransport.MaxIdleConnsPerHost = 10\n\thttpTransport.ResponseHeaderTimeout = httpResponseHeadersTimeout\n\n\t// Prepare request\n\tclient := http.Client{\n\t\tTransport: httpTransport,\n\t}\n\n\treq, err := http.NewRequest(\"GET\", source, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Execute request\n\tctx, ctxCancel := context.WithTimeout(context.Background(), httpRequestTimeout)\n\tdefer ctxCancel()\n\tresp, err = client.Do(req.WithContext(ctx))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Check if the request succeeded\n\tif resp.StatusCode != 200 {\n\t\t_ = resp.Body.Close()\n\n\t\treturn nil, fmt.Errorf(\"unexpected HTTP status code [http-status=%v]\", resp.Status)\n\t}\n\n\t// Read response body\n\tvar responseBody []byte\n\tresponseBody, err = ioutil.ReadAll(resp.Body)\n\t_ = resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Done\n\treturn responseBody, nil\n}",
"func (c Client) Download(url string) (*http.Response, error) {\n\tlog.Logger().Debugf(\"getting url: %s\", url)\n\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", \"application/octet-stream\")\n\n\tresp, err := c.http.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t//defer resp.Body.Close()\n\n\tsuccess := resp.StatusCode >= 200 && resp.StatusCode < 300\n\tif !success {\n\t\tlog.Logger().Debugf(\"failed with resp code %d\", resp.StatusCode)\n\t\treturn nil, handleHTTPError(resp)\n\t}\n\n\tif resp.StatusCode == http.StatusNoContent {\n\t\treturn nil, errors.New(\"no content\")\n\t}\n\n\treturn resp, nil\n}",
"func getInfo(url string) []byte {\n\n\tres, err := http.Get(url)\n\tcheck(err)\n\tinfo, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\tcheck(err)\n\n\treturn info\n\n}",
"func (f *Fast) download(url string, byteLenChan chan<- int64, done <-chan struct{}) (err error) {\n\tr := rq.Get(url)\n\t_, res, err := f.client.Send(r, false)\n\tif err != nil {\n\t\terr = errInternet\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\tbuf := make([]byte, bufferSize)\n\tvar length int\n\n\t// read res.Body loop\n\t// loop till <-done\n\t// or eof\nloop:\n\tfor {\n\t\tselect {\n\n\t\tcase <-done:\n\t\t\tdebug.Debug(\"<-done\")\n\t\t\tbreak loop\n\n\t\tdefault:\n\t\t\tlength, err = res.Body.Read(buf)\n\n\t\t\tbyteLenChan <- int64(length)\n\n\t\t\tif err == io.EOF {\n\t\t\t\t// remove err\n\t\t\t\terr = nil\n\n\t\t\t\tdebug.Debug(\"Read done\")\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tdebug.Debug(\"Read\", err)\n\t\t\t\tbreak loop\n\t\t\t}\n\t\t}\n\t}\n\n\tdebug.Done(\"done\")\n\treturn\n}",
"func getHTML(link *url.URL) ([]byte, error) {\n resp, err := http.Get(link.String())\n if err != nil {\n return nil, err\n }\n defer resp.Body.Close()\n\n return ioutil.ReadAll(resp.Body)\n}",
"func getAllContents(URL string) []byte {\n\tresponse, err := http.Get(URL)\n\tif err != nil {\n\t\tlog.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t} else {\n\t\tdefer response.Body.Close()\n\t}\n\tcontents, err := ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\tlog.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n\treturn contents\n}",
"func (dw downloadBuffer) ReadFrom(r io.Reader) (int64, error) {\n\tvar n int64\n\tfor len(dw.buf) > 0 {\n\t\tread, err := io.ReadFull(r, dw.buf[0])\n\n\t\tif err == io.ErrUnexpectedEOF || err == io.EOF {\n\t\t\tn += int64(read)\n\t\t\treturn n, nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tdw.buf = dw.buf[1:]\n\t\tn += int64(read)\n\t}\n\treturn n, nil\n}",
"func GetVersionFromURL(url string, client *http.Client) (string, error) {\n\tb, err := GetBodyFromURL(url, client)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(string(b)), nil\n}",
"func (api Tumblr) rawGet(url string) []byte {\n\trequest, err := http.NewRequest(\"GET\", url, nil)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tapi.oauthService.Sign(request, &api.config)\n\tclient := new(http.Client)\n\tclientResponse, err := client.Do(request)\n\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn []byte{0}\n\t}\n\tdefer clientResponse.Body.Close()\n\n\tbody, err := ioutil.ReadAll(clientResponse.Body)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\treturn body\n}",
"func get(cacheDir, url string) ([]byte, error) {\n\tclient := grab.NewClient()\n\treq, err := grab.NewRequest(cacheDir, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp := client.Do(req)\n\t<-resp.Done\n\treturn ioutil.ReadFile(resp.Filename)\n}",
"func GetBody(w http.ResponseWriter, url string) []byte{\n\t\n\tres, err := http.Get(url)\n\tif err != nil{\n\t\tfmt.Fprintln(w, \"error or something\")\n\t\t// must break out \n\t}\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil{\n\t\tfmt.Fprintln(w, \"could not get content\")\n\t\t // must break out\n\t}\n\treturn body\n}",
"func getJsonByte (url string) []byte {\n\tjsonClient := http.Client{\n\t\tTimeout: time.Second * 2, // Maximum of 2 secs\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\treq.Header.Set(\"User-Agent\", \"umbrella client\")\n\n\tres, getErr := jsonClient.Do(req)\n\tif getErr != nil {\n\t\tlog.Fatal(getErr)\n\t}\n\n\tbody, readErr := ioutil.ReadAll(res.Body)\n\tif readErr != nil {\n\t\tlog.Fatal(readErr)\n\t}\n\n\treturn body\n\t//jsonErr := json.Unmarshal(body, jsonBody)\n//\n//\tif jsonErr != nil {\n//\t\tlog.Fatal(jsonErr)\n//\t}\n}",
"func fetchURL(request *http.Request) (string, error) {\n\tcontents, err := fetchBytesFromURL(request)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not fetch url %v. Response error %v\", request.URL, err)\n\t}\n\n\treturn string(contents), nil\n}",
"func FetchHtmlFromUrl(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn make([]byte, 0), err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn make([]byte, 0), err\n\t}\n\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\treturn make([]byte, 0), err\n\t}\n\treturn body, nil\n}",
"func GetContent(url string, timeout uint) ([]byte, error) {\n\tresp, err := GetResp(url, timeout)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\treturn io.ReadAll(resp.Body)\n}",
"func getRTVBody(c http.Client, url string) ([]byte, error) {\n\tlog.Printf(\"Fetching URL: %q\\n\", url)\n\tresp, err := c.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, errors.Errorf(\"Non-200 response fetching %s, %+v\", url, resp)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn body, nil\n}",
"func (c *Crawler) getPageContents(url string) (io.ReadCloser, error) {\n\tresponse, err := c.client.getResponse(url)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to fetch URL: %v\", c.hostnameWithProtocol)\n\t}\n\tif response != nil {\n\t\treturn response.Body, nil\n\t}\n\treturn nil, errors.Errorf(\"unable to read response body for URL %v\", url)\n}",
"func postURL(source, post string, tr http.RoundTripper) ([]byte, error) {\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t}\n\tresp, err := client.Post(source, \"application/octet-stream\", strings.NewReader(post))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"http post %s: %v\", source, err)\n\t}\n\tdefer resp.Body.Close()\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"http post %s: %v\", source, statusCodeError(resp))\n\t}\n\treturn io.ReadAll(resp.Body)\n}",
"func getPage(url string) (body io.ReadCloser) {\n\t// Retrieve the page\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tlog.Fatalf(\"Response Error: %v\", err)\n\t}\n\tresp.Close = true\n\tbody = resp.Body\n\treturn\n}",
"func DownloadFromURL(url string, target string, user string, pw string) error {\n\tfileName := target\n\tfmt.Println(\"Downloading\", url, \"to\", fileName)\n\n\t// TODO: check file existence first with io.IsExist\n\toutput, err := os.Create(fileName)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating\", fileName, \"-\", err)\n\t\treturn err\n\t}\n\tdefer output.Close()\n\ttr := &http.Transport{\n\t\tMaxIdleConns: 10,\n\t\tIdleConnTimeout: 30 * time.Second,\n\t\tDisableCompression: true,\n\t}\n\tclient := &http.Client{\n\t\tTransport: tr,\n\t}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tfmt.Println(\"Error while downloading\", url, \"-\", err)\n\t\treturn err\n\t}\n\tif user != \"\" {\n\t\tfmt.Printf(\"Setting http username for url %s to \\\"%s\\\"\\n\", url, user)\n\t\treq.SetBasicAuth(user, pw)\n\t}\n\tresponse, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(\"Error while downloading\", url, \"-\", err)\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\tfmt.Printf(\"Http.Get() response code: %d (code is not necessarily an error)\\n\", response.StatusCode)\n\tif isHttpError(response.StatusCode) {\n\t\ts := fmt.Sprintf(\"http error: %d (%s)\", response.StatusCode, response.Status)\n\t\tfmt.Println(s)\n\t\treturn errors.New(s)\n\t}\n\tn, err := io.Copy(output, response.Body)\n\tif err != nil {\n\t\tfmt.Println(\"Error while downloading\", url, \"-\", err)\n\t\treturn err\n\t}\n\n\tfmt.Println(n, \"bytes downloaded.\")\n\treturn nil\n}",
"func DownloadFromURL(installLocation string, url string) (string, error) {\n\n\ttokens := strings.Split(url, \"/\")\n\tfileName := tokens[len(tokens)-1]\n\tfmt.Println(\"Downloading\", url, \"to\", fileName)\n\tfmt.Println(\"Downloading ...\")\n\n\toutput, err := os.Create(installLocation + fileName)\n\tif err != nil {\n\t\tfmt.Println(\"Error while creating\", installLocation+fileName, \"-\", err)\n\t\treturn \"\", err\n\t}\n\tdefer output.Close()\n\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tfmt.Println(\"Error while downloading\", url, \"-\", err)\n\t\treturn \"\", err\n\t}\n\tdefer response.Body.Close()\n\n\tn, errCopy := io.Copy(output, response.Body)\n\tif errCopy != nil {\n\t\tfmt.Println(\"Error while downloading\", url, \"-\", errCopy)\n\t\treturn \"\", errCopy\n\t}\n\n\tfmt.Println(n, \"bytes downloaded.\")\n\treturn installLocation + fileName, nil\n}",
"func doDownloadRequest(ctx context.Context, url string, downloadFrom, totalContentLength int64, options *Options) (body io.ReadCloser, err error) {\n\tclient := http.Client{\n\t\tTimeout: options.Timeout,\n\t\tTransport: options.HTTPTransport,\n\t}\n\n\t// See: https://stackoverflow.com/a/29200933/3536354\n\treq, _ := http.NewRequest(http.MethodGet, url, nil)\n\treq = req.WithContext(ctx)\n\n\tif downloadFrom > 0 {\n\t\treq.Header.Set(rangeHeader, fmt.Sprintf(\"bytes=%d-\", downloadFrom))\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error requesting url: %w\", err)\n\t}\n\n\tif downloadFrom <= 0 {\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\t_ = resp.Body.Close()\n\t\t\treturn nil, fmt.Errorf(\"unexpected download http status code %d\", resp.StatusCode)\n\t\t}\n\t\tif resp.ContentLength != totalContentLength {\n\t\t\t_ = resp.Body.Close()\n\t\t\treturn nil, fmt.Errorf(\"unexpected response content-length (expected %d, got %d)\", totalContentLength, resp.ContentLength)\n\t\t}\n\t\t// Return the body, done\n\t\treturn resp.Body, nil\n\t}\n\n\tif resp.StatusCode != http.StatusPartialContent {\n\t\treturn nil, fmt.Errorf(\"unexpected download http status code %d\", resp.StatusCode)\n\t}\n\n\t// Validate we are receiving the right portion of partial content\n\tvar respStart, respEnd, respTotal int64\n\t_, err = fmt.Sscanf(\n\t\tstrings.ToLower(resp.Header.Get(contentRangeHeader)),\n\t\t\"bytes %d-%d/%d\",\n\t\t&respStart, &respEnd, &respTotal,\n\t)\n\tif err != nil {\n\t\t_ = resp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"error parsing response content-range header: %w\", err)\n\t}\n\n\tif respStart != downloadFrom {\n\t\t_ = resp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"unexpected response range start (expected %d, got %d)\", downloadFrom, respStart)\n\t}\n\tif respEnd != totalContentLength-1 {\n\t\t_ = resp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"unexpected response range end (expected %d, got %d)\", totalContentLength-1, respEnd)\n\t}\n\tif respTotal != totalContentLength {\n\t\t_ = resp.Body.Close()\n\t\treturn nil, fmt.Errorf(\"unexpected response range total (expected %d, got %d)\", totalContentLength, respTotal)\n\t}\n\n\treturn resp.Body, nil\n}",
"func (c *Client) GetByteBuffer(url string) (*bytes.Buffer, error) {\n\tbuf := bytes.NewBuffer(nil)\n\n\tresp, err := c.Get(url)\n\tif err != nil {\n\t\treturn buf, err\n\t}\n\tif resp.StatusCode != 200 {\n\t\treturn buf, fmt.Errorf(\"failed to fetch %s : %s\", url, resp.Status)\n\t}\n\n\t_, err = io.Copy(buf, resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = resp.Body.Close()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error when closing the response body %s\", err)\n\t}\n\treturn buf, err\n}",
"func FromURL(url *nurl.URL, timeout time.Duration) (Article, error) {\n\t// Fetch page from URL\n\tclient := &http.Client{Timeout: timeout}\n\tresp, err := client.Get(url.String())\n\tif err != nil {\n\t\treturn Article{}, err\n\t}\n\tdefer resp.Body.Close()\n\n\t// Check content type. If not HTML, stop process\n\tcontentType := resp.Header.Get(\"Content-type\")\n\tif contentType == \"\" {\n\t\tcontentType = \"application/octet-stream\"\n\t}\n\n\tif !strings.HasPrefix(contentType, \"text/html\") {\n\t\treturn Article{}, fmt.Errorf(\"URL must be a text/html, found %s\", contentType)\n\t}\n\n\t// Parse response body\n\treturn FromReader(resp.Body, url)\n}",
"func HTTPGet(url string) (out []byte, err error) {\n\tres, err := http.Get(url)\n\tif err == nil {\n\t\tdefer res.Body.Close()\n\t\tout, err = ioutil.ReadAll(res.Body)\n\t}\n\treturn\n}",
"func performRequest(url string) ([]byte, error) {\n\tres, err := http.Get(url)\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tres.Body.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}",
"func Request(url string) ([]byte, error) {\n\tvar err error\n\tclient := resty.New()\n\n\tresponse, err := client.R().Get(url)\n\n\tbytes := []byte(response.Body())\n\n\treturn bytes, err\n}",
"func fetchHttpContent(url string) (*http.Response, []byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\n\treturn resp, body, err\n\n}",
"func PrintBody(url string) error {\n\tu, err := parseURL(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theader := \"GET \" + u.path + \" HTTP/1.1\\r\\n\"\n\theader += \"Host: \" + u.hostname + \"\\r\\n\"\n\theader += \"User-Agent: Noofbizzle\\r\\n\"\n\theader += \"Accept: text/html\\r\\n\"\n\theader += \"Accept-Language: en-us\\r\\n\"\n\theader += \"Accept-Encoding: gzip,deflate\\r\\n\"\n\theader += \"Accept-Charset: ISO-8859-1,utf-8\\r\\n\\r\\n\"\n\n\tconn, err := net.Dial(\"tcp\", \"[\"+u.hostname+\"]:\"+u.port)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\t_, err = conn.Write([]byte(header))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp := bufio.NewReader(conn)\n\trespHeader := make(map[string]string)\n\t// get the header\n\tfor {\n\t\tline, err := resp.ReadString('\\n')\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif line == \"\\r\\n\" {\n\t\t\tbreak //header ends with an empty line\n\t\t}\n\t\tif strings.HasPrefix(line, \"HTTP/\") {\n\t\t\trespHeader[\"first\"] = line\n\t\t\tcontinue\n\t\t}\n\t\tnv := strings.Split(line, \": \")\n\t\trespHeader[nv[0]] = strings.TrimSuffix(nv[1], \"\\r\\n\")\n\t}\n\n\tn, err := strconv.Atoi(respHeader[\"Content-Length\"])\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, n)\n\t_, err = resp.Read(buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Printf(\"%s\", buf)\n\n\treturn nil\n}",
"func (download *Download) Read(data []byte) (n int, err error) {\n\tif download.closed {\n\t\treturn 0, Error.New(\"already closed\")\n\t}\n\n\tif download.reader == nil {\n\t\terr = download.resetReader(download.offset)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\n\tif download.limit == 0 {\n\t\treturn 0, io.EOF\n\t}\n\tif download.limit > 0 && download.limit < int64(len(data)) {\n\t\tdata = data[:download.limit]\n\t}\n\tn, err = download.reader.Read(data)\n\tif download.limit >= 0 {\n\t\tdownload.limit -= int64(n)\n\t}\n\tdownload.offset += int64(n)\n\n\treturn n, err\n}",
"func SimpleGET(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn []byte(\"\"), err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn []byte(\"\"), err\n\t}\n\tresp.Body.Close()\n\n\treturn body, nil\n}",
"func GetObjectRange(url, byteRange string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif byteRange != \"\" {\n\t\treq.Header.Add(\"Range\", byteRange)\n\t}\n\t// In case it's an FTP server, we want to prevent it from compressing the\n\t// file data.\n\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusPartialContent && resp.StatusCode != http.StatusOK {\n\t\treturn nil, parseHTTPError(resp.StatusCode)\n\t}\n\treturn resp, nil\n}",
"func fetch(url string, ch chan<- string) {\n\tstart := time.Now()\n\n\tif !strings.HasPrefix(url, \"http://\") {\n\t\turl = \"http://\" + url\n\t}\n\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tch <- fmt.Sprint(err)\n\t\treturn\n\t}\n\n\tnbytes, err := io.Copy(ioutil.Discard, resp.Body)\n\tresp.Body.Close()\n\tif err != nil {\n\t\tch <- fmt.Sprintf(\"err while reading url %s: %v\", url, err)\n\t\treturn\n\t}\n\n\telapsed := time.Since(start).Seconds()\n\tch <- fmt.Sprintf(\"%.2fs %7d bytes %s %s\", elapsed, nbytes, resp.Status, url)\n}",
"func GetResource(uri string) (io.ReadCloser, error) {\n\tvar file io.ReadCloser\n\tif strings.HasPrefix(uri, \"http://\") || strings.HasPrefix(uri, \"https://\") {\n\t\tresp, err := http.Get(uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\treturn nil, errors.Errorf(\"http GET returned status %d for resource %s\", resp.StatusCode, uri)\n\t\t}\n\n\t\tfile = resp.Body\n\t} else {\n\t\tpath, err := filepath.Abs(uri)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"getting absolute path for %v\", uri)\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"opening file %v\", path)\n\t\t}\n\t\tfile = f\n\t}\n\n\t// Write the body to file\n\treturn file, nil\n}",
"func GetStringDataFromHTTPGet(URL string) (string, error) {\n\tbytes, err := httpGet(URL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(bytes), nil\n}"
] | [
"0.740808",
"0.7355354",
"0.6955804",
"0.66765195",
"0.6663741",
"0.65641993",
"0.6527202",
"0.65114444",
"0.6447429",
"0.63824195",
"0.62775344",
"0.6200917",
"0.62001663",
"0.6143989",
"0.6133847",
"0.6108124",
"0.61014867",
"0.60481185",
"0.59918225",
"0.5949454",
"0.59270614",
"0.5861381",
"0.58481324",
"0.58011615",
"0.5749488",
"0.57438636",
"0.5711103",
"0.57061297",
"0.5686592",
"0.5685735",
"0.56807536",
"0.5631409",
"0.56042135",
"0.5597771",
"0.55809474",
"0.55722225",
"0.5565065",
"0.5559774",
"0.55411124",
"0.5532068",
"0.5503772",
"0.5501811",
"0.548289",
"0.54790306",
"0.5471891",
"0.54712826",
"0.54599386",
"0.5459081",
"0.544547",
"0.5437208",
"0.5428931",
"0.5423572",
"0.5415151",
"0.5414328",
"0.53910846",
"0.5359504",
"0.53303266",
"0.53036684",
"0.52843237",
"0.5264471",
"0.52606994",
"0.5245226",
"0.5227678",
"0.5203747",
"0.51921403",
"0.51861906",
"0.5182023",
"0.51787204",
"0.5170436",
"0.5156107",
"0.51417613",
"0.51295006",
"0.5128771",
"0.511731",
"0.5094208",
"0.50863",
"0.5070041",
"0.5068449",
"0.5063922",
"0.5063892",
"0.5056938",
"0.5056468",
"0.50483596",
"0.50406116",
"0.5033436",
"0.50203747",
"0.5019247",
"0.5018374",
"0.5016995",
"0.5004708",
"0.50005394",
"0.49956664",
"0.49919528",
"0.49785736",
"0.49784642",
"0.49721423",
"0.4964034",
"0.49623188",
"0.4961797",
"0.49612203"
] | 0.74276346 | 0 |
Parses JSON into interface | func ParseJSON(body []byte, v interface{}) error {
return json.Unmarshal(body, v)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func parseJson(r io.Reader, v interface{}) (err error) {\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = json.Unmarshal(body, v)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func infoFromJSON(s string) (i Info, err error) {\n\terr = json.Unmarshal([]byte(s), &i)\n\treturn\n}",
"func FromJSON(raw []byte, i interface{}) error {\n\treturn json.Unmarshal(raw, i)\n}",
"func ReadJSONToInterface(reader io.Reader, in interface{}) (err error) {\n\terr = json.NewDecoder(reader).Decode(&in)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (pi *PortableInfo) parseJSON(jsonBytes []byte) error {\n\treturn json.Unmarshal([]byte(jsonBytes), &pi)\n}",
"func JsonToInterface(str string, tempMap interface{}) {\n if len(str) == 0 {\n return\n }\n err := json.Unmarshal([]byte(str), &tempMap)\n if err != nil {\n panic(err)\n }\n}",
"func (c *info) ParseJSON(b []byte) error {\n\treturn json.Unmarshal(b, &c)\n}",
"func ToInterface(reader io.Reader) (interface{}, error) {\n\tvar result interface{}\n\tdecoder := json.NewDecoder(reader)\n\tfor {\n\t\terr := decoder.Decode(&result)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif result == nil {\n\t\treturn map[string]interface{}{}, nil\n\t}\n\treturn result, nil\n}",
"func fromJson(v string) interface{} {\n\toutput, _ := mustFromJson(v)\n\treturn output\n}",
"func FromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\treturn d.Decode(i)\n}",
"func LoadFromJSON(item interface{}, data []byte) error {\n\terr := json.Unmarshal(data, &item)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func JsonParse(jsonStr string, v interface{})error{\n\tdec := json.NewDecoder(strings.NewReader(jsonStr))\n\terr := dec.Decode(v);\n\tif err != nil{\n\t\treturn err\n\t}\n\treturn nil\n}",
"func parseJSON(c *gin.Context, dst interface{}) error {\n\terr := c.ShouldBindJSON(dst)\n\tif err != nil {\n\t\treturn ErrInvalidJSONInput\n\t}\n\n\treturn nil\n}",
"func isJSON(payload []byte) (interface{}, error) {\n\tvar p interface{}\n\tvar err error\n\t//decode json\n\terr = json.Unmarshal(payload, &p)\n\treturn p, err\n}",
"func ReadFromJSON(i interface{}, r io.Reader) error {\n\td := json.NewDecoder(r)\n\terr := d.Decode(i)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn Validate(i)\n}",
"func Json(data []byte) (*Typed, error) {\n\tvar m map[string]interface{}\n\terr := json.Unmarshal(data, &m)\n\treturn New(m), err\n}",
"func (i *Interface) UnmarshalJSON(data []byte) error {\n\tvar rawMsg map[string]json.RawMessage\n\tif err := json.Unmarshal(data, &rawMsg); err != nil {\n\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t}\n\tfor key, val := range rawMsg {\n\t\tvar err error\n\t\tswitch key {\n\t\tcase \"etag\":\n\t\t\terr = unpopulate(val, \"Etag\", &i.Etag)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"extendedLocation\":\n\t\t\terr = unpopulate(val, \"ExtendedLocation\", &i.ExtendedLocation)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"id\":\n\t\t\terr = unpopulate(val, \"ID\", &i.ID)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"location\":\n\t\t\terr = unpopulate(val, \"Location\", &i.Location)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"name\":\n\t\t\terr = unpopulate(val, \"Name\", &i.Name)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"properties\":\n\t\t\terr = unpopulate(val, \"Properties\", &i.Properties)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"tags\":\n\t\t\terr = unpopulate(val, \"Tags\", &i.Tags)\n\t\t\tdelete(rawMsg, key)\n\t\tcase \"type\":\n\t\t\terr = unpopulate(val, \"Type\", &i.Type)\n\t\t\tdelete(rawMsg, key)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unmarshalling type %T: %v\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func readJSON(r *http.Request, v interface{}) error {\n\t// json decoder\n\tvar decoder = json.NewDecoder(r.Body)\n\n\t// decodes json body to interface\n\treturn decoder.Decode(v)\n}",
"func FromJSON(data []byte) Song {\r\n\tsong := Song{}\r\n\terr := json.Unmarshal(data, &song)\r\n\tif err != nil {\r\n\t\tpanic(err)\r\n\t}\r\n\treturn song\r\n}",
"func ImportJSON(url string, target interface{}, delimiter byte) error {\r\n\t// Get HTTP response from URL\r\n\tresponse, err := httpClient.Get(url)\r\n\tif (err != nil) {\r\n\t\treturn err\r\n\t}\r\n\r\n\tdefer response.Body.Close()\r\n\r\n\t// If the payload is fully JSON convert it to the specific struct and return\r\n\tif (delimiter == 0) {\r\n\t\treturn json.NewDecoder(response.Body).Decode(target)\r\n\t}\r\n\r\n\t/*\r\n\t\tIf payload is a JSON data segments separated by the delimiter\r\n\t\tNeed to assemble an array of structs type given\r\n\t*/\r\n\r\n\treader := bufio.NewReader(response.Body)\r\n\tslice := reflect.ValueOf(target).Elem()\r\n\ttypeOfSlice := slice.Type()\r\n\tslice.Set(reflect.MakeSlice(typeOfSlice, 0, 1))\r\n\tptrToTarget := reflect.New(typeOfSlice.Elem())\r\n\r\n\t// Loop through the payload segments try to convert them to struct and add them to array\r\n\tfor {\r\n\t\tpart, err := reader.ReadBytes(delimiter);\r\n\t\tif (err != nil) {\r\n\t\t\terrMsg := err.Error();\r\n\t\t\tif (errMsg != \"EOF\") {\r\n\t\t\t\tlog.Error(err.Error())\r\n\t\t\t}\r\n\r\n\t\t\treturn nil\r\n\t\t}\r\n\r\n\t\tif (len(part) == 0) {\r\n\t\t\treturn nil\r\n\t\t}\r\n\r\n\t\tif err := json.Unmarshal(part, ptrToTarget.Interface()); (err != nil) {\r\n\t\t\treturn err\r\n\t\t}\r\n\r\n\t\tslice.Set(reflect.Append(slice, ptrToTarget.Elem()))\r\n\t}\r\n\r\n\treturn nil\r\n}",
"func ParseJSON(r io.Reader, into interface{}) error {\n\treturn json.NewDecoder(r).Decode(into)\n}",
"func parseJSON(s string) (dat interface{}, err error) {\n\tb := bytes.Trim([]byte(s), \"\\x00\")\n\terr = json.Unmarshal(b, &dat)\n\treturn\n}",
"func LoadFromJSON(r io.Reader) (*Meta, error) {\n\tm := &Meta{}\n\n\tif err := json.NewDecoder(r).Decode(m); err != nil {\n\t\treturn nil, fmt.Errorf(\"while decoding Meta JSON file: %s\", err)\n\t}\n\n\tm.cfg, _ = m.Config()\n\n\treturn m, nil\n}",
"func Parse(jsondata []byte, v interface{}) {\n\terr := json.Unmarshal(jsondata, v)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n}",
"func LoadJSON(f string, iface interface{}) error {\n\traw, err := ioutil.ReadFile(f)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(raw, iface)\n}",
"func loadJSONFor(content string) (users.User, error) {\r\n\tvar elem users.User\r\n\r\n\tif err := json.Unmarshal([]byte(content), &elem); err != nil {\r\n\t\treturn users.User{}, err\r\n\t}\r\n\r\n\treturn elem, nil\r\n}",
"func readJSON(r io.Reader, v interface{}) error {\n\tdecoder := json.NewDecoder(r)\n\terr := decoder.Decode(&v)\n\treturn err\n}",
"func jsonDecode(reader io.ReadCloser, v interface{}) error {\n\tdecoder := json.NewDecoder(reader)\n\terr := decoder.Decode(v)\n\treturn err\n}",
"func (u *Util) JSONParse(s interface{}) (JSON, error) {\n\tvar js JSON\n\n\tvar b []byte\n\tvar err error\n\n\tswitch reflect.ValueOf(s).Kind() {\n\tcase reflect.String:\n\t\tb = []byte(s.(string))\n\tcase reflect.Slice, reflect.Array, reflect.Map:\n\t\tb, err = json.Marshal(s)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\terr = json.Unmarshal(b, &js)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn js, nil\n}",
"func ParseJSON(actual interface{}) (StructureExplorer, error) {\n\tvar result *GabsExplorer\n\t// gabs := &gabs.Container{}\n\tgabs, err := parseJSON(actual)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresult = (*GabsExplorer)(gabs)\n\treturn result, nil\n}",
"func parseJSONRequest(r io.ReadCloser, structure interface{}) error {\n\tdefer r.Close()\n\tbody, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// json decode\n\tif err = json.Unmarshal(body, structure); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func parseJSON(jsonPath string, v interface{}) error {\n\tif !osutil.Exists(jsonPath) {\n\t\twarn.Printf(\"unable to locate JSON file %q\", jsonPath)\n\t\treturn nil\n\t}\n\treturn jsonutil.ParseFile(jsonPath, v)\n}",
"func (q *InterviewCompletedEvent) FromJSON(r io.Reader) error {\n\treturn q.APIObject.FromJSON(q, r)\n}",
"func FromJSON(js string) (Car, error) {\n\tvar c car\n\terr := json.Unmarshal([]byte(js), &c)\n\treturn c, err\n}",
"func (rg *Rig) FromJSON(s string) error {\n\terr := json.Unmarshal([]byte(s), rg)\n\treturn err\n}",
"func TestJSON(t *testing.T) {\n\n\tsrcJSON := []byte(`{\"float\": 1.2, \"int\": 1, \"bool\": true, \"array\":[\"apple\", 2]}`)\n\tvar m map[string]interface{}\n\terr := json.Unmarshal(srcJSON, &m)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfmt.Printf(\"%T, %T, %T, %T, %T %T\\n\", m[\"float\"], m[\"int\"], m[\"bool\"], m[\"array\"], m[\"array\"].([]interface{})[0], m[\"array\"].([]interface{})[1])\n}",
"func (r *Response) JSON(userStruct interface{}) error {\n\n\tif r.Error != nil {\n\t\treturn r.Error\n\t}\n\n\tjsonDecoder := json.NewDecoder(r.getInternalReader())\n\tdefer r.Close()\n\n\tif err := jsonDecoder.Decode(&userStruct); err != nil && err != io.EOF {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func ParseJSON(r io.Reader) (*Meta, error) {\n\tmeta := &Meta{}\n\terr := json.NewDecoder(r).Decode(meta)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tswitch meta.Version {\n\tcase versionV1:\n\tcase versionV0:\n\t\tmeta.Version = versionV1\n\tdefault:\n\t\treturn nil, ErrWrongVersion\n\t}\n\treturn meta, nil\n}",
"func parseAPI(discovery []byte) (API map[string]interface{}, err error) {\n\tif discovery == nil {\n\t\treturn\n\t}\n\terr = json.Unmarshal(discovery, &API)\n\treturn\n}",
"func GetItemFromJSON(data []byte) (*Item, error) {\n\tobj := &Item{}\n\tl := jlexer.Lexer{Data: data}\n\tobj.UnmarshalEasyJSON(&l)\n\treturn obj, l.Error()\n}",
"func (i *Intangible) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Name = &name\n\t\t\t}\n\t\tcase \"url\":\n\t\t\tif v != nil {\n\t\t\t\tvar URL string\n\t\t\t\terr = json.Unmarshal(*v, &URL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.URL = &URL\n\t\t\t}\n\t\tcase \"image\":\n\t\t\tif v != nil {\n\t\t\t\tvar imageVar ImageObject\n\t\t\t\terr = json.Unmarshal(*v, &imageVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Image = &imageVar\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tif v != nil {\n\t\t\t\tvar description string\n\t\t\t\terr = json.Unmarshal(*v, &description)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Description = &description\n\t\t\t}\n\t\tcase \"entityPresentationInfo\":\n\t\t\tif v != nil {\n\t\t\t\tvar entityPresentationInfo EntitiesEntityPresentationInfo\n\t\t\t\terr = json.Unmarshal(*v, &entityPresentationInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.EntityPresentationInfo = &entityPresentationInfo\n\t\t\t}\n\t\tcase \"bingId\":\n\t\t\tif v != nil {\n\t\t\t\tvar bingID string\n\t\t\t\terr = json.Unmarshal(*v, &bingID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.BingID = &bingID\n\t\t\t}\n\t\tcase \"contractualRules\":\n\t\t\tif v != nil {\n\t\t\t\tcontractualRules, err := unmarshalBasicContractualRulesContractualRuleArray(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.ContractualRules = &contractualRules\n\t\t\t}\n\t\tcase \"webSearchUrl\":\n\t\t\tif v != nil {\n\t\t\t\tvar webSearchURL string\n\t\t\t\terr = json.Unmarshal(*v, &webSearchURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.WebSearchURL = &webSearchURL\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.ID = &ID\n\t\t\t}\n\t\tcase \"_type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar TypeBasicResponseBase\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\ti.Type = typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func mustFromJson(v string) (interface{}, error) {\n\tvar output interface{}\n\terr := json.Unmarshal([]byte(v), &output)\n\treturn output, err\n}",
"func (mi *Metadata) Scan(src interface{}) error {\n\treturn json.Unmarshal(src.([]byte), mi)\n}",
"func FromJSON(buf []byte, data interface{}) error {\n\terr := json.Unmarshal(buf, data)\n\treturn err\n}",
"func (h handler) parseJsonStream(ctx context.Context, body io.ReadCloser) error {\n\tdec := json.NewDecoder(body)\n\n\t// read first delimiter '{'\n\tif _, err := dec.Token(); err != nil {\n\t\treturn err\n\t}\n\n\t// parse each item individually, send to service for processing\n\tfor dec.More() {\n\t\tportID, err := dec.Token()\n\t\tif err != nil {\n\t\t\tlog.Printf(\"error parsing item ID: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar port model.Port\n\t\tif err = dec.Decode(&port); err != nil {\n\t\t\tlog.Printf(\"error parsing item: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tvar ok bool\n\t\tport.ID, ok = portID.(string)\n\t\tif !ok {\n\t\t\tlog.Printf(\"error parsing item ID: %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tgo h.portService.ProcessPort(ctx, port)\n\t}\n\n\treturn nil\n}",
"func ParseJSON(data []byte) General {\n\tvar jsonObject General\n\tjson.Unmarshal(data, &jsonObject)\n\treturn jsonObject\n}",
"func (h Histgram) FromJSON(jsonData []byte) (store.Item, error) {\n\tresult := Histgram{}\n\terr := json.Unmarshal(jsonData, &result)\n\treturn result, err\n}",
"func FromJSON(data *string, obj interface{}) error {\n\terr := json.Unmarshal([]byte(*data), obj)\n\treturn err\n}",
"func walkJSON(ptr interface{}, extracted map[string]manifest.Manifest, path trace) error {\n\t// check for known types\n\tswitch v := ptr.(type) {\n\tcase map[string]interface{}:\n\t\treturn walkObj(v, extracted, path)\n\tcase []interface{}:\n\t\treturn walkList(v, extracted, path)\n\t}\n\n\treturn ErrorPrimitiveReached{\n\t\tpath: path.Base(),\n\t\tkey: path.Name(),\n\t\tprimitive: ptr,\n\t}\n}",
"func (j Json) Parse() gjson.Result {\n\treturn gjson.Parse(string(j))\n}",
"func JSONObj(reader io.Reader) (IJSON, error) {\n\tjsonBytes, err := ioutil.ReadAll(reader)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar jsonData map[string]interface{}\n\tif err := json.Unmarshal(jsonBytes, &jsonData); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TJSON{jsonData}, nil\n}",
"func ParseJSON(target interface{}) func(res *http.Response, req *http.Request) error {\n\treturn func(res *http.Response, req *http.Request) error {\n\t\tbodyBytes, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn json.Unmarshal(bodyBytes, target)\n\t}\n}",
"func (stone *Stone) Scan(src interface{}) error {\n\terr := json.Unmarshal(src.([]byte), stone)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func JSON(reader io.Reader) (*abi.ABI, error) {\n\tdec := json.NewDecoder(reader)\n\n\tvar anAbi abi.ABI\n\tif err := dec.Decode(&anAbi); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &anAbi, nil\n}",
"func JSON(reader io.Reader) (*abi.ABI, error) {\n\tdec := json.NewDecoder(reader)\n\n\tvar anAbi abi.ABI\n\tif err := dec.Decode(&anAbi); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &anAbi, nil\n}",
"func ParseJsonByStruct(body []byte, v interface{}, skiplevel int) int {\n\tif v == nil {\n\t\treturn 0\n\t}\n\t// 2016-10-12\n\tif len(body) == 0 {\n\t\tbody = []byte(\"{}\")\n\t\t// if skiplevel >= 0 {\n\t\t// \tprnLog.Debugf(\"\", \"income body is null, changed to {}\")\n\t\t// }\n\t}\n\n\tif err := json.Unmarshal(body, v); err != nil {\n\t\tif skiplevel >= 0 {\n\t\t\tprnLog.LogPrint(LOG_ERROR, skiplevel, false, false, \"err=%v,body=%v\",\n\t\t\t\terr, string(body))\n\t\t}\n\t\treturn ErrData\n\t}\n\treturn thisSuccess\n}",
"func parseJSON(w http.ResponseWriter, body io.ReadCloser, model interface{}) bool {\n\tdefer body.Close()\n\n\tb, _ := ioutil.ReadAll(body)\n\terr := json.Unmarshal(b, model)\n\tif err != nil {\n\t\te := &models.ErrorData{}\n\t\te.Message = \"Error in parsing json\"\n\t\te.Err = err\n\t\trenderERROR(w, e)\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (u *Session) FromJSON(in []byte) error {\n\treturn json.Unmarshal(in, &u)\n}",
"func FromJSON(data []byte) Book {\n b := Book{}\n err := json.Unmarshal(data, &b)\n if err != nil {\n panic(err)\n }\n return b\n}",
"func (message *Message) FromJson(reader io.Reader) error {\n\tdecoder := json.NewDecoder(reader)\n\tdecoded := decoder.Decode(message)\n\treturn decoded\n}",
"func fromJSONStock(data []byte) stock {\n\tstock := stock{}\n\terr := json.Unmarshal(data, &stock)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn stock\n}",
"func (i *Info) Scan(src interface{}) error {\n\tvar source []byte\n\t// let's support string and []byte\n\tswitch src.(type) {\n\tcase string:\n\t\tsource = []byte(src.(string))\n\tcase []byte:\n\t\tsource = src.([]byte)\n\tdefault:\n\t\treturn errors.New(\"Incompatible type for Info\")\n\t}\n\treturn json.Unmarshal(source, i)\n}",
"func ParseJson(data io.Reader) error {\n\tparams = make(map[string]interface{})\n\tdecoder := json.NewDecoder(data)\n\terr := decoder.Decode(¶ms)\n\tswitch {\n\tcase err == io.EOF: //empty body\n\t\treturn nil\n\tcase err != nil:\n\t\treturn err\n\t}\n\treturn nil\n}",
"func ReadJSON(data []byte, value interface{}) error {\n\ttrimmedData := bytes.Trim(data, \"\\x00\")\n\tif d, ok := value.(ejUnmarshaler); ok {\n\t\tjl := &jlexer.Lexer{Data: trimmedData}\n\t\td.UnmarshalEasyJSON(jl)\n\t\treturn jl.Error()\n\t}\n\tif d, ok := value.(json.Unmarshaler); ok {\n\t\treturn d.UnmarshalJSON(trimmedData)\n\t}\n\treturn json.Unmarshal(trimmedData, value)\n}",
"func JSON(reader io.Reader) (ABI, error) {\n\tdec := json.NewDecoder(reader)\n\n\tvar abi ABI\n\tif err := dec.Decode(&abi); err != nil {\n\t\treturn ABI{}, err\n\t}\n\n\treturn abi, nil\n}",
"func (a *Account) ImportJSON(data []byte) (*Account, error) {\n\tvar tmp struct {\n\t\tPublicKey string `json:\"public_key\"`\n\t\tName string `json:\"name\"`\n\t\tTimestamp uint32 `json:\"timestamp\"`\n\t\tFields map[string]string `json:\"fields\"`\n\t\tSignature string `json:\"signature\"`\n\t}\n\n\tif err := json.Unmarshal(data, &tmp); err != nil {\n\t\treturn nil, err\n\t}\n\n\ta.pub = *(new(PubKey).SetString(tmp.PublicKey))\n\ta.name = tmp.Name\n\ta.fields = tmp.Fields\n\ta.sign = *(new(SigData).SetString(tmp.Signature))\n\ta.timestamp = tmp.Timestamp\n\treturn a, nil\n}",
"func JSONToMapOfInterfaces(input string) (map[string]any, error) {\n\tvar data map[string]any\n\tbyt := []byte(input)\n\n\tif err := json.Unmarshal(byt, &data); err != nil {\n\t\treturn nil, err\n\t}\n\treturn data, nil\n}",
"func (fi *FeatureInfo) FromJSON(data []byte) error {\n\n\tif fi == nil {\n\t\treturn fmt.Errorf(\"FromJSON on nil pointer\")\n\t}\n\n\tvar fi2 FeatureInfo\n\tif err := json.Unmarshal(data, &fi2); err != nil {\n\n\t\treturn err\n\n\t}\n\n\t*fi = fi2 // overwrite the current fi pointer\n\treturn nil\n}",
"func DeserializeFromJSON(buf []byte, data interface{}) error {\n\terr := json.Unmarshal(buf, data)\n\treturn err\n}",
"func FromJSON(b []byte) (*Result, error) {\n\tmsg := make(map[string]json.RawMessage)\n\tif err := json.Unmarshal(b, &msg); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(msg) != 1 {\n\t\tks := \"\"\n\t\tfor k := range msg {\n\t\t\tks += \", \" + k\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"parse: expected one modifier, received %d: %s\", len(msg), ks)\n\t}\n\n\tparseMu.RLock()\n\tdefer parseMu.RUnlock()\n\tfor k, m := range msg {\n\t\tparseFunc, ok := parseFuncs[k]\n\t\tif !ok {\n\t\t\treturn nil, ErrUnknownModifier{name: k}\n\t\t}\n\t\treturn parseFunc(m)\n\t}\n\n\treturn nil, fmt.Errorf(\"parse: no modifiers found: %v\", msg)\n}",
"func Deserialize(buffer []byte, destination interface{}) error {\n\treturn json.Unmarshal(buffer, &destination)\n}",
"func (i *Invoice) FromJSON(jsonData []byte) error {\n\treturn json.Unmarshal(jsonData, i)\n}",
"func parseMixinJSON(body []byte) ([]mixin, error) {\n\tvar mixins map[string][]mixin\n\tif err := json.Unmarshal(body, &mixins); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to unmarshal json: %w\", err)\n\t}\n\tmixinsList := mixins[\"mixins\"]\n\treturn mixinsList, nil\n}",
"func (d *decode) Json(v interface{}) error {\n\t// we have an error set return it\n\tif d.err != nil {\n\t\treturn d.err\n\t}\n\n\tdefer d.Close()\n\n\treturn json.NewDecoder(d).Decode(v)\n}",
"func JSONScan(i, value interface{}) error {\n\treturn json.NewDecoder(bytes.NewReader(value.([]byte))).Decode(i)\n}",
"func decodeJSON(r io.Reader, v interface{}) error {\n\tdata, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = json.Unmarshal(data, v); err != nil {\n\t\tlog.Printf(\"Error decoding JSON into %T: %s\", v, err)\n\t\tlog.Println(string(data))\n\t\treturn err\n\t}\n\treturn nil\n}",
"func parse(input string) (output *js.Object, err error) {\n\tvar ast interface{}\n\terr = hcl.Unmarshal([]byte(input), &ast)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata, err := json.MarshalIndent(ast, \"\", \" \")\n\tif err != nil {\n\t\treturn\n\t}\n\toutput = js.Global.Get(\"JSON\").Call(\"parse\", string(data))\n\treturn\n}",
"func (m *Metadata) FromJSON(jsonData string) error {\n\tif len(jsonData) == 0 {\n\t\treturn errors.New(\"empty json data to construct repository\")\n\t}\n\n\treturn json.Unmarshal([]byte(jsonData), m)\n}",
"func Parse(out, in interface{}) error {\n\tvar (\n\t\tbuf = new(bytes.Buffer)\n\t)\n\n\t// encode interface to json\n\terr := json.NewEncoder(buf).Encode(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// parse the json as a template\n\ttmpl, err := template.New(\"\").Parse(buf.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf.Reset()\n\n\t// execute template to out buffer\n\terr = tmpl.Execute(buf, in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// finally decode the executed template\n\treturn json.NewDecoder(buf).Decode(out)\n}",
"func (r *Robot) FromJSON(jsonData string) error {\n\tif len(jsonData) == 0 {\n\t\treturn errors.New(\"empty json data to parse\")\n\t}\n\n\treturn json.Unmarshal([]byte(jsonData), r)\n}",
"func (r *Registration) FromJSON(jsonData string) error {\n\tif len(jsonData) == 0 {\n\t\treturn errors.New(\"empty json data to parse\")\n\t}\n\n\treturn json.Unmarshal([]byte(jsonData), r)\n}",
"func UnmarshalJSON(b []byte, discriminator string, f Factory) (interface{}, error) {\n\tm := make(map[string]interface{})\n\terr := json.Unmarshal(b, &m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn Decode(m, discriminator, f)\n}",
"func (p *Product) FromJSON(r io.Reader) error {\n\tdecoder := json.NewDecoder(r)\n\treturn decoder.Decode(p)\n}",
"func (c *configuration) ParseJSON(b []byte) error {\n\treturn json.Unmarshal(b, &c)\n}",
"func (c *configuration) ParseJSON(b []byte) error {\n\treturn json.Unmarshal(b, &c)\n}",
"func (e *Entity) FromJSON(jsonData []byte) error {\n\treturn json.Unmarshal(jsonData, e)\n}",
"func FromJSON(data []byte) Weather {\n\tweather := Weather{}\n\terr := json.Unmarshal(data, &weather)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn weather\n}",
"func ParseJSON(content []byte) (*Container, error) {\n\tjsonContainer, err := gabs.ParseJSON(content)\n\treturn &Container{JSONContainer: jsonContainer}, err\n}",
"func (v *Value) FromJSON(rawmessage json.RawMessage) error {\n\treturn json.Unmarshal(rawmessage, &v)\n}",
"func (v *OneLike) UnmarshalJSON(data []byte) error {\n\tr := jlexer.Lexer{Data: data}\n\tdecodeOneLike(&r, v)\n\treturn r.Error()\n}",
"func (m *jsonValue) Scan(src interface{}) error {\n\tvar source []byte\n\tswitch src.(type) {\n\tcase string:\n\t\tsource = []byte(src.(string))\n\tcase []byte:\n\t\tsource = src.([]byte)\n\tdefault:\n\t\treturn errors.New(\"Incompatible type for jsonValue\")\n\t}\n\n\treturn json.Unmarshal(source, m.data)\n}",
"func TestJson(t *testing.T) {\n json := json.NewJSON()\n json.Debug = true\n\n data := make(map[string]interface{})\n age := make(map[string]interface{})\n age[\"age\"] = 34\n data[\"name\"] = \"Ricardo\"\n data[\"id\"] = 1\n data[\"age\"] = age\n data[\"now\"] = \"2021-07-12T14:44:00-03:00\"\n\n //p := new(Person)\n //json.ParseMap(data, p)\n //fmt.Println(\"PARSE MAP:\", fmt.Sprintf(\"%#v, %#v\", p, p.Age))\n\n pp := new(Person)\n err := json.Decode([]byte(`\n {\n \"id\": 1,\n \"name\": \"Jons\",\n \"age\": {\n \"age\": 45\n },\n \"age2\": {\n \"age\": 30\n },\n \"type\": 1,\n \"type2\": \"Masculino\",\n \"types\": [\"Masculino\", \"Feminino\"],\n \"types2\": [\"Masculino\", \"Feminino\"],\n \"n\": 5,\n \"now\": \"2021-07-12T14:44:00-03:00\",\n \"now2\": \"2021-07-12T14:44:00-03:00\",\n \"tags\": [\"a\", \"b\", \"c\"],\n \"tags2\": [\"a\", \"b\", \"c\"],\n \"friends\": [{\"id\": 2, \"name\": \"Mark\"}, {\"id\": 3, \"name\": \"Juca\"}],\n \"friends2\": [{\"id\": 2, \"name\": \"Mark\"}, {\"id\": 3, \"name\": \"Juca\"}],\n \"data\": {\"x\": 1, \"y\": 2},\n \"data2\": {\"x\": 1, \"y\": 2},\n \"data3\": {\"x\": \"1\", \"y\": \"2\"}\n }\n `), pp)\n\n fmt.Println(\"err \", err )\n //fmt.Println(\"--------------------------------------\")\n //fmt.Println(\"PARSE:\", fmt.Sprintf(\"%#v\", pp))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Friends:\", fmt.Sprintf(\"%#v\", pp.Friends))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Type2:\", fmt.Sprintf(\"%#v\", pp.Type2))\n fmt.Println(\"Types:\", fmt.Sprintf(\"%#v\", pp.Types))\n fmt.Println(\"Types2:\", fmt.Sprintf(\"%#v\", pp.Types2))\n fmt.Println(\"Data3:\", fmt.Sprintf(\"%#v\", pp.Data3))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Age:\", fmt.Sprintf(\"%#v\", pp.Age))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Age2:\", fmt.Sprintf(\"%#v\", pp.Age2))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Tags\", fmt.Sprintf(\"%#v\", pp.Tags))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Tags2\", fmt.Sprintf(\"%#v\", pp.Tags2))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Data\", fmt.Sprintf(\"%#v\", pp.Data))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Data2\", fmt.Sprintf(\"%#v\", pp.Data2))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Now\", fmt.Sprintf(\"%#v\", pp.Now))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"Now2\", fmt.Sprintf(\"%#v\", pp.Now2))\n fmt.Println(\"--------------------------------------\")\n fmt.Println(\"N\", *pp.N)\n\n //jsonData, err := json.ToMap(p)\n //fmt.Println(fmt.Sprintf(\"TO MAP: %#v, Err %v\", jsonData, err))\n\n\n}",
"func (client *WebsocketClientHandler) ReadJson(cmd interfaces.WebsocketCommand, obj interface{}) error {\n\tcmdData, ok := cmd.(*WebsocketCommandHandler)\n\tif !ok {\n\t\treturn errors.New(\"Invalid command\")\n\t}\n\n\t// Need to use reflect to copy values into obj\n\tdest := reflect.ValueOf(obj).Elem()\n\tsrc, ok := cmdData.Data.(map[string]interface{})\n\tif !ok {\n\t\treturn errors.New(\"Invalid command\")\n\t}\n\tfor srcKey, srcVal := range src {\n\t\tdestF := dest.FieldByName(srcKey)\n\t\tsrcF := reflect.ValueOf(srcVal)\n\t\tif destF.IsValid() && srcF.Type().ConvertibleTo(destF.Type()) {\n\t\t\tdestF.Set(srcF.Convert(destF.Type()))\n\t\t}\n\t}\n\n\treturn nil\n}",
"func Decode(json []byte, ptr interface{}) error {\n\treturn parser.Unmarshal(json, ptr)\n}",
"func ParseJSON(bytes []byte, conn net.Conn, str *HandlerTCP) {\n\tmessage := userConnections.MessageIn{}\n\terr := json.Unmarshal(bytes, &message)\n\tif err != nil {\n\t\tlog.Print(\"Unmarshal doesn't work: \")\n\t\tlog.Fatal(err)\n\t}\n\tstr.Connection.AddTCPConn(conn, message.User.Login)\n\trouterIn.RouterIn(&message, str.Connection.OutChan)\n}",
"func loadJSON(t *testing.T, filename string, output interface{}) {\n\tfile := filepath.Join(\"testdata\", filename)\n\tfd, err := os.Open(file)\n\tif err != nil {\n\t\tt.Fatalf(\"failed to read file %s: %v\", file, err)\n\t}\n\tif err = json.NewDecoder(fd).Decode(&output); err != nil {\n\t\tt.Fatalf(\"failed to decode file %s: %v\", file, err)\n\t}\n}",
"func (h *handler) readJSONInto(into interface{}) error {\n\t\n\tcontentType := h.rq.Header.Get(\"Content-Type\")\n\tif contentType != \"\" && !strings.HasPrefix(contentType, \"application/json\") {\n\t\treturn base.HTTPErrorf(http.StatusUnsupportedMediaType, \"Invalid content type %s\", contentType)\n\t}\n \n \t//TO DO: zip version to be added\n\t \t\n\tdecoder := json.NewDecoder(h.requestBody)\n\tif err := decoder.Decode(into); err != nil {\n\t\tbase.Warn(\"Couldn't parse JSON in HTTP request: %v\", err)\n\t\treturn base.HTTPErrorf(http.StatusBadRequest, \"Bad JSON\")\n\t}\n\t \n\treturn nil\n}",
"func Parse(jsonBlob []byte) (Story, error) {\n\t// create a Story to absorb the gb.json\n\tvar story Story\n\t// Unmarshal the json\n\terr := json.Unmarshal(jsonBlob, &story)\n\t// return the story and error\n\treturn story, err\n}",
"func (t *Thing) UnmarshalJSON(body []byte) error {\n\tvar m map[string]*json.RawMessage\n\terr := json.Unmarshal(body, &m)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor k, v := range m {\n\t\tswitch k {\n\t\tcase \"name\":\n\t\t\tif v != nil {\n\t\t\t\tvar name string\n\t\t\t\terr = json.Unmarshal(*v, &name)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Name = &name\n\t\t\t}\n\t\tcase \"url\":\n\t\t\tif v != nil {\n\t\t\t\tvar URL string\n\t\t\t\terr = json.Unmarshal(*v, &URL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.URL = &URL\n\t\t\t}\n\t\tcase \"image\":\n\t\t\tif v != nil {\n\t\t\t\tvar imageVar ImageObject\n\t\t\t\terr = json.Unmarshal(*v, &imageVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Image = &imageVar\n\t\t\t}\n\t\tcase \"description\":\n\t\t\tif v != nil {\n\t\t\t\tvar description string\n\t\t\t\terr = json.Unmarshal(*v, &description)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Description = &description\n\t\t\t}\n\t\tcase \"entityPresentationInfo\":\n\t\t\tif v != nil {\n\t\t\t\tvar entityPresentationInfo EntitiesEntityPresentationInfo\n\t\t\t\terr = json.Unmarshal(*v, &entityPresentationInfo)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.EntityPresentationInfo = &entityPresentationInfo\n\t\t\t}\n\t\tcase \"bingId\":\n\t\t\tif v != nil {\n\t\t\t\tvar bingID string\n\t\t\t\terr = json.Unmarshal(*v, &bingID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.BingID = &bingID\n\t\t\t}\n\t\tcase \"contractualRules\":\n\t\t\tif v != nil {\n\t\t\t\tcontractualRules, err := unmarshalBasicContractualRulesContractualRuleArray(*v)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.ContractualRules = &contractualRules\n\t\t\t}\n\t\tcase \"webSearchUrl\":\n\t\t\tif v != nil {\n\t\t\t\tvar webSearchURL string\n\t\t\t\terr = json.Unmarshal(*v, &webSearchURL)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.WebSearchURL = &webSearchURL\n\t\t\t}\n\t\tcase \"id\":\n\t\t\tif v != nil {\n\t\t\t\tvar ID string\n\t\t\t\terr = json.Unmarshal(*v, &ID)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.ID = &ID\n\t\t\t}\n\t\tcase \"_type\":\n\t\t\tif v != nil {\n\t\t\t\tvar typeVar TypeBasicResponseBase\n\t\t\t\terr = json.Unmarshal(*v, &typeVar)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tt.Type = typeVar\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func decode() {\n\tfmt.Println(\"=== json.decode ===\")\n\tconst jsonStream = `\n\t\t{\"name\": \"Ed\", \"age\": 55}\n\t\t{\"name\": \"Ethan\", \"age\": 33}\n\t\t{\"name\": \"Elbert\", \"age\": 111}\n\t`\n\tdec := json.NewDecoder(strings.NewReader(jsonStream))\n\tfor {\n\t\tvar m Person\n\t\tif err := dec.Decode(&m); err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfmt.Printf(\"%s: %d\\n\", m.Name, m.Age)\n\t}\n}"
] | [
"0.64064944",
"0.6405011",
"0.6356266",
"0.62482035",
"0.62306",
"0.62236863",
"0.6217344",
"0.6217314",
"0.62139374",
"0.6171303",
"0.61380273",
"0.61018044",
"0.6049581",
"0.60457",
"0.60261834",
"0.60151225",
"0.60117835",
"0.5997406",
"0.5944856",
"0.59396416",
"0.5879698",
"0.5845051",
"0.5840045",
"0.58379364",
"0.5788917",
"0.5781851",
"0.57790315",
"0.57220286",
"0.5710795",
"0.5703492",
"0.56958175",
"0.56946075",
"0.56898725",
"0.567262",
"0.5662621",
"0.56621426",
"0.56555974",
"0.5653431",
"0.56342745",
"0.5629131",
"0.5617908",
"0.56152266",
"0.5613462",
"0.56117034",
"0.5607637",
"0.56038445",
"0.560112",
"0.5593274",
"0.55796945",
"0.55710834",
"0.5569329",
"0.55553496",
"0.55390316",
"0.5529332",
"0.5529332",
"0.5524122",
"0.55184555",
"0.55091923",
"0.54990065",
"0.5494069",
"0.5488297",
"0.5475945",
"0.5473242",
"0.5463727",
"0.5458326",
"0.5454504",
"0.54511935",
"0.5448648",
"0.5446775",
"0.542831",
"0.5425254",
"0.5419441",
"0.541574",
"0.54149294",
"0.5408667",
"0.54073006",
"0.5404599",
"0.53971505",
"0.5389439",
"0.53891915",
"0.5381919",
"0.5380863",
"0.53722787",
"0.53648394",
"0.53648394",
"0.5361755",
"0.5360937",
"0.5354735",
"0.53463334",
"0.53455085",
"0.53439796",
"0.53387415",
"0.5337098",
"0.5335823",
"0.5331851",
"0.5320495",
"0.53180575",
"0.5317268",
"0.5310391",
"0.52984685"
] | 0.5995478 | 18 |
Parses CSV into a slice of maps using the header row to determine the keys | func ParseCSV(body []byte) ([]map[string]string, error) {
slice := make([]map[string]string, 0)
records, err := csv.NewReader(bytes.NewReader(body)).ReadAll()
if err != nil {
return slice, err
}
rows := len(records)
for i := 1; i < rows; i++ {
m := make(map[string]string)
cols := len(records[i])
for j := 0; j < cols; j++ {
m[records[0][j]] = records[i][j]
}
slice = append(slice, m)
}
return slice, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func csvFileToMap(fs io.Reader) (returnMap []map[string]string, err error) {\n\t// read csv file\n\n\treader := csv.NewReader(fs)\n\n\trawCSVdata, err := reader.ReadAll()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(err.Error())\n\t}\n\n\theader := []string{} // holds first row (header) better to declare this way to avoid nil indexing below\n\tfor lineNum, record := range rawCSVdata {\n\n\t\t// for first row, build the header slice\n\t\tif lineNum == 0 {\n\t\t\tfor i := 0; i < len(record); i++ {\n\t\t\t\theader = append(header, strings.ToLower(strings.TrimSpace(record[i])))\n\t\t\t}\n\t\t} else {\n\t\t\t// for each cell, map[string]string k=header v=value\n\t\t\tline := map[string]string{}\n\t\t\tfor i := 0; i < len(record); i++ {\n\t\t\t\tline[header[i]] = record[i]\n\t\t\t}\n\t\t\treturnMap = append(returnMap, line)\n\t\t}\n\t}\n\n\treturn\n}",
"func GetKVPMap(csvStr string) map[string]string {\n\tkvpMap := make(map[string]string)\n\n\treader := csv.NewReader(strings.NewReader(csvStr))\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tkvpMap[record[0]] = record[1]\n\t}\n\n\treturn kvpMap\n}",
"func processCSV(r io.Reader) ([]*Entry, error) {\n\treader := csv.NewReader(r)\n\trecords, err := reader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tci := &ColumnIndexes{FirstName: -1, LastName: -1, Username: -1, Addr1: -1, Addr2: -1, Code: -1, City: -1, State: -1, CountryISO: -1, Email: -1}\n\n\tcolHeaders := records[0]\n\tfor i, colHeader := range colHeaders {\n\t\t// making comparison more permissive\n\t\tc := strings.ToLower(colHeader)\n\t\tc = strings.Replace(c, \" \", \"\", -1) // remove all spaces\n\n\t\tif c == \"prénom\" || c == \"firstname\" {\n\t\t\tci.FirstName = i\n\t\t} else if c == \"nom\" || c == \"name\" || c == \"lastname\" || c == \"shippingname\" {\n\t\t\tci.LastName = i\n\t\t} else if c == \"adresse\" || c == \"addr1\" || c == \"address1\" || c == \"address\" || c == \"shippingaddress1\" {\n\t\t\tci.Addr1 = i\n\t\t} else if c == \"complémentd'adresse\" || c == \"addr2\" || c == \"address2\" || c == \"shippingaddress2\" {\n\t\t\tci.Addr2 = i\n\t\t} else if c == \"codepostal\" || c == \"zipcode\" || c == \"postalcode\" || c == \"shippingzip\" {\n\t\t\tci.Code = i\n\t\t} else if c == \"city\" || c == \"ville\" || c == \"shippingcity\" {\n\t\t\tci.City = i\n\t\t} else if c == \"pays\" || c == \"country\" || c == \"countrycode\" || c == \"shippingcountry\" {\n\t\t\tci.CountryISO = i\n\t\t} else if c == \"state\" || c == \"shippingprovince\" {\n\t\t\tci.State = i\n\t\t} else if c == \"email\" || c == \"e-mail\" || strings.Contains(c, \"email\") {\n\t\t\tci.Email = i\n\t\t} else if c == \"username\" || c == \"nickname\" {\n\t\t\tci.Username = i\n\t\t}\n\t}\n\n\tentries := make([]*Entry, 0)\n\n\tfor i := 1; i < len(records); i++ {\n\t\trecord := records[i]\n\n\t\tentry := &Entry{}\n\n\t\tif ci.FirstName > -1 {\n\t\t\tentry.FirstName = record[ci.FirstName]\n\t\t}\n\n\t\tif ci.LastName > -1 {\n\t\t\tentry.LastName = record[ci.LastName]\n\t\t}\n\n\t\tif ci.Username > -1 {\n\t\t\tentry.Username = record[ci.Username]\n\t\t}\n\n\t\tif ci.Addr1 > -1 {\n\t\t\tentry.Addr1 = record[ci.Addr1]\n\t\t}\n\n\t\tif ci.Addr2 > -1 {\n\t\t\tentry.Addr2 = record[ci.Addr2]\n\t\t}\n\n\t\tif ci.Code > -1 {\n\t\t\tentry.Code = record[ci.Code]\n\t\t}\n\n\t\tif ci.City > -1 {\n\t\t\tentry.City = record[ci.City]\n\t\t}\n\n\t\tif ci.CountryISO > -1 {\n\t\t\tentry.CountryISO = record[ci.CountryISO]\n\t\t}\n\n\t\tif ci.State > -1 {\n\t\t\tentry.State = record[ci.State]\n\t\t}\n\n\t\tif ci.Email > -1 {\n\t\t\tentry.Email = record[ci.Email]\n\t\t}\n\n\t\tentries = append(entries, entry)\n\t}\n\n\treturn entries, nil\n}",
"func readCSV(r io.Reader) Roadmap {\n\tvar (\n\t\tcats []Category\n\t\tc Category\n\t\tit Item\n\t\titems []Item\n\t\trm Roadmap\n\t)\n\n\tnc := -1\n\tnr := 0\n\tinput := csv.NewReader(r)\n\tfor {\n\t\tfields, csverr := input.Read()\n\t\tif csverr == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif csverr != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v %v\\n\", csverr, fields)\n\t\t\tcontinue\n\t\t}\n\t\tnr++\n\t\tif nr == 1 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(fields) < 5 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(fields[0]) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len(fields[0]) > 0 && len(fields[1]) == 0 && len(fields[2]) == 0 {\n\t\t\tnc++\n\t\t\tc.Name = fields[0]\n\t\t\tc.Vspace = \"45\"\n\t\t\tc.Itemheight = 40\n\t\t\tcats = append(cats, c)\n\t\t\titems = []Item{}\n\t\t\tcontinue\n\t\t}\n\t\tit.Text = fields[0]\n\t\tit.Begin = fields[1]\n\t\tit.Duration = fields[2]\n\t\titems = append(items, it)\n\t\tcats[nc].Item = items\n\t}\n\trp := strings.Split(*csvparam, \",\")\n\tif len(rp) == 3 {\n\t\trm.Title = rp[0]\n\t\trm.Begin, _ = strconv.ParseFloat(rp[1], 64)\n\t\trm.End, _ = strconv.ParseFloat(rp[2], 64)\n\t}\n\trm.Scale = 12\n\trm.Catpercent = 15\n\trm.Vspace = 45\n\trm.Itemheight = 40\n\trm.Shape = \"r\"\n\trm.Fontname = \"Calibri,sans-serif\"\n\trm.Category = cats\n\tdumprm(rm, os.Stderr)\n\treturn rm\n}",
"func ParseMap(line string) map[string]string {\n\tline = strings.TrimSpace(line)\n\tif line == \"\" {\n\t\treturn nil\n\t}\n\n\tr := csv.NewReader(strings.NewReader(line))\n\tr.TrimLeadingSpace = true\n\n\trecord, err := r.Read()\n\tif err == io.EOF {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata := map[string]string{}\n\tfor i := range record {\n\t\tp := strings.SplitN(record[i], \"=\", 2)\n\t\tif len(p) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tk := p[0]\n\t\tvar v string\n\t\tif len(p) > 1 {\n\t\t\tv = p[1]\n\t\t}\n\t\tdata[k] = v\n\t}\n\n\treturn data\n}",
"func processLine(headers []string, dataList []string) (map[string]string, error) {\n\t// Make sure there is the same num of headers as columns, otherwise throw error\n\tif len(dataList) != len(headers) {\n\t\treturn nil, errors.New(\"line does not match headers format, skipping line.\")\n\t}\n\n\t// Create the map we're going to populate\n\trecordMap := make(map[string]string)\n\n\t// For each header we are going to set a map key with the corresponding column val\n\tfor i, name := range headers {\n\t\trecordMap[name] = dataList[i]\n\t}\n\n\t// Returning the generated map\n\treturn recordMap, nil\n}",
"func parseCSV(csvFilePath string) error {\n\tcsvFile, err := os.Open(csvFilePath)\n\n\tif err != nil && os.IsNotExist(err) {\n\t\treturn errors.New(csvFilePath + \" does not exist\")\n\t} else if err != nil && os.IsPermission(err) {\n\t\treturn errors.New(\"Can not open \" + csvFilePath + \" due to invalid file permissions\")\n\t}\n\n\tdefer csvFile.Close()\n\n\treader := csv.NewReader(csvFile)\n\n\tfor {\n\t\trecord, err := reader.Read()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\taddress := strings.ToLower(strings.TrimSpace(record[1]))\n\t\tvendor := strings.TrimSpace(record[2])\n\n\t\taddressMap.Lock()\n\t\taddressMap.m[address] = vendor\n\t\taddressMap.Unlock()\n\n\t}\n\n\treturn nil\n}",
"func parseMappingFile(file io.Reader, filematch bool) (map[string][]string, error) {\n\tvar m = make(map[string][]string)\n\n\treader := csv.NewReader(file)\n\treader.Comma = ','\n\tlineCount := 0\n\tfor {\n\t\trecord, err := reader.Read()\n\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn m, err\n\t\t}\n\n\t\tvar key string\n\t\tif filematch {\n\t\t\tif len(record) != 4 {\n\t\t\t\treturn m, errors.New(\"no file matching information provided in mapping file\")\n\t\t\t}\n\t\t\tkey = buildMappingKey([]string{record[0], record[1], record[3]})\n\t\t} else {\n\t\t\tkey = buildMappingKey([]string{record[0], record[1]})\n\t\t}\n\t\tm[key] = append(m[key], record[2])\n\t\tlineCount++\n\t}\n\n\tlog.Printf(\"successfully read mappings: %d\\n\", lineCount)\n\n\treturn m, nil\n}",
"func (r *CSVParser) parse(value interface{}) (interface{}, error) {\n\tvar csvLine string\n\tswitch val := value.(type) {\n\tcase string:\n\t\tcsvLine = val\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"type '%T' cannot be parsed as csv\", value)\n\t}\n\n\treader := csvparser.NewReader(strings.NewReader(csvLine))\n\treader.Comma = r.fieldDelimiter\n\treader.FieldsPerRecord = r.numFields\n\tparsedValues := make(map[string]interface{})\n\n\trecord, err := reader.Read()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor i, key := range r.header {\n\t\tparsedValues[key] = record[i]\n\t}\n\n\treturn parsedValues, nil\n}",
"func CSV(filename string, sep rune) []map[string]string {\n\tfile, err := fs.Instance.Open(filename)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treader := csv.NewReader(file)\n\treader.Comma = sep\n\treader.LazyQuotes = true\n\n\t// Create header.\n\theader, err := reader.Read()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t// Read information.\n\tdata := make([]map[string]string, 0)\n\tfor {\n\t\tline, err := reader.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tlog.Fatalln(err)\n\t\t}\n\n\t\tparsedLine := make(map[string]string, len(header))\n\t\tfor i, field := range header {\n\t\t\tparsedLine[field] = line[i]\n\t\t}\n\n\t\tdata = append(data, parsedLine)\n\t}\n\n\treturn data\n}",
"func (c *Collection) ImportCSV(buf io.Reader, idCol int, skipHeaderRow bool, overwrite bool, verboseLog bool) (int, error) {\n\tvar (\n\t\tfieldNames []string\n\t\tkey string\n\t\terr error\n\t)\n\tr := csv.NewReader(buf)\n\tr.FieldsPerRecord = -1\n\tr.TrimLeadingSpace = true\n\tlineNo := 0\n\tif skipHeaderRow == true {\n\t\tlineNo++\n\t\tfieldNames, err = r.Read()\n\t\tif err != nil {\n\t\t\treturn lineNo, fmt.Errorf(\"Can't read header csv table at %d, %s\", lineNo, err)\n\t\t}\n\t}\n\tfor {\n\t\tlineNo++\n\t\trow, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn lineNo, fmt.Errorf(\"Can't read row csv table at %d, %s\", lineNo, err)\n\t\t}\n\t\tvar fieldName string\n\t\trecord := map[string]interface{}{}\n\t\tif idCol < 0 {\n\t\t\tkey = fmt.Sprintf(\"%d\", lineNo)\n\t\t}\n\t\tfor i, val := range row {\n\t\t\tif i < len(fieldNames) {\n\t\t\t\tfieldName = fieldNames[i]\n\t\t\t\tif idCol == i {\n\t\t\t\t\tkey = val\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfieldName = fmt.Sprintf(fmtColumnName, i+1)\n\t\t\t}\n\t\t\t//Note: We need to convert the value\n\t\t\tif i, err := strconv.ParseInt(val, 10, 64); err == nil {\n\t\t\t\trecord[fieldName] = i\n\t\t\t} else if f, err := strconv.ParseFloat(val, 64); err == nil {\n\t\t\t\trecord[fieldName] = f\n\t\t\t} else if strings.ToLower(val) == \"true\" {\n\t\t\t\trecord[fieldName] = true\n\t\t\t} else if strings.ToLower(val) == \"false\" {\n\t\t\t\trecord[fieldName] = false\n\t\t\t} else {\n\t\t\t\tval = strings.TrimSpace(val)\n\t\t\t\tif len(val) > 0 {\n\t\t\t\t\trecord[fieldName] = val\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif len(key) > 0 && len(record) > 0 {\n\t\t\tif c.HasKey(key) {\n\t\t\t\tif overwrite == true {\n\t\t\t\t\terr = c.Update(key, record)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn lineNo, fmt.Errorf(\"can't update %+v to %s, %s\", record, key, err)\n\t\t\t\t\t}\n\t\t\t\t} else if verboseLog {\n\t\t\t\t\tlog.Printf(\"Skipping row %d, key %q, already exists\", lineNo, key)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\terr = c.Create(key, record)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn lineNo, fmt.Errorf(\"can't create %+v to %s, %s\", record, key, err)\n\t\t\t\t}\n\t\t\t}\n\t\t} else if verboseLog {\n\t\t\tlog.Printf(\"Skipping row %d, key value missing\", lineNo)\n\t\t}\n\t\tif verboseLog == true && (lineNo%1000) == 0 {\n\t\t\tlog.Printf(\"%d rows processed\", lineNo)\n\t\t}\n\t}\n\treturn lineNo, nil\n}",
"func createCSVReader(ctx context.Context, csvFileName string) (\n\t<-chan map[string]string, <-chan error, error) {\n\n\tout := make(chan map[string]string)\n\terrc := make(chan error, 1)\n\n\t// build the reader, raw csv reader is wrapped to provide\n\t// channel access & row-reord to map functionality\n\tf, err := os.Open(csvFileName)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"unable to open csv file: \"+csvFileName)\n\t}\n\n\tcsvr := csv.NewReader(f)\n\treader := csvutils.WithCsvReader(csvr, f)\n\n\tgo func() {\n\t\tdefer close(out)\n\t\tdefer close(errc)\n\t\tdefer f.Close()\n\t\tdefer reader.Close()\n\n\t\tfor record := range reader.C() {\n\t\t\trecordMap := record.AsMap()\n\t\t\tselect {\n\t\t\tcase out <- recordMap:\n\t\t\t\t// log.Printf(\"\\nrecord read:\\n\\n%v\\n\\n\", recordMap)\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\t// check for any errors that may have closed the stream early\n\t\tif reader.Error() != nil {\n\t\t\terrc <- errors.Wrap(reader.Error(), \"csv stream unexpectedly closed\")\n\t\t}\n\t}()\n\treturn out, errc, nil\n\n}",
"func FillTheMap(fileCols, row []string, lm LexMachine) map[string]string {\n\n\t// the output map will contain pairs 'fieldName' -> 'fieldValue' from the current row of CSV-file\n\tmapSize := len(lm.Select) + len(lm.Where)/3\n\trowData := make(map[string]string, mapSize)\n\n\t// fill the output map with SELECT-data\n\tfor _, col := range lm.Select {\n\t\tfor i := 0; i < len(fileCols); i += 1 {\n\t\t\tif col == fileCols[i] {\n\t\t\t\trowData[col] = row[i]\n\t\t\t}\n\t\t}\n\t}\n\t// add WHERE-data to the output map\n\tfor i := 0; i < len(lm.Where); i += 4 { //подход не учитывает скобки. Надо исправлять\n\t\tfor j := 0; j < len(fileCols); j += 1 {\n\t\t\tif lm.Where[i].Tok == fileCols[j] {\n\t\t\t\trowData[lm.Where[i].Tok] = row[j]\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rowData\n}",
"func ParseMap(t string, typ reflect.Type, indent int) (interface{}, error) {\n\tr := bytes.NewReader([]byte(t))\n\tcsvR := csv.NewReader(r)\n\trecords, err := csvR.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm := reflect.MakeMap(typ)\n\n\tktyp := typ.Key()\n\tvtyp := typ.Elem()\n\n\tfor _, slc := range records {\n\t\tfor _, s := range slc {\n\t\t\t// TODO - fix this, this is bad and will break if there are any colons inside of a string\n\t\t\tkvslc := strings.Split(s, \":\")\n\t\t\tif len(kvslc) != 2 {\n\t\t\t\treturn nil, fmt.Errorf(\"cfgen: Missing full k/v pair for map, got %d of 2 entries\", len(kvslc))\n\t\t\t}\n\t\t\tk, err := ParseType(kvslc[0], ktyp, indent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tt, err := ParseType(kvslc[1], vtyp, indent)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tm.SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(t))\n\t\t}\n\t}\n\treturn m.Interface(), nil\n}",
"func generateCSVParseFunc(headers []string, fieldDelimiter rune, lazyQuotes bool) parseFunc {\n\treturn func(value interface{}) (interface{}, error) {\n\t\tcsvLine, err := valueAsString(value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treader := csvparser.NewReader(strings.NewReader(csvLine))\n\t\treader.Comma = fieldDelimiter\n\t\treader.FieldsPerRecord = len(headers)\n\t\treader.LazyQuotes = lazyQuotes\n\n\t\t// Typically only need one\n\t\tlines := make([][]string, 0, 1)\n\t\tfor {\n\t\t\tline, err := reader.Read()\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif err != nil && len(line) == 0 {\n\t\t\t\treturn nil, errors.New(\"failed to parse entry\")\n\t\t\t}\n\n\t\t\tlines = append(lines, line)\n\t\t}\n\n\t\t/*\n\t\t\tThis parser is parsing a single value, which came from a single log entry.\n\t\t\tTherefore, if there are multiple lines here, it should be assumed that each\n\t\t\tsubsequent line contains a continuation of the last field in the previous line.\n\n\t\t\tGiven a file w/ headers \"A,B,C,D,E\" and contents \"aa,b\\nb,cc,d\\nd,ee\",\n\t\t\texpect reader.Read() to return bodies:\n\t\t\t- [\"aa\",\"b\"]\n\t\t\t- [\"b\",\"cc\",\"d\"]\n\t\t\t- [\"d\",\"ee\"]\n\t\t*/\n\n\t\tjoinedLine := lines[0]\n\t\tfor i := 1; i < len(lines); i++ {\n\t\t\tnextLine := lines[i]\n\n\t\t\t// The first element of the next line is a continuation of the previous line's last element\n\t\t\tjoinedLine[len(joinedLine)-1] += \"\\n\" + nextLine[0]\n\n\t\t\t// The remainder are separate elements\n\t\t\tfor n := 1; n < len(nextLine); n++ {\n\t\t\t\tjoinedLine = append(joinedLine, nextLine[n])\n\t\t\t}\n\t\t}\n\n\t\treturn headersMap(headers, joinedLine)\n\t}\n}",
"func buildPhoneMap(inputFileName string) (*map[string]map[string]string, error) {\n\tfile, err := os.Open(inputFileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\n\tuserPhoneMap := map[string]map[string]string{}\n\tdidReadHeader := false\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, err\n\t\t} else if len(record) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"Wrong format at line %v\", record)\n\t\t}\n\n\t\tif !didReadHeader {\n\t\t\tdidReadHeader = true\n\t\t\tcontinue\n\t\t}\n\t\tphone := record[0]\n\t\tactivate := record[1]\n\t\tdeactivate := record[2]\n\n\t\tif _, ok := userPhoneMap[phone]; !ok {\n\t\t\tuserPhoneMap[phone] = map[string]string{}\n\t\t}\n\t\tuserPhoneMap[phone][deactivate] = activate\n\t}\n\n\treturn &userPhoneMap, nil\n}",
"func readCSV(file string) (map[string]string, error) {\n\tin, err := os.Open(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcsvIn := csv.NewReader(in)\n\t// skip headers\n\t_, err = csvIn.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparents := make(map[string]string)\n\tfor {\n\t\trecord, err := csvIn.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsourceTag := record[0] + \".\" + record[1]\n\t\ttargetTag := record[2] + \".\" + record[3]\n\t\tparents[targetTag] = sourceTag\n\t}\n\n\treturn parents, nil\n}",
"func getCSVHeadersIndexMap() map[string]int {\n\n\t// csvHeadersIndex holds the map of headers with its index\n\tcsvHeadersIndex := map[string]int{\n\t\t\"Unit\": -1,\n\t\t\"FloorPlan\": -1,\n\t\t\"UnitDesignation\": -1,\n\t\t\"SQFT\": -1,\n\t\t\"UnitLeaseStatus\": -1,\n\t\t\"Name\": -1,\n\t\t\"PhoneNumber\": -1,\n\t\t\"Email\": -1,\n\t\t\"MoveIn\": -1,\n\t\t\"MoveOut\": -1,\n\t\t\"LeaseStart\": -1,\n\t\t\"LeaseEnd\": -1,\n\t\t\"MarketAddl\": -1,\n\t\t\"Rent\": -1,\n\t\t// \"Tax\": -1,\n\t}\n\n\treturn csvHeadersIndex\n}",
"func getEntriesMap(a string) (map[string]int, error) {\n\tresult := make(map[string]int)\n\tfile, err := afs.Open(a)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\tfor s, header, i := bufio.NewScanner(file), true, -1; s.Scan(); header = false {\n\t\t// Find correct row if len() > 1 and check for ambiguities\n\t\tif header {\n\t\t\tfor k, v := range strings.Split(s.Text(), \",\") {\n\t\t\t\tif v == headerName {\n\t\t\t\t\tif i < 0 {\n\t\t\t\t\t\ti = k\n\t\t\t\t\t} else {\n\t\t\t\t\t\tfile.Close()\n\t\t\t\t\t\treturn nil,\n\t\t\t\t\t\t\tfmt.Errorf(multipleHeaderErr,\n\t\t\t\t\t\t\t\ta, headerName)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif i < 0 {\n\t\t\t\treturn nil, fmt.Errorf(headerNotFoundErr, headerName, a)\n\t\t\t}\n\t\t\tcontinue // don't want to add header to result\n\t\t}\n\t\tif v := strings.Split(s.Text(), \",\"); len(v) > i {\n\t\t\t// Ignoring what are presumably null values (discuss?)\n\t\t\tif v[i] != \"\\\"\\\"\" {\n\t\t\t\tresult[v[i]]++\n\t\t\t}\n\t\t}\n\t}\n\treturn result, nil\n}",
"func Read_total_gsm() (map[string]int, map[string]int){\n csvfile, err := os.Open(os.Args[2])\n\n if err != nil {\n fmt.Println(err)\n return nil, nil\n }\n\n defer csvfile.Close()\n reader := csv.NewReader(csvfile)\n reader.FieldsPerRecord = -1 \n rawCSVdata, err := reader.ReadAll()\n\n if err != nil {\n fmt.Println(err)\n os.Exit(1)\n }\n \n //var name []string\n gsm := make(map[string]int)\n length := make(map[string]int)\n\n for _, each := range rawCSVdata {\n n := each[0]\n g, err := strconv.Atoi(each[1])\n l, err := strconv.Atoi(each[2])\n if err != nil {\n // handle error\n fmt.Println(err)\n os.Exit(2)\n }\n //fmt.Println(i)\n //name = append(name, n)\n gsm[n] = g\n length[n] = l\n }\n \n return gsm, length\n}",
"func parseCSV(csvFile multipart.File, filename string, column int) (data string, err error) {\n\tvar totalRows int = 0\n\tvar distributionMap = make(map[int]int, 9)\n\n\tr := csvd.NewReader(csvFile)\n\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t\t// Out of range error, i.e. that column is not in use.\n\t\tif len(record) <= column {\n\t\t\tbreak\n\t\t}\n\t\t// Skip over any empty records.\n\t\tif record[column] == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfirstDigit := retrieveFirstDigit(record[column])\n\t\tif firstDigit != 0 {\n\t\t\tdistributionMap[firstDigit]++\n\t\t\ttotalRows++\n\t\t}\n\t}\n\n\tsortedKeys := sortMap(distributionMap)\n\n\tpayload := &Payload{}\n\tpayload.Filename = filename\n\n\tfor _, digit := range sortedKeys {\n\t\tvar count int = distributionMap[digit]\n\t\tvar percent float64 = calculatePercent(count, totalRows)\n\n\t\tif digit != 0 {\n\t\t\tvalues := Digit{Value: digit, Count: count, Percent: percent}\n\t\t\tpayload.AddItem(values)\n\t\t}\n\t\tif digit == 1 {\n\t\t\tpayload.BenfordValidation = benfordValidator(percent)\n\t\t}\n\t}\n\n\toutput, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tdata = string(output)\n\treturn\n}",
"func mapData(is types.ImportSource) ([]types.ImportSource, error) {\n\tvar rr []types.ImportSource\n\tif is.DataMap == nil {\n\t\trr = append(rr, is)\n\t\treturn rr, nil\n\t}\n\n\t// unpack the map\n\t// @todo provide a better structure!!\n\tvar dataMap []map[string]interface{}\n\tsrc, _ := ioutil.ReadAll(is.DataMap)\n\terr := json.Unmarshal(src, &dataMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// get header fields\n\tr := csv.NewReader(is.Source)\n\theader, err := r.Read()\n\tif err == io.EOF {\n\t\treturn rr, nil\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// maps { header field: field index } for a nicer lookup\n\thMap := make(map[string]int)\n\tfor i, h := range header {\n\t\thMap[h] = i\n\t}\n\n\tbufs := make(map[string]*MapBuffer)\n\n\t// data mapping\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// on next row, currently acquired headers are marked as final\n\t\tfor _, b := range bufs {\n\t\t\tb.hasHeader = true\n\t\t}\n\n\t\t// find applicable maps, that can be used for the given row.\n\t\t// the system allows composition, so all applicable maps are used.\n\t\tfor _, strmp := range dataMap {\n\t\t\tif ok, err := checkWhere(strmp[\"where\"], record, hMap); ok && err == nil {\n\t\t\t\tmaps, ok := strmp[\"map\"].([]interface{})\n\t\t\t\tif !ok {\n\t\t\t\t\treturn nil, errors.New(\"dataMap.invalidMap \" + is.Name)\n\t\t\t\t}\n\n\t\t\t\t// handle current record and it's values\n\t\t\t\tfor _, mp := range maps {\n\t\t\t\t\tmm, ok := mp.(map[string]interface{})\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.invalidEntry \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tfrom, ok := mm[\"from\"].(string)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.entry.invalidFrom \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tto, ok := mm[\"to\"].(string)\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\treturn nil, errors.New(\"dataMap.map.invalidTo \" + is.Name)\n\t\t\t\t\t}\n\n\t\t\t\t\tvv := strings.Split(to, \".\")\n\t\t\t\t\tnm := vv[0]\n\t\t\t\t\tnmF := vv[1]\n\n\t\t\t\t\tif bufs[nm] == nil {\n\t\t\t\t\t\tvar bb bytes.Buffer\n\t\t\t\t\t\tww := csv.NewWriter(&bb)\n\t\t\t\t\t\tdefer ww.Flush()\n\t\t\t\t\t\tbufs[nm] = &MapBuffer{\n\t\t\t\t\t\t\tbuffer: &bb,\n\t\t\t\t\t\t\twriter: ww,\n\t\t\t\t\t\t\tname: nm,\n\t\t\t\t\t\t\thasHeader: false,\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tval := record[hMap[from]]\n\n\t\t\t\t\t// handle data join\n\t\t\t\t\tif strings.Contains(from, \".\") {\n\t\t\t\t\t\t// construct a `alias.joinOnID` value, so we can perform a simple map lookup\n\t\t\t\t\t\tpts := strings.Split(from, \".\")\n\t\t\t\t\t\tbaseFieldAlias := pts[0]\n\t\t\t\t\t\toriginalOn := is.AliasMap[baseFieldAlias]\n\t\t\t\t\t\tjoinField := pts[1]\n\n\t\t\t\t\t\too := []string{}\n\t\t\t\t\t\tfor _, ff := range originalOn {\n\t\t\t\t\t\t\too = append(oo, record[hMap[ff]])\n\t\t\t\t\t\t}\n\t\t\t\t\t\tval = baseFieldAlias + \".\" + strings.Join(oo[:], \".\")\n\n\t\t\t\t\t\t// modify header field to specify what joined node field to use\n\t\t\t\t\t\tnmF += \":\" + joinField\n\t\t\t\t\t}\n\n\t\t\t\t\tbufs[nm].row = append(bufs[nm].row, val)\n\t\t\t\t\tif !bufs[nm].hasHeader {\n\t\t\t\t\t\tbufs[nm].header = append(bufs[nm].header, nmF)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t// write csv rows\n\t\tfor _, v := range bufs {\n\t\t\tif len(v.row) > 0 {\n\t\t\t\tv.writer.Write(v.row)\n\t\t\t\tv.row = []string{}\n\t\t\t}\n\t\t}\n\t}\n\n\t// construct output import source nodes\n\tfor _, v := range bufs {\n\t\trr = append(rr, types.ImportSource{\n\t\t\tName: v.name,\n\t\t\tSource: v.buffer,\n\t\t\tHeader: &v.header,\n\t\t\tFieldMap: is.FieldMap,\n\t\t\tAliasMap: is.AliasMap,\n\t\t\tValueMap: is.ValueMap,\n\t\t})\n\t}\n\n\treturn rr, nil\n}",
"func parseLines(scanner *bufio.Scanner, writer *bufio.Writer) []string {\n\tdefer writer.Flush()\n\theader := make([]string, 0)\n\tfor scanner.Scan() {\n\t\tparsed := parseRecord(scanner.Text())\n\t\toutput := make([]string, 0)\n\t\tfor _, field := range header {\n\t\t\tif val, iskey := parsed[field]; iskey {\n\t\t\t\toutput = append(output, val)\n\t\t\t\tdelete(parsed, field)\n\t\t\t} else {\n\t\t\t\toutput = append(output, \"\")\n\t\t\t}\n\t\t}\n\n\t\tfor field, value := range parsed {\n\t\t\toutput = append(output, value)\n\t\t\theader = append(header, field)\n\t\t}\n\t\toutput = append(output, strconv.Itoa(len(output)))\n\t\twriter.WriteString(strings.Join(output, parse.delim) + \"\\n\")\n\t}\n\treturn header\n}",
"func CSVFirstRowEquals(csvString string, testVals map[string]interface{}) error {\n\tm, err := CSVUnmarshal(csvString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\theader := m[0]\n\tfirstRow := map[string]string{}\n\t//turn first row into a map\n\tfor k, v := range m[1] {\n\t\tfirstRow[header[k]] = v\n\t}\n\n\tfor k, v := range testVals {\n\t\tif fmt.Sprintf(\"%+v\", firstRow[k]) != fmt.Sprintf(\"%+v\", v) {\n\t\t\treturn fmt.Errorf(\"values for key %s do not match: expected '%+v' provided '%+v'\", k, v, firstRow[k])\n\t\t}\n\t}\n\n\treturn nil\n}",
"func ReadCSV(path string, cols map[string]int, sep rune, comment string) (<-chan map[string]string, <-chan error, chan<- int) {\n\tout, err, sig, sigv := make(chan map[string]string, 64), make(chan error, 1), make(chan int), 0\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif e := recover(); e != nil {\n\t\t\t\terr <- e.(error)\n\t\t\t}\n\t\t\tclose(err)\n\t\t\tclose(out)\n\t\t}()\n\t\tin, ierr, isig := readLn(path)\n\t\tdefer close(isig)\n\t\thandleSig(sig, &sigv)\n\n\t\tvcols, wid, line, algn := make(map[string]int, 32), 0, 0, 0\n\t\tfor ln := range in {\n\t\t\tfor line++; ; {\n\t\t\t\tswitch {\n\t\t\t\tcase len(strings.TrimLeft(ln, \" \")) == 0:\n\t\t\t\tcase comment != \"\" && strings.HasPrefix(ln, comment):\n\t\t\t\tcase sep == '\\x00':\n\t\t\t\t\tfor _, r := range sepSet {\n\t\t\t\t\t\tif c := len(splitCSV(ln, r)); c > wid {\n\t\t\t\t\t\t\twid, sep = c, r\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tcontinue\n\t\t\t\tcase len(vcols) == 0:\n\t\t\t\t\tsl, uc, sc, mc, qc := splitCSV(ln, sep), make(map[int]int), make(map[string]int), 0, make(map[string]int)\n\t\t\t\t\tfor c, i := range cols {\n\t\t\t\t\t\tif c = strings.Trim(c, \" \"); c != \"\" && i > 0 {\n\t\t\t\t\t\t\tsc[c] = i\n\t\t\t\t\t\t\tif uc[i]++; i > mc {\n\t\t\t\t\t\t\t\tmc = i\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tfor i, c := range sl {\n\t\t\t\t\t\tif c = strings.Trim(c, \" \"); c != \"\" {\n\t\t\t\t\t\t\tif len(sc) == 0 || sc[c] > 0 {\n\t\t\t\t\t\t\t\tvcols[c] = i + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tif _, e := strconv.ParseFloat(c, 64); e != nil {\n\t\t\t\t\t\t\t\tqc[c] = i + 1\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tswitch wid = len(sl); {\n\t\t\t\t\tcase len(sc) == 0 && len(qc) == wid:\n\t\t\t\t\tcase len(sc) == 0:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"no heading in CSV file %q and no column map provided\", path))\n\t\t\t\t\tcase len(vcols) == len(sc):\n\t\t\t\t\tcase len(vcols) > 0:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"missing columns in CSV file %q\", path))\n\t\t\t\t\tcase len(qc) == wid || mc > wid:\n\t\t\t\t\t\tpanic(fmt.Errorf(\"column map incompatible with CSV file %q\", path))\n\t\t\t\t\tcase len(uc) < len(sc):\n\t\t\t\t\t\tpanic(fmt.Errorf(\"ambiguous column map provided for CSV file %q\", path))\n\t\t\t\t\tdefault:\n\t\t\t\t\t\tvcols = sc\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\tdefault:\n\t\t\t\t\tif sl := splitCSV(ln, sep); len(sl) == wid {\n\t\t\t\t\t\tm, heading := make(map[string]string, len(vcols)), true\n\t\t\t\t\t\tfor c, i := range vcols {\n\t\t\t\t\t\t\tf := strings.Trim(sl[i-1], \" \")\n\t\t\t\t\t\t\tif len(f) > 0 {\n\t\t\t\t\t\t\t\tm[c] = f\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\theading = heading && f == c\n\t\t\t\t\t\t}\n\t\t\t\t\t\tif !heading && len(m) > 0 {\n\t\t\t\t\t\t\tm[\"~line\"] = strconv.Itoa(line)\n\t\t\t\t\t\t\tout <- m\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if algn++; line > 200 && float64(algn)/float64(line) > 0.02 {\n\t\t\t\t\t\tpanic(fmt.Errorf(\"excessive column misalignment in CSV file %q (>%d rows)\", path, algn))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif sigv != 0 {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t\tif e := <-ierr; e != nil {\n\t\t\tpanic(fmt.Errorf(\"problem reading CSV file %q (%v)\", path, e))\n\t\t}\n\t}()\n\treturn out, err, sig\n}",
"func NewMap(csvData io.Reader) (m *MapMapper, err error) {\n\tm = &MapMapper{}\n\tr := csv.NewReader(csvData)\n\tr.LazyQuotes = true\n\tr.FieldsPerRecord = 2\n\tvar row []string\n\trow, err = r.Read()\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tm.From = row[0]\n\tm.To = row[1]\n\tm.m = make(map[string]string)\n\ti := 0\n\tfor {\n\t\ti++\n\t\trow, err = r.Read()\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tm.m[row[0]] = row[1]\n\t}\n\treturn\n}",
"func parseCsv(file string) ([][]string, error) {\n\tf, err := os.Open(file)\n\tdefer f.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tlines, err := csv.NewReader(f).ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn lines, nil\n}",
"func makeFromCsvReader(\n\tfileName string, csvFile *os.File, csvHeader string, csvToCell func(row []string) (interface{}, error),\n) (func() (interface{}, error), error) {\n\n\t// create csv reader from utf-8 line\n\tuRd, err := helper.Utf8Reader(csvFile, theCfg.encodingName)\n\tif err != nil {\n\t\treturn nil, errors.New(\"fail to create utf-8 converter: \" + err.Error())\n\t}\n\n\tcsvRd := csv.NewReader(uRd)\n\tcsvRd.TrimLeadingSpace = true\n\tcsvRd.ReuseRecord = true\n\n\t// skip header line\n\tfhs, e := csvRd.Read()\n\tswitch {\n\tcase e == io.EOF:\n\t\treturn nil, errors.New(\"invalid (empty) csv file: \" + fileName)\n\tcase err != nil:\n\t\treturn nil, errors.New(\"csv file read error: \" + fileName + \": \" + err.Error())\n\t}\n\tfh := strings.Join(fhs, \",\")\n\tif strings.HasPrefix(fh, string(helper.Utf8bom)) {\n\t\tfh = fh[len(helper.Utf8bom):]\n\t}\n\tif fh != csvHeader {\n\t\treturn nil, errors.New(\"Invalid csv file header \" + fileName + \": \" + fh + \" expected: \" + csvHeader)\n\t}\n\n\t// convert each csv line into cell (id cell)\n\t// reading from .id.csv files not supported by converters\n\tfrom := func() (interface{}, error) {\n\t\trow, err := csvRd.Read()\n\t\tswitch {\n\t\tcase err == io.EOF:\n\t\t\treturn nil, nil // eof\n\t\tcase err != nil:\n\t\t\treturn nil, errors.New(\"csv file read error: \" + fileName + \": \" + err.Error())\n\t\t}\n\n\t\t// convert csv line to cell and return from reader\n\t\tc, err := csvToCell(row)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"csv file row convert error: \" + fileName + \": \" + err.Error())\n\t\t}\n\t\treturn c, nil\n\t}\n\treturn from, nil\n}",
"func loadOneSiteCSVRow(csvHeadersIndex map[string]int, data []string) (bool, CSVRow) {\n\tcsvRow := reflect.New(reflect.TypeOf(CSVRow{}))\n\trowLoaded := false\n\n\tfor header, index := range csvHeadersIndex {\n\t\tvalue := strings.TrimSpace(data[index])\n\t\tcsvRow.Elem().FieldByName(header).Set(reflect.ValueOf(value))\n\t}\n\n\t// if blank data has not been passed then only need to return true\n\tif (CSVRow{}) != csvRow.Elem().Interface().(CSVRow) {\n\t\trowLoaded = true\n\t}\n\n\treturn rowLoaded, csvRow.Elem().Interface().(CSVRow)\n}",
"func readStationMapFile() map[string][]string {\n\tcsvFile := os.Getenv(\"STATION_MAP_FILE\")\n\tif csvFile == \"\" {\n\t\tlog.Fatal(\"$STATION_MAP_FILE must be set\")\n\t}\n\n\tfile, err := os.Open(csvFile)\n\tif err != nil {\n\t\tpanic(\"error while opening file\")\n\t}\n\n\tr := csv.NewReader(file)\n\n\ttrainLines := map[string][]string{} // temporary map of line to stations. Its used to order stations on a line.\n\n\tline := -1\n\tstationIdx := 0 // auto generated index for every station.\n\t// Iterate through the records\n\tfor {\n\t\tline++\n\t\t// Read each record from csv\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tif line == 0 {\n\t\t\t// skip the header line\n\t\t\tcontinue\n\t\t}\n\n\t\tstationCode := record[0]\n\t\tstationName := record[1]\n\t\topeningTime, err := time.Parse(\"2 January 2006\", record[2])\n\t\tif err != nil {\n\t\t\tlog.Printf(\"wrong date format for station %v. Skipping it\\n\", stationName)\n\t\t\tcontinue\n\t\t}\n\n\t\tif _, ok := lineStationMap[stationCode]; ok {\n\t\t\tlog.Println(\"duplicate station entry for stationCode \", stationCode)\n\t\t\tcontinue\n\t\t}\n\n\t\t// create line-station for the record.\n\t\tlineStationMap[stationCode] = &lineStation{\n\t\t\tname: stationName,\n\t\t\topeningDate: openingTime,\n\t\t\tneighbours: map[int]*edge{},\n\t\t}\n\n\t\t// if its an existing station, just append station code. Otherwise, create a new station object.\n\t\tif s, ok := stationNameMap[stationName]; ok {\n\t\t\ts.codes = append(s.codes, stationCode)\n\t\t} else {\n\t\t\tstation := &station{\n\t\t\t\tname: stationName,\n\t\t\t\tcodes: []string{stationCode},\n\t\t\t\tidx: stationIdx,\n\t\t\t}\n\t\t\tstationNameMap[stationName] = station\n\t\t\tstationIndexMap[stationIdx] = station\n\t\t\tstationIdx++\n\t\t}\n\n\t\tlineCode := stationCode[:2] // extract train line.\n\t\tif _, ok := trainLines[lineCode]; ok {\n\t\t\ttrainLines[lineCode] = append(trainLines[lineCode], stationCode)\n\t\t} else {\n\t\t\ttrainLines[lineCode] = []string{stationCode}\n\t\t}\n\t}\n\treturn trainLines\n}",
"func NewDecoderFromCSVReader(csvR *csv.Reader, dest interface{}) (Decoder, error) {\n\tmappings, err := structureFromStruct(dest)\n\tif err != nil {\n\t\treturn Decoder{}, err\n\t}\n\n\t// ensure that all \"unknown\" types have their own text unmarshaler\n\tfor _, m := range mappings {\n\t\tif m.fieldType == reflect.Invalid && !m.customUnmarshaler {\n\t\t\treturn Decoder{}, fmt.Errorf(\"unsupported field type found that does not \"+\n\t\t\t\t\"implement the encoding.TextUnmarshaler interface: %s\", m.fieldName)\n\t\t}\n\t}\n\n\theaders, err := csvR.Read()\n\tif err != nil {\n\t\treturn Decoder{}, fmt.Errorf(\"failed to find headers: %s\", err)\n\t}\n\n\tallEmpty := true\n\tnumColumns := len(headers)\n\tsortedMappings := make([]csvField, numColumns)\n\textraHeaders := []string{} // TODO: do anything with this?\n\theadersSeen := map[string]bool{}\n\t// Sort headers in line w/ CSV columns\n\tfor i, h := range headers {\n\t\th = normalizeHeader(h)\n\t\t// ensure unique CSV headers\n\t\tif headersSeen[h] {\n\t\t\treturn Decoder{}, fmt.Errorf(\"saw header column '%s' twice, CSV headers must be unique\", h)\n\t\t}\n\t\theadersSeen[h] = true\n\n\t\t// slot field info in array parallel to CSV column\n\t\tfor _, f := range mappings {\n\t\t\tif h == normalizeHeader(f.fieldName) {\n\t\t\t\tsortedMappings[i] = f\n\t\t\t}\n\t\t}\n\t\t// check if field not set\n\t\tif sortedMappings[i].fieldName == \"\" {\n\t\t\textraHeaders = append(extraHeaders, h)\n\t\t} else {\n\t\t\t// note that a field exists without an empty name\n\t\t\tallEmpty = false\n\t\t}\n\t}\n\n\t// Ensure that at least one mapping has a non-empty field name\n\tif allEmpty {\n\t\treturn Decoder{}, fmt.Errorf(\"all struct fields do not match any CSV headers\")\n\t}\n\n\t// Ensure that all required columns are present\n\tfor _, f := range mappings {\n\t\tif f.required && !headersSeen[normalizeHeader(f.fieldName)] {\n\t\t\treturn Decoder{}, fmt.Errorf(\"column '%s' required but not found\", f.fieldName)\n\t\t}\n\t}\n\n\treturn Decoder{\n\t\tr: csvR,\n\t\tmappings: sortedMappings,\n\t\tnumColumns: numColumns,\n\t}, nil\n}",
"func ImportTimePanelFromCSV(str string) (*TimePanel, error) {\n\treader := csv.NewReader(strings.NewReader(str))\n\treader.TrimLeadingSpace = true\n\trecords, err := reader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theadLine := records[0]\n\tif len(headLine) < 3 {\n\t\treturn nil, errors.New(\"Need more columns (dateKey, secondKey, thirdKeys ...)\")\n\t}\n\tthirdIndex := NewStringIndex(headLine[2:], true)\n\tsecondMap := make(map[string]bool)\n\trecords = records[1:]\n\tfor i := range records {\n\t\tsecondMap[records[i][1]] = true\n\t}\n\tsecondKeys := make([]string, 0, len(secondMap))\n\tfor k := range secondMap {\n\t\tsecondKeys = append(secondKeys, k)\n\t}\n\tsort.Strings(secondKeys)\n\tsecondIndex := NewStringIndex(secondKeys, false)\n\tdata := make(map[int64][][]float64)\n\tfor _, record := range records {\n\t\tdate, err := time.ParseInLocation(time.RFC3339, record[0], time.Local)\n\t\tif err != nil {\n\t\t\tdate, err = time.ParseInLocation(\"2006-01-02\", record[0], time.Local)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.New(\"Unknown timeformat\")\n\t\t\t}\n\t\t}\n\t\tunix := date.Unix()\n\t\tkey := record[1]\n\t\tkeyIdx := secondIndex.Index(key)\n\t\tdf := data[unix]\n\t\tif len(df) == 0 {\n\t\t\tdf = make([][]float64, secondIndex.Length())\n\t\t}\n\t\tvalues := make([]float64, thirdIndex.Length())\n\t\tfor j := range values {\n\t\t\tvalues[j], err = strconv.ParseFloat(record[j+2], 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\tdf[keyIdx] = values\n\t\tdata[unix] = df\n\t}\n\tdates := make([]int, 0, len(data))\n\tfor k := range data {\n\t\tdates = append(dates, int(k))\n\t}\n\tsort.Ints(dates)\n\tpanel := NewTimePanel(secondIndex, thirdIndex)\n\tfor _, date := range dates {\n\t\tpanel.AddMat(time.Unix(int64(date), 0), data[int64(date)])\n\t}\n\treturn panel, nil\n}",
"func loadMovies(filepath string) (map[int]*Movie, error) {\n\tif csvFile, fileErr := os.Open(filepath); fileErr != nil {\n\t\treturn nil, fileErr\n\t} else {\n\t\treader := csv.NewReader(bufio.NewReader(csvFile))\n\t\tmovieByID := make(map[int]*Movie)\n\t\tfor {\n\t\t\tvar rowRecord []string\n\t\t\tvar readerErr error\n\n\t\t\trowRecord, readerErr = reader.Read()\n\t\t\tif readerErr != nil {\n\t\t\t\tif readerErr == io.EOF {\n\t\t\t\t\tbreak\n\t\t\t\t} else {\n\t\t\t\t\tfmt.Printf(\"Unexpected reader error: %v\\n\", readerErr)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tid, parseErr := strconv.ParseInt(rowRecord[0], 10, 64)\n\t\t\tif parseErr != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tmovieByID[int(id)] = &Movie{\n\t\t\t\tID: int(id),\n\t\t\t\tTitle: rowRecord[1],\n\t\t\t}\n\t\t}\n\n\t\treturn movieByID, nil\n\t}\n}",
"func generateSplitParseFunc(headers []string, fieldDelimiter rune) parseFunc {\n\treturn func(value interface{}) (interface{}, error) {\n\t\tcsvLine, err := valueAsString(value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// This parse function does not do any special quote handling; Splitting on the delimiter is sufficient.\n\t\tfields := strings.Split(csvLine, string(fieldDelimiter))\n\t\treturn headersMap(headers, fields)\n\t}\n}",
"func ParseMetadataHeader(header string) map[string]string {\n meta := make(map[string]string)\n\n for _, element := range strings.Split(header, \",\") {\n element := strings.TrimSpace(element)\n\n parts := strings.Split(element, \" \")\n\n // Do not continue with this element if no key and value or presented\n if len(parts) != 2 {\n continue\n }\n\n // Ignore corrent element if the value is no valid base64\n key := parts[0]\n value, err := base64.StdEncoding.DecodeString(parts[1])\n if err != nil {\n continue\n }\n\n meta[key] = string(value)\n }\n\n return meta\n}",
"func getNodes() map[string]Node {\r\n file, err := os.Open(\"data/nodes.csv\")\r\n\tif err != nil {\r\n\t\t// err is printable\r\n\t\t// elements passed are separated by space automatically\r\n\t\tfmt.Println(\"Error:\", err)\r\n\t\treturn\r\n\t}\r\n\t// automatically call Close() at the end of current method\r\n\tdefer file.Close()\r\n\t// \r\n\treader := csv.NewReader(file)\r\n\t// options are available at:\r\n\t// http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94\r\n\treader.Comma = ';'\r\n\tlineCount := 0\r\n\tfor {\r\n // read just one record, but we could ReadAll() as well\r\n\t\trecord, err := reader.Read()\r\n \r\n\t\t// end-of-file is fitted into err\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t} else if err != nil {\r\n\t\t\tfmt.Println(\"Error:\", err)\r\n\t\t\treturn\r\n\t\t}\r\n \r\n if lineCount == 0 {\r\n Header header = getHeader(record)\r\n }\r\n \r\n lineCount += 1\r\n }\r\n}",
"func parseCSV(s string, tw timeWindow) []csvLine {\n\t// open CSV file\n\tfile, err := os.Open(s)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"There was a problem opening the file! :. %s\", err))\n\t}\n\t// make sure we eventually close the CSV file\n\tdefer func() {\n\t\tif err = file.Close(); err != nil {\n\t\t\tpanic(fmt.Sprintf(\"There was a problem closing the file! :. %s\", err))\n\t\t}\n\t}()\n\t// create a reader for the file\n\treader := csv.NewReader(file)\n\tvar content []csvLine\n\n\t// if the read line is in the specified time window put it into the content slice\n\tfor {\n\t\tEOF := readCSVLine(&content, reader, tw)\n\t\tif EOF {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn content\n}",
"func ParseCSV(r io.Reader) ([]Entry, error) {\n\treader := csv.NewReader(r)\n\n\t// skip header\n\treader.Read()\n\n\tvar entries []Entry\n\tfor {\n\t\tline, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn nil, fmt.Errorf(\"encountered error while parsing csv: %v\", err)\n\t\t}\n\n\t\tentries = append(entries, entryFor(line))\n\t}\n\n\treturn entries, nil\n}",
"func parseRow(row string, rowIndex int) (dr datarow) {\n\tcols := strings.Split(row, \",\")\n\tif len(cols) == 0 { // blank lines ignored\n\t\treturn nil\n\t}\n\tdr = make(datarow, columnsPerRow)\n\t// last column is the thing the previous column features predict\n\tfor i := 0; i < lastColumnIndex; i++ {\n\t\tnc, err := strconv.ParseFloat(cols[i], 32)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"row=\", rowIndex, \"col=\", i)\n\t\t\tpanic(err)\n\t\t}\n\t\tdr[i] = float32(nc)\n\t}\n\tprediction := cols[lastColumnIndex]\n\tif _, existsYet := variables[prediction]; !existsYet {\n\t\tindexedVariables = append(indexedVariables, prediction)\n\t\tnewIndex := len(indexedVariables) - 1\n\t\tvariables[prediction] = float32(newIndex)\n\t}\n\tdr[lastColumnIndex] = variables[prediction]\n\n\treturn dr\n}",
"func readCSVFileIntoTbl(f string) (tbl [][]string) {\n\tcsvFile, err := os.Open(f)\n\tif err != nil {\n\t\tpanic(\"ReadFileIntoTbl \" + f + \" fail\\n\" + err.Error())\n\t}\n\tdefer csvFile.Close()\n\n\tcsvReader := csv.NewReader(csvFile)\n\tfor {\n\t\trow, err := csvReader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tpanic(\"ReadFileIntoTbl \" + f + \" fail\\n\" + err.Error())\n\t\t}\n\t\ttbl = append(tbl, row)\n\t}\n\treturn tbl\n}",
"func (ns *numbers) parse(key, value string) {\n\tns.Lock()\n\tdefer ns.Unlock()\n\tsrc := bytes.NewBufferString(value)\n\tcsv_reader := csv.NewReader(src)\n\trecords, err := csv_reader.ReadAll()\n\tif err != nil {\n\t\tlog.Errorf(\"%v %v\", err, key)\n\t\treturn\n\t}\n\n\tif len(records) == 0 {\n\t\tlog.Warningf(\"empty document: %v\", key)\n\t\treturn\n\t}\n\n\ttblname := filepath.Base(key)\n\t// 记录数据, 第一行为表头,因此从第二行开始\n\tfor line := 1; line < len(records); line++ {\n\t\tfor field := 1; field < len(records[line]); field++ { // 每条记录的第一个字段作为行索引\n\t\t\tns.set(ns.tables, tblname, records[line][0], records[0][field], records[line][field])\n\t\t}\n\t}\n\n\t// 记录KEYS\n\tns.dump_keys(ns.tables, tblname)\n}",
"func (sc *Scanner) parseRecord() (record map[string]string, err error) {\n\trecord = make(map[string]string)\n\tfor {\n\t\tf, delim, err := sc.parseFieldName()\n\t\tif err != nil {\n\t\t\tif len(record) == 0 {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tif delim == '\\n' {\n\t\t\tcontinue\n\t\t}\n\t\tif delim == '%' {\n\t\t\tbreak\n\t\t}\n\t\tv, end := sc.parseFieldValue()\n\t\tif len(f) > 0 && len(v) > 0 {\n\t\t\tif _, dup := record[f]; dup {\n\t\t\t\treturn nil, errors.Errorf(\"line: %d: duplicated field %q\", sc.line, f)\n\t\t\t}\n\t\t\trecord[f] = v\n\t\t\tif !sc.fok[f] {\n\t\t\t\tsc.fok[f] = true\n\t\t\t\tsc.fields = append(sc.fields, f)\n\t\t\t}\n\t\t}\n\t\tif end {\n\t\t\tbreak\n\t\t}\n\t}\n\tif len(record) == 0 {\n\t\treturn nil, nil\n\t}\n\treturn record, nil\n}",
"func jmeterCsvGetHeader(line []string, head *JmeterCsvHeader) (err error) {\n\thead.TimeStamp = -1\n\thead.Elapsed = -1\n\thead.Label = -1\n\thead.ResponseCode = -1\n\thead.ResponseMessage = -1\n\thead.ThreadName = -1\n\thead.DataType = -1\n\thead.Success = -1\n\thead.FailureMessage = -1\n\thead.Bytes = -1\n\thead.SentBytes = -1\n\thead.GrpThreads = -1\n\thead.AllThreads = -1\n\thead.URL = -1\n\thead.Latency = -1\n\thead.IdleTime = -1\n\thead.Connect = -1\n\thead.Length = 17\n\n\tfor i := range line {\n\t\tswitch line[i] {\n\t\tcase \"timeStamp\":\n\t\t\thead.TimeStamp = int8(i)\n\t\tcase \"elapsed\":\n\t\t\thead.Elapsed = int8(i)\n\t\tcase \"label\":\n\t\t\thead.Label = int8(i)\n\t\tcase \"responseCode\":\n\t\t\thead.ResponseCode = int8(i)\n\t\tcase \"responseMessage\":\n\t\t\thead.ResponseMessage = int8(i)\n\t\tcase \"threadName\":\n\t\t\thead.ThreadName = int8(i)\n\t\tcase \"dataType\":\n\t\t\thead.DataType = int8(i)\n\t\tcase \"success\":\n\t\t\thead.Success = int8(i)\n\t\tcase \"failureMessage\":\n\t\t\thead.FailureMessage = int8(i)\n\t\tcase \"bytes\":\n\t\t\thead.Bytes = int8(i)\n\t\tcase \"sentBytes\":\n\t\t\thead.SentBytes = int8(i)\n\t\tcase \"grpThreads\":\n\t\t\thead.GrpThreads = int8(i)\n\t\tcase \"allThreads\":\n\t\t\thead.AllThreads = int8(i)\n\t\tcase \"URL\":\n\t\t\thead.URL = int8(i)\n\t\tcase \"Latency\":\n\t\t\thead.Latency = int8(i)\n\t\tcase \"IdleTime\":\n\t\t\thead.IdleTime = int8(i)\n\t\tcase \"Connect\":\n\t\t\thead.Connect = int8(i)\n\t\tdefault:\n\t\t\terr = fmt.Errorf(\"unknown field: %s\", line[i])\n\t\t\treturn\n\t\t}\n\t}\n\tif head.TimeStamp == -1 {\n\t\terr = fmt.Errorf(\"missing field: timeStamp\")\n\t\treturn\n\t}\n\tif head.Elapsed == -1 {\n\t\terr = fmt.Errorf(\"missing field: elapsed\")\n\t\treturn\n\t}\n\tif head.Label == -1 {\n\t\terr = fmt.Errorf(\"missing field: label\")\n\t\treturn\n\t}\n\tif head.ResponseCode == -1 {\n\t\terr = fmt.Errorf(\"missing field: timeStamp\")\n\t\treturn\n\t}\n\tif head.ResponseMessage == -1 {\n\t\terr = fmt.Errorf(\"missing field: responseMessage\")\n\t\treturn\n\t}\n\tif head.ThreadName == -1 {\n\t\terr = fmt.Errorf(\"missing field: threadName\")\n\t\treturn\n\t}\n\tif head.DataType == -1 {\n\t\terr = fmt.Errorf(\"missing field: dataType\")\n\t\treturn\n\t}\n\tif head.Success == -1 {\n\t\terr = fmt.Errorf(\"missing field: success\")\n\t\treturn\n\t}\n\tif head.FailureMessage == -1 {\n\t\terr = fmt.Errorf(\"missing field: failureMessage\")\n\t\treturn\n\t}\n\tif head.Bytes == -1 {\n\t\terr = fmt.Errorf(\"missing field: bytes\")\n\t\treturn\n\t}\n\tif head.SentBytes == -1 {\n\t\terr = fmt.Errorf(\"missing field: sentBytes\")\n\t\treturn\n\t}\n\tif head.GrpThreads == -1 {\n\t\terr = fmt.Errorf(\"missing field: grpThreads\")\n\t\treturn\n\t}\n\tif head.AllThreads == -1 {\n\t\terr = fmt.Errorf(\"missing field: allThreads\")\n\t\treturn\n\t}\n\tif head.URL == -1 {\n\t\terr = fmt.Errorf(\"missing field: URL\")\n\t\treturn\n\t}\n\tif head.Latency == -1 {\n\t\terr = fmt.Errorf(\"missing field: Latency\")\n\t\treturn\n\t}\n\tif head.IdleTime == -1 {\n\t\terr = fmt.Errorf(\"missing field: IdleTime\")\n\t\treturn\n\t}\n\tif head.Connect == -1 {\n\t\terr = fmt.Errorf(\"missing field: Connect\")\n\t\treturn\n\t}\n\tif len(line) != head.Length {\n\t\terr = fmt.Errorf(\"mismatch fields count in header\")\n\t\treturn\n\t}\n\treturn\n}",
"func MapHeader(record []string, typ reflect.Type) MappingInfo {\n\n\tfields := mapHeaderRecursive(record, typ)\n\n\treturn MappingInfo{fields, typ}\n}",
"func (ctx *TestContext) getRowMap(rowIndex int, table *messages.PickleStepArgument_PickleTable) map[string]string {\n\trowHeader := table.Rows[0]\n\tsourceRow := table.Rows[rowIndex]\n\n\trowMap := map[string]string{}\n\tfor i := 0; i < len(rowHeader.Cells); i++ {\n\t\tvalue := sourceRow.Cells[i].Value\n\t\tif value == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\trowMap[rowHeader.Cells[i].Value] = value\n\t}\n\n\treturn rowMap\n}",
"func csv1(x uint32) (r uint32) {\n\tif _, cols := istab(x); cols > 0 {\n\t\treturn csvw(x)\n\t}\n\td := val(ks(\"CSV\"))\n\tif d == 0 || tp(d) != 7 {\n\t\tpanic(\"csv: var CSV must be a format dict\")\n\t}\n\tif tp(x) == 5 { // file\n\t\txn := nn(x)\n\t\tif xn == 0 {\n\t\t\tpanic(\"csv: no inputs\")\n\t\t} else if xn == 1 {\n\t\t\trx(x)\n\t\t\treturn csv3(d, read1(x), x)\n\t\t}\n\t\t// multiple files: `file`col1`col2!(`file1`file1`file1..;..)\n\t\tfor i := i(0); i < xn; i++ {\n\t\t\trx(x)\n\t\t\tt := csv1(atx(x, mki(i)))\n\t\t\tif i == 0 {\n\t\t\t\tr = t\n\t\t\t} else {\n\t\t\t\tr = dcat(r, val(t))\n\t\t\t}\n\t\t}\n\t\treturn r\n\t}\n\treturn csv3(d, x, 0)\n}",
"func newOrderFromCSVRow(row []string) interface{} {\n\t// type conversions from CSV file, arguments must align\n\tamount, _ := strconv.Atoi(row[2])\n\tprice, _ := strconv.ParseFloat(row[3], 64)\n\treturn newOrder(row[1], row[0], amount, price)\n}",
"func (c *CsvMarshaller) Read(reader *bufio.Reader, previousHeader *UnmarshalledHeader) (*UnmarshalledHeader, []byte, error) {\n\tline, err := readUntil(reader, c.lineSep())\n\tif err == io.EOF {\n\t\tif len(line) == 0 {\n\t\t\treturn nil, nil, err\n\t\t} else {\n\t\t\t// Ignore here\n\t\t}\n\t} else if err != nil {\n\t\treturn nil, nil, err\n\t} else if len(line) == 1 {\n\t\treturn nil, nil, errors.New(\"Empty CSV line\")\n\t} else if len(line) > 0 {\n\t\tline = line[:len(line)-1] // Strip newline char\n\t}\n\tindex := bytes.Index(line, []byte{c.valSep()})\n\tvar firstField string\n\tif index < 0 {\n\t\tfirstField = string(line) // Only one field\n\t} else {\n\t\tfirstField = string(line[:index])\n\t}\n\n\tswitch {\n\tcase previousHeader == nil:\n\t\tif checkErr := checkFirstField(c.timeCol(), firstField); checkErr != nil {\n\t\t\treturn nil, nil, checkErr\n\t\t}\n\t\treturn c.parseHeader(line), nil, err\n\tcase firstField == c.timeCol():\n\t\treturn c.parseHeader(line), nil, err\n\tdefault:\n\t\treturn nil, line, err\n\t}\n}",
"func parseMap(fileScanner *bufio.Scanner) [][]int {\n\ttreeMap := [][]int{}\n\tfor fileScanner.Scan() {\n\t\tline := fileScanner.Text()\n\t\trow := []int{}\n\t\tfor _, char := range line {\n\t\t\theight, _ := strconv.Atoi(string(char))\n\t\t\trow = append(row, height)\n\t\t}\n\t\ttreeMap = append(treeMap, row)\n\t}\n\treturn treeMap\n}",
"func ParseFromCSV(reader *csv.Reader) ([]Descriptor, error) {\n\tvar (\n\t\terr error\n\t\tdescriptorList []Descriptor\n\t\trecord []string\n\t\temailDoubleChecker map[string]bool\n\t)\n\n\temailDoubleChecker = map[string]bool{}\n\n\ti := 0\n\tadded := 0\n\tfor {\n\t\ti++\n\n\t\trecord, err = reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn descriptorList, fmt.Errorf(\"Error on line %d %s\", i, err.Error())\n\t\t}\n\n\t\tif len(record) < 3 {\n\t\t\treturn descriptorList, fmt.Errorf(\"Invalid record on line %d\", i)\n\t\t}\n\n\t\temail := strings.TrimSpace(record[1])\n\t\tif !govalidator.IsEmail(email) {\n\t\t\tlogger.Warn(fmt.Sprintf(\"Email '%s' is not an email on line %d\", email, i))\n\t\t\tcontinue\n\t\t}\n\n\t\tif emailDoubleChecker[email] {\n\t\t\tcontinue\n\t\t}\n\t\temailDoubleChecker[email] = true\n\n\t\tcompanyName := strings.TrimSpace(record[0])\n\t\tdomainName := strings.TrimSpace(record[2])\n\n\t\tdescriptorList = append(descriptorList, NewDescriptor(companyName, domainName, email))\n\t\tadded++\n\t}\n\n\tlogger.Debug(fmt.Sprintf(\"Parsed %d records out of %d\", added, i))\n\treturn descriptorList, nil\n}",
"func (f *commaSeparated) GetFields(record string) (map[interface{}]string, error) {\n\tbuf := bytes.NewBufferString(record)\n\tr := csv.NewReader(buf)\n\tif f.FieldDelim != \"\" {\n\t\tr.Comma, _ = utf8.DecodeRune([]byte(f.FieldDelim))\n\t}\n\tr.FieldsPerRecord = f.NumFields\n\trec, err := r.Read()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret := make(map[interface{}]string)\n\tfor i, v := range rec {\n\t\tret[i] = v\n\t}\n\treturn ret, nil\n}",
"func (tf *CSVTransformer) Transform(r io.Reader) ([]*queue.Message, error) {\n\treader := csv.NewReader(r)\n\treader.Comma = tf.Delim\n\n\t// The first row represents the field names\n\tkeys, err := reader.Read()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"no CSV data found\")\n\t}\n\tcolumns := len(keys)\n\n\tmsgs := []*queue.Message{}\n\tts := time.Now().Unix()\n\n\tfor {\n\t\trec, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tcontent := make(map[string]interface{})\n\t\tfor i := 0; i < columns; i++ {\n\t\t\tcontent[keys[i]] = rec[i]\n\t\t}\n\t\tmsg := queue.NewMessage(content)\n\t\tmsg.Metadata[queue.MetaBatch] = ts\n\t\tmsgs = append(msgs, msg)\n\t}\n\treturn msgs, nil\n}",
"func readOneRow(zoneNames []string, reader *csv.Reader) (inputData, bool, error) {\n\trowCells, err := reader.Read()\n\tif err == io.EOF {\n\t\treturn inputData{}, true, nil\n\t}\n\tif err != nil {\n\t\treturn inputData{}, true, err\n\t}\n\tvar rowData inputData\n\trowData.name = rowCells[0]\n\tfor index, data := range rowCells[1:] {\n\t\tnodeStr := strings.Fields(data)\n\t\t// convert string to int. number of nodes in a zone\n\t\tnumNodes, err := strconv.Atoi(nodeStr[0])\n\t\tif err != nil {\n\t\t\treturn rowData, false, err\n\t\t}\n\t\t// convert string to int. number of endpoints in a zone\n\t\tnumEndpoints, err := strconv.Atoi(nodeStr[1])\n\t\tif err != nil {\n\t\t\treturn rowData, false, err\n\t\t}\n\t\trowData.zones = append(rowData.zones, types.Zone{\n\t\t\tNodes: numNodes,\n\t\t\tEndpoints: numEndpoints,\n\t\t\tName: zoneNames[index],\n\t\t})\n\t}\n\treturn rowData, false, nil\n}",
"func LoadCellDictsToIndex(fname Filename) map[string]int {\n\n\tscanner, f := fname.ReturnReader(0)\n\tdefer CloseFile(f)\n\n\tvar cellID string\n\tvar index int\n\n\tcelliddict := make(map[string]int)\n\n\tfor scanner.Scan() {\n\t\tcellID = scanner.Text()\n\t\tcellID = strings.ReplaceAll(cellID, \" \", \"\\t\")\n\t\tcellID = strings.Split(cellID, \"\\t\")[0]\n\n\t\tcelliddict[cellID] = index\n\t\tindex++\n\t}\n\n\treturn celliddict\n}",
"func loadAndTrainData(symbol, companyName string, r *csv.Reader,\n\tcomputeLengths []int) error {\n\n\tvals, err := r.ReadAll()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error reading csv\")\n\t}\n\n\tif vals == nil {\n\t\treturn errors.New(fmt.Sprintf(\"empty or invalid CSV for '%v'\", symbol))\n\t}\n\n\t// Only allow if there are enough periods for train and compute\n\tif len(vals) <= 2 {\n\t\treturn errors.New(fmt.Sprintf(\"[%v] not enough periods\", symbol))\n\t}\n\n\tvar periods model.PeriodSlice\n\tfor i, v := range vals {\n\n\t\tvar parseErrors error\n\n\t\tif i == 0 {\n\t\t\t// TODO jpirkey build header to field map index here\n\t\t\tcontinue\n\t\t}\n\n\t\trow := CsvRow{}\n\t\tif row.Date, err = convertTime(v[csvDate]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] date field\", symbol))\n\t\t}\n\t\tif row.Open, err = convertFloat(v[csvOpen]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] open field\", symbol))\n\t\t}\n\t\tif row.High, err = convertFloat(v[csvHigh]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] high field\", symbol))\n\t\t}\n\t\tif row.Low, err = convertFloat(v[csvLow]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] low field\", symbol))\n\t\t}\n\t\tif row.Close, err = convertFloat(v[csvClose]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] close field\", symbol))\n\t\t}\n\t\tif row.Volume, err = convertInt(v[csvVolume]); err != nil {\n\t\t\tparseErrors = multierror.Append(parseErrors, errors.Wrapf(err, \"[%v] volume field\", symbol))\n\t\t}\n\n\t\tif parseErrors == nil {\n\t\t\tp := model.Period{Symbol: symbol, Date: row.Date, Open: row.Open, High: row.High,\n\t\t\t\tLow: row.Low, Close: row.Close, Volume: row.Volume}\n\t\t\tperiods = append(periods, &p)\n\t\t} else {\n\t\t\tlog.Warn(parseErrors)\n\t\t}\n\t}\n\n\tif len(periods) < 2 {\n\t\treturn errors.New(fmt.Sprintf(\"[%v] not enough parsed periods\", symbol))\n\t}\n\n\tsort.Sort(periods)\n\n\ttimer := metrics.GetOrRegisterTimer(\"training-timer\", loadRegistry)\n\ttimer.Time(func() { trainDaily(periods) })\n\n\tinsertCount, err := Repos.PeriodRepo.InsertMany(periods)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"[%v] inserting periods\", symbol)\n\t}\n\tif len(periods) != insertCount {\n\t\treturn fmt.Errorf(\"[%v] periods parsed count does not match inserted count\", symbol)\n\t}\n\n\tticker := model.Ticker{Symbol: symbol, Company: companyName}\n\terr = Repos.TickerRepo.InsertOne(&ticker)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"[%v] inserting ticker\", symbol)\n\t}\n\n\tif len(computeLengths) > 0 {\n\t\tfor _, computeLength := range computeLengths {\n\t\t\tif len(periods) < computeLength+1 {\n\t\t\t\tlog.Warnf(\"[%v] not enough periods to compute %v length series\",\n\t\t\t\t\tsymbol, computeLength)\n\t\t\t} else {\n\t\t\t\ttimer := metrics.GetOrRegisterTimer(\"compute-timer\", loadRegistry)\n\t\t\t\ttimer.Time(func() { computeSeries(computeLength, symbol, periods) })\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func parse2lines(headers, values string) map[string]int64 {\n\tkeys := strings.Fields(headers)\n\tvals := strings.Fields(values)\n\tresult := make(map[string]int64, len(keys))\n\n\tif len(keys) != len(vals) || len(keys) <= 1 || keys[0] != vals[0] {\n\t\treturn result\n\t}\n\n\t// strip the \":\" of \"foo:\" ...\n\ttopic := keys[0][:len(keys[0])-1]\n\t// .. and just get the actual header entries and values\n\tkeys = keys[1:]\n\tvals = vals[1:]\n\n\tfor i, k := range keys {\n\t\tif v, e := strconv.ParseInt(vals[i], 10, 64); e == nil && v >= 0 {\n\t\t\tresult[topic+\".\"+k] = v\n\t\t}\n\t}\n\treturn result\n}",
"func File(o Options) (map[string][][]string, error) {\n\tvar hash = make(map[string][][]string)\n\t\n\terr := csvFileReader(o, func(line []string) {\n\t\tkey := makeHashKey(line, o.KeyColumns)\n\t\t\n\t\tif !o.IncludeKeysValues {\n\t\t\tremoveKeyColumns(&line, o.KeyColumns)\n\t\t}\n\t\t\n\t\thash[key] = append(hash[key], line)\n\t})\n\t\n\treturn hash, err\n}",
"func ReadStrainNlpTagCsv(inputFile string) [][]string {\n\n\t// fmt.Println(\"gomu --- \", inputFile)\n\tincomingFile := inputFile[65:len(inputFile)]\n\n\t// Open CSV file\n\tf, err := os.Open(inputFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\n\t// Read File into a Variable\n\tlines, err := csv.NewReader(f).ReadAll()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar matrix [][]string\n\t// Loop through lines & turn into object\n\tfor _, line := range lines {\n\t\tdata := NlpTagCsv{\n\t\t\tColumn1: line[0],\n\t\t\tColumn2: line[1],\n\t\t}\n\t\tmatrix = append(matrix, []string{data.Column1, data.Column2})\n\t}\n\n\tvar nlpTag01, nlpTag02, nlpTag03, nlpTag04, nlpTag05, nlpTag06, nlpTag07, nlpTag08, nlpTag09, nlpTag10, nlpTag11, nlpTag12, nlpTag13, nlpTag14, nlpTag15, nlpTag16, nlpTag17, nlpTag18, nlpTag19, nlpTag20, nlpTag21, nlpTag22, nlpTag23, nlpTag24, nlpTag25, nlpTag26, nlpTag27, nlpTag28, nlpTag29, nlpTag30, nlpTag31, nlpTag32, nlpTag33, nlpTag34, nlpTag35, nlpTag36, nlpTag37, nlpTag38, nlpTag39, nlpTag40, nlpTag41, nlpTag42, nlpTag43, nlpTag44, nlpTag45 [][]string\n\n\tvar getthemall [][]string\n\t// masterNLP := []string{\"(\", \")\", \",\", \":\", \".\", \"''\", \"``\", \"#\", \"$\", \"CC\", \"CD\", \"DT\", \"EX\", \"FW\", \"IN\", \"JJ\", \"JJR\", \"JJS\", \"LS\", \"MD\", \"NN\", \"NNP\", \"NNPS\", \"NNS\", \"PDT\", \"POS\", \"PRP\", \"PRP$\", \"RB\", \"RBR\", \"RBS\", \"RP\", \"SYM\", \"TO\", \"UH\", \"VB\", \"VBD\", \"VBG\", \"VBN\", \"VBP\", \"VBZ\", \"WDT\", \"WP\", \"WP$\", \"WRB\"}\n\n\t// getthemall = append(getthemall, []string{\n\t// \t\"Strain File\",\n\t// \tmasterNLP[0],\n\t// \tmasterNLP[1],\n\t// \tmasterNLP[2],\n\t// \tmasterNLP[3],\n\t// \tmasterNLP[4],\n\t// \tmasterNLP[5],\n\t// \tmasterNLP[6],\n\t// \tmasterNLP[7],\n\t// \tmasterNLP[8],\n\t// \tmasterNLP[9],\n\t// \tmasterNLP[10],\n\t// \tmasterNLP[11],\n\t// \tmasterNLP[12],\n\t// \tmasterNLP[13],\n\t// \tmasterNLP[14],\n\t// \tmasterNLP[15],\n\t// \tmasterNLP[16],\n\t// \tmasterNLP[17],\n\t// \tmasterNLP[18],\n\t// \tmasterNLP[19],\n\t// \tmasterNLP[20],\n\t// \tmasterNLP[21],\n\t// \tmasterNLP[22],\n\t// \tmasterNLP[23],\n\t// \tmasterNLP[24],\n\t// \tmasterNLP[25],\n\t// \tmasterNLP[26],\n\t// \tmasterNLP[27],\n\t// \tmasterNLP[28],\n\t// \tmasterNLP[29],\n\t// \tmasterNLP[30],\n\t// \tmasterNLP[31],\n\t// \tmasterNLP[32],\n\t// \tmasterNLP[33],\n\t// \tmasterNLP[34],\n\t// \tmasterNLP[35],\n\t// \tmasterNLP[36],\n\t// \tmasterNLP[37],\n\t// \tmasterNLP[38],\n\t// \tmasterNLP[39],\n\t// \tmasterNLP[40],\n\t// \tmasterNLP[41],\n\t// \tmasterNLP[42],\n\t// \tmasterNLP[43],\n\t// \tmasterNLP[44]})\n\n\ti := 0\n\tfor i < len(matrix)-1 {\n\t\ti++\n\t\t// collect = append(collect, []string{matrix[i][0], matrix[i][1]})\n\t\tswitch matrix[i][1] {\n\t\tcase \"(\":\n\t\t\tnlpTag01 = append(nlpTag01, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag01)\n\t\tcase \")\":\n\t\t\tnlpTag02 = append(nlpTag02, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag02)\n\t\tcase \",\":\n\t\t\tnlpTag03 = append(nlpTag03, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag03)\n\t\tcase \":\":\n\t\t\tnlpTag04 = append(nlpTag04, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag04)\n\t\tcase \".\":\n\t\t\tnlpTag05 = append(nlpTag05, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag05)\n\t\tcase \"''\":\n\t\t\tnlpTag06 = append(nlpTag06, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag06)\n\t\tcase \"``\":\n\t\t\tnlpTag07 = append(nlpTag07, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag07)\n\t\tcase \"#\":\n\t\t\tnlpTag08 = append(nlpTag08, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag08)\n\t\tcase \"$\":\n\t\t\tnlpTag09 = append(nlpTag09, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag09)\n\t\tcase \"CC\":\n\t\t\tnlpTag10 = append(nlpTag10, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag10)\n\t\tcase \"CD\":\n\t\t\tnlpTag11 = append(nlpTag11, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag11)\n\t\tcase \"DT\":\n\t\t\tnlpTag12 = append(nlpTag12, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag12)\n\t\tcase \"EX\":\n\t\t\tnlpTag13 = append(nlpTag13, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag13)\n\t\tcase \"FW\":\n\t\t\tnlpTag14 = append(nlpTag14, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag14)\n\t\tcase \"IN\":\n\t\t\tnlpTag15 = append(nlpTag15, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag15)\n\t\tcase \"JJ\":\n\t\t\tnlpTag16 = append(nlpTag16, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag16)\n\t\tcase \"JJR\":\n\t\t\tnlpTag17 = append(nlpTag17, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag17)\n\t\tcase \"JJS\":\n\t\t\tnlpTag18 = append(nlpTag18, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag18)\n\t\tcase \"LS\":\n\t\t\tnlpTag19 = append(nlpTag19, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag19)\n\t\tcase \"MD\":\n\t\t\tnlpTag20 = append(nlpTag20, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag20)\n\t\tcase \"NN\":\n\t\t\tnlpTag21 = append(nlpTag21, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag21)\n\t\tcase \"NNP\":\n\t\t\tnlpTag22 = append(nlpTag22, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag22)\n\t\tcase \"NNPS\":\n\t\t\tnlpTag23 = append(nlpTag23, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag23)\n\t\tcase \"NNS\":\n\t\t\tnlpTag24 = append(nlpTag24, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag24)\n\t\tcase \"PDT\":\n\t\t\tnlpTag25 = append(nlpTag25, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag25)\n\t\tcase \"POS\":\n\t\t\tnlpTag26 = append(nlpTag26, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag26)\n\t\tcase \"PRP\":\n\t\t\tnlpTag27 = append(nlpTag27, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag27)\n\t\tcase \"PRP$\":\n\t\t\tnlpTag28 = append(nlpTag28, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag28)\n\t\tcase \"RB\":\n\t\t\tnlpTag29 = append(nlpTag29, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag29)\n\t\tcase \"RBR\":\n\t\t\tnlpTag30 = append(nlpTag30, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag30)\n\t\tcase \"RBS\":\n\t\t\tnlpTag31 = append(nlpTag31, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag31)\n\t\tcase \"RP\":\n\t\t\tnlpTag32 = append(nlpTag32, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag32)\n\t\tcase \"SYM\":\n\t\t\tnlpTag33 = append(nlpTag33, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag33)\n\t\tcase \"TO\":\n\t\t\tnlpTag34 = append(nlpTag34, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag34)\n\t\tcase \"UH\":\n\t\t\tnlpTag35 = append(nlpTag35, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag35)\n\t\tcase \"VB\":\n\t\t\tnlpTag36 = append(nlpTag36, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag36)\n\t\tcase \"VBD\":\n\t\t\tnlpTag37 = append(nlpTag37, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag37)\n\t\tcase \"VBG\":\n\t\t\tnlpTag38 = append(nlpTag38, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag38)\n\t\tcase \"VBN\":\n\t\t\tnlpTag39 = append(nlpTag39, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag39)\n\t\tcase \"VBP\":\n\t\t\tnlpTag40 = append(nlpTag40, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag40)\n\t\tcase \"VBZ\":\n\t\t\tnlpTag41 = append(nlpTag41, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag41)\n\t\tcase \"WDT\":\n\t\t\tnlpTag42 = append(nlpTag42, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag42)\n\t\tcase \"WP\":\n\t\t\tnlpTag43 = append(nlpTag43, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag43)\n\t\tcase \"WP$\":\n\t\t\tnlpTag44 = append(nlpTag44, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag44)\n\t\tcase \"WRB\":\n\t\t\tnlpTag45 = append(nlpTag45, []string{matrix[i][0], matrix[i][1]})\n\t\t\t// fmt.Println(nlpTag45)\n\t\t}\n\t}\n\n\tgetthemall = append(getthemall, []string{\n\t\tincomingFile,\n\t\tstrconv.Itoa(len(nlpTag01)),\n\t\tstrconv.Itoa(len(nlpTag02)),\n\t\tstrconv.Itoa(len(nlpTag03)),\n\t\tstrconv.Itoa(len(nlpTag04)),\n\t\tstrconv.Itoa(len(nlpTag05)),\n\t\tstrconv.Itoa(len(nlpTag06)),\n\t\tstrconv.Itoa(len(nlpTag07)),\n\t\tstrconv.Itoa(len(nlpTag08)),\n\t\tstrconv.Itoa(len(nlpTag09)),\n\t\tstrconv.Itoa(len(nlpTag10)),\n\t\tstrconv.Itoa(len(nlpTag11)),\n\t\tstrconv.Itoa(len(nlpTag12)),\n\t\tstrconv.Itoa(len(nlpTag13)),\n\t\tstrconv.Itoa(len(nlpTag14)),\n\t\tstrconv.Itoa(len(nlpTag15)),\n\t\tstrconv.Itoa(len(nlpTag16)),\n\t\tstrconv.Itoa(len(nlpTag17)),\n\t\tstrconv.Itoa(len(nlpTag18)),\n\t\tstrconv.Itoa(len(nlpTag19)),\n\t\tstrconv.Itoa(len(nlpTag20)),\n\t\tstrconv.Itoa(len(nlpTag21)),\n\t\tstrconv.Itoa(len(nlpTag22)),\n\t\tstrconv.Itoa(len(nlpTag23)),\n\t\tstrconv.Itoa(len(nlpTag24)),\n\t\tstrconv.Itoa(len(nlpTag25)),\n\t\tstrconv.Itoa(len(nlpTag26)),\n\t\tstrconv.Itoa(len(nlpTag27)),\n\t\tstrconv.Itoa(len(nlpTag28)),\n\t\tstrconv.Itoa(len(nlpTag29)),\n\t\tstrconv.Itoa(len(nlpTag30)),\n\t\tstrconv.Itoa(len(nlpTag31)),\n\t\tstrconv.Itoa(len(nlpTag32)),\n\t\tstrconv.Itoa(len(nlpTag33)),\n\t\tstrconv.Itoa(len(nlpTag34)),\n\t\tstrconv.Itoa(len(nlpTag35)),\n\t\tstrconv.Itoa(len(nlpTag36)),\n\t\tstrconv.Itoa(len(nlpTag37)),\n\t\tstrconv.Itoa(len(nlpTag38)),\n\t\tstrconv.Itoa(len(nlpTag39)),\n\t\tstrconv.Itoa(len(nlpTag40)),\n\t\tstrconv.Itoa(len(nlpTag41)),\n\t\tstrconv.Itoa(len(nlpTag42)),\n\t\tstrconv.Itoa(len(nlpTag43)),\n\t\tstrconv.Itoa(len(nlpTag44)),\n\t\tstrconv.Itoa(len(nlpTag45))})\n\t// fmt.Println(getthemall)\n\treturn getthemall\n}",
"func headersMap(headers []string, fields []string) (map[string]interface{}, error) {\n\tparsedValues := make(map[string]interface{})\n\n\tif len(fields) != len(headers) {\n\t\treturn nil, fmt.Errorf(\"wrong number of fields: expected %d, found %d\", len(headers), len(fields))\n\t}\n\n\tfor i, val := range fields {\n\t\tparsedValues[headers[i]] = val\n\t}\n\treturn parsedValues, nil\n}",
"func LoadCSV() {\n\n\tsession := util.MongoSession.Copy()\n\tdefer session.Close()\n\n\tsession.SetMode(mgo.Monotonic, true)\n\n\tcollection := session.DB(util.Config.DbName).C(\"csvload\")\n\tabsPath, _ := filepath.Abs(\"../Go_Docker/data/convertcsv.csv\")\n\tfile, err := os.Open(absPath)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer file.Close()\n\n\treader := csv.NewReader(file)\n\n\tfor {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif record[0] != \"key\" {\n\t\t\terr = collection.Insert(&Mongo{Key: record[0], Value: record[1]})\n\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\tlog.Printf(\"%#v\", record)\n\t\t}\n\n\t}\n}",
"func main() {\n\n\toriginalFilepath := os.Args[1]\n\tdedupedFilepath := os.Args[2]\n\n\tcsvfile, err := os.Open(originalFilepath)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tbuffer, _ := ioutil.ReadAll(csvfile)\n\trx := strings.NewReplacer(\"\\r\\n\", \"\\n\", \"\\r\", \"\\n\")\n\n\treader := csv.NewReader(strings.NewReader(rx.Replace(string(buffer))))\n\n\theader, err := reader.Read()\n\tif err != nil {\n\t\tfmt.Println(\"cant read header\")\n\t}\n\n\tcols := make(map[string]int)\n\tfor i, col := range header {\n\t\tcol = strings.ToLower(col)\n\t\tcols[col] = i\n\t}\n\n\tif _, ok := cols[\"email\"]; ok {\n\t} else {\n\t\tfmt.Println(\"expected CSV to contain email field\")\n\t}\n\n\tdefer csvfile.Close()\n\n\toutFile, err := os.Create(dedupedFilepath)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tdefer outFile.Close()\n\n\tw := csv.NewWriter(outFile)\n\n\tm := make(map[string][]string)\n\tfor {\n\t\trecord, err := reader.Read()\n\t\t// end-of-file is fitted into err\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfmt.Println(\"Error:\", err)\n\t\t}\n\n\t\tif len(record) <= 1 {\n\t\t\tfmt.Println(\"error parsing csv, whole csv is on one line\")\n\t\t}\n\t\temail := record[cols[\"email\"]]\n\t\tm[email] = record\n\t}\n\n\tfor _, v := range m {\n\t\tw.Write(v)\n\t}\n\tw.Flush()\n}",
"func readCsv(filename string)([]Record){\n\tcsvFile, _ := os.Open(filename)\n reader := csv.NewReader(bufio.NewReader(csvFile))\n\t//skip first line\n\treader.Read()\n\tvar people []Record\n\tfor {\n\t\t\t line, error := reader.Read()\n\t\t\t if error == io.EOF {\n\t\t\t\t\t break\n\t\t\t } else if error != nil {\n\t\t\t\t\t log.Fatal(error)\n\t\t\t }\n\t\t\t id, err := strconv.Atoi(line[0])\n\t\t\t if err == nil {\n\t\t\t\t people = append(people, Record {\n\t\t\t\t\t\t Id: id,\n\t\t\t\t\t\t Firstname: line[1],\n\t\t\t\t\t\t Lastname: line[2],\n\t\t\t\t\t\t Phonenumber: line[3]})\n\t\t\t }else{\n\t\t\t\t log.Fatal(err)\n\t\t\t }\n\n\t }\n\t return people\n}",
"func RowToMap(rows *sql.Rows) []map[string]string {\n\tcolumns, _ := rows.Columns()\n\tcount := len(columns)\n\treadCols := make([]interface{}, count)\n\trawCols := make([]interface{}, count)\n\tvar records []map[string]string\n\tfor rows.Next() {\n\t\t// resultCols := make(map[string]string, count)\n\t\tfor i := range columns {\n\t\t\treadCols[i] = &rawCols[i]\n\t\t}\n\t\trows.Scan(readCols...)\n\n\t\t// all conver to string\n\t\tresultCols := assertTypeMap(columns, rawCols)\n\n\t\trecords = append(records, resultCols)\n\t}\n\treturn records\n}",
"func populateStationsList(stationsCSV io.Reader) (map[string]robots.Point, error) {\n\n\tstations := make(map[string]robots.Point)\n\treader := csv.NewReader(stationsCSV)\n\treader.FieldsPerRecord = 3\n\n\tfor {\n\t\tline, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to process line in tube.csv: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlat, err := strconv.ParseFloat(line[1], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to parse lat from tube.csv: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\t\tlon, err := strconv.ParseFloat(line[2], 64)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"Unable to parse lon from tube.csv: %v\", err)\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfmt.Printf(\"Loaded station: %v \\t\\t Lat/Lon: %v, %v\\n\", line[0], lat, lon)\n\t\tstations[line[0]] = robots.Point{\n\t\t\tLat: lat,\n\t\t\tLon: lon,\n\t\t}\n\t}\n\n\treturn stations, nil\n}",
"func LoadCSV() *starlarkstruct.Struct {\n\treturn starlarkstruct.FromStringDict(\n\t\tstarlarkstruct.Default,\n\t\tstarlark.StringDict{\n\t\t\t\"ToCSV\": starlark.NewBuiltin(\"csv\", ToCSV),\n\t\t\t\"FromCSV\": starlark.NewBuiltin(\"object\", FromCSV),\n\t\t},\n\t)\n}",
"func (d *Dao) processHeaderLine(context *tagContext, decoder toolbox.Decoder, lineNumber int) (*toolbox.DelimitedRecord, *Tag, error) {\n\trecord := &toolbox.DelimitedRecord{Delimiter: \",\"}\n\terr := decoder.Decode(record)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\townerName := context.rootObject.GetString(\"Name\")\n\tcontext.tag = NewTag(ownerName, context.source, record.Columns[0], lineNumber)\n\terr = d.processTag(context)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn record, context.tag, nil\n}",
"func prepareCSV(scanResult *CertCheckerResults, fileName string) error {\n\tscanResults := reflect.ValueOf(scanResult).Elem()\n\ttypeOfT := scanResults.Type()\n\tcsvdatafile, err := os.Create(fileName)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"Failed to create file %s\", fileName)\n\t}\n\tdefer csvdatafile.Close()\n\twriter := csv.NewWriter(csvdatafile)\n\tvar record []string\n\tfor i := 0; i < scanResults.NumField(); i++ {\n\t\tf := scanResults.Field(i)\n\t\tif f.Kind() == reflect.Map {\n\t\t\tvar keys []string\n\t\t\tfor key := range scanResult.VulnResults {\n\t\t\t\tkeys = append(keys, key)\n\t\t\t}\n\t\t\tsort.Strings(keys)\n\t\t\tfor _, key := range keys {\n\t\t\t\trecord = append(record, key)\n\t\t\t}\n\t\t} else {\n\t\t\trecord = append(record, typeOfT.Field(i).Name)\n\t\t}\n\t}\n\twriter.Write(record)\n\twriter.Flush()\n\treturn err\n}",
"func readCsvFile(csvIn io.Reader) ([][]string, error) {\n\n\tcsvReader := csv.NewReader(csvIn)\n\tresult, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn result, nil\n}",
"func SplitHeaderFields(h, sep string) map[string]string {\n\theader := make(map[string]string)\n\n\tif len(h) == 0 {\n\t\treturn header\n\t}\n\n\theaderLine := strings.Split(h, sep)\n\tfor _, h := range headerLine {\n\t\tsepIndex := strings.Index(h, \":\")\n\n\t\tif sepIndex == -1 {\n\t\t\tlog.Fatalln(\"Malformed header name/value. Missing separator colon ':', like name:value\")\n\t\t\tcontinue\n\t\t}\n\n\t\tname := strings.TrimSpace(h[:sepIndex])\n\t\tvalue := strings.TrimSpace(h[sepIndex+1:])\n\t\theader[name] = value\n\t}\n\n\treturn header\n}",
"func (this *activitiesStruct) ImportCSV(data string) error {\n\trstr := strings.NewReader(data)\n\trcsv := csv.NewReader(rstr)\n\trecords, err := rcsv.ReadAll()\n\n\t/*\n\t * Check if an error occured.\n\t */\n\tif err != nil {\n\t\tmsg := err.Error()\n\t\treturn fmt.Errorf(\"Error importing activity data from CSV: %s\", msg)\n\t} else {\n\t\tthis.mutex.Lock()\n\t\tgroups := this.groups\n\t\tnumGroups := len(groups)\n\t\tgroupsCopy := make([]activityGroupStruct, numGroups)\n\t\tcopy(groupsCopy, groups)\n\t\tfirstError := error(nil)\n\t\tidxFirstErr := uint64(0)\n\t\tnumErrors := uint64(0)\n\n\t\t/*\n\t\t * Iterate over all records and parse activity data.\n\t\t */\n\t\tfor idx, record := range records {\n\t\t\trecordHasErrors := false\n\t\t\tnumFields := len(record)\n\n\t\t\t/*\n\t\t\t * Check that sufficient number of fields is present.\n\t\t\t */\n\t\t\tif numFields < EXPECTED_NUM_FIELDS {\n\n\t\t\t\t/*\n\t\t\t\t * Store first error occuring.\n\t\t\t\t */\n\t\t\t\tif firstError == nil {\n\t\t\t\t\tfirstError = fmt.Errorf(\"Expected %d fields, found %d.\", EXPECTED_NUM_FIELDS, numFields)\n\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t}\n\n\t\t\t\t/*\n\t\t\t\t * Increment error count.\n\t\t\t\t */\n\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\tnumErrors++\n\t\t\t\t\trecordHasErrors = true\n\t\t\t\t}\n\n\t\t\t} else {\n\t\t\t\tbeginString := record[0]\n\t\t\t\tbegin, err := filter.ParseTime(beginString, false, false)\n\n\t\t\t\t/*\n\t\t\t\t * Check if begin time could be parsed.\n\t\t\t\t */\n\t\t\t\tif err != nil {\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t */\n\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse begin time stamp: %s\", msg)\n\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t}\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Increment error count.\n\t\t\t\t\t */\n\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tweightKG := record[1]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty weight.\n\t\t\t\t */\n\t\t\t\tif weightKG == \"\" {\n\t\t\t\t\tweightKG = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\trunningDurationString := record[2]\n\t\t\t\trunningDuration := time.Duration(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running duration.\n\t\t\t\t */\n\t\t\t\tif runningDurationString != \"\" {\n\t\t\t\t\trunningDuration, err = time.ParseDuration(runningDurationString)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running duration could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running duration: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\trunningDistanceKM := record[3]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running distance.\n\t\t\t\t */\n\t\t\t\tif runningDistanceKM == \"\" {\n\t\t\t\t\trunningDistanceKM = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\trunningStepCountString := record[4]\n\t\t\t\trunningStepCount := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running step count.\n\t\t\t\t */\n\t\t\t\tif runningStepCountString != \"\" {\n\t\t\t\t\trunningStepCount, err = strconv.ParseUint(runningStepCountString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running step count could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running step count: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\trunningEnergyKJString := record[5]\n\t\t\t\trunningEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty running energy.\n\t\t\t\t */\n\t\t\t\tif runningEnergyKJString != \"\" {\n\t\t\t\t\trunningEnergyKJ, err = strconv.ParseUint(runningEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if running energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse running energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tcyclingDurationString := record[6]\n\t\t\t\tcyclingDuration := time.Duration(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling duration.\n\t\t\t\t */\n\t\t\t\tif cyclingDurationString != \"\" {\n\t\t\t\t\tcyclingDuration, err = time.ParseDuration(cyclingDurationString)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if cycling duration could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse cycling duration: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\tcyclingDistanceKM := record[7]\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling distance.\n\t\t\t\t */\n\t\t\t\tif cyclingDistanceKM == \"\" {\n\t\t\t\t\tcyclingDistanceKM = \"0.0\"\n\t\t\t\t}\n\n\t\t\t\tcyclingEnergyKJString := record[8]\n\t\t\t\tcyclingEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty cycling energy.\n\t\t\t\t */\n\t\t\t\tif cyclingEnergyKJString != \"\" {\n\t\t\t\t\tcyclingEnergyKJ, err = strconv.ParseUint(cyclingEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if cycling energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse cycling energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\totherEnergyKJString := record[9]\n\t\t\t\totherEnergyKJ := uint64(0)\n\n\t\t\t\t/*\n\t\t\t\t * Allow for empty other energy.\n\t\t\t\t */\n\t\t\t\tif otherEnergyKJString != \"\" {\n\t\t\t\t\totherEnergyKJ, err = strconv.ParseUint(otherEnergyKJString, 10, 64)\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Check if other energy could be parsed.\n\t\t\t\t\t */\n\t\t\t\t\tif err != nil {\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\t\tmsg := err.Error()\n\t\t\t\t\t\t\tfirstError = fmt.Errorf(\"Failed to parse other energy: %s\", msg)\n\t\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\t/*\n\t\t\t\t\t\t * Increment error count.\n\t\t\t\t\t\t */\n\t\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t\t\trecordHasErrors = true\n\t\t\t\t\t\t}\n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\n\t\t\t\t/*\n\t\t\t\t * Create activity info.\n\t\t\t\t */\n\t\t\t\tinfo := ActivityInfo{\n\t\t\t\t\tBegin: begin,\n\t\t\t\t\tWeightKG: weightKG,\n\t\t\t\t\tRunningDuration: runningDuration,\n\t\t\t\t\tRunningDistanceKM: runningDistanceKM,\n\t\t\t\t\tRunningStepCount: runningStepCount,\n\t\t\t\t\tRunningEnergyKJ: runningEnergyKJ,\n\t\t\t\t\tCyclingDuration: cyclingDuration,\n\t\t\t\t\tCyclingDistanceKM: cyclingDistanceKM,\n\t\t\t\t\tCyclingEnergyKJ: cyclingEnergyKJ,\n\t\t\t\t\tOtherEnergyKJ: otherEnergyKJ,\n\t\t\t\t}\n\n\t\t\t\tg, err := createActivityGroup(&info)\n\n\t\t\t\t/*\n\t\t\t\t * Check if activity group could be parsed.\n\t\t\t\t */\n\t\t\t\tif err != nil {\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Store first error occuring.\n\t\t\t\t\t */\n\t\t\t\t\tif firstError == nil {\n\t\t\t\t\t\tfirstError = err\n\t\t\t\t\t\tidxFirstErr = uint64(idx)\n\t\t\t\t\t}\n\n\t\t\t\t\t/*\n\t\t\t\t\t * Increment error count.\n\t\t\t\t\t */\n\t\t\t\t\tif !recordHasErrors && numErrors < math.MaxUint64 {\n\t\t\t\t\t\tnumErrors++\n\t\t\t\t\t}\n\n\t\t\t\t} else {\n\t\t\t\t\tgroupsCopy = append(groupsCopy, g)\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\n\t\t/*\n\t\t * Only modify activity groups if no error occured.\n\t\t */\n\t\tif firstError == nil {\n\n\t\t\t/*\n\t\t\t * Comparison function for sorting algorithm.\n\t\t\t */\n\t\t\tless := func(i int, j int) bool {\n\t\t\t\tgi := groupsCopy[i]\n\t\t\t\tgiBegin := gi.begin\n\t\t\t\tgj := groupsCopy[j]\n\t\t\t\tgjBegin := gj.begin\n\t\t\t\tresult := giBegin.Before(gjBegin)\n\t\t\t\treturn result\n\t\t\t}\n\n\t\t\tsort.SliceStable(groupsCopy, less)\n\t\t\tthis.groups = groupsCopy\n\t\t\tthis.revision++\n\t\t}\n\n\t\tthis.mutex.Unlock()\n\n\t\t/*\n\t\t * Check if error occured.\n\t\t */\n\t\tif firstError != nil {\n\t\t\tmsg := firstError.Error()\n\t\t\treturn fmt.Errorf(\"Error deserializing activity data: %d erroneous activity groups, first at group number %d: %s\", numErrors, idxFirstErr, msg)\n\t\t} else {\n\t\t\treturn nil\n\t\t}\n\n\t}\n\n}",
"func parseParams(header http.Header, key string) (params map[string]string) {\n\tparams = make(map[string]string)\n\tv := header.Get(key)\n\tp := strings.Split(v, \";\")\n\tre := regexp.MustCompile(\"([A-Za-z0-9-]*)[=\\\"]*([^\\\"]*)?\")\n\n\tfor _, s := range p {\n\t\ts = strings.TrimSpace(s)\n\t\tmatch := re.FindStringSubmatch(s)\n\t\tpk := strings.ToLower(match[1])\n\n\t\tpv := match[2]\n\t\tif params[pk] == \"\" {\n\t\t\tparams[pk] = pv\n\t\t} else {\n\t\t\tparams[pk] = params[pk] + \",\" + pv\n\t\t}\n\t}\n\n\treturn\n}",
"func readMap(filename string) (map[string]string, error) {\n\tnames := make(map[string]string)\n\tinput, err := os.Open(filename)\n\tdefer func() {\n\t\tif closeErr := input.Close(); closeErr != nil {\n\t\t\terr = closeErr\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn names, err\n\t}\n\tscanner := bufio.NewScanner(input)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tsplit := strings.Split(line, \"\\t\")\n\t\tnames[split[0]] = split[1]\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn names, err\n\t}\n\n\treturn names, nil\n}",
"func PricesFromCSV(csvFilePath string) (*DataHandler, error) {\n\tcsvFile, _ := os.Open(csvFilePath)\n\treader := csv.NewReader(bufio.NewReader(csvFile))\n\n\t//Reading first line header and validating the required columns\n\tif line, error := reader.Read(); error != nil || !isCSVHeaderValid(line) {\n\t\treturn nil, fmt.Errorf(`error reading header with columns in the csv.\n\t\t\t\tMake sure the CSV has the columns Open, High, Low, Close, Volume`)\n\t}\n\n\tvar prices []DataPoint\n\tfor {\n\t\tline, error := reader.Read()\n\t\tif error == io.EOF {\n\t\t\tbreak\n\t\t} else if error != nil {\n\t\t\tlog.Fatal(error)\n\t\t}\n\n\t\t//Checking each OHLCV value in the csv\n\t\tvar numbers [5]float64\n\t\tfor i := 0; i < 5; i++ {\n\t\t\tvalue, err := strToFloat(line[i])\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tnumbers[i] = value\n\n\t\t}\n\n\t\tprices = append(prices, DataPoint{\n\t\t\topen: numbers[0],\n\t\t\thigh: numbers[1],\n\t\t\tlow: numbers[2],\n\t\t\tclose: numbers[3],\n\t\t\tvolume: numbers[4],\n\t\t})\n\t}\n\n\treturn newDataHandler(prices), nil\n}",
"func RowsToMap(rows *sql.Rows, typeString string) ([]map[string]interface{}, error) {\n\tarr := make([]map[string]interface{}, 0)\n\n\tcols, err := rows.Columns()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Set up valuePointers slice using types from typeString\n\ttypes := strings.Split(typeString, \",\")\n\tvaluePointers := make([]interface{}, len(types))\n\tfor i, t := range types {\n\t\tif t == \"int\" {\n\t\t\tvaluePointers[i] = new(int)\n\t\t} else if t == \"string\" {\n\t\t\tvaluePointers[i] = new(string)\n\t\t} else {\n\t\t\treturn nil, errors.New(\"Unknown type in typeString\")\n\t\t}\n\t}\n\n\tfor rows.Next() {\n\t\t// Scan the result into the value pointers...\n\t\tif err := rows.Scan(valuePointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tm := make(map[string]interface{})\n\t\tfor i, colName := range cols {\n\t\t\tm[colName] = valuePointers[i]\n\t\t}\n\n\t\tarr = append(arr, m)\n\t}\n\n\treturn arr, nil\n}",
"func parseCsv(input string) []string {\n\ttokens := strings.Split(input, \",\")\n\tresults := make([]string, 0, len(tokens))\n\tfor _, token := range tokens {\n\t\tresults = append(results, strings.TrimSpace(token))\n\t}\n\treturn results\n}",
"func (table *Table) Read(rowNumber int) (map[string]string, int) {\n\trow := make(map[string]string)\n\tstatus := table.Seek(rowNumber)\n\tif status == st.OK {\n\t\trowInBytes := make([]byte, table.RowLength)\n\t\t_, err := table.DataFile.Read(rowInBytes)\n\t\tif err == nil {\n\t\t\t// For the columns in their order\n\t\t\tfor _, column := range table.ColumnsInOrder {\n\t\t\t\t// column1:value2, column2:value2...\n\t\t\t\trow[column.Name] = strings.TrimSpace(string(rowInBytes[column.Offset : column.Offset+column.Length]))\n\t\t\t}\n\t\t} else {\n\t\t\tlogg.Err(\"table\", \"Read\", err.String())\n\t\t\treturn nil, st.CannotReadTableDataFile\n\t\t}\n\t}\n\treturn row, st.OK\n}",
"func FormatCSV(t *CSVData, d *dict.Dict) {\n\n format := func (lineStr *string) bool {\n\n arr := strings.Split(*lineStr, \",\")\n needReplace := false\n for idx, str := range arr {\n \n if strings.IndexFunc(str, matchF) == -1 {\n continue\n }\n \n item := d.GetItem(str)\n if item == nil {\n \n // t.NilArr[str] = 0\n continue\n }\n \n arr[idx] = item.Value\n needReplace = true\n }\n\n if needReplace {\n *lineStr = strings.Join(arr, \",\")\n }\n \n return needReplace\n }\n\n for i, l := range t._data {\n\n if !format(&l) {\n continue\n }\n\n t._data[i] = l\n }\n\n}",
"func CsvToLedgerEntries(filepath string) ([]ledger.Entry, error) {\n\t// create the reader\n\treader, err := csvReader(filepath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating the reader: %w\", err)\n\t}\n\t// read the header\n\theader, err := reader.Read()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Reading the header row: %w\", err)\n\t}\n\t// Validate order of columns\n\tif header[0] != \"source\" || header[1] != \"destination\" || header[2] != \"entrydate\" || header[3] != \"amount\" {\n\t\treturn nil, fmt.Errorf(\"Columns must be in order: source, destination, entrydate, amount (%w)\", err)\n\t}\n\n\t// construct slice of buckets to return\n\tvar entries []ledger.Entry\n\t// Read rows and construct and append Entry objects\n\tfor i := 0; ; i += 1 {\n\t\trecord, err := reader.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak // reached end of the file\n\t\t} else if err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Reading a row: %v\", err)\n\t\t}\n\t\t// convert EntryDate value to time.Time\n\t\tEntryDate, err := utils.ParseDate(record[2])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Parsing string to time.Time: %w\", err)\n\t\t}\n\t\t// convert amount value to int\n\t\tamount, err := strconv.Atoi(record[3])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Converting string to int: %w\", err)\n\t\t}\n\t\t// construct the entry\n\t\te := ledger.Entry{\n\t\t\tSource: record[0],\n\t\t\tDestination: record[1],\n\t\t\tEntryDate: EntryDate,\n\t\t\tAmount: amount,\n\t\t}\n\t\tentries = append(entries, e)\n\t}\n\treturn entries, nil\n}",
"func newPackagesMetadataFromCSV(csvFile string) (*metadata.PackagesMetadata, error) {\n\tresult := new(metadata.PackagesMetadata)\n\tf, err := os.Open(csvFile)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to open %s for reading\", csvFile)\n\t}\n\tr := csv.NewReader(f)\n\theader, err := r.Read()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"unable to read CSV header from %s\", csvFile)\n\t}\n\tif len(header) != 2 || header[0] != \"Name\" || header[1] != \"Version\" {\n\t\treturn nil, fmt.Errorf(\"unexpected CSV header in %s, got %s, want Name,Version\", csvFile, strings.Join(header, \",\"))\n\t}\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"error parsing record in CSV file %s\", csvFile)\n\t\t}\n\t\tresult.Packages = append(result.Packages, metadata.PackageMetadata{\n\t\t\tName: record[0],\n\t\t\tVersion: record[1],\n\t\t})\n\t}\n\t// Sort the packages in alphabetical order of their names.\n\tsort.Slice(result.Packages, func(i, j int) bool {\n\t\tpi, pj := result.Packages[i], result.Packages[j]\n\t\treturn strings.Compare(pi.Name, pj.Name) < 0\n\t})\n\treturn result, nil\n}",
"func parseColumns(reader *csv.Reader, skipHeader bool, fields string) ([]string, error) {\n\tvar err error\n\tvar columns []string\n\tif fields != \"\" {\n\t\tcolumns = strings.Split(fields, \",\")\n\n\t\tif skipHeader {\n\t\t\treader.Read() //Force consume one row\n\t\t}\n\t} else {\n\t\tcolumns, err = reader.Read()\n\t\tfmt.Printf(\"%v columns\\n%v\\n\", len(columns), columns)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"FOUND ERR\\n\")\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tfor _, col := range columns {\n\t\tif containsDelimiter(col) {\n\t\t\treturn columns, errors.New(\"Please specify the correct delimiter with -d.\\n\" +\n\t\t\t\t\"Header column contains a delimiter character: \" + col)\n\t\t}\n\t}\n\n\tfor i, col := range columns {\n\t\tcolumns[i] = postgresify(col)\n\t}\n\n\treturn columns, nil\n}",
"func Convert(data string, headers []Header) string {\n\treader := csv.NewReader(strings.NewReader(data))\n\tlines, _ := reader.ReadAll()\n\n\tresult := []map[string]interface{}{}\n\tfor rowIndex, line := range lines {\n\t\tif len(headers) == 0 && rowIndex == 0 {\n\t\t\theaders = convertFirstLineToHeader(line)\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, convertLineToJSONObject(line, headers))\n\t}\n\n\tjsonBytes, _ := json.Marshal(result)\n\treturn string(jsonBytes[:])\n}",
"func getMapFromRows(rows *sql.Rows) (map[string]interface{}, error) {\n\tcols, _ := rows.Columns()\n\tm := make(map[string]interface{})\n\tfor rows.Next() {\n\t\t// Create a slice of interface{}'s to represent each column,\n\t\t// and a second slice to contain pointers to each item in the columns slice.\n\t\tcolumns := make([]interface{}, len(cols))\n\t\tcolumnPointers := make([]interface{}, len(cols))\n\t\tfor i, _ := range columns {\n\t\t\tcolumnPointers[i] = &columns[i]\n\t\t}\n\n\t\t// Scan the result into the column pointers...\n\t\tif err := rows.Scan(columnPointers...); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// Create our map, and retrieve the value for each column from the pointers slice,\n\t\t// storing it in the map with the name of the column as the key.\n\t\tfor i, colName := range cols {\n\t\t\tval := columnPointers[i].(*interface{})\n\t\t\tm[colName] = *val\n\t\t}\n\t}\n\treturn m, nil\n}",
"func CsvToJSON(csvSrc io.Reader, jsonDst io.Writer) error {\n\tenc := json.NewEncoder(jsonDst)\n\tr := csv.NewReader(csvSrc)\n\tn := 0\n\t// TODO: should probably compare header to expected schema\n\tvar headerFields []string\n\tfor {\n\t\tp := &profiles.ChurnProfile{}\n\t\tvar err error\n\t\tvar record []string\n\t\tif record, err = r.Read(); err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tif n == 0 {\n\t\t\theaderFields = record\n\t\t} else {\n\t\t\terr := loadProfile(p, record, headerFields)\n\t\t\tif err == ErrBadCsvCol {\n\t\t\t\t// TODO: handle bad csv rows, whether logging, adding to skip file\n\t\t\t\t// emitting metrics/notification etc, for now just skip this row\n\t\t\t\tlog.Println(\"Encountered bad column, skipping row\")\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif err := enc.Encode(&p); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tn++\n\t\tlog.Println(\"seen\", n)\n\t}\n\t//TODO: emit metrics around lines processed, skipped, etc\n\treturn nil\n}",
"func parseHeader(line string, cb *models.CircularBuffer) {\n\tif fieldsRE.MatchString(line) {\n\t\tfields = strings.Split(line, \" \")[1:]\n\t}\n}",
"func readCSV(csvFileName string) []Record {\n\tvar records []Record\n\n\tcsvFile, err := os.Open(csvFileName)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer csvFile.Close()\n\n\tr := csv.NewReader(bufio.NewReader(csvFile))\n\tr.Comma = ';'\n\tfor {\n\t\trecord, err := r.Read()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\t// fmt.Printf(\"%T: %v\\n\", record, record)\n\t\tcount, err := strconv.Atoi(record[2])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\trecords = append(records, Record{\n\t\t\tDate: record[0],\n\t\t\tTerm: record[1],\n\t\t\tCount: count,\n\t\t})\n\t}\n\n\treturn records\n}",
"func (c *CsvFile) GetRecord(id int) (map[string]interface{}, error) {\n\t// Read Locks\n\tc.mu.RLock()\n\tdefer c.mu.RUnlock()\n\n\t// Open our CSV file\n\tf, err := os.Open(c.file)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to open csv\")\n\t}\n\tdefer f.Close()\n\n\t// increment id by 1 because of header\n\tid = id + 1\n\n\t// Employ our CSV Reader\n\tcsvReader := csv.NewReader(f)\n\trows, err := csvReader.ReadAll()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to read csv\")\n\t}\n\n\t// Check Length of Row\n\tif len(rows) <= id {\n\t\treturn nil, fmt.Errorf(\"index out of range\")\n\t}\n\n\t// index position for title\n\ttitle := find(rows[0], \"title\")\n\n\t// +1 because header is at 0, but ids start at 0\n\trow := rows[id]\n\n\tdata := map[string]interface{}{\n\t\t\"id\": row[0],\n\t\t\"title\": row[title],\n\t}\n\n\treturn data, nil\n}",
"func ReaderReadAll(r *csv.Reader,) ([][]string, error)",
"func readRow(scanner *bufio.Scanner, k int) row {\n\tif !scanner.Scan() { panic(\"missing imput\") }\n\tvar result row = make([]string, k)\n\ti := 0\n\tbuilder := &strings.Builder{}\n\tfor _, c := range scanner.Text() {\n\t\tif c == '\\t' {\n\t\t\tresult[i] = builder.String()\n\t\t\tbuilder.Reset() \n\t\t\ti++ \n\t\t\tif i >= k { panic(\"too much columns in input\") }\n\t\t} else {\n\t\t\tbuilder.WriteRune(c)\n\t\t}\n\t}\n\treturn result\n}",
"func PeekCSV(path string) (dig Digest, err error) {\n\tdefer func() {\n\t\tif e := recover(); e != nil {\n\t\t\terr = e.(error)\n\t\t}\n\t}()\n\tfile, e := os.Open(path)\n\tif e != nil {\n\t\tpanic(fmt.Errorf(\"can't access %q (%v)\", path, e))\n\t}\n\tdefer file.Close()\n\tinfo, e := file.Stat()\n\tif e != nil {\n\t\tpanic(fmt.Errorf(\"can't access %q metadata (%v)\", path, e))\n\t}\n\tbf, row, tlen, max := bufio.NewScanner(file), -1, 0, 1\ngetLine:\n\tfor row < previewRows && bf.Scan() {\n\t\tswitch ln := bf.Text(); {\n\t\tcase len(strings.TrimLeft(ln, \" \")) == 0:\n\t\tcase dig.comment != \"\" && strings.HasPrefix(ln, dig.comment):\n\t\tcase row < 0:\n\t\t\trow = 0\n\t\t\tfor _, p := range commentSet {\n\t\t\t\tif strings.HasPrefix(ln, p) {\n\t\t\t\t\tdig.comment = p\n\t\t\t\t\tcontinue getLine\n\t\t\t\t}\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\trow++\n\t\t\ttlen += len(ln)\n\t\t\tdig.preview = append(dig.preview, ln)\n\t\t}\n\t}\n\tswitch e := bf.Err(); {\n\tcase e != nil:\n\t\tpanic(fmt.Errorf(\"problem reading %q (%v)\", path, e))\n\tcase row < 1:\n\t\tpanic(fmt.Errorf(\"%q does not contain data\", path))\n\tcase row < previewRows:\n\t\tdig.erows = row\n\tdefault:\n\t\tdig.erows = int(float64(info.Size())/float64(tlen-len(dig.preview[0])+row-1)*0.995+0.5) * (row - 1)\n\t}\ngetSep:\n\tfor _, r := range sepSet {\n\t\tc, sl := 0, []string{}\n\t\tfor _, ln := range dig.preview {\n\t\t\tif sl = splitCSV(ln, r); len(sl) <= max || len(sl) != c && c > 0 {\n\t\t\t\tcontinue getSep\n\t\t\t}\n\t\t\tfor _, f := range sl {\n\t\t\t\tif len(f) > maxFieldLen {\n\t\t\t\t\tcontinue getSep\n\t\t\t\t}\n\t\t\t}\n\t\t\tc = len(sl)\n\t\t}\n\t\tmax, dig.sep = c, r\n\t}\n\tif dig.sep > '\\x00' {\n\t\tuf := make(map[string]int, max)\n\t\tfor _, f := range splitCSV(dig.preview[0], dig.sep) {\n\t\t\ttf := strings.Trim(f, \" \")\n\t\t\tif _, e := strconv.ParseFloat(tf, 64); e != nil && len(tf) > 0 {\n\t\t\t\tuf[tf]++\n\t\t\t}\n\t\t\tdig.split = append(dig.split, tf)\n\t\t}\n\t\tif dig.heading = len(uf) == max; dig.heading {\n\t\t\tdig.md5 = fmt.Sprintf(\"%x\", md5.Sum([]byte(strings.Join(dig.split, string(dig.sep)))))\n\t\t}\n\t}\n\treturn\n}",
"func ReadAndParseInput(r io.Reader) ([]int, error) {\n\tcsvReader := csv.NewReader(r)\n\tvar result []int\n\tfor {\n\t\trecord, err := csvReader.Read()\n\t\tif errors.Is(err, io.EOF) {\n\t\t\treturn result, nil\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trecordAsInt, err := strconv.Atoi(record[0])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresult = append(result, recordAsInt)\n\t}\n}",
"func parseChannelModeKindsCSV(kindstr string) (map[rune]int, error) {\n\tif len(kindstr) == 0 {\n\t\treturn nil, fmt.Errorf(fmtErrCsvParse, kindstr)\n\t}\n\n\tkindSplits := strings.Split(kindstr, \",\")\n\tif len(kindSplits) != 4 {\n\t\treturn nil, fmt.Errorf(fmtErrCsvParse, kindstr)\n\t}\n\n\treturn parseChannelModeKinds(\n\t\t\tkindSplits[0], kindSplits[1], kindSplits[2], kindSplits[3]),\n\t\tnil\n}",
"func ReadEndpoints(filename string) (map[string]string, map[string]string, error) {\n\n\tregion := make(map[string]string)\n\tendpoints := make(map[string]string)\n\n\t// Open CSV file\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer f.Close()\n\n\t// Read File into a Variable\n\tlines, err := csv.NewReader(f).ReadAll()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor i, line := range lines {\n\t\tif i != 0 {\n\t\t\tregion[line[0]] = line[1]\n\t\t\tendpoints[line[0]] = line[2]\n\t\t}\n\t}\n\treturn region, endpoints, err\n}",
"func parsePAX(r io.Reader) (map[string]string, error) {\n\tbuf, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\theaders := make(map[string]string)\n\t// Each record is constructed as\n\t// \"%d %s=%s\\n\", length, keyword, value\n\tfor len(buf) > 0 {\n\t\t// or the header was empty to start with.\n\t\tvar sp int\n\t\t// The size field ends at the first space.\n\t\tsp = bytes.IndexByte(buf, ' ')\n\t\tif sp == -1 {\n\t\t\treturn nil, ErrHeader\n\t\t}\n\t\t// Parse the first token as a decimal integer.\n\t\tn, err := strconv.ParseInt(string(buf[:sp]), 10, 0)\n\t\tif err != nil {\n\t\t\treturn nil, ErrHeader\n\t\t}\n\t\t// Extract everything between the decimal and the n -1 on the\n\t\t// beginning to to eat the ' ', -1 on the end to skip the newline.\n\t\tvar record []byte\n\t\trecord, buf = buf[sp+1:n-1], buf[n:]\n\t\t// The first equals is guaranteed to mark the end of the key.\n\t\t// Everything else is value.\n\t\teq := bytes.IndexByte(record, '=')\n\t\tif eq == -1 {\n\t\t\treturn nil, ErrHeader\n\t\t}\n\t\tkey, value := record[:eq], record[eq+1:]\n\t\theaders[string(key)] = string(value)\n\t}\n\treturn headers, nil\n}",
"func (r *regex) load(data [][]string) {\n\n\t//Cycle through all the parse data rows in the parse data xls\n\tfor c, row := range data {\n\n\t\tvar tmpr regexstruct\n\t\t//ignore header\n\t\tif c != 0 {\n\n\t\t\t//Array order 0-Role,1-App,2-Env,3-Loc\n\t\t\ttmpmap := make(map[string]string)\n\t\t\tfor x, lbl := range []string{\"role\", \"app\", \"env\", \"loc\"} {\n\t\t\t\t//place CSV column in map\n\t\t\t\ttmpmap[lbl] = row[x+1]\n\t\t\t}\n\t\t\t//Put the regex string and capture groups into data structure\n\t\t\ttmpr.regex = row[0]\n\t\t\ttmpr.labelcg = tmpmap\n\n\t\t\tr.Regexdata = append(r.Regexdata, tmpr)\n\t\t}\n\n\t}\n}",
"func (self FilterSet) ApplyHeader(header []string) {\n\tindexmap = make(map[string]int)\n\n\tfor idx, field := range header {\n\t\tindexmap[field] = idx\n\t}\n}",
"func parseChannelModeKindsCSV(kindstr string) (map[rune]int, error) {\n\tif len(kindstr) == 0 {\n\t\treturn nil, fmt.Errorf(fmtErrCsvParse, kindstr)\n\t}\n\n\tkindSplits := strings.Split(kindstr, \",\")\n\tif len(kindSplits) != 4 {\n\t\treturn nil, fmt.Errorf(fmtErrCsvParse, kindstr)\n\t}\n\n\treturn parseChannelModeKinds(\n\t\tkindSplits[0], kindSplits[1], kindSplits[2], kindSplits[3]), nil\n}",
"func NewRecordFromCSV(rec []string) (*Record, error) {\n\tif len(rec) < 6 {\n\t\treturn nil, ErrInvalidRecord\n\t}\n\n\tvar tm, err = time.Parse(time.RFC3339, rec[4])\n\tif err != nil {\n\t\treturn nil, ErrInvalidRecord\n\t}\n\n\tcode, err := strconv.ParseInt(rec[6], 10, 8)\n\tif err != nil {\n\t\treturn nil, ErrInvalidRecord\n\t}\n\n\treturn &Record{\n\t\tUserID: rec[1],\n\t\tExecEnd: tm,\n\t\tExitCode: uint8(code),\n\t}, nil\n}",
"func (t *Table) readHeader(row []string) []int {\n\treturn t.header.set(row)\n}",
"func readinput(r io.Reader, f string) {\n\n s := bufio.NewScanner(r)\n\n ln := 0\n for s.Scan() {\n ln++\n l := s.Text()\n c := strings.Split(strings.TrimSpace(l), seperator)\n\n if len(c) == 0 {\n readerr(\"No columns parsed from line\", f, ln, nil)\n continue\n }\n\n if column > len(c) {\n readerr(\"No enough columns to meet requested column number\", f, ln, nil)\n continue\n }\n\n v, e := strconv.ParseFloat(c[column - 1], 64)\n\n if e != nil {\n readerr(\"Failed to parse line\", f, ln, e)\n }\n\n // remove the value column since we don't to output that in the result\n columns := []string{}\n for k, v := range c {\n if k == (column - 1) {\n continue\n }\n columns = append(columns, v)\n }\n\n k := hash(strings.Join(columns, \"\"))\n\n if _, ok := summary[k]; !ok {\n summary[k] = NewRow(columns, k)\n }\n\n summary[k].Add(v, f)\n }\n\n if e := s.Err(); e != nil {\n panic(e)\n }\n\n}",
"func main() {\n\t// Open the file.\n\tfile, err := os.Open(dataFile)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// Schedule the file to be closed.\n\tdefer file.Close()\n\n\t// Contains all the field/value mappings for every line.\n\tvar fvMappings []map[string]string\n\n\t// Create a reader for the file.\n\tr := bufio.NewReader(file)\n\tfor {\n\t\t// Read all the bytes up to the end of line marker.\n\t\tline, err := r.ReadSlice('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\t// Capture the field/value mappings for this line.\n\t\tfv := make(map[string]string)\n\n\t\tvar start int\n\t\tvar field int\n\t\tfor index := 0; index < len(line); index++ {\n\t\t\t// If we don't find a space or EOL, check the next byte.\n\t\t\tif line[index] != ' ' && line[index] != '\\n' {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If the start and index values are the same, we have more than\n\t\t\t// one space separating the next value.\n\t\t\tif start == index {\n\t\t\t\tstart = index + 1\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Slice the value from the line and add the value to the map\n\t\t\t// for the specified field name.\n\t\t\tfv[fieldNames[field]] = string(line[start:index])\n\t\t\tfield++\n\t\t\tstart = index + 1\n\t\t}\n\n\t\t// Append the field/value map to the master collection.\n\t\tfvMappings = append(fvMappings, fv)\n\t}\n\n\t// Display all of the field/value maps.\n\tfor _, fv := range fvMappings {\n\t\tfmt.Printf(\"%#v\\n\\n\", fv)\n\t}\n}"
] | [
"0.68890405",
"0.6685908",
"0.65211517",
"0.6323418",
"0.63146794",
"0.63065344",
"0.6174222",
"0.6147124",
"0.6009719",
"0.6006986",
"0.5970537",
"0.5944572",
"0.5903829",
"0.58752096",
"0.5776775",
"0.57162684",
"0.57082254",
"0.57051295",
"0.56925297",
"0.5662414",
"0.56526715",
"0.56381994",
"0.56263775",
"0.55509186",
"0.55053884",
"0.5497749",
"0.5495392",
"0.5485109",
"0.54650563",
"0.5430678",
"0.53836364",
"0.5372102",
"0.5369643",
"0.5332273",
"0.53302735",
"0.5308203",
"0.5284697",
"0.5268596",
"0.5253823",
"0.5224984",
"0.5219852",
"0.5218377",
"0.5195875",
"0.51692426",
"0.5168244",
"0.516057",
"0.5160215",
"0.5157481",
"0.5150558",
"0.51264787",
"0.51067907",
"0.5101705",
"0.5086658",
"0.5063153",
"0.5062594",
"0.506108",
"0.5042563",
"0.50261176",
"0.50101936",
"0.49926293",
"0.49863058",
"0.49849236",
"0.4983218",
"0.49815285",
"0.49791172",
"0.49761027",
"0.49703383",
"0.49699914",
"0.49679902",
"0.49605033",
"0.4958277",
"0.4944526",
"0.49430063",
"0.4936555",
"0.49311554",
"0.49221435",
"0.4908836",
"0.490346",
"0.4897454",
"0.4886257",
"0.48821306",
"0.487961",
"0.48741862",
"0.4872871",
"0.4870024",
"0.48674744",
"0.48610488",
"0.48562655",
"0.48539704",
"0.48529258",
"0.4849468",
"0.48478594",
"0.4846029",
"0.48441198",
"0.48425302",
"0.48424527",
"0.48378065",
"0.48320043",
"0.48215413",
"0.48156345"
] | 0.67590743 | 1 |
Parses a simple DAY/MONTH/YEAR date format into a Time | func ParseSimpleDate(date string) (t time.Time, err error) {
return time.Parse(SimpleDateFormat, date)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ParseDate(date string) (time.Time, error)",
"func Time(s string) (result time.Time) {\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\tresult = time.Time{}\n\t\t}\n\t}()\n\n\tif len(s) < 6 {\n\t\treturn time.Time{}\n\t}\n\n\tif !strings.HasPrefix(s, \"/\") {\n\t\ts = \"/\" + s\n\t}\n\n\tb := []byte(s)\n\n\tif found := DateCanonicalRegexp.Find(b); len(found) > 0 { // Is it a canonical name like \"20120727_093920_97425909.jpg\"?\n\t\tif date, err := time.Parse(\"20060102_150405\", string(found[1:16])); err == nil {\n\t\t\tresult = date.Round(time.Second).UTC()\n\t\t}\n\t} else if found := DateTimeRegexp.Find(b); len(found) > 0 { // Is it a date with time like \"2020-01-30_09-57-18\"?\n\t\tn := DateIntRegexp.FindAll(found, -1)\n\n\t\tif len(n) != 6 {\n\t\t\treturn result\n\t\t}\n\n\t\tyear := Int(string(n[0]))\n\t\tmonth := Int(string(n[1]))\n\t\tday := Int(string(n[2]))\n\t\thour := Int(string(n[3]))\n\t\tmin := Int(string(n[4]))\n\t\tsec := Int(string(n[5]))\n\n\t\tif year < YearMin || year > YearMax || month < MonthMin || month > MonthMax || day < DayMin || day > DayMax {\n\t\t\treturn result\n\t\t}\n\n\t\tif hour < HourMin || hour > HourMax || min < MinMin || min > MinMax || sec < SecMin || sec > SecMax {\n\t\t\treturn result\n\t\t}\n\n\t\tresult = time.Date(\n\t\t\tyear,\n\t\t\ttime.Month(month),\n\t\t\tday,\n\t\t\thour,\n\t\t\tmin,\n\t\t\tsec,\n\t\t\t0,\n\t\t\ttime.UTC)\n\n\t} else if found := DateRegexp.Find(b); len(found) > 0 { // Is it a date only like \"2020-01-30\"?\n\t\tn := DateIntRegexp.FindAll(found, -1)\n\n\t\tif len(n) != 3 {\n\t\t\treturn result\n\t\t}\n\n\t\tyear := Int(string(n[0]))\n\t\tmonth := Int(string(n[1]))\n\t\tday := Int(string(n[2]))\n\n\t\tif year < YearMin || year > YearMax || month < MonthMin || month > MonthMax || day < DayMin || day > DayMax {\n\t\t\treturn result\n\t\t}\n\n\t\tresult = time.Date(\n\t\t\tyear,\n\t\t\ttime.Month(month),\n\t\t\tday,\n\t\t\t0,\n\t\t\t0,\n\t\t\t0,\n\t\t\t0,\n\t\t\ttime.UTC)\n\t} else if found := DatePathRegexp.Find(b); len(found) > 0 { // Is it a date path like \"2020/01/03\"?\n\t\tn := DateIntRegexp.FindAll(found, -1)\n\n\t\tif len(n) < 2 || len(n) > 3 {\n\t\t\treturn result\n\t\t}\n\n\t\tyear := Int(string(n[0]))\n\t\tmonth := Int(string(n[1]))\n\n\t\tif year < YearMin || year > YearMax || month < MonthMin || month > MonthMax {\n\t\t\treturn result\n\t\t}\n\n\t\tif len(n) == 2 {\n\t\t\tresult = time.Date(\n\t\t\t\tyear,\n\t\t\t\ttime.Month(month),\n\t\t\t\t1,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\ttime.UTC)\n\t\t} else if day := Int(string(n[2])); day >= DayMin && day <= DayMax {\n\t\t\tresult = time.Date(\n\t\t\t\tyear,\n\t\t\t\ttime.Month(month),\n\t\t\t\tday,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\t0,\n\t\t\t\ttime.UTC)\n\t\t}\n\t}\n\n\treturn result.UTC()\n}",
"func Parse(year, month, day, hourMinute string, loc *time.Location) (time.Time, error) {\n\tnow := time.Now().In(loc)\n\n\ty64, err := strconv.ParseInt(year, 10, 0)\n\ty := int(y64)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif y < now.Year()-1000 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad year; %d is too far in the past\", y)\n\t}\n\tm, err := strconv.ParseInt(month, 10, 0)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif m < 0 || m > 11 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad month: %d is not within [0, 11]\", m)\n\t}\n\t// Month +1 since time.Month is [1, 12].\n\tm = m + 1\n\td64, err := strconv.ParseInt(day, 10, 0)\n\td := int(d64)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif d < 1 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad day: %d; can't be negative\", d)\n\t} else if d > daysIn(time.Month(m), y) {\n\t\treturn time.Time{}, fmt.Errorf(\"bad day: %d; only %d days in %v, %d\", d, daysIn(time.Month(m), y), time.Month(m), y)\n\t}\n\tparts := strings.Split(hourMinute, \":\")\n\tif len(parts) != 2 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad hour/minute: %s\", hourMinute)\n\t}\n\th, err := strconv.ParseInt(parts[0], 10, 0)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif h < 0 || h > 60 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad hour: %d\", h)\n\t}\n\tmin, err := strconv.ParseInt(parts[1], 10, 0)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif min < 0 || min > 60 {\n\t\treturn time.Time{}, fmt.Errorf(\"bad minute: %d\", min)\n\t}\n\n\tt := time.Time(time.Date(int(y), time.Month(m), int(d), int(h), int(min), 0, 0, loc))\n\tif t.After(now) {\n\t\treturn time.Time{}, fmt.Errorf(\"bad time; %v is in the future\", time.Time(t))\n\t}\n\treturn t, nil\n}",
"func (t DateString) Parse() (time.Time, error) {\n\treturn time.ParseInLocation(\"200601021504\", string(t), Timezone)\n}",
"func Parse(layout, d string) (Date, error) {\n\tt, err := time.Parse(layout, d)\n\tif err != nil {\n\t\treturn Date{}, err\n\t}\n\n\treturn Of(t), nil\n}",
"func Date(year int, month Month, day, hour, min, sec, nsec int, loc *Location) Time {}",
"func toTime(s string) time.Time {\n\tlayout := \"2006-01-02\"\n\tt, _ := time.Parse(layout, s)\n\n\treturn t\n}",
"func parseTime(ts string) (time.Time, error) {\n\treturn time.ParseInLocation(\"02-Jan-2006 03:04:05 PM\", ts, tz)\n}",
"func parseTime(rawtime string) time.Time {\n\trawtime = strings.Replace(rawtime, \"下午\", \"PM \", -1)\n\trawtime = strings.Replace(rawtime, \"上午\", \"AM \", -1)\n\ttm, err := time.Parse(\"2006-1-2 PM 3:04\", rawtime)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn time.Now().AddDate(-10, 0, 0)\n\t}\n\treturn tm\n\n}",
"func ParseTime(s string, fmt string) (ti time.Time, err error) {\n\tvar i int64\n\tswitch fmt {\n\tcase \"ns\", \"nanosecond\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i), nil\n\t\t}\n\tcase \"us\", \"microsecond\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Microsecond)), nil\n\t\t}\n\tcase \"ms\", \"millisecond\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Millisecond)), nil\n\t\t}\n\tcase \"s\", \"second\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Second)), nil\n\t\t}\n\tcase \"mi\", \"minute\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Minute)), nil\n\t\t}\n\tcase \"h\", \"hour\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Hour)), nil\n\t\t}\n\tcase \"d\", \"day\":\n\t\tif i, err = strconv.ParseInt(s, 10, 64); err != nil {\n\t\t\treturn\n\t\t} else {\n\t\t\treturn time.Unix(0, i*int64(time.Hour)*24), nil\n\t\t}\n\tdefault:\n\t\tif fmt == \"\" {\n\t\t\tfmt = time.RFC3339Nano\n\t\t}\n\t\tif ti, e := time.ParseInLocation(fmt, s, time.Local); e != nil {\n\t\t\treturn ti, errs.New(e)\n\t\t} else {\n\t\t\treturn ti, nil\n\t\t}\n\t}\n}",
"func parseTime(s string, started bool, after time.Time) (time.Time, error) {\n\tdaysOfWeek := map[string]time.Weekday{\n\t\t\"mon\": time.Monday,\n\t\t\"tue\": time.Tuesday,\n\t\t\"wed\": time.Wednesday,\n\t\t\"thu\": time.Thursday,\n\t\t\"fri\": time.Friday,\n\t\t\"sat\": time.Saturday,\n\t\t\"sun\": time.Sunday,\n\t}\n\n\ts = strings.ToLower(s)\n\td := strings.SplitN(s, \", \", 2)\n\ttimesString := strings.SplitN(d[1], \":\", 2)\n\tvar timesInt [2]int\n\tfor index, tString := range timesString {\n\t\ttimesInt[index], _ = strconv.Atoi(tString)\n\t}\n\n\tweekTime, ok := daysOfWeek[d[0]] // TODO: can make it better\n\tif !ok {\n\t\treturn time.Time{}, errors.New(\"invalid weekday name\")\n\t}\n\n\ttimeNow := time.Now()\n\n\tt := time.Date(\n\t\ttimeNow.Year(),\n\t\ttimeNow.Month(),\n\t\ttimeNow.Day(),\n\t\ttimesInt[0],\n\t\ttimesInt[1],\n\t\t0,\n\t\t0,\n\t\ttimeNow.Location(),\n\t)\n\n\t// Add one day to valid date\n\tif after.IsZero() && timeNow.After(t) {\n\t\tif t.Weekday() == weekTime && !started {\n\t\t\tt = t.AddDate(0, 0, 1)\n\t\t}\n\t}\n\n\t// Set t weekday to next day named as in args\n\tfor t.Weekday() != weekTime {\n\t\tt = t.AddDate(0, 0, 1)\n\t}\n\n\treturn t, nil\n}",
"func CalcTime(s string) (time.Time, error) {\n\tt := time.Time{}\n\tif len(s) != 17 {\n\t\treturn t, fmt.Errorf(\"invalid time string length %d for %s\", len(s), s)\n\t}\n\ty, e := strconv.Atoi(s[:4])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading year\", e)\n\t}\n\tif y < 1900 || y > 3000 {\n\t\treturn t, fmt.Errorf(\"invalid year %d\", y)\n\t}\n\tm, e := strconv.Atoi(s[4:6])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading month\", e)\n\t}\n\tif m < 1 || m > 12 {\n\t\treturn t, fmt.Errorf(\"invalid month %d\", m)\n\t}\n\td, e := strconv.Atoi(s[6:8])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading day\", e)\n\t}\n\tif d < 1 || d > 31 {\n\t\treturn t, fmt.Errorf(\"invalid day %d\", d)\n\t}\n\th, e := strconv.Atoi(s[8:10])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading hour\", e)\n\t}\n\tif h < 0 || h > 24 {\n\t\treturn t, fmt.Errorf(\"invalid hour %d\", h)\n\t}\n\tmi, e := strconv.Atoi(s[10:12])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading minutes\", e)\n\t}\n\tif mi < 0 || mi > 60 {\n\t\treturn t, fmt.Errorf(\"invalid minutes %d\", mi)\n\t}\n\tsc, e := strconv.Atoi(s[12:14])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading seconds\", e)\n\t}\n\tif sc < 0 || sc > 60 {\n\t\treturn t, fmt.Errorf(\"invalid seconds %d\", sc)\n\t}\n\tms, e := strconv.Atoi(s[14:17])\n\tif e != nil {\n\t\treturn t, errors.NewUtil(s, \"reading miliseconds\", e)\n\t}\n\tif ms < 0 || ms > 1000 {\n\t\treturn t, fmt.Errorf(\"invalid miliseconds %d\", ms)\n\t}\n\tl, e := time.LoadLocation(\"Local\")\n\tif e != nil {\n\t\treturn t, errors.NewUtil(\"\", \"loading location\", e)\n\t}\n\treturn time.Date(y, time.Month(m), d, h, mi, sc, ms*1000000, l), nil\n}",
"func TimeParseAny(dtStr string) time.Time {\n\tformats := []string{\n\t\t\"2006-01-02T15:04:05Z\",\n\t\t\"2006-01-02 15:04:05\",\n\t\t\"2006-01-02 15:04\",\n\t\t\"2006-01-02 15\",\n\t\t\"2006-01-02\",\n\t\t\"2006-01\",\n\t\t\"2006\",\n\t}\n\tfor _, format := range formats {\n\t\tt, e := time.Parse(format, dtStr)\n\t\tif e == nil {\n\t\t\treturn t\n\t\t}\n\t}\n\tPrintf(\"Error:\\nCannot parse date: '%v'\\n\", dtStr)\n\tfmt.Fprintf(os.Stdout, \"Error:\\nCannot parse date: '%v'\\n\", dtStr)\n\tos.Exit(1)\n\treturn time.Now()\n}",
"func TimeParseAny(value string) (*time.Time, error) {\n\tif t, err := time.Parse(\"2006-01-02 15:04:05\", value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC3339, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC3339Nano, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC822, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC822Z, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC850, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC1123, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RFC1123Z, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.UnixDate, value); err == nil {\n\t\treturn &t, nil\n\t} else if t, err = time.Parse(time.RubyDate, value); err == nil {\n\t\treturn &t, nil\n\t}\n\treturn nil, fmt.Errorf(\"parse datetime %s error\", value)\n}",
"func ParseTime(format, operand string) (time.Time, error) { return time.Parse(format, operand) }",
"func ParseTime(input string) (time.Time, error) {\n\t// validate input with regex\n\tmatch, err := regexp.MatchString(`^[0-9]{1,2}\\/[0-9]{1,2}\\/[0-9]{1,4}$`, input)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\tif !match {\n\t\treturn time.Time{}, errors.New(\"invalid time format\")\n\t\t// return error\n\t}\n\tinputArray := strings.Split(input, \"/\")\n\tfor location, dateStamp := range inputArray {\n\t\t// lazy validation of string\n\t\t// if less 2 strings per position format from 1 to 01\n\t\tif len(dateStamp) < 2 {\n\t\t\ttempFmt := fmt.Sprintf(\"0%s\", dateStamp)\n\t\t\t// modify inputArray with the new format\n\t\t\tinputArray[location] = tempFmt\n\n\t\t}\n\n\t}\n\n\tnewFormattedInput := strings.Join(inputArray, \"/\")\n\tparsedDate, err := time.Parse(\"02/01/2006\", newFormattedInput)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\t// validate date range\n\tif !parsedDate.After(MinDate) && parsedDate.Before(MaxDate) {\n\n\t\treturn time.Time{}, errors.New(\"error dates outside of acceptable range\")\n\t}\n\treturn parsedDate, nil\n}",
"func Parse(str string) (time.Time, error) {\n\tdatefmt, err := DateParser(str)\n\tif err != nil {\n\t\treturn time.Time{}, errors.Wrap(err, NewParseError(str, \"Parsing date format\"))\n\t}\n\ttimefmt, err := TimeParser(str)\n\tif err != nil {\n\t\treturn time.Time{}, errors.Wrap(err, NewParseError(str, \"Parsing time format\"))\n\t}\n\n\treturn time.Parse(datefmt+timefmt, str)\n}",
"func parseTime(msg []byte) *time.Time {\n\t// convert input to integer\n\ti := binary.BigEndian.Uint32(msg)\n\n\t// convert time from 1900 to Unix time\n\tt := int64(i) - timeGap1900\n\n\tres := time.Unix(t, 0)\n\treturn &res\n}",
"func ParseOrdinalDate(layout, value string) (time.Time, error) {\n\tvalue = strings.ToLower(strings.Replace(value, \",\", \"\", -1))\n\tdateStringSplited := strings.Split(value, \" \")\n\n\tday, err := strconv.ParseInt(dayOrdinals[dateStringSplited[0]], 10, 64)\n\tif err != nil {\n\t\treturn time.Time{}, errors.New(\"Day out of range\")\n\t}\n\tmonthString := strings.ToLower(dateStringSplited[1])\n\tmonth, found := monthStrings[monthString]\n\tif !found {\n\t\treturn time.Time{}, errors.New(\"Month out of range \" + monthString)\n\t}\n\tyear, err := strconv.ParseInt(dateStringSplited[2], 10, 64)\n\n\t//hadle and log err in format\n\n\treturn time.Date(int(year), time.Month(month), int(day), 0,0,0,0, DefaultLocation), nil\n\n\tconst ( // day number\n\t\tcardMinLen = len(\"1\")\n\t\tcardMaxLen = len(\"31\")\n\t\tordSfxLen = len(\"th\")\n\t\tordMinLen = cardMinLen + ordSfxLen\n\t)\n\n\tfor k := 0; k < len(value)-ordMinLen; {\n\t\t// i number start\n\t\tfor ; k < len(value) && (value[k] > '9' || value[k] < '0'); k++ {\n\t\t}\n\t\ti := k\n\t\t// j cardinal end\n\t\tfor ; k < len(value) && (value[k] <= '9' && value[k] >= '0'); k++ {\n\t\t}\n\t\tj := k\n\t\tif j-i > cardMaxLen || j-i < cardMinLen {\n\t\t\tcontinue\n\t\t}\n\t\t// k ordinal end\n\t\t// ASCII Latin (uppercase | 0x20) = lowercase\n\t\tfor ; k < len(value) && (value[k]|0x20 >= 'a' && value[k]|0x20 <= 'z'); k++ {\n\t\t}\n\t\tif k-j != ordSfxLen {\n\t\t\tcontinue\n\t\t}\n\n\t\t// day ordinal to cardinal\n\t\tfor ; i < j-1 && (value[i] == '0'); i++ {\n\t\t}\n\t\to := strings.ToLower(value[i:k])\n\t\tc, ok := dayOrdinals[o]\n\t\tif ok {\n\t\t\tvalue = value[:i] + c + value[k:]\n\t\t\tbreak\n\t\t}\n\t}\n\n\tdate, err := time.ParseInLocation(layout, value, DefaultLocation)\n\treturn date, err\n}",
"func ParseTime(ts string) (time.Time, error) {\n\traw := []byte(ts)\n\tif len(raw) != 6 {\n\t\treturn time.Time{}, errors.New(\"ts illegal\")\n\t}\n\n\tfor _, c := range raw {\n\t\tif int(c) > len(reversedAlphabet) || reversedAlphabet[c] == -1 {\n\t\t\treturn time.Time{}, errors.New(\"ts illegal\")\n\t\t}\n\t}\n\n\toffset := uint8(reversedAlphabet[raw[5]])\n\n\tym := uint8((offset>>3)*alphabets) + uint8(reversedAlphabet[raw[0]])\n\tyear := int(ym>>4) + epochYear\n\tmonth := time.Month(ym << 4 >> 4)\n\n\tday := reversedAlphabet[raw[1]]\n\n\thour := int((offset<<5>>7)*alphabets) + reversedAlphabet[raw[2]]\n\tminute := int((offset<<6>>7)*alphabets) + reversedAlphabet[raw[3]]\n\tsecond := int((offset<<7>>7)*alphabets) + reversedAlphabet[raw[4]]\n\n\treturn time.Date(year, month, day, hour, minute, second, 0, time.UTC), nil\n}",
"func CheckParse(s string) (time.Time, error) {\n\tvar value time.Time\n\tvar err error\n\tvalue, err = time.Parse(time.RFC3339, s)\n\tif err != nil {\n\t\tvalue, err = time.Parse(\"2006-01-02\", s)\n\t\tif err != nil {\n\t\t\treturn time.Time{}, fmt.Errorf(\"invalid date format (%s) provided\", s)\n\t\t}\n\t}\n\treturn value, nil\n}",
"func (t Time) Parse() (ts time.Time, err error) {\n\ts := strings.TrimSpace(string(t))\n\t// Support for Bitpipe timestamps using 24:00:00\n\ts = strings.Replace(s, \" 24:\", \" 00:\", 1)\n\tfor _, layout := range formats {\n\t\tif ts, err = time.Parse(layout, s); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\t// As in time.Parse(), return UTC for the first arg, which will come out\n\t// of the previous calls to time.Parse()\n\treturn ts, errors.New(\"Could not parse \" + s)\n}",
"func parseTime(format string, timeStr string) time.Time {\n\tt, err := time.Parse(format, timeStr)\n\tcheck(err)\n\treturn t\n}",
"func ParseFormatted(input string) (time.Time, string, error) {\n\t// \"Mon, 02 Jan 2006 15:04:05 MST\"\n\tif t, err := time.Parse(time.RFC1123, input); err == nil {\n\t\treturn t, time.RFC1123, nil\n\t}\n\n\t// \"Mon, 02 Jan 2006 15:04:05 -0700\"\n\tif t, err := time.Parse(time.RFC1123Z, input); err == nil {\n\t\treturn t, time.RFC1123Z, nil\n\t}\n\n\t// \"2006-01-02T15:04:05Z07:00\"\n\tif t, err := time.Parse(time.RFC3339, input); err == nil {\n\t\treturn t, time.RFC3339, nil\n\t}\n\n\t// \"2006-01-02T15:04:05.999999999Z07:00\"\n\tif t, err := time.Parse(time.RFC3339Nano, input); err == nil {\n\t\treturn t, time.RFC3339Nano, nil\n\t}\n\n\t// \"02 Jan 06 15:04 MST\"\n\tif t, err := time.Parse(time.RFC822, input); err == nil {\n\t\treturn t, time.RFC822, nil\n\t}\n\n\t// \"02 Jan 06 15:04 -0700\"\n\tif t, err := time.Parse(time.RFC822Z, input); err == nil {\n\t\treturn t, time.RFC822Z, nil\n\t}\n\n\t// \"Monday, 02-Jan-06 15:04:05 MST\"\n\tif t, err := time.Parse(time.RFC850, input); err == nil {\n\t\treturn t, time.RFC850, nil\n\t}\n\n\t// \"Mon Jan _2 15:04:05 2006\"\n\tif t, err := time.Parse(time.ANSIC, input); err == nil {\n\t\treturn t, time.ANSIC, nil\n\t}\n\n\t// \"Mon Jan _2 15:04:05 MST 2006\"\n\tif t, err := time.Parse(time.UnixDate, input); err == nil {\n\t\treturn t, time.UnixDate, nil\n\t}\n\n\t// \"Mon Jan 02 15:04:05 -0700 2006\"\n\tif t, err := time.Parse(time.RubyDate, input); err == nil {\n\t\treturn t, time.RubyDate, nil\n\t}\n\n\t// \"3:04PM\"\n\tif t, err := time.Parse(time.Kitchen, input); err == nil {\n\t\treturn t, time.Kitchen, nil\n\t}\n\n\t// \"Jan _2 15:04:05\"\n\tif t, err := time.Parse(time.Stamp, input); err == nil {\n\t\treturn t, time.Stamp, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000\"\n\tif t, err := time.Parse(time.StampMilli, input); err == nil {\n\t\treturn t, time.StampMilli, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000000\"\n\tif t, err := time.Parse(time.StampMicro, input); err == nil {\n\t\treturn t, time.StampMicro, nil\n\t}\n\n\t// \"Jan _2 15:04:05.000000000\"\n\tif t, err := time.Parse(time.StampNano, input); err == nil {\n\t\treturn t, time.StampNano, nil\n\t}\n\n\t// \"Mon, 02 Jan 2006 15:04:05 GMT\"\n\tif t, err := time.Parse(FormatHTTP, input); err == nil {\n\t\treturn t, FormatHTTP, nil\n\t}\n\n\tif t, err := time.Parse(FormatGo, strings.Split(input, \" m=\")[0]); err == nil {\n\t\treturn t, FormatGo, nil\n\t}\n\n\t// \"2019-01-25 21:51:38\"\n\tif t, err := time.Parse(FormatSimple, input); err == nil {\n\t\treturn t, FormatSimple, nil\n\t}\n\n\treturn time.Time{}, \"\", ErrParseFormatted\n}",
"func Parse(layout, value string) (Time, error) {}",
"func ParseTime(t string) (time.Time, error) {\n\tlayout := \"2006-01-02 15:04:05 ZO700\"\n\treturn time.Parse(layout, t)\n}",
"func ParseTime(s string) Time {\n\ttp := timeParser{p: s}\n\n\t// TODO: limit length of elements so that\n\t// 20060102T000000 is not parsed as year 20060102\n\n\tyear := tp.val(\":-\")\n\tmonth := tp.xval(\":-\")\n\tday := tp.xval(\"tT\")\n\n\tif tp.prec == 0 {\n\t\treturn Time{}\n\t}\n\n\thour := tp.val(\":\")\n\tmin := tp.val(\":\")\n\tsec := tp.val(\".\")\n\n\tnsec, ndenom, ok := tp.rat(\"\")\n\tif ok {\n\t\tfor ndenom < 1e9 {\n\t\t\tnsec, ndenom = nsec*10, ndenom*10\n\t\t}\n\t}\n\n\tloc := tp.loc()\n\n\treturn Time{\n\t\tTime: time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc),\n\t\tPrec: tp.prec,\n\t\tHasLoc: tp.hasLoc,\n\t}\n}",
"func parseDataUltimaCompra(field string) (*time.Time, error) {\n\tif nullStringAsNil(field) {\n\t\treturn nil, nil\n\t}\n\tparse, err := time.Parse(\"2006-01-02\", field)\n\treturn &parse, err\n}",
"func TimeParse(datetime string) (timestamp time.Time, err error) {\n\tdatetime = strings.Replace(datetime, \"am\", \"AM\", -1)\n\tdatetime = strings.Replace(datetime, \"pm\", \"PM\", -1)\n\ttimeFormats := []string{\n\t\ttime.ANSIC,\n\t\ttime.UnixDate,\n\t\ttime.RubyDate,\n\t\ttime.RFC822,\n\t\ttime.RFC822Z,\n\t\ttime.RFC850,\n\t\ttime.RFC1123,\n\t\ttime.RFC1123Z,\n\t\ttime.RFC3339,\n\t\ttime.RFC3339Nano,\n\t\ttime.Kitchen,\n\t\ttime.Stamp,\n\t\ttime.StampMilli,\n\t\ttime.StampMicro,\n\t\ttime.StampNano,\n\t}\n\n\tcustomDate := []string{\n\t\t\"January 2 2006\",\n\t\t\"Jan 2 2006\",\n\t\t\"1 2 2006\",\n\t\t\"01 2 2006\",\n\t\t\"1 02 2006\",\n\t\t\"01 02 2006\",\n\n\t\t\"2 January 2006\",\n\t\t\"2 Jan 2006\",\n\t\t\"2 1 2006\",\n\t\t\"2 01 2006\",\n\t\t\"02 1 2006\",\n\t\t\"02 01 2006\",\n\n\t\t\"2006 January 2\",\n\t\t\"2006 Jan 2\",\n\t\t\"2006 1 2\",\n\t\t\"2006 01 2\",\n\t\t\"2006 1 02\",\n\t\t\"2006 01 02\",\n\n\t\t\"January 2\",\n\t\t\"Jan 2\",\n\t\t\"1 2\",\n\t\t\"01 2\",\n\t\t\"1 02\",\n\t\t\"01 02\",\n\n\t\t\"2 January\",\n\t\t\"2 Jan\",\n\t\t\"2 1\",\n\t\t\"2 01\",\n\t\t\"02 1\",\n\t\t\"02 01\",\n\t}\n\n\tcustomTime := []string{\n\t\t\"15:04:05\",\n\t\t\"15:04\",\n\n\t\t\"3:04 PM\",\n\t\t\"03:04 PM\",\n\t\t\"3 PM\",\n\t\t\"03 PM\",\n\n\t\t\"3:04PM\",\n\t\t\"03:04PM\",\n\t\t\"3PM\",\n\t\t\"03PM\",\n\t}\n\n\tcustomZone := []string{\n\t\t\"MST\",\n\n\t\t\"GMT-0700\",\n\t\t\"GMT-7\",\n\t\t\"GMT-07\",\n\t\t\"GMT-07:00\",\n\t\t\"GMT-7:00\",\n\n\t\t\"UTC-0700\",\n\t\t\"UTC-7\",\n\t\t\"UTC-07\",\n\t\t\"UTC-07:00\",\n\t\t\"UTC-7:00\",\n\t}\n\n\tfor _, timeFormat := range timeFormats {\n\t\ttimestamp, err = time.Parse(timeFormat, datetime)\n\t\tif err == nil {\n\t\t\treturn timestamp, nil\n\t\t}\n\t}\n\n\t// Run custom formats only if none of the default formats work\n\tfor _, date := range customDate {\n\t\ttimestamp, err = time.Parse(date, datetime)\n\t\tif err == nil {\n\t\t\treturn timestamp, nil\n\t\t}\n\n\t\tfor _, timer := range customTime {\n\t\t\ttimestamp, err = time.Parse(timer, datetime)\n\t\t\tif err == nil {\n\t\t\t\ttimestamp = timestamp.AddDate(time.Now().Year(), int(time.Now().Month())-1, time.Now().Day())\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\ttimestamp, err = time.Parse(date+\" \"+timer, datetime)\n\t\t\tif err == nil {\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\ttimestamp, err = time.Parse(timer+\" \"+date, datetime)\n\t\t\tif err == nil {\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\tfor _, zone := range customZone {\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\ttimestamp = timestamp.AddDate(time.Now().Year(), int(time.Now().Month())-1, time.Now().Day())\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\ttimestamp = timestamp.AddDate(time.Now().Year(), int(time.Now().Month())-1, time.Now().Day())\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+timer+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+zone+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+date+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+zone+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+timer+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+date+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Run with dashed date now if none of the non-dashed work\n\tfor _, date := range customDate {\n\t\tdate = dashed(date)\n\n\t\ttimestamp, err = time.Parse(date, datetime)\n\t\tif err == nil {\n\t\t\treturn timestamp, nil\n\t\t}\n\n\t\tfor _, timer := range customTime {\n\t\t\ttimestamp, err = time.Parse(timer, datetime)\n\t\t\tif err == nil {\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\ttimestamp, err = time.Parse(date+\" \"+timer, datetime)\n\t\t\tif err == nil {\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\ttimestamp, err = time.Parse(timer+\" \"+date, datetime)\n\t\t\tif err == nil {\n\t\t\t\treturn timestamp, nil\n\t\t\t}\n\n\t\t\tfor _, zone := range customZone {\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+timer+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(date+\" \"+zone+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+date+\" \"+zone, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(timer+\" \"+zone+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+timer+\" \"+date, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\n\t\t\t\ttimestamp, err = time.Parse(zone+\" \"+date+\" \"+timer, datetime)\n\t\t\t\tif err == nil {\n\t\t\t\t\treturn timestamp, nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn timestamp, err\n}",
"func parseTime(str string) (*time.Time, error) {\n\tif str == \"\" {\n\t\treturn nil, nil\n\t}\n\tt, err := time.Parse(twilioTimeLayout, str)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &t, err\n}",
"func Ydate(year int) parsec.Parser {\n\tpattdelimit := `[/.-]`\n\tpattmonth := `[0-9]{1,2}|Jan|jan|Feb|feb|Mar|mar|Apr|apr|` +\n\t\t`May|may|Jun|jun|Jul|jul|Aug|aug|Sep|sep|Oct|oct|` +\n\t\t`Nov|nov|Dec|dec`\n\tpattd2 := `[0-9]{1,2}`\n\tpattern := fmt.Sprintf(\n\t\t\"([0-9]{2,4}%v)?(%v)%v(%v)\"+ // date\n\t\t\t\"( (%v):(%v):(%v))?\", // time\n\t\tpattdelimit, pattmonth, pattdelimit, pattd2,\n\t\tpattd2, pattd2, pattd2,\n\t)\n\n\t// parts 1:year, 2:month, 3:date, 4:time, 5:hour, 6:minute, 7:second\n\tregc, err := regexp.Compile(pattern)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn parsec.And(\n\t\tfunc(nodes []parsec.ParsecNode) parsec.ParsecNode {\n\t\t\ttext := string(nodes[0].(*parsec.Terminal).Value)\n\t\t\tparts := regc.FindStringSubmatch(text)\n\t\t\tif parts[1] != \"\" {\n\t\t\t\tyr, _ := strconv.Atoi(strings.Trim(parts[1], pattdelimit))\n\t\t\t\tif yr < 100 {\n\t\t\t\t\tyr = ((year / 100) * 100) + yr\n\t\t\t\t}\n\t\t\t\tyear = yr\n\t\t\t}\n\t\t\tmonth := mon2index[parts[2]]\n\t\t\tdate, _ := strconv.Atoi(parts[3])\n\t\t\thour, min, sec := 0, 0, 0\n\t\t\tif parts[4] != \"\" {\n\t\t\t\thour, _ = strconv.Atoi(parts[5])\n\t\t\t\tmin, _ = strconv.Atoi(parts[6])\n\t\t\t\tsec, _ = strconv.Atoi(parts[7])\n\t\t\t}\n\t\t\ttm := time.Date(\n\t\t\t\tyear, time.Month(month), date, hour, min, sec, 0,\n\t\t\t\ttime.Local, /*locale*/\n\t\t\t)\n\t\t\tok := api.ValidateDate(tm, year, month, date, hour, min, sec)\n\t\t\tif ok == false {\n\t\t\t\tfmsg := \"invalid date %v/%v/%v %v:%v:%v\"\n\t\t\t\treturn fmt.Errorf(fmsg, year, month, date, hour, min, sec)\n\t\t\t}\n\t\t\tlog.Debugf(\"Ydate: %v\\n\", tm)\n\t\t\treturn tm\n\t\t},\n\t\tparsec.Token(pattern, \"DATETIME\"),\n\t)\n}",
"func getReTime(s string) time.Time {\n\t// format testing.\n\t//fmt.Println(\"day:\",s[0:2], \"Month:\",s[3:5],\"year:\",s[6:10],\"h:\",s[11:13],\"m:\",s[14:16],\"s:\",s[17:19])\n\tyear, err := strconv.Atoi(s[6:10])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tmonth, err := strconv.Atoi(s[3:5])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tday, err := strconv.Atoi(s[0:2])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\thour, err := strconv.Atoi(s[11:13])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tmin, err := strconv.Atoi(s[14:16])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tsec, err := strconv.Atoi(s[17:19])\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tloc, _ := time.LoadLocation(\"Local\")\n\treturn time.Date(year, time.Month(month), day, hour, min, sec, 0, loc)\n\n}",
"func (t Time) Date() (year int, month Month, day int) {}",
"func StringToTime(s string, layout ...string) (t time.Time, err error) {\n\tif s == \"\" || s == \"0000-00-00 00:00:00\" {\n\t\treturn\n\t} else if len(layout) > 0 && layout[0] != \"\" {\n\t\treturn time.Parse(layout[0], s)\n\t}\n\n\tlayouts := []string{\n\t\ttime.RFC3339Nano,\n\t\t\"2006-01-02 15:04:05\",\n\t\t\"2006-01-02T15:04:05\", // iso8601 without timezone\n\t\ttime.RFC1123Z,\n\t\ttime.RFC1123,\n\t\ttime.RFC822Z,\n\t\ttime.RFC822,\n\t\ttime.RFC850,\n\t\ttime.ANSIC,\n\t\ttime.UnixDate,\n\t\ttime.RubyDate,\n\t\t\"2006-01-02 15:04:05.999999999 -0700 MST\", // Time.String()\n\t\t\"2006-01-02\",\n\t\t\"02 Jan 2006\",\n\t\t\"2006-01-02T15:04:05-0700\", // RFC3339 without timezone hh:mm colon\n\t\t\"2006-01-02 15:04:05 -07:00\",\n\t\t\"2006-01-02 15:04:05 -0700\",\n\t\t\"2006-01-02 15:04:05Z07:00\", // RFC3339 without T\n\t\t\"2006-01-02 15:04:05Z0700\", // RFC3339 without T or timezone hh:mm colon\n\t\t\"2006-01-02 15:04:05\",\n\t\ttime.Kitchen,\n\t\ttime.Stamp,\n\t\ttime.StampMilli,\n\t\ttime.StampMicro,\n\t\ttime.StampNano,\n\t}\n\n\tfor _, layout := range layouts {\n\t\tif t, err = time.Parse(layout, s); err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\treturn t, fmt.Errorf(\"unable to parse time: '%s'\", s)\n}",
"func parseMil(mil string) (time.Time, bool) {\n\n\tif len(mil) != 10 {\n\t\treturn time.Time{}, false\n\t}\n\n\tmonth, err := strconv.Atoi(mil[0:2])\n\tif err != nil {\n\t\treturn time.Time{}, false\n\t}\n\n\tday, err := strconv.Atoi(mil[2:4])\n\tif err != nil {\n\t\treturn time.Time{}, false\n\t}\n\n\tyear, err := strconv.Atoi(mil[4:6])\n\tif err != nil {\n\t\treturn time.Time{}, false\n\t}\n\tyear += 2000\n\n\thour, err := strconv.Atoi(mil[6:8])\n\tif err != nil {\n\t\treturn time.Time{}, false\n\t}\n\n\tmin, err := strconv.Atoi(mil[8:10])\n\tif err != nil {\n\t\treturn time.Time{}, false\n\t}\n\n\treturn time.Date(year, time.Month(month), day, hour, min, 0, 0, time.UTC), true\n}",
"func date(s string) (time.Time, error) {\n date, err := time.Parse(layoutISO, s)\n return date, err\n}",
"func convertTimeFormat(s string) time.Time {\n\tif s != \"\" {\n\t\tinitT, err := time.Parse(\"01/02/2006\", s)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tsT := initT.Format(time.RFC3339)\n\t\tt, err := time.Parse(time.RFC3339, sT)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn t\n\t}\n\tt := new(time.Time)\n\treturn *t\n}",
"func StringToTime(dateString string) (time.Time, error) {\n\n\tvar t time.Time\n\tvar err error\n\n\tt, err = time.Parse(\"2006-01-02 15:04:05\", dateString)\n\tif err == nil {\n\t\treturn t, err\n\t}\n\n\tt, err = time.Parse(\"2006-01-02\", dateString)\n\tif err == nil {\n\t\treturn t, err\n\t}\n\n\tt, err = time.Parse(time.RFC3339, dateString)\n\tif err == nil {\n\t\treturn t, err\n\t}\n\n\tmsg := \"Error parsing date string - expected one of the following layouts: '%s', '%s', OR '%s'\"\n\tmsg = fmt.Sprintf(msg, \"2006-01-02\", \"2006-01-02 15:04:05\", time.RFC3339)\n\treturn t, errors.New(msg)\n}",
"func strToTime(s string) (time.Time, error) {\n\tvar t time.Time\n\tvar err error\n\tfor _, f := range _DATE_FORMATS {\n\t\tt, err = time.ParseInLocation(f, s, time.Local)\n\t\tif err == nil {\n\t\t\treturn t, nil\n\t\t}\n\t}\n\n\treturn t, err\n}",
"func Parse(format, s string) time.Time {\n\tt, _ := time.Parse(format, s)\n\treturn t\n}",
"func parseDate(dateString string) time.Time {\n\n\tdate, err := time.Parse(\"2006-01-02\", dateString)\n\tif err != nil {\n\t\tlog.Fatalf(\"Given date \\\"%s\\\" invalid. We expect the format YYYY-MM-DD.\\n\", dateString)\n\t}\n\n\treturn date\n}",
"func parseTime(t string) (time.Time, error) {\n\tepoch, err := strconv.ParseInt(t, 10, 64)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\treturn time.Unix(epoch, 0), nil\n}",
"func StringToDateE(s string) (time.Time, error) {\n\n\ttm, err := parseDateWith(s, []string{\n\t\ttime.RFC3339,\n\t\t\"2006-01-02T15:04:05\", // iso8601 without timezone\n\t\ttime.RFC1123Z,\n\t\ttime.RFC1123,\n\t\ttime.RFC822Z,\n\t\ttime.RFC822,\n\t\ttime.RFC850,\n\t\ttime.ANSIC,\n\t\ttime.UnixDate,\n\t\ttime.RubyDate,\n\t\t\"2006-01-02 15:04:05.999999999 -0700 MST\", // Time.String()\n\t\t\"2006-01-02\",\n\t\t\"02 Jan 2006\",\n\t\t\"2006-01-02T15:04:05-0700\", // RFC3339 without timezone hh:mm colon\n\t\t\"2006-01-02 15:04:05 -07:00\",\n\t\t\"2006-01-02 15:04:05 -0700\",\n\t\t\"2006-01-02 15:04:05Z07:00\", // RFC3339 without T\n\t\t\"2006-01-02 15:04:05Z0700\", // RFC3339 without T or timezone hh:mm colon\n\t\t\"2006-01-02 15:04:05\",\n\t\t\"2006-01-02 15:04:05.000\",\n\t\ttime.Kitchen,\n\t\ttime.Stamp,\n\t\ttime.StampMilli,\n\t\ttime.StampMicro,\n\t\ttime.StampNano,\n\t\t\"02/01/2006 15:04:05\", // indonesian date time\n\t\t\"02/01/2006 15:04:05.000\", // indonesian date time\n\t\t\"02/01/2006\", // indonesian date\n\t})\n\n\treturn tm, err\n}",
"func parseTime(accum string) (time.Time, error) {\n\tformats := []string{time.RFC822, time.RFC822Z, time.RFC1123, time.RFC1123Z,\n\t\ttime.RFC3339, \"2006-1-2\"}\n\n\tfor _, format := range formats {\n\t\ttm, err := time.Parse(format, accum)\n\t\tif err == nil {\n\t\t\treturn tm, nil\n\t\t}\n\t}\n\n\treturn time.Time{}, MkError(\"Could not parse time: %s\", accum)\n}",
"func (t *Time) ParseFrom(b []byte) error {\n\n\tif t == nil {\n\t\treturn _ERR_NIL_TIME_RECEIVER\n\t}\n\n\tvar i = 0\n\tfor n := len(b); i < n && b[i] <= ' '; i++ { }\n\n\t// Minimum required len: 4 (hhmm - w/o separators, w/o seconds).\n\tif len(b[i:]) < 4 {\n\t\treturn _ERR_NOT_ISO8601_TIME\n\t}\n\n\tx, valid := batoi(b[i], b[i+1])\n\tif !valid || x < 0 || x > 23 {\n\t\treturn _ERR_BAD_HOUR\n\t}\n\n\ti += 2\n\thh := Hour(x)\n\twasSeparator := false\n\n\t// Skip separator\n\tif !(b[i] >= '0' && b[i] <= '9') {\n\t\ti++\n\t\twasSeparator = true\n\t}\n\n\t// At this code point, len(b) may == 1. Check it.\n\tif len(b[i:]) == 1 {\n\t\treturn _ERR_NOT_ISO8601_TIME\n\t}\n\n\tx, valid = batoi(b[i], b[i+1])\n\tif !valid || x < 0 || x > 59 {\n\t\treturn _ERR_BAD_MINUTE\n\t}\n\n\ti += 2\n\tmm := Minute(x)\n\tss := Second(0)\n\n\t// At this code point user may provide \"hhmm\" w/o seconds.\n\t// Check whether seconds are provided.\n\tif l := len(b[i:]); l > 0 {\n\t\t// We need 2 symbols if there was no separator, or 3 symbols otherwise.\n\t\tif (l == 1 && !wasSeparator) || (l == 2 && wasSeparator) {\n\t\t\treturn _ERR_NOT_ISO8601_TIME\n\t\t}\n\t\tif wasSeparator {\n\t\t\ti++\n\t\t}\n\t\tx, valid = batoi(b[i], b[i+1])\n\t\tif !valid || x < 0 || x > 59 {\n\t\t\treturn _ERR_BAD_SECOND\n\t\t}\n\t\tss = Second(x)\n\t}\n\n\n\tif !IsValidTime(hh, mm, ss) {\n\t\treturn _ERR_BAD_CORRESP_TIME\n\t}\n\n\t*t = NewTime(hh, mm, ss)\n\treturn nil\n}",
"func parseTime(timeString string) (time.Time, error) {\n\tif timeString == \"\" {\n\t\treturn time.Time{}, errors.New(\"need time string\")\n\t}\n\n\tt, err := time.Parse(dateFormat, timeString)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\t// time.Parse() is \"clever\" but also doesn't check anything more\n\t// granular than a second, so let's be completely paranoid and check\n\t// via regular expression too.\n\tmatched := dateFormatRE.FindAllStringSubmatch(timeString, -1)\n\tif matched == nil {\n\t\treturn time.Time{},\n\t\t\tfmt.Errorf(\"expected time in format %q, got %q\", dateFormatPattern, timeString)\n\t}\n\n\treturn t, nil\n}",
"func ParseAny(datestr string) (time.Time, error) {\n\treturn parseTime(datestr, nil)\n}",
"func parseTime(from string, offset *time.Time) (time.Time, error) {\n\tvar start time.Time\n\tif offset != nil {\n\t\tstart = *offset\n\t} else {\n\t\tstart = time.Now()\n\t}\n\ty, m, d, dur, r, err := parseISO8601Duration(from)\n\tif err == nil {\n\t\tif r != -1 {\n\t\t\treturn time.Time{}, errors.Errorf(\"repetitions are not allowed\")\n\t\t}\n\t\treturn start.AddDate(y, m, d).Add(dur), nil\n\t}\n\tif dur, err = time.ParseDuration(from); err == nil {\n\t\treturn start.Add(dur), nil\n\t}\n\tif t, err := time.Parse(time.RFC3339, from); err == nil {\n\t\treturn t, nil\n\t}\n\treturn time.Time{}, errors.Errorf(\"unsupported time/duration format %q\", from)\n}",
"func Parse(layout, value string) (*time.Time, error) {\n\tt, err := time.Parse(layout, value)\n\treturn &t, err\n}",
"func parseTime(timeString string) (time.Time, error) {\n\tfor _, form := range timeFormat {\n\t\ttimes, err := time.Parse(form, timeString)\n\t\tif err == nil {\n\t\t\t//Parse time value successful\n\t\t\treturn times, nil\n\t\t}\n\t}\n\t//Parse time value unsuccessful\n\treturn time.Now(), fmt.Errorf(\"parsing time %q error\", timeString)\n}",
"func ParseHeader(timeStr string) (time.Time, error) {\n\tfor _, dateFormat := range httpTimeFormats {\n\t\tt, err := time.Parse(dateFormat, timeStr)\n\t\tif err == nil {\n\t\t\treturn t, nil\n\t\t}\n\t}\n\treturn time.Time{}, ErrMalformedDate\n}",
"func ParseDate(in string) time.Time {\n\tt, err := time.Parse(DateFormat, in)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (d *Date) Time() (time.Time, error) {\n\treturn time.Parse(\"2006-01-02\", string(*d))\n}",
"func ParserDate(date string) (*time.Time, error) {\n\tlayout := \"02/01/2006\"\n\tt, err := time.Parse(layout, date)\n\tif err != nil {\n\t\treturn nil, ErrParseDate\n\t}\n\treturn &t, nil\n}",
"func decodeTime(data []byte) time.Time {\n\tsec := int(data[0] & 0x3F)\n\tmin := int(data[1] & 0x3F)\n\thour := int(data[2] & 0x1F)\n\tday := int(data[3] & 0x1F)\n\t// The 4-bit month value is encoded in the high 2 bits of the first 2 bytes.\n\tmonth := time.Month(int(data[0]>>6)<<2 | int(data[1]>>6))\n\tyear := 2000 + int(data[4]&0x7F)\n\treturn time.Date(year, month, day, hour, min, sec, 0, time.Local)\n}",
"func (p *ArgumentParser) Parse(input string) (Type, *time.Time, error) {\n\tnow := p.Clock.NowUTC()\n\ttoday := time.Date(now.Year(), now.Month(), now.Day(), 0, 0, 0, 0, time.UTC)\n\tswitch input {\n\tcase \"this-hour\":\n\t\tthisHour := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)\n\t\treturn Hourly, &thisHour, nil\n\tcase \"today\":\n\t\treturn Daily, &today, nil\n\tcase \"this-week\":\n\t\tmonday := today\n\t\tfor monday.Weekday() != time.Monday {\n\t\t\tmonday = monday.AddDate(0, 0, -1)\n\t\t}\n\t\treturn Weekly, &monday, nil\n\tcase \"this-month\":\n\t\tfistDateOfMonth := time.Date(today.Year(), today.Month(), 1, 0, 0, 0, 0, time.UTC)\n\t\treturn Monthly, &fistDateOfMonth, nil\n\tcase \"last-hour\":\n\t\tthisHour := time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, time.UTC)\n\t\tlastHour := thisHour.Add(-time.Hour)\n\t\treturn Hourly, &lastHour, nil\n\tcase \"yesterday\":\n\t\tyesterday := today.AddDate(0, 0, -1)\n\t\treturn Daily, &yesterday, nil\n\tcase \"last-week\":\n\t\tlastWeek := today.AddDate(0, 0, -7)\n\t\tfor lastWeek.Weekday() != time.Monday {\n\t\t\tlastWeek = lastWeek.AddDate(0, 0, -1)\n\t\t}\n\t\treturn Weekly, &lastWeek, nil\n\tcase \"last-month\":\n\t\tlastMonth := today.AddDate(0, -1, 0)\n\t\tfistDateOfMonth := time.Date(lastMonth.Year(), lastMonth.Month(), 1, 0, 0, 0, 0, time.UTC)\n\t\treturn Monthly, &fistDateOfMonth, nil\n\t}\n\n\t// match format \"2006-01-02T15\"\n\tt, err := time.Parse(\"2006-01-02T15\", input)\n\tif err == nil {\n\t\tt = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, time.UTC)\n\t\treturn Hourly, &t, nil\n\t}\n\n\t// match format \"2006-01-02\"\n\tt, err = time.Parse(\"2006-01-02\", input)\n\tif err == nil {\n\t\tt = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, time.UTC)\n\t\treturn Daily, &t, nil\n\t}\n\n\t// match format \"2006-01\"\n\tt, err = time.Parse(\"2006-01\", input)\n\tif err == nil {\n\t\tfistDateOfMonth := time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, time.UTC)\n\t\treturn Monthly, &fistDateOfMonth, nil\n\t}\n\n\t// match format \"2006-W37\"\n\tmatches := iso8601WeekRegex.FindStringSubmatch(input)\n\tif len(matches) == 3 {\n\t\tyear, err := strconv.Atoi(matches[1])\n\t\tif err != nil {\n\t\t\treturn \"\", nil, ErrInvalidPeriodical\n\t\t}\n\n\t\tweek, err := strconv.Atoi(matches[2])\n\t\tif err != nil {\n\t\t\treturn \"\", nil, ErrInvalidPeriodical\n\t\t}\n\n\t\tfirstDayOfISOWeek, err := timeutil.FirstDayOfISOWeek(year, week, time.UTC)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\n\t\treturn Weekly, firstDayOfISOWeek, nil\n\t}\n\n\treturn \"\", nil, ErrInvalidPeriodical\n}",
"func NormalizeDate(s string) (time.Time, error) {\n\tswitch len(s) {\n\tcase len(`2006-01-02 15:04:05 -0700`):\n\t\tdt, err := time.Parse(`2006-01-02 15:04:05 -0700`, s)\n\t\treturn dt, err\n\tcase len(`2006-01-02 15:04:05`):\n\t\tdt, err := time.Parse(`2006-01-02 15:04:05`, s)\n\t\treturn dt, err\n\tcase len(`2006-01-02`):\n\t\tdt, err := time.Parse(`2006-01-02`, s)\n\t\treturn dt, err\n\tdefault:\n\t\treturn time.Time{}, fmt.Errorf(\"Can't format %s, expected format like 2006-01-02 15:04:05 -0700\", s)\n\t}\n}",
"func (d *DateField) Date() (t time.Time, err error) {\n\tif len(d.DateParts) == 0 {\n\t\treturn t, errNoDate\n\t}\n\tparts := d.DateParts[0]\n\tswitch len(parts) {\n\tcase 1:\n\t\tt, err = time.Parse(\"2006-01-02\", fmt.Sprintf(\"%04d-01-01\", parts[0]))\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\tcase 2:\n\t\tt, err = time.Parse(\"2006-01-02\", fmt.Sprintf(\"%04d-%02d-01\", parts[0], parts[1]))\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\tcase 3:\n\t\tt, err = time.Parse(\"2006-01-02\", fmt.Sprintf(\"%04d-%02d-%02d\", parts[0], parts[1], parts[2]))\n\t\tif err != nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\treturn t, err\n}",
"func parseTime(s string) (time.Time, error) {\n\tt, err := time.ParseInLocation(time.RFC1123, s, time.UTC)\n\tif err != nil {\n\t\treturn time.Time{}, fmt.Errorf(\"parsing time: %w\", err)\n\t}\n\n\treturn t, nil\n}",
"func (hms *HHMMSS) Parse(s string) (err error) {\n\tll := []string{\n\t\t\"15:04:05.999999999-07:00\",\n\t\t\"15:04:05.999999999Z07:00\",\n\t\t\"15:04:05.999999999\",\n\t\t\"15:04:05.999999-07:00\",\n\t\t\"15:04:05.999999Z07:00\",\n\t\t\"15:04:05.999999\",\n\t\t\"15:04:05.999-07:00\",\n\t\t\"15:04:05.999Z07:00\",\n\t\t\"15:04:05.999\",\n\t\t\"15:04:05-07:00\",\n\t\t\"15:04:05Z07:00\",\n\t\t\"15:04:05\",\n\t\t\"15:04-07:00\",\n\t\t\"15:04Z07:00\",\n\t\t\"15:04\",\n\t\t\"15-07:00\",\n\t\t\"15Z07:00\",\n\t\t\"15\",\n\t}\n\tconst year = \"2006 \"\n\tys := year + s\n\tfor _, l := range ll {\n\t\ttm, e := time.ParseInLocation(year+l, ys, time.Local)\n\t\tif e == nil {\n\t\t\t*(*time.Duration)(hms) = tm.Sub(tm.Truncate(24 * time.Hour))\n\t\t\treturn nil\n\t\t}\n\t\terr = e\n\t}\n\terr = &time.ParseError{\n\t\tValue: s,\n\t\tMessage: \": cannot parse into HHMMSS\",\n\t}\n\treturn\n}",
"func TimeParse() time.Time {\n\tnow := time.Now()\n\tthen := now.AddDate(0, 0, -1).Format(\"2006-01-02T15:04:05:0700\")\n\tsince, _ := time.Parse(\"2006-01-02\", then)\n\treturn since\n}",
"func ParseDate(strDate string) time.Time {\n\tp := dateReg.FindStringSubmatch(strDate)\n\tyear, _ := strconv.Atoi(p[1])\n\tmon, _ := strconv.Atoi(p[2])\n\tday, _ := strconv.Atoi(p[3])\n\treturn time.Date(year+1911, time.Month(mon), day, 0, 0, 0, 0, TaipeiTimeZone)\n}",
"func parseDate(layout, date string) time.Time {\n\td, err := time.Parse(layout, date)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn d\n}",
"func ExtractFromString(date string, format string) (*date, error) {\r\n\t//dmap:=make(map[string]int,3)\r\n\tparts := strings.Split(date, \"/\")\r\n\tdm := dateString{}\r\n\tif len(parts) != 3 {\r\n\t\treturn nil, errors.New(\"a valid date is made of 3 components:day,month,year\")\r\n\t}\r\n\tswitch strings.ToUpper(format) {\r\n\tcase \"DD/MM/YY\":\r\n\t\terr := dm.dispatch(0, 1, 2, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\tcase \"JJ/MM/AA\":\r\n\t\t\terr := dm.dispatch(0, 1, 2, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"DD/MM/YYYY\" :\r\n\t\terr := dm.dispatch(0, 1, 2, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\t\tcase \"JJ/MM/AAAA\":\r\n\t\t\terr := dm.dispatch(0, 1, 2, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"MM/DD/YY\" :\r\n\t\terr := dm.dispatch(1, 0, 2, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\t\tcase \"MM/JJ/AA\":\r\n\t\t\terr := dm.dispatch(1, 0, 2, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\r\n\tcase \"MM/DD/YYYY\" :\r\n\t\terr := dm.dispatch(1, 0, 2, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\tcase \"MM/JJ/AAAA\":\r\n\t\t\terr := dm.dispatch(1, 0, 2, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"YY/DD/MM\" :\r\n\t\terr := dm.dispatch(1, 2, 0, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\t\tcase \"AA/JJ/MM\":\r\n\t\t\terr := dm.dispatch(1, 2, 0, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"YYYY/DD/MM\" :\r\n\t\terr := dm.dispatch(1, 2, 0, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\tcase \"AAAA/JJ/MM\":\r\n\t\t\terr := dm.dispatch(1, 2, 0, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"YY/MM/DD\" :\r\n\t\terr := dm.dispatch(2, 1, 0, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\tcase \"AA/MM/JJ\":\r\n\t\t\terr := dm.dispatch(2, 1, 0, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tcase \"YYYY/MM/DD\" :\r\n\t\terr := dm.dispatch(2, 1, 0, parts)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\treturn dm.isValid()\r\n\tcase \"AAAA/MM/JJ\":\r\n\t\t\terr := dm.dispatch(2, 1, 0, parts)\r\n\t\t\tif err != nil {\r\n\t\t\t\treturn nil, err\r\n\t\t\t}\r\n\t\t\treturn dm.isValid()\r\n\tdefault:\r\n\t\treturn nil, errors.New(\"a valid date is made of 3 components:day,month,year\")\r\n\t}\r\n}",
"func hourToTime(s string) time.Time {\n\tvar now = time.Now()\n\tvar hhmm = strings.Split(s, \":\")\n\n\thh := toInt(hhmm[0])\n\tmm := toInt(hhmm[1])\n\ty,m, d := now.Date()\n\tthen := time.Date(y, m, d, hh, mm, 0, 0, time.Local)\n\n\t//Dprintf(\"s = %s, now = %v, then = %v\\n\", s, now, then)\n\treturn then\n}",
"func ToTime(s string, layouts ...string) (t time.Time, err error) {\n\t// custom layout\n\tif len(layouts) > 0 {\n\t\tif len(layouts[0]) > 0 {\n\t\t\treturn time.Parse(layouts[0], s)\n\t\t}\n\n\t\terr = ErrDateLayout\n\t\treturn\n\t}\n\n\t// auto match use some commonly layouts.\n\tstrLn := len(s)\n\tmaybeLayouts, ok := layoutMap[strLn]\n\tif !ok {\n\t\terr = ErrInvalidParam\n\t\treturn\n\t}\n\n\tvar hasAlphaT bool\n\tif pos := strings.IndexByte(s, 'T'); pos > 0 && pos < 12 {\n\t\thasAlphaT = true\n\t}\n\n\thasSlashR := strings.IndexByte(s, '/') > 0\n\tfor _, layout := range maybeLayouts {\n\t\t// date string has \"T\". eg: \"2006-01-02T15:04:05\"\n\t\tif hasAlphaT {\n\t\t\tlayout = strings.Replace(layout, \" \", \"T\", 1)\n\t\t}\n\n\t\t// date string has \"/\". eg: \"2006/01/02 15:04:05\"\n\t\tif hasSlashR {\n\t\t\tlayout = strings.Replace(layout, \"-\", \"/\", -1)\n\t\t}\n\n\t\tt, err = time.Parse(layout, s)\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\n\t// t, err = time.ParseInLocation(layout, s, time.Local)\n\treturn\n}",
"func MustParse(datestr string) time.Time {\n\tt, err := parseTime(datestr, nil)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn t\n}",
"func StringToDate(s string) (time.Time, error) {\n\treturn parseDateWith(s, []string{\n\t\ttime.RFC3339,\n\t\t\"2006-01-02T15:04:05\", // iso8601 without timezone\n\t\ttime.RFC1123Z,\n\t\ttime.RFC1123,\n\t\ttime.RFC822Z,\n\t\ttime.RFC822,\n\t\ttime.ANSIC,\n\t\ttime.UnixDate,\n\t\ttime.RubyDate,\n\t\t\"2006-01-02 15:04:05Z07:00\",\n\t\t\"02 Jan 06 15:04 MST\",\n\t\t\"2006-01-02\",\n\t\t\"02 Jan 2006\",\n\t\t\"2006-01-02 15:04:05 -07:00\",\n\t\t\"2006-01-02 15:04:05 -0700\",\n\t\t\"2006-01-02 15:04:05\",\n\t})\n\n}",
"func ParseDate(dateStr string) (time.Time, error) {\n\tfor _, layout := range SupportedLayouts {\n\t\tt, err := time.Parse(layout, dateStr)\n\n\t\tif err == nil {\n\t\t\treturn t, nil\n\t\t}\n\t}\n\n\treturn time.Time{}, errors.New(\"error: '\" + dateStr + \"' is not in a supported format.\")\n}",
"func ParseAsFormat(input *string, format string) (*time.Time, error) {\n\tif input == nil {\n\t\treturn nil, nil\n\t}\n\n\tval, err := time.Parse(format, *input)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing %q: %+v\", *input, err)\n\t}\n\n\treturn &val, nil\n}",
"func String2Time(stime string) time.Time {\n\ttimeObj, err := time.Parse(\"2006-01-02 15:04:05\", stime)\n\tif err != nil {\n\t\tpanic(err)\n\t\treturn time.Now()\n\t}\n\treturn timeObj\n}",
"func StringToDate(s string) (time.Time, error) {\n\treturn parsedate(s, []string{\n\t\ttime.RFC3339,\n\t\t\"2006-01-02T15:04:05\",\n\t\ttime.RFC1123Z,\n\t\ttime.RFC1123,\n\t\ttime.RFC822Z,\n\t\ttime.RFC822,\n\t\ttime.RFC850,\n\t\ttime.ANSIC,\n\t\ttime.UnixDate,\n\t\ttime.RubyDate,\n\t\t\"2006-01-02 15:04:05.999999999 -0700 MST\",\n\t\t\"2006-01-02\",\n\t\t\"02 Jan 2006\",\n\t\t\"2006-01-02T15:04:05-0700\",\n\t\t\"2006-01-02 15:04:05 -07:00\",\n\t\t\"2006-01-02 15:04:05 -0700\",\n\t\t\"2006-01-02 15:04:05Z07:00\",\n\t\t\"2006-01-02 15:04:05Z0700\",\n\t\t\"2006-01-02 15:04:05\",\n\t\ttime.Kitchen,\n\t\ttime.Stamp,\n\t\ttime.StampMilli,\n\t\ttime.StampMicro,\n\t\ttime.StampNano,\n\t})\n}",
"func ParseDate(dateStr string) (time.Time, error) {\n\tlayout := \"2006-01-02\"\n\treturn time.Parse(layout, dateStr)\n}",
"func parseTime(s string) (time.Time, error) {\n\t// attempt to parse time as RFC3339 string\n\tt, err := time.Parse(time.RFC3339Nano, s)\n\tif err == nil {\n\t\treturn t, nil\n\t}\n\n\t// attempt to parse time as float number of unix seconds\n\tif f, err := strconv.ParseFloat(s, 64); err == nil {\n\t\tsec, dec := math.Modf(f)\n\t\treturn time.Unix(int64(sec), int64(dec*(1e9))), nil\n\t}\n\n\t// attempt to parse time as json marshaled value\n\tif err := json.Unmarshal([]byte(s), &t); err == nil {\n\t\treturn t, nil\n\t}\n\n\treturn time.Time{}, err\n}",
"func parseTime(s string) (time.Time, error) {\n\treturn time.Parse(time.RFC1123Z, s)\n}",
"func Parse(amzDateStr string) (time.Time, error) {\n\tfor _, dateFormat := range amzDateFormats {\n\t\tamzDate, err := time.Parse(dateFormat, amzDateStr)\n\t\tif err == nil {\n\t\t\treturn amzDate, nil\n\t\t}\n\t}\n\treturn time.Time{}, ErrMalformedDate\n}",
"func parseTime(s string) (hour, minute int64, err error) {\n\ttime := strings.Split(s, \":\")\n\thour, err = strconv.ParseInt(time[0], 10, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\tminute, err = strconv.ParseInt(time[1], 10, 64)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}",
"func parseDateFromString(now time.Time, holidayDay holiday) (time.Time) {\r\n\r\n\t// the data in the json file is a string like \"yyyy-mm-dd\",\r\n\t// we break it into an array of three cells [yyyy, mm, dd]\r\n\t// and create an object of type Date\r\n\t//instead of the unknown parameters- we use the parameters of the current date\r\n\r\n tmpDateArr := strings.Split(holidayDay.Date, \"-\")\r\n tmpMonth, _ := strconv.Atoi(tmpDateArr[1])\r\n tmpDay, _ := strconv.Atoi(tmpDateArr[2])\r\n tmpDateOfHoliday := time.Date(\r\n \t\t\t\tnow.Year(), time.Month(tmpMonth), tmpDay,\r\n \t\t\t\tnow.Hour(), now.Minute(), now.Second(),\r\n \t\t\t\tnow.Nanosecond(), now.Location())\r\n return tmpDateOfHoliday\r\n}",
"func strToTime(str string) (int, int, float64, bool, bool, formulaArg) {\n\tvar subMatch []string\n\tpattern := \"\"\n\tfor key, tf := range timeFormats {\n\t\tsubMatch = tf.FindStringSubmatch(str)\n\t\tif len(subMatch) > 1 {\n\t\t\tpattern = key\n\t\t\tbreak\n\t\t}\n\t}\n\tif pattern == \"\" {\n\t\treturn 0, 0, 0, false, false, newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t}\n\tdateIsEmpty := subMatch[1] == \"\"\n\tsubMatch = subMatch[49:]\n\tvar (\n\t\tl = len(subMatch)\n\t\tlast = subMatch[l-1]\n\t\tam = last == \"am\"\n\t\tpm = last == \"pm\"\n\t\thours, minutes int\n\t\tseconds float64\n\t\terr error\n\t)\n\tif handler, ok := map[string]func(match []string) (int, int, float64, error){\n\t\t\"hh\": strToTimePatternHandler1,\n\t\t\"hh:mm\": strToTimePatternHandler2,\n\t\t\"mm:ss\": strToTimePatternHandler3,\n\t\t\"hh:mm:ss\": strToTimePatternHandler4,\n\t}[pattern]; ok {\n\t\tif hours, minutes, seconds, err = handler(subMatch); err != nil {\n\t\t\treturn 0, 0, 0, false, false, newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t}\n\t}\n\tif minutes >= 60 {\n\t\treturn 0, 0, 0, false, false, newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t}\n\tif am || pm {\n\t\tif hours > 12 || seconds >= 60 {\n\t\t\treturn 0, 0, 0, false, false, newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t\t} else if hours == 12 {\n\t\t\thours = 0\n\t\t}\n\t} else if hours >= 24 || seconds >= 10000 {\n\t\treturn 0, 0, 0, false, false, newErrorFormulaArg(formulaErrorVALUE, formulaErrorVALUE)\n\t}\n\treturn hours, minutes, seconds, pm, dateIsEmpty, newEmptyFormulaArg()\n}",
"func MakeTime(yearString string, monthString string, dayString string) (time.Time, error) {\n\tday, errParseDay := ParseStringToInt(dayString)\n\tif errParseDay != nil {\n\t\treturn time.Now(), errParseDay\n\t}\n\tmonth, errParseMonth := ParseStringToInt(monthString)\n\tif errParseMonth != nil {\n\t\treturn time.Now(), errParseMonth\n\t}\n\tyear, errParseYear := ParseStringToInt(yearString)\n\tif errParseYear != nil {\n\t\treturn time.Now(), errParseYear\n\t}\n\treturn time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.FixedZone(\"Europe/Paris\", 0)), nil\n}",
"func ParseTimeArg(arg string) (time.Time, error) {\n\tdateFmts := []string{\n\t\t\"2006-01-02 3:04:05 PM MST\",\n\t\t\"01-02 3:04:05 PM MST\",\n\t\t\"2006-01-02 3:04 PM MST\",\n\t\t\"01-02 3:04 PM MST\",\n\t\ttime.Kitchen,\n\t\t\"2006-01-02\",\n\t}\n\tvar res time.Time\n\tfor _, dateFmt := range dateFmts {\n\t\t// Special update for kitchen time format to include year, month,\n\t\t// and day.\n\t\tif dateFmt == time.Kitchen {\n\t\t\tloc, _ := time.LoadLocation(\"Local\")\n\t\t\tt, err := time.ParseInLocation(dateFmt, arg, loc)\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres = t\n\t\t\tn := time.Now()\n\t\t\t// Month and day default values are already 1!\n\t\t\tres = res.AddDate(n.Year(), int(n.Month())-1, n.Day()-1)\n\t\t\tbreak\n\t\t} else {\n\t\t\tt, err := time.Parse(dateFmt, arg)\n\t\t\tif err == nil {\n\t\t\t\tres = t\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\tif res.IsZero() {\n\t\tmsg := fmt.Sprintf(\"Unable to parse %s using formats %v\", arg, dateFmts)\n\t\treturn res, errors.New(msg)\n\t}\n\tif res.Year() == 0 {\n\t\tres = res.AddDate(time.Now().In(res.Location()).Year(), 0, 0)\n\t}\n\treturn res.UTC(), nil\n}",
"func timeParse(ts string) (time.Time, error) {\n\tformat := timeFormat[:len(ts)]\n\tt, err := time.ParseInLocation(format, ts, time.Local)\n\treturn t, err\n}",
"func (act *Activity) DateParsed() time.Time {\n\tvar d time.Time\n\tif act.Date == \"\" {\n\t\treturn d\n\t}\n\td, _ = time.Parse(DateFormat, act.Date[:10])\n\treturn d\n}",
"func parseTime(cmdArgs []string) (*goment.Goment, error) {\n\tg, _ := goment.New()\n\n\ttime := cmdArgs[1]\n\n\t// Whether it's necessary to add 12 hours to the time (goment expects a 24 hour format).\n\thasPM := regexp.MustCompile(`(?i)pm`).MatchString(time)\n\t// Cleanup the time input.\n\ttime = regexp.MustCompile(`(?i)(pm|am)`).ReplaceAllString(time, \"\")\n\n\t// 13:37, 13.37, 4:20am, 4.20am\n\tvar timeParts []string\n\tif strings.Contains(time, \":\") {\n\t\ttimeParts = strings.Split(time, \":\")\n\t} else if strings.Contains(time, \".\") {\n\t\ttimeParts = strings.Split(time, \".\")\n\t} else {\n\t\ttimeParts = []string{time}\n\t}\n\n\thour, err := strconv.ParseInt(timeParts[0], 10, 32)\n\tif err != nil {\n\t\treturn nil, errors.New(\"cannot parse hour\")\n\t}\n\tif hour < 0 || hour > 23 {\n\t\treturn nil, errors.New(\"invalid hour format\")\n\t}\n\tif hasPM && hour <= 12 {\n\t\thour += 12\n\t}\n\tg.SetHour(int(hour))\n\n\tvar minute int64\n\tif len(timeParts) > 1 {\n\t\tminute, err = strconv.ParseInt(timeParts[1], 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"cannot parse minute\")\n\t\t}\n\t\tif minute < 0 || minute > 60 {\n\t\t\treturn nil, errors.New(\"invalid minute format\")\n\t\t}\n\t}\n\tg.SetMinute(int(minute))\n\n\tvar second int64\n\tif len(timeParts) > 2 {\n\t\tsecond, err = strconv.ParseInt(timeParts[2], 10, 32)\n\t\tif err != nil {\n\t\t\treturn nil, errors.New(\"cannot parse second\")\n\t\t}\n\t\tif second < 0 || second > 60 {\n\t\t\treturn nil, errors.New(\"invalid second format\")\n\t\t}\n\t}\n\tg.SetSecond(int(second))\n\n\treturn g, nil\n}",
"func DayFromTime(t time.Time) Day {\n\tyear, month, day := t.Date()\n\treturn Day{year, day, month, TimeZone(t.Location().String())}\n}",
"func parseDate(s string) (combatEventDate time.Time, err error) {\n\tlayout := \"1/2 15:04:05.000 2006\"\n\tcurrentDate := time.Now()\n\tcurrentYear := currentDate.Year()\n\n\tcombatEventDate, err = time.Parse(layout, s+\" \"+strconv.Itoa(currentYear))\n\tif combatEventDate.Year() != currentYear {\n\t\terr = fmt.Errorf(\"event: Failed to parse event date of '%s'\", s)\n\t}\n\n\tif combatEventDate.After(currentDate) {\n\t\tpreviousYear := currentYear - 1\n\t\tcombatEventDate, err = time.Parse(layout, s+\" \"+strconv.Itoa(previousYear))\n\t\tif combatEventDate.Year() != previousYear {\n\t\t\terr = fmt.Errorf(\"event: Failed to parse old event date of '%s'\", s)\n\t\t}\n\t}\n\n\treturn combatEventDate, err\n}",
"func parseDate(t string) string {\n\tif tm, err := time.Parse(\"15:04P\", t); err == nil {\n\t\treturn tm.Format(\"15:04:05\")\n\t}\n\treturn \"00:00:00\"\n}",
"func Parse3339(s string) (time.Time, error) {\n\tzone := zoneOf(s)\n\tif zone == \"\" {\n\t\t// Invalid or weird timezone offset. Use slow path,\n\t\t// which'll probably return an error.\n\t\treturn time.Parse(time.RFC3339Nano, s)\n\t}\n\tloc, err := getLocation(zone, s)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\ts = s[:len(s)-len(zone)] // remove zone suffix\n\tvar year, mon, day, hr, min, sec, nsec int\n\tconst baseLen = len(\"2020-04-05T15:56:00\")\n\tif len(s) < baseLen ||\n\t\t!parseInt(s[:4], &year) ||\n\t\ts[4] != '-' ||\n\t\t!parseInt(s[5:7], &mon) ||\n\t\ts[7] != '-' ||\n\t\t!parseInt(s[8:10], &day) ||\n\t\ts[10] != 'T' ||\n\t\t!parseInt(s[11:13], &hr) ||\n\t\ts[13] != ':' ||\n\t\t!parseInt(s[14:16], &min) ||\n\t\ts[16] != ':' ||\n\t\t!parseInt(s[17:19], &sec) {\n\t\treturn time.Time{}, errors.New(\"invalid time\")\n\t}\n\tnsStr := s[baseLen:]\n\tif nsStr != \"\" {\n\t\tif nsStr[0] != '.' {\n\t\t\treturn time.Time{}, errors.New(\"invalid optional nanosecond prefix\")\n\t\t}\n\t\tif !parseInt(nsStr[1:], &nsec) {\n\t\t\treturn time.Time{}, fmt.Errorf(\"invalid optional nanosecond number %q\", nsStr[1:])\n\t\t}\n\t\tfor i := 0; i < len(\"999999999\")-(len(nsStr)-1); i++ {\n\t\t\tnsec *= 10\n\t\t}\n\t}\n\treturn time.Date(year, time.Month(mon), day, hr, min, sec, nsec, loc), nil\n}",
"func getDate(s string) (time.Time, error) {\n\treturn time.ParseInLocation(\n\t\tdatetimeFormat,\n\t\tdatetimeRegex.FindString(path.Base(s)),\n\t\tNY)\n}",
"func (c Date) Time() time.Time {\n\tt, _ := time.Parse(\"2006-01-02\", c.String())\n\n\treturn t\n}",
"func strToTimeFormat(s string) (time.Time, string, error) {\n\tvar t time.Time\n\tvar err error\n\tfor _, f := range _DATE_FORMATS {\n\t\tt, err = time.ParseInLocation(f, s, time.Local)\n\t\tif err == nil {\n\t\t\treturn t, f, nil\n\t\t}\n\t}\n\n\treturn t, _DEFAULT_FORMAT, err\n}",
"func TimeParse(t TimeStr) (time.Time, error) {\n\treturn time.Parse(time.RFC3339, string(t))\n}",
"func parseDate(t string) (time.Time, bool) {\n\tdt, err := time.Parse(layout, t)\n\tif err != nil {\n\t\treturn dt, false\n\t}\n\treturn dt, true\n}",
"func ParseDay(str string) Day {\n\tt, err := time.Parse(\"2006-01-02\", str)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn date(t)\n}",
"func main() {\n p := fmt.Println\n\n // Here’s a basic example of formatting a time according to RFC3339.\n t := time.Now()\n p(t.Format(\"2006-01-02T15:04:05Z07:00\"))\n\n // Format uses an example-based layout approach; it takes a formatted \n // version of the reference time Mon Jan 2 15:04:05 MST 2006 to determine \n // the general pattern with which to format the given time. The example \n // time must be exactly as shown: the year 2006, 15 for the hour, Monday \n // for the day of the week, etc. Here are a few more examples of time \n // formatting.\n p(t.Format(\"3:04PM\"))\n p(t.Format(\"Mon Jan _2 15:04:05 2006\"))\n p(t.Format(\"2006-01-02T15:04:05.999999-07:00\"))\n\n // For purely numeric representations you can also use standard \n // string formatting with the extracted components of the time value.\n fmt.Printf(\"%d-%02d-%02dT%02d:%02d:%02d-00:00\\n\",\n t.Year(), t.Month(),t.Day(),\n t.Hour(), t.Minute(), t.Second())\n\n // Time parsing uses the same example-based approach as Formating. \n // These examples parse times rendered with some of the layouts used above.\n withNanos := \"2006-01-02T15:04:05.999999999-07:00\"\n t1, e := time.Parse(withNanos,\n \"2012-11-01T22:08:41.117442+00:00\")\n p(t1)\n kitchen := \"3:04PM\"\n t2, e := time.Parse(kitchen, \"8:41PM\")\n p(t2)\n\n // Parse will return an error on malformed input explaining the \n // parsing problem.\n ansic := \"Mon Jan _2 15:04:05 2006\"\n _, e = time.Parse(ansic, \"8:41PM\")\n p(e)\n\n // There are several predefined formats that you can use for both \n // formatting and parsing.\n p(t.Format(time.Kitchen))\n}",
"func TimeFromString(s string) dgo.Time {\n\tts, err := time.Parse(time.RFC3339Nano, s)\n\tif err != nil {\n\t\tpanic(catch.Error(err))\n\t}\n\treturn &timeVal{ts}\n}",
"func convertDate(date *string) {\n\n\tconst shortForm = \"02-Jan-2006\"\n\tt, _ := time.Parse(shortForm, *date)\n\ttimeString := t.String()\n\t*date = strings.Split(timeString, \" \")[0]\n\n}",
"func Parse(value string) (time.Time, error) {\n\tvar t time.Time\n\tvar err error\n\tfor _, layout := range layouts {\n\t\tt, err = time.Parse(layout, value)\n\t\tif err == nil {\n\t\t\treturn t, err\n\t\t}\n\t}\n\treturn t, err\n}",
"func parseDateTime(dateString, timeString string) (time.Time, error) {\n\tdateTime, err := time.Parse(time.RFC3339, dateString + \"T\" + timeString + \"-05:00\")\n\tif err != nil {\n\t\treturn dateTime, errors.New(\"Invalid Date or Time given.\")\n\t}\n\treturn dateTime, err\n}",
"func mustParseTime(s string) time.Time {\n\tt, err := time.Parse(time.RFC3339, s)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn t\n}"
] | [
"0.7036604",
"0.651316",
"0.6448794",
"0.6373066",
"0.63458323",
"0.61925656",
"0.6190765",
"0.6140698",
"0.6134023",
"0.60921896",
"0.60708255",
"0.6053343",
"0.60519713",
"0.60331255",
"0.60287833",
"0.6019247",
"0.6008316",
"0.59616876",
"0.59443885",
"0.59325457",
"0.59313726",
"0.5930213",
"0.5921789",
"0.59146804",
"0.5898031",
"0.58806896",
"0.58728456",
"0.58636653",
"0.58536804",
"0.5842251",
"0.5819346",
"0.5803434",
"0.5799965",
"0.5799823",
"0.5794053",
"0.5793114",
"0.5791649",
"0.57732695",
"0.5770085",
"0.5762779",
"0.5750456",
"0.57475704",
"0.57426405",
"0.57416105",
"0.57273394",
"0.5726078",
"0.5725172",
"0.57169515",
"0.5712552",
"0.5702847",
"0.57017756",
"0.56898683",
"0.5683423",
"0.56704885",
"0.56586",
"0.5656642",
"0.56545997",
"0.5634313",
"0.5619761",
"0.5588939",
"0.55843604",
"0.55732554",
"0.5560531",
"0.5558659",
"0.5549567",
"0.554935",
"0.5535909",
"0.55341405",
"0.55260694",
"0.552577",
"0.5516786",
"0.5513886",
"0.5510788",
"0.5501098",
"0.54968333",
"0.548677",
"0.5486051",
"0.54829794",
"0.54790723",
"0.5475063",
"0.546752",
"0.54670054",
"0.5461633",
"0.54504395",
"0.5449675",
"0.5430185",
"0.5417623",
"0.5406737",
"0.5380297",
"0.53785235",
"0.53783375",
"0.5372511",
"0.5367714",
"0.5366588",
"0.5366306",
"0.5363259",
"0.5349824",
"0.53442425",
"0.5342948",
"0.53218734"
] | 0.560642 | 59 |
Prints a map to log file | func PrintMap(m map[string]string) {
for k := range m {
log.Println("\t",k,"=",m[k])
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func PrintMetaMap(metaMap map[string]FileMetaData) {\n\n\tfmt.Println(\"--------BEGIN PRINT MAP--------\")\n\n\tfor _, filemeta := range metaMap {\n\t\tfmt.Println(\"\\t\", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)\n//\t\tfmt.Println(\"\\t\", filemeta.Filename, filemeta.Version)\n\t}\n\n\tfmt.Println(\"---------END PRINT MAP--------\")\n\n}",
"func PrintMetaMap(metaMap map[string]*FileMetaData) {\n\n\tfmt.Println(\"--------BEGIN PRINT MAP--------\")\n\n\tfor _, filemeta := range metaMap {\n\t\tfmt.Println(\"\\t\", filemeta.Filename, filemeta.Version, filemeta.BlockHashList)\n\t}\n\n\tfmt.Println(\"---------END PRINT MAP--------\")\n\n}",
"func printMap(form url.Values) {\n\tfmt.Println(\"BEGIN<PRINTING>\")\n\tfor key, value := range(form) {\n\t\tfmt.Printf(\"<%v> -> <%v>\\n\", key, value)\n\t}\n\tfmt.Println(\"END<PRINTING>\")\n}",
"func printMap(P map[string]map[string]Pair) {\n\tfmt.Println(\"{\")\n\tfor key,special_vector := range P {\n\t\tfmt.Print(\" \", key,\": \") // Stampa il nome dell'elemento\n\n\t\tfor process,pair := range special_vector {\n\t\t\tfmt.Print(process, \": \")\n\t\t\tprintPair(pair)\n\t\t}\n\n\t\tfmt.Println()\n\t}\n\tfmt.Print(\"}\")\n}",
"func (g *Graph) PrintMap() {\n\tfor city := range g.Cities {\n\t\tfmt.Print(city)\n\t\tfmt.Println(g.GetAdjNodes(city))\n\t}\n}",
"func Log(m map[string]interface{}) {\n\tfmt.Println(\"[debug] →\")\n\tfor k, v := range m {\n\t\tfmt.Printf(\"\\t%v: %+v\\n\", k, v)\n\t}\n\tfmt.Println(\"[debug] □\")\n}",
"func printMap(tempMap []string) {\n\tfor i := 0; i < len(tempMap); i++ {\n\t\tfmt.Printf(\"%s\\n\", tempMap[i])\n\t}\n}",
"func printMap(c map[string]string) {\n\t// Iterate over a map\n\tfor color, hex := range c {\n\t\tfmt.Println(\"Hex code for\", color, \"is\", hex)\n\t}\n}",
"func printMap(m map[string]string) {\n\tfor color, hex := range m { //key, value\n\t\tfmt.Println(\"Hex code for\", color, \"is\", hex)\n\t}\n}",
"func printMap(c map[string]string) {\n\tfor color, hex := range c {\n\t\tfmt.Println(\"Hex code for\", color, \"is\", hex)\n\t}\n}",
"func (m *Map) Print() {\n\tfmt.Println(\"Map size:\", m.area)\n\n\tfor y := uint8(0); y < m.area.height; y++ {\n\t\tfmt.Printf(\"%4d |\", y)\n\n\t\tfor x := uint8(0); x < m.area.width; x++ {\n\t\t\tif p := atomic.LoadPointer(m.fields[y][x]); fieldIsEmpty(p) {\n\t\t\t\tfmt.Print(\" .\")\n\t\t\t} else {\n\t\t\t\tfmt.Print(\" x\")\n\t\t\t}\n\t\t}\n\n\t\tfmt.Println()\n\t}\n}",
"func (gw *GameWorld) PrintWorldMap() {\n\tfor i := 0; i < len(gw.gameArea); i++ {\n\t\tfor j := 0; j < len(gw.gameArea[i]); j++ {\n\t\t\tfmt.Printf(\"[%v]\", gw.gameArea[i][j].Mark())\n\t\t}\n\t\tfmt.Println()\n\t}\n}",
"func DumpMapStrings(data map[string]string, leftPad int) {\n\tlongest := LongestString(StringsMapKeys(data), 0) + leftPad\n\n\tIterateStringsMap(data, func(k, v string) {\n\t\tfmt.Printf(\"%s: %s\\n\", strings.Repeat(\" \", longest-len(k))+k, v)\n\t})\n}",
"func writeMapToFile(m map[string]int, filename string) {\n\tfile, buf := CreateFileAndBuf(filename)\n\tfor k, v := range m {\n\t\tbuf.Write([]byte(k + \",\" + strconv.Itoa(v) + \"\\n\"))\n\t}\n\tSafeClose(file, buf)\n}",
"func dumpMaps() {\n\t// TODO: make this function part of the exporter\n\tfor name, cmap := range builtinMetricMaps {\n\t\tquery, ok := queryOverrides[name]\n\t\tif !ok {\n\t\t\tfmt.Println(name)\n\t\t} else {\n\t\t\tfor _, queryOverride := range query {\n\t\t\t\tfmt.Println(name, queryOverride.versionRange, queryOverride.query)\n\t\t\t}\n\t\t}\n\n\t\tfor column, details := range cmap.columnMappings {\n\t\t\tfmt.Printf(\" %-40s %v\\n\", column, details)\n\t\t}\n\t\tfmt.Println()\n\t}\n}",
"func Dump(nodes *map[string]storage.Node) {\n\tt := time.Now()\n\tf, _ := os.Create(strings.Join([]string{\"persister/data/dump_\", getTimeStr(t), \".log\"}, \"\"))\n\tdefer f.Close()\n\tfor k, v := range *nodes {\n\t\tf.WriteString(\"key: \" + k + \" \" + v.String() + \"\\n\")\n\t}\n\n}",
"func PrintBucketMap() string {\n\tbucketMap.RLock()\n\tdefer bucketMap.RUnlock()\n\n\tres := fmt.Sprintf(\"len:%d\\n\", len(bucketMap.m))\n\tres += \"keys:\\n\"\n\n\tfor k := range bucketMap.m {\n\t\tres += fmt.Sprintf(\"%s\\n\", k)\n\t}\n\n\treturn res\n}",
"func (m *TMap) String() string {\n\tvar b strings.Builder\n\tfmt.Fprint(&b, \"{ \")\n\titer := m.Iterator()\n\tfor iter.HasNext() {\n\t\tentry := iter.NextEntry()\n\t\tfmt.Fprintf(&b, \"%s \", entry)\n\t}\n\tfmt.Fprint(&b, \"}\")\n\treturn b.String()\n}",
"func MapToString(labels map[string]string) string {\n\tv := new(bytes.Buffer)\n\tfor key, value := range labels {\n\t\tfmt.Fprintf(v, \"%s=%s,\", key, value)\n\t}\n\treturn strings.TrimRight(v.String(), \",\")\n}",
"func FormatMap(m map[string]string) (fmtStr string) {\n\t// output with keys in sorted order to provide stable output\n\tkeys := sets.NewString()\n\tfor key := range m {\n\t\tkeys.Insert(key)\n\t}\n\tfor _, key := range keys.List() {\n\t\tfmtStr += fmt.Sprintf(\"%v=%q\\n\", key, m[key])\n\t}\n\tfmtStr = strings.TrimSuffix(fmtStr, \"\\n\")\n\n\treturn\n}",
"func (t *Map) String() string {\n\treturn fmt.Sprint(t.Entries())\n}",
"func (u *Util) PrintWorldMap() {\n\tfor city := range u.WorldMap {\n\t\tvar printString = city\n\t\tif u.WorldMap[city].North != nil {\n\t\t\tif u.WorldMap[u.WorldMap[city].North.Name] != nil {\n\t\t\t\tprintString = printString + \" north=\" + u.WorldMap[city].North.Name\n\t\t\t}\n\t\t}\n\t\tif u.WorldMap[city].South != nil {\n\t\t\tif u.WorldMap[u.WorldMap[city].South.Name] != nil {\n\t\t\t\tprintString = printString + \" south=\" + u.WorldMap[city].South.Name\n\t\t\t}\n\t\t}\n\t\tif u.WorldMap[city].East != nil {\n\t\t\tif u.WorldMap[u.WorldMap[city].East.Name] != nil {\n\t\t\t\tprintString = printString + \" east=\" + u.WorldMap[city].East.Name\n\t\t\t}\n\t\t}\n\t\tif u.WorldMap[city].West != nil {\n\t\t\tif u.WorldMap[u.WorldMap[city].West.Name] != nil {\n\t\t\t\tprintString = printString + \" west=\" + u.WorldMap[city].West.Name\n\t\t\t}\n\t\t}\n\t\tfmt.Println(printString)\n\t}\n}",
"func (m *Map) String() string {\n\tvar out bytes.Buffer\n\n\tout.WriteString(\"{\\n\")\n\tfor idx, val := range m.Elements {\n\t\tout.WriteString(fmt.Sprintf(\"%s : %s,\\n\", idx.String(), val.String()))\n\t}\n\tout.WriteString(\"}\")\n\n\treturn out.String()\n}",
"func (m *UrlMap) GetMap(w io.Writer) {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\n\tsep := fmt.Sprintf(\"+%s+%s+%s+%s+%s+%s+%s+\\n\", strings.Repeat(\"-\", 51), strings.Repeat(\"-\", 12),\n\t\tstrings.Repeat(\"-\", 18), strings.Repeat(\"-\", 18), strings.Repeat(\"-\", 18), strings.Repeat(\"-\", 18), strings.Repeat(\"-\", 18))\n\tfmt.Fprintf(w, sep)\n\tfmt.Fprintf(w, \"| % -50s| % -10s | % -16s | % -16s | % -16s | % -16s | % -16s |\\n\", \"Request URL\", \"Method\", \"Times\", \"Total Used(s)\", \"Max Used(μs)\", \"Min Used(μs)\", \"Avg Used(μs)\")\n\tfmt.Fprintf(w, sep)\n\n\tfor k, v := range m.urlmap {\n\t\tfor kk, vv := range v {\n\t\t\tfmt.Fprintf(w, \"| % -50s| % -10s | % 16d | % 16f | % 16.6f | % 16.6f | % 16.6f |\\n\", k,\n\t\t\t\tkk, vv.RequestNum, vv.TotalTime.Seconds(), float64(vv.MaxTime.Nanoseconds())/1000,\n\t\t\t\tfloat64(vv.MinTime.Nanoseconds())/1000, float64(time.Duration(int64(vv.TotalTime)/vv.RequestNum).Nanoseconds())/1000,\n\t\t\t)\n\t\t}\n\t}\n\tfmt.Fprintf(w, sep)\n}",
"func outPut() {\n\ts, _ := json.Marshal(allMap)\n\tfmt.Println(string(s))\n\ts, _ = json.Marshal(errMap)\n\tfmt.Println(string(s))\n}",
"func ( crawler *SingleCrawler ) Print() error {\n\n if err1 := IsOk(crawler); err1 != nil{\n return err1\n }\n\n stdout := os.Stdout\n outfile := stdout\n duped := true\n\n outfile, err := os.OpenFile( crawler.Filename, os.O_WRONLY | os.O_CREATE, 0644 )\n if err != nil {\n glog.Error(\"Unable to open requested file for writing. Defaulting to std out.\")\n duped = false\n } else{\n os.Stdout = outfile\n }\n\n fmt.Printf(\"SiteMap from starting URL %s, total pages found %d.\\n\\n\\n\", crawler.Site.String(), crawler.NumPages )\n for i := 0; i < crawler.NumPages; i++ {\n crawler.Sitemap[i].Print(crawler.PRINT_LIMIT)\n }\n\n if duped == true {\n outfile.Close()\n os.Stdout = stdout\n }\n\n return nil\n\n}",
"func printDatus(d interface{}) {\n\tm, ok := d.(map[string]interface{})\n\tif !ok {\n\t\tinfoLogger.Println(\"Failed type assertion\", d)\n\t}\n\t//Go's map implementation returns keys in random order. So we are sorting before accessing\n\tkeys := make([]string, len(m))\n\ti := 0\n\tfor k := range m {\n\t\tkeys[i] = k\n\t\ti++\n\t}\n\tsort.Strings(keys)\n\tfor _, key := range keys {\n\t\tinfoLogger.Panicf(\"%s: %-20v\", key, valueFromTypeMap(m[key]))\n\t}\n\tinfoLogger.Println()\n\n}",
"func PrintValues(w http.ResponseWriter, values map[string][]string){\n for i, n := range values{\n fmt.Fprintf(w,\"%s:%s\\n\", i, n)\n }\n}",
"func (m *RoadMap) Log(rank int) {\n\tfmt.Println()\n\tzlog.Info().\n\t\tFloat64(\"distance\", m.Distance).\n\t\tMsgf(\"%dº\", rank)\n\n\tfor _, r := range m.Routes {\n\t\tr.Log()\n\t}\n}",
"func writeHostMap(hostMap map[string]string) {\n\tif host_list_file == \"\" {\n\t\treturn\n\t}\n\tf, err := os.Create(host_list_file)\n\tif err != nil {\n\t\tlogr.LogLine(logr.Lerror, ltagsrc, err.Error())\n\t}\n\tdefer f.Close()\n\n\tfor host := range hostMap {\n\t\tf.WriteString(fmt.Sprintf(\"%s\\n\", host))\n\t}\n}",
"func (u *LRU) show() {\n\tfmt.Println(\"MAP:\")\n\tfor k, v := range u.entries {\n\t\tvv := v.Value.(kvpair)\n\t\tfmt.Printf(\"K:%20s K2: %20s V:%20s\\n\", k, vv.Key, vv.Value)\n\t}\n\tfmt.Println(\"LIST:\")\n\tfor elem := u.last.Front(); elem != nil; elem = elem.Next() {\n\t\tvv := elem.Value.(kvpair)\n\t\tfmt.Printf(\"kvpair:%+v\\n\", vv)\n\t}\n}",
"func (sm *SpaceMap) Print() {\n\tvar strb strings.Builder\n\tstrb.WriteString(fmt.Sprintf(\"Grid (width:%v, height:%v):\\n\", sm.width, sm.height))\n\tfor y := 0; y < sm.height; y++ {\n\t\tfor x := 0; x < sm.width; x++ {\n\t\t\tt := sm.grid[point{x, y}]\n\t\t\tstrb.WriteByte(byte(t))\n\t\t}\n\t\tstrb.WriteByte('\\n')\n\t}\n\tfmt.Print(strb.String())\n}",
"func (i geoLocationInfo) print() {\n\tif i[\"status\"] == \"fail\" {\n\t\tfmt.Printf(\"\\nno geolocation info available\\n\")\n\t} else {\n\t\tfmt.Printf(\"\\nGeolocation info: %s, %s\\n\", i[\"city\"], i[\"country\"])\n\t\tfmt.Printf(\"Organization: %s\\n\", i[\"org\"])\n\t}\n}",
"func printMapChanges(pm1, pm2 *mapper.PartitionMap) {\n\t// Ensure the topic name and partition order match.\n\tfor i := range pm1.Partitions {\n\t\tt1, t2 := pm1.Partitions[i].Topic, pm2.Partitions[i].Topic\n\t\tp1, p2 := pm1.Partitions[i].Partition, pm2.Partitions[i].Partition\n\t\tif t1 != t2 || p1 != p2 {\n\t\t\tfmt.Println(\"Unexpected partition map order\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\t// Get a status string of what's changed.\n\tfmt.Println(\"\\nPartition map changes:\")\n\tfor i := range pm1.Partitions {\n\t\tchange := whatChanged(pm1.Partitions[i].Replicas,\n\t\t\tpm2.Partitions[i].Replicas)\n\n\t\tfmt.Printf(\"%s%s p%d: %v -> %v %s\\n\",\n\t\t\tindent,\n\t\t\tpm1.Partitions[i].Topic,\n\t\t\tpm1.Partitions[i].Partition,\n\t\t\tpm1.Partitions[i].Replicas,\n\t\t\tpm2.Partitions[i].Replicas,\n\t\t\tchange)\n\t}\n}",
"func dumprm(r Roadmap, w io.Writer) {\n\tfmt.Fprintf(w, \"title=%q begin=%v end=%v scale=%v catpercent=%v vspace=%v font=%v itemh=%v, shape=%q\\n\",\n\t\tr.Title, r.Begin, r.End, r.Scale, r.Catpercent, r.Vspace, r.Fontname, r.Itemheight, r.Shape)\n\tfor _, cat := range r.Category {\n\t\tfmt.Fprintf(w, \"\\tname=%q vspace=%v itemheight=%v color=%v\\n\", cat.Name, cat.Vspace, cat.Itemheight, cat.Color)\n\t\tfor _, item := range cat.Item {\n\t\t\tfmt.Fprintf(w, \"\\t\\ttext=%q, begin=%q duration=%v vspace=%v\\n\", item.Text, item.Begin, item.Duration, item.Vspace)\n\t\t}\n\t}\n}",
"func DumpConfigMap(logger simplelogger.Logger, cm *corev1.ConfigMap) error {\n\tlogger.Logf(\"--- ConfigMap %s\\n\", cm.Name)\n\tlogger.Logf(\"%s\\n\", FormatAsYAML(cm.Data, 0))\n\treturn nil\n}",
"func writeStatGroupMap(w io.Writer, statGroups map[string]*statGroup) {\n\tmaxKeyLength := 0\n\tkeys := make([]string, 0, len(statGroups))\n\tfor k := range statGroups {\n\t\tif len(k) > maxKeyLength {\n\t\t\tmaxKeyLength = len(k)\n\t\t}\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\tfor _, k := range keys {\n\t\tv := statGroups[k]\n\t\tpaddedKey := fmt.Sprintf(\"%s\", k)\n\t\tfor len(paddedKey) < maxKeyLength {\n\t\t\tpaddedKey += \" \"\n\t\t}\n\n\t\t_, err := fmt.Fprintf(w, \"%s:\\n\", paddedKey)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\terr = v.write(w)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n}",
"func OutputMap(t testing.TestingT, options *Options, key string) map[string]string {\n\tout, err := OutputMapE(t, options, key)\n\trequire.NoError(t, err)\n\treturn out\n}",
"func (m Map) String() string {\n\treturn fmt.Sprintf(\"map[%s]%s\", m.Key.String(), m.Value.String())\n}",
"func (pm *PathMap) String() string {\n\tvar out strings.Builder\n\tout.WriteByte('{')\n\tnames := pm.pathnames()\n\tlastIdx := len(names) - 1\n\tfor idx, name := range names {\n\t\tvalue, _ := pm.get(name)\n\t\tfmt.Fprintf(&out, \"%s: %v\", name, value)\n\t\tif idx != lastIdx {\n\t\t\tout.WriteString(\", \")\n\t\t}\n\t}\n\tout.WriteByte('}')\n\treturn out.String()\n}",
"func DescribeMap(in interface{}, indent string) string {\n\tdescription := \"\"\n\tm, ok := in.(map[string]interface{})\n\tif ok {\n\t\tkeys := make([]string, 0)\n\t\tfor k, _ := range m {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfor _, k := range keys {\n\t\t\tv := m[k]\n\t\t\tdescription += fmt.Sprintf(\"%s%s:\\n\", indent, k)\n\t\t\tdescription += DescribeMap(v, indent+\" \")\n\t\t}\n\t\treturn description\n\t}\n\ta, ok := in.([]interface{})\n\tif ok {\n\t\tfor i, v := range a {\n\t\t\tdescription += fmt.Sprintf(\"%s%d:\\n\", indent, i)\n\t\t\tdescription += DescribeMap(v, indent+\" \")\n\t\t}\n\t\treturn description\n\t}\n\tdescription += fmt.Sprintf(\"%s%+v\\n\", indent, in)\n\treturn description\n}",
"func TestMap_Dump(t *testing.T) {\n\tschema := config.Schema{\n\t\t\"foo\": {},\n\t\t\"bar\": {Default: \"x\"},\n\t\t\"egg\": {Hidden: true},\n\t}\n\n\tvalues := map[string]string{\n\t\t\"foo\": \"hello\",\n\t\t\"bar\": \"x\",\n\t\t\"egg\": \"123\",\n\t}\n\n\tm, err := config.Load(schema, values)\n\tassert.NoError(t, err)\n\n\tdump := map[string]any{\n\t\t\"foo\": \"hello\",\n\t\t\"egg\": true,\n\t}\n\n\tassert.Equal(t, dump, m.Dump())\n}",
"func printPrayTimes(ptMap map[string]string) {\n\tfmt.Println(\"midnight =\", ptMap[\"midnight\"])\n\tfmt.Println(\"imsak =\", ptMap[\"imsak\"])\n\tfmt.Println(\"fajr =\", ptMap[\"fajr\"])\n\tfmt.Println(\"sunrise =\", ptMap[\"sunrise\"])\n\tfmt.Println(\"dhuhr =\", ptMap[\"dhuhr\"])\n\tfmt.Println(\"asr =\", ptMap[\"asr\"])\n\tfmt.Println(\"maghrib =\", ptMap[\"maghrib\"])\n\tfmt.Println(\"isha =\", ptMap[\"isha\"])\n}",
"func logState(s map[string]string) {\n\tlog.Println(\"Current state:\")\n\tfor k, v := range s {\n\t\tlog.Printf(\" %s %s\", k, v)\n\t}\n}",
"func (mgr *LocalHashMapDBMgr) LogAllKeys() {\n\tfor k, e := range mgr.memMap {\n\t\tglog.Infof(\"%v: %v\", k, e)\n\t}\n}",
"func (m *Map) Format() error {\n\t// todo\n\treturn nil\n}",
"func (m *Map) String() string {\n\tstr := \"\"\n\tfor row := 0; row < m.Rows; row++ {\n\t\tfor col := 0; col < m.Cols; col++ {\n\t\t\ts := m.itemGrid[row * m.Cols + col].Symbol()\n\t\t\tstr += string([]byte{s}) + \" \"\n\t\t}\n\t\tstr += \"\\n\"\n\t}\n\treturn str\n}",
"func (m StatementStatsMap) String() string {\n\tif len(m) == 0 {\n\t\treturn \"StatementStatsMap {}\"\n\t}\n\tbs := bytes.NewBufferString(\"\")\n\tbs.WriteString(\"StatementStatsMap {\\n\")\n\tfor k, v := range m {\n\t\tbs.WriteString(fmt.Sprintf(\" %s => %s\\n\", k, v))\n\t}\n\tbs.WriteString(\"}\")\n\treturn bs.String()\n}",
"func formatSmap() error { return formatMeta(&cluster.Smap{}) }",
"func (m *Map) String() string {\n\tstr := \"TreeBidiMap\\nmap[\"\n\tit := m.Iterator()\n\tfor it.Next() {\n\t\tstr += fmt.Sprintf(\"%v:%v \", it.Key(), it.Value())\n\t}\n\treturn strings.TrimRight(str, \" \") + \"]\"\n}",
"func (s *SSHOrch) ShowClientMap() {\n\tfor k, v := range s.lookup {\n\t\tfmt.Printf(\"%s:%s\\n\", k, v.LocalAddr())\n\t}\n}",
"func (p Print) Map() map[string]interface{} {\n\treturn map[string]interface{}{\n\t\t\"op\": \"print\",\n\t\t\"node\": p.Node.Map(),\n\t}\n}",
"func writeMapFile(filename string, target interface{}) error {\n\tf, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err = toml.NewEncoder(f).Encode(target); err != nil {\n\t\treturn err\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func appendMapHeader(pad *scratchpad, l uint32) {\n\t// Create the initial allocation and define the header.\n\ta := make([]byte, 5)\n\ta[0] = 't'\n\n\t// Write the length.\n\tntohl32(l, a, 1)\n\n\t// Append the header.\n\tpad.endAppend(a...)\n}",
"func Print(key Type) {\n\tmutex.RLock()\n\tdefer mutex.RUnlock()\n\n\tklog.Infof(\"%s: %s\", key, globalStats[key])\n}",
"func printHostStorageMapVerbose(hsm control.HostStorageMap, out io.Writer, opts ...PrintConfigOption) error {\n\tfor _, key := range hsm.Keys() {\n\t\thss := hsm[key]\n\t\thosts := getPrintHosts(hss.HostSet.RangedString(), opts...)\n\t\tlineBreak := strings.Repeat(\"-\", len(hosts))\n\t\tfmt.Fprintf(out, \"%s\\n%s\\n%s\\n\", lineBreak, hosts, lineBreak)\n\t\tfmt.Fprintf(out, \"HugePage Size: %d KB\\n\", hss.HostStorage.MemInfo.HugepageSizeKiB)\n\t\tif len(hss.HostStorage.ScmNamespaces) == 0 {\n\t\t\tif err := PrintScmModules(hss.HostStorage.ScmModules, out, opts...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t} else {\n\t\t\tif err := PrintScmNamespaces(hss.HostStorage.ScmNamespaces, out, opts...); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tfmt.Fprintln(out)\n\t\tif err := PrintNvmeControllers(hss.HostStorage.NvmeDevices, out, opts...); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfmt.Fprintln(out)\n\t}\n\n\treturn nil\n}",
"func (w *Writer) Info(m string) error {}",
"func (conf *Config) Print() {\n\tlog.Info().Str(\"app\", version.AppVersion).Str(\"commit\", version.Commit).Msg(\"version\")\n\tlog.Info().Int(\"port\", conf.Port).Msg(\"metrics endpoint port\")\n\tlog.Info().Str(\"namespace\", conf.Namespace).Str(\"subsystem\", conf.Subsystem).Str(\"name\", conf.Name).Msg(\"metric name\")\n\tlog.Info().Str(\"label\", conf.LabelName).Msg(\"label name\")\n\tlog.Info().Str(\"file\", conf.LabelFile).Msg(\"label values file\")\n}",
"func (ptr *KeyholeInfo) Print() string {\n\tif ptr == nil {\n\t\treturn \"\"\n\t}\n\tstrs := []string{fmt.Sprintf(`{ keyhole: { version: \"%v\", args: \"%v\" } }`, ptr.Version, ptr.Params)}\n\tstrs = append(strs, ptr.Logs...)\n\treturn strings.Join(strs, \"\\n\")\n}",
"func RendersMap(logbookConfig LogbookConfig) {\n\n\tairportMarkers := make(map[string]struct{})\n\trouteLines := make(map[string]struct{})\n\n\tvar totals logbookTotalRecord\n\n\t// load airports.json\n\tairports, err := loadAirportsDB()\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot load airports.json file: %v\", err)\n\t}\n\n\t// get data from the google spreadsheet\n\tresponse, err := getLogbookDump(logbookConfig)\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot get logbook dump: %v\", err)\n\t}\n\n\t// parsing\n\tfor _, row := range response {\n\t\trecord := parseRecord(row)\n\n\t\tif (logbookConfig.FilterDate != \"\" && strings.Contains(record.date, logbookConfig.FilterDate)) || logbookConfig.FilterDate == \"\" {\n\t\t\t// add to the list of the airport markers departure and arrival\n\t\t\t// it will be automatically a list of unique airports\n\t\t\tairportMarkers[record.departure.place] = struct{}{}\n\t\t\tairportMarkers[record.arrival.place] = struct{}{}\n\n\t\t\t// the same for the route lines\n\t\t\tif !logbookConfig.FilterNoRoutes {\n\t\t\t\tif record.departure.place != record.arrival.place {\n\t\t\t\t\trouteLines[fmt.Sprintf(\"%s-%s\", record.departure.place, record.arrival.place)] = struct{}{}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttotals = calculateTotals(totals, record)\n\t\t}\n\n\t}\n\n\tfmt.Printf(\"Airports: %d\\n\", len(airportMarkers))\n\tfmt.Printf(\"Routes: %d\\n\", len(routeLines))\n\tfmt.Printf(\"Total time: %s\\n\", totals.time.total.GetTime())\n\tfmt.Printf(\"Landings: %d day, %d night\\n\", totals.landings.day, totals.landings.night)\n\n\tctx := sm.NewContext()\n\tctx.SetSize(1920, 1080)\n\n\t// generate routes lines\n\tfor route := range routeLines {\n\t\tplaces := strings.Split(route, \"-\")\n\n\t\tif airport1, ok := airports[places[0]].(map[string]interface{}); ok {\n\t\t\tif airport2, ok := airports[places[1]].(map[string]interface{}); ok {\n\n\t\t\t\tctx.AddObject(\n\t\t\t\t\tsm.NewPath(\n\t\t\t\t\t\t[]s2.LatLng{\n\t\t\t\t\t\t\ts2.LatLngFromDegrees(airport1[\"lat\"].(float64), airport1[\"lon\"].(float64)),\n\t\t\t\t\t\t\ts2.LatLngFromDegrees(airport2[\"lat\"].(float64), airport2[\"lon\"].(float64)),\n\t\t\t\t\t\t},\n\t\t\t\t\t\tcolor.Black,\n\t\t\t\t\t\t0.5),\n\t\t\t\t)\n\n\t\t\t}\n\t\t}\n\t}\n\n\t// generate airports markers\n\tfor place := range airportMarkers {\n\n\t\tif airport, ok := airports[place].(map[string]interface{}); ok {\n\t\t\tctx.AddObject(\n\t\t\t\tsm.NewMarker(\n\t\t\t\t\ts2.LatLngFromDegrees(airport[\"lat\"].(float64), airport[\"lon\"].(float64)),\n\t\t\t\t\tcolor.RGBA{0xff, 0, 0, 0xff},\n\t\t\t\t\t16.0,\n\t\t\t\t),\n\t\t\t)\n\t\t}\n\n\t}\n\n\timg, err := ctx.Render()\n\tif err != nil {\n\t\tlog.Fatalf(\"Cannot render a map %v\", err)\n\t}\n\n\tif err := gg.SavePNG(\"map.png\", img); err != nil {\n\t\tlog.Fatalf(\"Cannot save a map %v\", err)\n\t} else {\n\t\tfmt.Printf(\"Map has been saved to map.png\\n\")\n\t}\n}",
"func Print(key Type) {\n\tmutex.RLock()\n\tdefer mutex.RUnlock()\n\n\tklog.Infof(\"%s: %s\", key, GlobalStats[key])\n}",
"func (m *Map) String() string {\n\tkeys := \"\"\n\ti := 0\n\tfor k, _ := range *m {\n\t\tkeys = keys + k\n\t\tif i++; i < len(*m) {\n\t\t\tkeys = keys + \",\"\n\t\t}\n\t}\n\treturn keys\n}",
"func (m _Map_String_String_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) {\n\tfor k, v := range m {\n\t\tenc.AddString((string)(k), v)\n\t}\n\treturn err\n}",
"func (g *Generator) buildMap(runs [][]Value, typeInfo typeInfo) {\n\ttypeName := typeInfo.Name\n\tg.Printf(\"\\n\")\n\tg.declareNameVars(runs, typeName, \"\")\n\tg.Printf(\"\\nvar _%s_map = map[%s]string{\\n\", typeName, typeName)\n\tn := 0\n\tfor _, values := range runs {\n\t\tfor _, value := range values {\n\t\t\tg.Printf(\"\\t%s: _%s_name[%d:%d],\\n\", &value, typeName, n, n+len(value.typeInfo.originalName))\n\t\t\tn += len(value.typeInfo.originalName)\n\t\t}\n\t}\n\tg.Printf(\"}\\n\\n\")\n\tg.Printf(stringMap, typeName)\n}",
"func (t *Tracer) DebugEBPFMaps(maps ...string) (string, error) {\n\treturn \"\", ebpf.ErrNotImplemented\n}",
"func (pm partitionMap) String() string {\n\tres := bytes.Buffer{}\n\tfor ns, partitions := range pm {\n\t\tres.WriteString(\"-----------------------------------------------------------------------\\n\")\n\t\tres.WriteString(\"Namespace: \" + ns + \"\\n\")\n\t\tres.WriteString(fmt.Sprintf(\"Regimes: %v\\n\", partitions.regimes))\n\t\tres.WriteString(fmt.Sprintf(\"SCMode: %v\\n\", partitions.SCMode))\n\t\treplicaArray := partitions.Replicas\n\t\tfor i, nodeArray := range replicaArray {\n\t\t\tif i == 0 {\n\t\t\t\tres.WriteString(\"\\nMASTER:\")\n\t\t\t} else {\n\t\t\t\tres.WriteString(fmt.Sprintf(\"\\nReplica %d: \", i))\n\t\t\t}\n\t\t\tfor partitionID, node := range nodeArray {\n\t\t\t\tres.WriteString(strconv.Itoa(partitionID) + \"/\")\n\t\t\t\tif node != nil {\n\t\t\t\t\tres.WriteString(node.host.String())\n\t\t\t\t\tres.WriteString(\", \")\n\t\t\t\t} else {\n\t\t\t\t\tres.WriteString(\"nil, \")\n\t\t\t\t}\n\t\t\t}\n\t\t\tres.WriteString(\"\\n\")\n\t\t}\n\t}\n\tres.WriteString(\"\\n\")\n\treturn res.String()\n}",
"func Pm(v map[string][]string) {\n\tfor k := range v {\n\t\tfmt.Printf(\"%+v: %+v\\n\", k, v[k])\n\t}\n}",
"func (c *BulkInsertMapDefinition) String() string {\n\tvar buf bytes.Buffer\n\tbuf.WriteString(c.MapExpr.String())\n\tbuf.WriteString(\" \")\n\tbuf.WriteString(c.Name.String())\n\tbuf.WriteString(\" \")\n\tbuf.WriteString(c.Type.String())\n\treturn buf.String()\n}",
"func (self *Map) Fprintf(w io.Writer, format string) {\n\tFprintf(w, format, self.MapNative())\n}",
"func (p *printer) mapAssign(m *Map) {\n\tif !p.ok() {\n\t\treturn\n\t}\n\tp.printf(\"\\n%s[%s] = %s\", m.Varname(), m.Keyidx, m.Validx)\n}",
"func (b infoDumper) write(format string, p ...interface{}) {\n\tif b.err != nil {\n\t\treturn\n\t}\n\t_, b.err = fmt.Fprintf(b.w, \"%s\", b.indent)\n\tif b.err != nil {\n\t\treturn\n\t}\n\t_, b.err = fmt.Fprintf(b.w, format+\"\\n\", p...)\n}",
"func (l *Log) DumpLog() {\n\tfor _, v := range l.Entries {\n\t\tfmt.Println(v)\n\t}\n}",
"func (lw *StandardLogWriter) PrintLogs() {\n\tf := createLogFile()\n\tdefer f.Close()\n\n\tfor i := 0; i < len(lw.OpEntries); i++ {\n\t\twriteEntry := fmt.Sprintf(\"%s\\n\", (lw.OpEntries[i]).MustMarshal())\n\t\t_, err := f.WriteString(writeEntry)\n\t\tif err != nil {\n\t\t\tpanic(\"Failed to write logs to file\")\n\t\t}\n\t}\n}",
"func writeout() {\n\tworden := \"test, test\" // temporary\n\n\tfmt.Println(bar)\n\tfmt.Printf(\"%v\\n%v %s %v\\n%v\", bar, sd, worden, sd, bar)\n\t/* file io should replace prints */\n}",
"func PrintTimeFrequency(frequencyMap map[string]int) {\n\tfor key, value := range frequencyMap {\n\t\tfmt.Printf(\"%s message count: %d\\n\", key, value)\n\t}\n}",
"func PrettyPrint(m *cosmos.Map) {\n\tfor i := 0; i < m.CitiesLen(); i++ {\n\t\tnewline := m.CitiesIDName[i]\n\t\tcity, _ := m.GetCity(newline)\n\t\tif !city.IsDestroyed() {\n\t\t\tfor dir := 0; dir < 4; dir++ {\n\t\t\t\troad, _ := city.GetRoad(dir)\n\t\t\t\tif road != nil {\n\t\t\t\t\tif road.IsAvailable() {\n\t\t\t\t\t\tnewline = ConcatRoads(road, newline)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tfmt.Println(newline)\n\t\t}\n\t}\n}",
"func (p *Printer) PrintTM(template string, v ds.Map) FullPrinter {\n state, fc := p.initState()\n defer p.reset(state)\n p.formatter.PrintTemplateMap(fc, template, v)\n state.Buffer.WriteNewLine()\n p.fc.Writer.Write(state.Buffer)\n return p\n}",
"func writeEntry(w io.Writer, e *Entry) {\n\tfmt.Fprintf(w, \"\\n@%s{%s,\\n\",\n\t\tstrings.ToLower(e.EntryString),\n\t\te.Key)\n\n\t// if this entry kind has a list of required fields,\n\t// print each of the required fields in order\n\tprinted := make(map[string]bool)\n\tif req, ok := required[e.Kind]; ok {\n\t\tfor _, r := range req {\n\t\t\tfor _, s := range strings.Split(r, \"/\") {\n\t\t\t\tif v, ok := e.Fields[s]; ok {\n\t\t\t\t\twriteTagValue(w, s, v)\n\t\t\t\t\tprinted[s] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// print the known optional fields\n\tif opt, ok := optional[e.Kind]; ok {\n\t\tfor _, r := range opt {\n\t\t\tif v, ok := e.Fields[r]; ok {\n\t\t\t\twriteTagValue(w, r, v)\n\t\t\t\tprinted[r] = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// print the blessed fields\n\tfor _, tag := range blessed {\n\t\tif v, ok := e.Fields[tag]; ok {\n if _, ok := printed[tag]; !ok {\n\t\t \twriteTagValue(w, tag, v)\n\t\t\t printed[tag] = true\n }\n\t\t}\n\t}\n\n\t// print all the other tags, in sorted order\n\tfor _, tag := range e.Tags() {\n\t\tif _, ok := printed[tag]; !ok {\n\t\t\twriteTagValue(w, tag, e.Fields[tag])\n\t\t}\n\t}\n\n\tfmt.Fprintf(w, \"}\\n\")\n}",
"func (ctx *TemplateContext) formatMap() (out string) {\n\talphaSortMap(ctx.substitutionsMap,\n\t\tfunc(s string) {\n\t\t\tv := ctx.substitutionsMap[s]\n\t\t\tconst TRIM = 80\n\t\t\tif len(v) > TRIM {\n\t\t\t\tv = v[:TRIM] + \"...\"\n\t\t\t}\n\t\t\tout += fmt.Sprintf(\" % 20s '%v'\\n\\n\", s, v)\n\t\t})\n\treturn\n}",
"func printStats(stats []statisic, hash string) {\n\tw := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', tabwriter.AlignRight|tabwriter.Debug)\n\n\tfmt.Fprintf(w, \"%s(w=%d):\\n\", hash, sketchWidth)\n\tfmt.Fprintf(w, \"data set\\tmax. abs.\\tavg. abs.\\tmax. rel.\\tavg. rel.\\t# exact\\n\")\n\n\tfor i := 0; i < len(stats); i++ {\n\t\tstat := stats[i]\n\t\tfmt.Fprintf(w, \"%s\\t%d\\t%d\\t%.2f\\t%.2f\\t%d\\n\", filePaths[i], stat.maxAbs, stat.avgAbs, stat.maxRel, stat.avgRel, 100-stat.misses)\n\t}\n\tfmt.Fprintln(w)\n\tw.Flush()\n}",
"func (rt *RoutingTable) Print() {\n\tfmt.Printf(\"Routing Table, bs = %d, Max latency = %d\\n\", rt.bucketsize, rt.maxLatency)\n\trt.tabLock.RLock()\n\n\tfor i, b := range rt.Buckets {\n\t\tfmt.Printf(\"\\tbucket: %d\\n\", i)\n\n\t\tb.lk.RLock()\n\t\tfor e := b.list.Front(); e != nil; e = e.Next() {\n\t\t\tp := e.Value.(peer.ID)\n\t\t\tfmt.Printf(\"\\t\\t- %s %s\\n\", p, rt.metrics.LatencyEWMA(p).String())\n\t\t}\n\t\tb.lk.RUnlock()\n\t}\n\trt.tabLock.RUnlock()\n}",
"func (ns *NsonSerializer) startMap(field string) {\n\tif field != \"\" {\n\t\tns.startField(field)\n\t}\n\tns.writer.WriteByte(byte(types.Map))\n\toff := ns.writer.Size()\n\tns.writer.WriteInt(0) // size in bytes\n\tns.writer.WriteInt(0) // number of elements\n\tns.offsetStack = append(ns.offsetStack, off)\n\tns.sizeStack = append(ns.sizeStack, 0)\n}",
"func (p *SliceOfMap) String() string {\n\tvar builder strings.Builder\n\tbuilder.WriteString(\"[\")\n\tif p != nil {\n\t\tfor i := range *p {\n\t\t\tbuilder.WriteString(ToString((*p)[i]))\n\t\t\tif i+1 < len(*p) {\n\t\t\t\tbuilder.WriteString(\" \")\n\t\t\t}\n\t\t}\n\t}\n\tbuilder.WriteString(\"]\")\n\treturn builder.String()\n}",
"func iterateMap() {\n\tnames := map[string]string{\n\t\t\"Kakashi\": \"Hatake\",\n\t\t\"Konohamaru\": \"Sarutobi\",\n\t\t\"Iruka\": \"Umino\",\n\t}\n\tfor k, v := range names {\n\t\tfmt.Println(k, v)\n\t}\n}",
"func MiniMap(mini []string){\n fmt.Printf(\"Mini Map: \\n\")\n fmt.Printf(\" 1 2 3 4 5 \\n\")\n for i := 0; i <= 4; i ++ {\n fmt.Printf(\"%d\",(i+1))\n for j := 0; j <= 4; j ++ {\n fmt.Printf(\" %s \", mini[i*5+j])\n }\n fmt.Printf(\"\\n\")\n }\n fmt.Printf(\"*****************************************************************************\\n\")\n}",
"func (s *session) log(info ...interface{}) {\n\tpreamble := fmt.Sprintf(\"IMAP (%s) \", s.id)\n\tmessage := []interface{}{preamble}\n\tmessage = append(message, info...)\n\tlog.Print(message...)\n}",
"func PrintResult(m map[string][]int) {\n\tfor key := range m {\n\t\tfmt.Print(\"\\n\", key, \": \")\n\t\tfor _, item := range m[key] {\n\t\t\tfmt.Print(item, \" \")\n\t\t}\n\t}\n}",
"func (masterfile *MasterFile) Print(spacing int) {\n\tlog.Debug(\"%*sMagic : %s\", spacing, \"\", masterfile.Magic)\n\tlog.Debug(\"%*sMD5 : %s\", spacing, \"\", masterfile.Md5)\n\tlog.Debug(\"%*sGeneration : %d\", spacing, \"\", masterfile.Generation)\n}",
"func (conf *Config) Print() {\n\tlog.Info().Str(\"app\", version.AppVersion).Str(\"commit\", version.Commit).Msg(\"version\")\n\tlog.Info().Int(\"port\", conf.Port).Msg(\"gRPC port\")\n\tlog.Info().Str(\"URL\", conf.SystemModelAddress).Msg(\"systemModelAddress\")\n\tlog.Info().Str(\"URL\", conf.EdgeInventoryProxyAddress).Msg(\"edgeInventoryProxyAddress\")\n\tlog.Info().Str(\"prefix\", conf.AppClusterPrefix).Msg(\"appClusterPrefix\")\n\tlog.Info().Int(\"port\", conf.AppClusterPort).Msg(\"appClusterPort\")\n\tlog.Info().Bool(\"tls\", conf.UseTLS).Bool(\"skipServerCertValidation\", conf.SkipServerCertValidation).Str(\"cert\", conf.CACertPath).Str(\"cert\", conf.ClientCertPath).Msg(\"TLS parameters\")\n\tlog.Info().Dur(\"CacheTTL\", conf.CacheTTL).Msg(\"selected TTL for the stats cache in milliseconds\")\n}",
"func (m Map) String() string {\n\treturn \"project-map-detector\"\n}",
"func (l *Logger) Print(v ...interface{}) { l.lprint(INFO, v...) }",
"func (w *Writer) Debug(out io.Writer)",
"func logInfo() {\n\tlogFile := path.Join(configFolder, logFileName)\n\tfile, err := os.OpenFile(logFile, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to open log file %s. Error was: %s\\n\", logFile, err)\n\t\treturn\n\t}\n\tdefer file.Close()\n\n\tdata := fmt.Sprintf(\"Date: [%s], Domain: [%s], Special Characters: [%v], AdditionalInfo: [%s]\\n\", time.Now().Format(time.RFC3339), domain, addSpecialChars, additionalInfo)\n\t_, err = file.WriteString(data)\n\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to write to log file %s. Error was: %s\\n\", logFile, err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Useful information about the password successfully stored to %s\\n\", logFile)\n}",
"func (d *DependencyMap) Write() (err error) {\n\n\tvar buf bytes.Buffer\n\tstr, err := json.Marshal(d.Map)\n\tjson.Indent(&buf, str, \"\", \" \")\n\n\tif err == nil {\n\t\tdata := []byte(buf.String() + \"\\n\")\n\t\tioutil.WriteFile(d.Path, data, 0644)\n\t}\n\treturn\n}",
"func printTorrents(m map[string]string) string {\n\trelease := parseUrlforStu(m[\"torrent\"])\n\tvar output string\n\tfor _, thing := range release.Items {\n\t\tif isDesired(thing.Title) {\n\t\t\toutput += strFormatOut(thing)\n\t\t\toutput += \"\\n\"\n\t\t}\n\t}\n\treturn output\n}",
"func (zw *ZapWriter) Write(event string, kvs ...interface{}) {\n\tzw.logger.Infow(event, kvs...)\n}",
"func (m _Map_String_Binary_Zapper) MarshalLogObject(enc zapcore.ObjectEncoder) (err error) {\n\tfor k, v := range m {\n\t\tenc.AddString((string)(k), base64.StdEncoding.EncodeToString(v))\n\t}\n\treturn err\n}",
"func printConfig() {\n\n\tvar m map[string]string\n\n\tm[\"LogPath\"] = conf.Conf.LogPath\n\n\tutil.Logf(\"Configuration >> %v\", m)\n}",
"func (args Args) LogFieldMap() map[string]interface{} {\n\tfields := make(map[string]interface{}, len(args.LogFields))\n\tfor _, field := range args.LogFields {\n\t\tfields[field.Key()] = field.Value()\n\t}\n\n\treturn fields\n}",
"func (args Args) LogFieldMap() map[string]interface{} {\n\tfields := make(map[string]interface{}, len(args.LogFields))\n\tfor _, field := range args.LogFields {\n\t\tfields[field.Key()] = field.Value()\n\t}\n\n\treturn fields\n}"
] | [
"0.67710865",
"0.66852766",
"0.63662946",
"0.6362477",
"0.63460183",
"0.6289744",
"0.6271945",
"0.6254925",
"0.621653",
"0.6151758",
"0.61285734",
"0.60187525",
"0.59970164",
"0.59936285",
"0.5975958",
"0.59478647",
"0.5933662",
"0.59229624",
"0.5835121",
"0.57788086",
"0.5770993",
"0.56977385",
"0.56703424",
"0.56635034",
"0.56417626",
"0.5624384",
"0.557822",
"0.55289024",
"0.5517626",
"0.55132055",
"0.5510847",
"0.54589605",
"0.544605",
"0.54326576",
"0.5426813",
"0.54208076",
"0.54157203",
"0.54134333",
"0.53567946",
"0.5345882",
"0.5341617",
"0.5330185",
"0.5308323",
"0.53078437",
"0.5275049",
"0.52702326",
"0.5250051",
"0.5249649",
"0.52442944",
"0.5243809",
"0.5218127",
"0.5211177",
"0.5194354",
"0.51896805",
"0.5166051",
"0.5149436",
"0.51315975",
"0.5131086",
"0.51235986",
"0.51195455",
"0.51149607",
"0.51110756",
"0.5089204",
"0.5075645",
"0.50597394",
"0.50590813",
"0.50515825",
"0.50407034",
"0.5037603",
"0.5034814",
"0.5034552",
"0.5024548",
"0.5006708",
"0.5004088",
"0.49995837",
"0.4998138",
"0.49964148",
"0.49909776",
"0.4978353",
"0.49747843",
"0.49709612",
"0.49582168",
"0.49546042",
"0.49542415",
"0.49517938",
"0.4948838",
"0.4943309",
"0.4939935",
"0.49375752",
"0.4924821",
"0.4924411",
"0.49192616",
"0.4912764",
"0.49082372",
"0.4899168",
"0.48988217",
"0.48848283",
"0.4879029",
"0.4866923",
"0.4866923"
] | 0.75574833 | 0 |
FindCurrentMeta will return information about commonly used weapons via the Charlemagne API | func FindCurrentMeta(platform, requestedActivity string) (*skillserver.EchoResponse, error) {
response := skillserver.NewEchoResponse()
// TODO: activity hash is currently unused. may be useful to be able to request meta
// for a current activity
//activityHash := ""
gameModes := []string{}
translatedGameMode, ok := metaActivityTranslation[requestedActivity]
if ok {
// At some ponit this could be a list of activities but for now let's leave it as one
gameModes = append(gameModes, translatedGameMode)
glg.Infof("Found translated game mode of: %s", translatedGameMode)
}
translatedPlatform := platformNameToMapKey[platform]
glg.Warnf("Translated game mode for lookup in cache: %s", translatedGameMode)
meta := cachedMetaResponses[translatedPlatform][translatedGameMode]
if meta == nil {
err := fmt.Errorf("Trying to return meta for mode=%s and platform=%s but not in cache", translatedGameMode, translatedPlatform)
raven.CaptureError(err, nil, nil)
return nil, err
}
speechBuffer := bytes.NewBuffer([]byte{})
speechBuffer.Write([]byte("The most commonly used weapons according to Charlemagne "))
if requestedActivity != "" {
speechBuffer.WriteString(fmt.Sprintf("for %s ", requestedActivity))
}
if platform != "" {
speechBuffer.WriteString(fmt.Sprintf("on %s ", platform))
}
speechBuffer.WriteString(fmt.Sprintf("include %s and %s for kinetic weapons,",
meta.TopKineticWeapons[0].WeaponName, meta.TopKineticWeapons[1].WeaponName))
speechBuffer.WriteString(fmt.Sprintf("%s and %s for energy weapons,",
meta.TopEnergyWeapons[0].WeaponName, meta.TopEnergyWeapons[1].WeaponName))
speechBuffer.WriteString(fmt.Sprintf("and %s and %s for power weapons.",
meta.TopPowerWeapons[0].WeaponName, meta.TopPowerWeapons[1].WeaponName))
response.OutputSpeech(speechBuffer.String())
return response, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetWeaponInfo(weaponName string) (w *Weapon, err error) {\n\tfields := []string{\"name\", \"weapon_type\", \"proficiency\", \"damage\", \"secondary_damage\", \"damage_type\", \"save\", \"range\", \"rarity\", \"modifiers\", \"weight\", \"description\", \"properties\"}\n\tw = &Weapon{}\n\tquery := fmt.Sprintf(\"SELECT %s FROM weapons WHERE lower(name) = lower($1)\", strings.Join(fields, \",\"))\n\n\trows, err := db.Query(query, weaponName)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\trows.Scan(&w.Name, &w.Type, &w.Proficiency, &w.Damage, &w.SecondaryDamage, &w.DamageType, &w.Save, pq.Array(&w.rangeRaw), &w.Rarity, pq.Array(&w.modifiersRaw), &w.Weight, &w.Description, &w.propertiesRaw)\n\t}\n\n\t// clean up\n\terr = w.clean()\n\n\treturn\n}",
"func weapon(char core.Character, c *core.Core, r int, param map[string]int) {\n\n\texpiry := 0\n\tper := 0.03 + 0.01*float64(r)\n\tstacks := 0\n\ticd := 0\n\n\tc.Events.Subscribe(core.OnDamage, func(args ...interface{}) bool {\n\n\t\tds := args[1].(*core.Snapshot)\n\n\t\tif ds.ActorIndex != char.CharIndex() {\n\t\t\treturn false\n\t\t}\n\t\tif ds.AttackTag != core.AttackTagNormal && ds.AttackTag != core.AttackTagExtra {\n\t\t\treturn false\n\t\t}\n\t\tif icd > c.F {\n\t\t\treturn false\n\t\t}\n\t\ticd = c.F + 18\n\t\tif expiry < c.F {\n\t\t\tstacks = 0\n\t\t}\n\t\tstacks++\n\t\tif stacks > 4 {\n\t\t\tstacks = 4\n\t\t}\n\t\texpiry = c.F + 360\n\t\treturn false\n\t}, fmt.Sprintf(\"prototype-rancour-%v\", char.Name()))\n\n\tval := make([]float64, core.EndStatType)\n\tchar.AddMod(core.CharStatMod{\n\t\tKey: \"prototype\",\n\t\tExpiry: -1,\n\t\tAmount: func(a core.AttackTag) ([]float64, bool) {\n\t\t\tif expiry < c.F {\n\t\t\t\tstacks = 0\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tval[core.ATKP] = per * float64(stacks)\n\t\t\tval[core.DEFP] = per * float64(stacks)\n\t\t\treturn val, true\n\t\t},\n\t})\n\n}",
"func PopularWeapons(request *skillserver.EchoRequest) (response *skillserver.EchoResponse) {\n\n\tresponse, err := trials.GetWeaponUsagePercentages()\n\tif err != nil {\n\t\tresponse = skillserver.NewEchoResponse()\n\t\tresponse.OutputSpeech(\"Sorry Guardian, I cannot access this information at this time, please try again later\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (p *Player) Weapons() []*Equipment {\n\tres := make([]*Equipment, 0, len(p.RawWeapons))\n\tfor _, w := range p.RawWeapons {\n\t\tres = append(res, w)\n\t}\n\treturn res\n}",
"func weapon(char core.Character, c *core.Core, r int, param map[string]int) {\n\tdmg := 0.16 + float64(r)*0.04\n\n\tc.Events.Subscribe(core.OnAttackWillLand, func(args ...interface{}) bool {\n\t\tds := args[1].(*core.Snapshot)\n\t\tt := args[0].(core.Target)\n\t\tif ds.ActorIndex != char.CharIndex() {\n\t\t\treturn false\n\t\t}\n\t\t// if t.AuraType() == def.Hydro {\n\t\t// \tds.Stats[def.DmgP] += dmg\n\t\t// \tc.Log.Debugw(\"dragonbane\", \"frame\", c.F, \"event\", def.LogCalc, \"final dmg%\", ds.Stats[def.DmgP])\n\t\t// }\n\t\tif t.AuraContains(core.Hydro, core.Pyro) {\n\t\t\tds.Stats[core.DmgP] += dmg\n\t\t\tc.Log.Debugw(\"dragonbane\", \"frame\", c.F, \"event\", core.LogCalc, \"final dmg%\", ds.Stats[core.DmgP])\n\t\t}\n\t\treturn false\n\t}, fmt.Sprintf(\"dragonbane-%v\", char.Name()))\n\n}",
"func PersonalTopWeapons(request *skillserver.EchoRequest) (response *skillserver.EchoResponse) {\n\n\taccessToken := request.Session.User.AccessToken\n\tresponse, err := trials.GetPersonalTopWeapons(accessToken)\n\tif err != nil {\n\t\tresponse = skillserver.NewEchoResponse()\n\t\tresponse.OutputSpeech(\"Sorry Guardian, I cannot access this information at this time, please try again later\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (b BaseDefender) GetWeaponPower(researches Researches) int64 {\n\treturn int64(float64(b.WeaponPower) * (1 + float64(researches.WeaponsTechnology)*0.1))\n}",
"func (p *Player) ActiveWeapon() *Equipment {\n\treturn p.RawWeapons[p.ActiveWeaponID]\n}",
"func PopularWeaponTypes(echoRequest *skillserver.EchoRequest) (response *skillserver.EchoResponse) {\n\n\tresponse, err := trials.GetPopularWeaponTypes()\n\tif err != nil {\n\t\tresponse = skillserver.NewEchoResponse()\n\t\tresponse.OutputSpeech(\"Sorry Guardian, I cannot access this information at this time, pleast try again later\")\n\t\treturn\n\t}\n\n\treturn\n}",
"func (g *Game) Look() *LookData {\n\tld := &LookData{}\n\n\tif g.IsDark() {\n\t\tld.RoomDescription = \"I can't see. It is too dark!\"\n\t\treturn ld\n\t}\n\n\tr := g.Current.Rooms[g.Current.State.Location]\n\tif r.Literal {\n\t\tld.RoomDescription = r.Description\n\t} else {\n\t\tld.RoomDescription = fmt.Sprintf(\"I'm in a %s\", r.Description)\n\t}\n\n\tfor i, dir := range []string{\"North\", \"South\", \"East\", \"West\", \"Up\", \"Down\"} {\n\t\tif r.Exits[i] != 0 {\n\t\t\tld.Exits = append(ld.Exits, dir)\n\t\t}\n\t}\n\n\tfor i := 0; i < int(g.Current.Header.NumItems); i++ {\n\t\tif it := g.Current.Items[i]; it.Location == g.Current.State.Location {\n\t\t\tld.Items = append(ld.Items, it.Description)\n\t\t}\n\t}\n\n\treturn ld\n}",
"func (_CraftingI *CraftingICallerSession) GetWeaponDc(_item_id *big.Int) (*big.Int, error) {\n\treturn _CraftingI.Contract.GetWeaponDc(&_CraftingI.CallOpts, _item_id)\n}",
"func (rw *RankedWeapon) oneUseWeapons(rank int, cfg npcdefs.NPCCfg) []InvItem {\n\tvar allWids []int\n\n\tfor _, wid := range rw.AllWeaponIDs {\n\t\tif !defs.Weapons[wid].Reusable {\n\t\t\tallWids = append(allWids, wid)\n\t\t}\n\t}\n\n\twidMap := map[int]int{} // [id]count\n\n\ttotalCount := util.RandRange(cfg.WeaponCountMin, cfg.WeaponCountMax)\n\tfor i := 0; i < totalCount; i++ {\n\t\tpoints := rankedWeaponPoints(rw.SkillLevel, rank, cfg)\n\t\twid := selectOneWeaponID(allWids, points)\n\t\twidMap[wid]++\n\t}\n\n\tvar items []InvItem\n\tfor wid, count := range widMap {\n\t\titems = append(items, InvItem{\n\t\t\tItemID: defs.Weapons[wid].ItemID,\n\t\t\tCount: count,\n\t\t})\n\t}\n\n\tsort.Slice(items, func(i int, j int) bool {\n\t\treturn items[i].ItemID < items[j].ItemID\n\t})\n\n\treturn items\n}",
"func (_CraftingI *CraftingISession) GetWeaponDc(_item_id *big.Int) (*big.Int, error) {\n\treturn _CraftingI.Contract.GetWeaponDc(&_CraftingI.CallOpts, _item_id)\n}",
"func (*WeaponInfo) Descriptor() ([]byte, []int) {\n\treturn file_login_proto_rawDescGZIP(), []int{1}\n}",
"func getCurrent(l, u string) *owm.CurrentWeatherData {\n\tw, err := owm.NewCurrent(u)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tw.CurrentByName(l)\n\treturn w\n}",
"func weildWeapon() (string, int) {\n\tlottery := random(1, 5)\n\tvar weapon string\n\tvar weapondie int\n\tswitch lottery {\n\tcase 1:\n\t\tweapon = \"fist\"\n\t\tweapondie = 3\n\tcase 2:\n\t\tweapon = \"dagger\"\n\t\tweapondie = 4\n\tcase 3:\n\t\tweapon = \"short sword\"\n\t\tweapondie = 6\n\tcase 4:\n\t\tweapon = \"longsword\"\n\t\tweapondie = 8\n\tcase 5:\n\t\tweapon = \"greataxe\"\n\t\tweapondie = 12 // At this case, the Greataxe will deal random damage from 1 point to 12 points, a 12-side die.\n\t}\n\treturn weapon, weapondie\n}",
"func GetWareByCategory(c *server.Context) error {\n\tvar (\n\t\tres []ware.BriefInfo\n\t\tcidReq struct {\n\t\t\tParentCID uint32 `json:\"parent_cid\" validate:\"required\"`\n\t\t\tCID uint32 `json:\"cid\"`\n\t\t}\n\t)\n\n\terr := c.JSONBody(&cidReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\terr = c.Validate(cidReq)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrInvalidParam, nil)\n\t}\n\n\tconn, err := mysql.Pool.Get()\n\tdefer mysql.Pool.Release(conn)\n\tif err != nil {\n\t\tlogger.Error(err)\n\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t}\n\n\tif cidReq.CID == 0 {\n\t\tres, err = ware.Service.GetByParentCID(conn, cidReq.ParentCID)\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t\t}\n\t} else {\n\t\tres, err = ware.Service.GetByCID(conn, cidReq.CID)\n\t\tif err != nil {\n\t\t\tlogger.Error(err)\n\t\t\treturn core.WriteStatusAndDataJSON(c, constants.ErrMysql, nil)\n\t\t}\n\t}\n\n\treturn core.WriteStatusAndDataJSON(c, constants.ErrSucceed, res)\n}",
"func (c *Character) EquipWeapon(name string) {\n\tfor i, v := range c.Weapons {\n\t\tif v.Name == name {\n\t\t\tc.EquippedWeapons = append(c.EquippedWeapons, v)\n\t\t\tc.Weapons = append(c.Weapons[:i], c.Weapons[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (m *BoschBpts5Hybrid) CurrentPower() (float64, error) {\n\tstatus, err := m.api.Status()\n\n\tswitch m.usage {\n\tcase \"grid\":\n\t\treturn status.BuyFromGrid - status.SellToGrid, err\n\tcase \"pv\":\n\t\treturn status.PvPower, err\n\tcase \"battery\":\n\t\treturn status.BatteryDischargePower - status.BatteryChargePower, err\n\tdefault:\n\t\treturn 0, err\n\t}\n}",
"func world_itemByID(id int32, meta int16) (world.Item, bool)",
"func (w *CurrentWeatherData) CurrentByName(location string) error {\n\tresponse, err := w.client.Get(fmt.Sprintf(fmt.Sprintf(baseURL, \"appid=%s&q=%s&units=%s&lang=%s\"), w.Key, url.QueryEscape(location), w.Unit, w.Lang))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer response.Body.Close()\n\n\tif err := json.NewDecoder(response.Body).Decode(&w); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func GetWeapons() (weapons []*Weapon, err error) {\n\tfields := []string{\"name\", \"weapon_type\", \"proficiency\", \"damage\", \"secondary_damage\", \"damage_type\", \"save\", \"range\", \"rarity\", \"modifiers\", \"weight\", \"description\", \"properties\"}\n\tquery := fmt.Sprintf(\"SELECT %s FROM weapons\", strings.Join(fields, \",\"))\n\trows, err := db.Query(query)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer rows.Close()\n\tfor rows.Next() {\n\t\t// create the new wespon, scan into it and cleanup\n\t\tw := &Weapon{}\n\t\trows.Scan(&w.Name, &w.Type, &w.Proficiency, &w.Damage, &w.SecondaryDamage, &w.DamageType, &w.Save, pq.Array(&w.rangeRaw), &w.Rarity, pq.Array(&w.modifiersRaw), &w.Weight, &w.Description, &w.propertiesRaw)\n\t\tw.clean()\n\t\tweapons = append(weapons, w)\n\t}\n\n\treturn\n}",
"func (e *Enemy) AvailableSkill() Skill {\n\tif e.Health <= 0 {\n\t\treturn nil\n\t}\n\n\t// TODO: Select skill\n\tfor _, skill := range e.Skills {\n\t\tif skill.WaitTime() == 0 {\n\t\t\treturn skill\n\t\t}\n\t\tfmt.Printf(\" WAIT: %s\\n\", skill.WaitTime().String())\n\t}\n\n\treturn nil\n}",
"func enchweapon() {\n\tif c[WIELD] < 0 {\n\t\tcursors()\n\t\tbeep()\n\t\tlprcat(\"\\nYou feel a sense of loss\")\n\t\treturn\n\t}\n\ttmp := iven[c[WIELD]]\n\tif tmp != OSCROLL {\n\t\tif tmp != OPOTION {\n\t\t\tivenarg[c[WIELD]]++\n\t\t\tif tmp == OCLEVERRING {\n\t\t\t\tc[INTELLIGENCE]++\n\t\t\t} else if tmp == OSTRRING {\n\t\t\t\tc[STREXTRA]++\n\t\t\t} else if tmp == ODEXRING {\n\t\t\t\tc[DEXTERITY]++\n\t\t\t}\n\t\t\tbottomline()\n\t\t}\n\t}\n}",
"func (w *RandomWorld) GetFood() map[string]*GoWorld.Food {\n\treturn w.FoodList\n}",
"func wearArmor(dexterity int) (string, int) {\n\tlottery := random(1, 5)\n\tvar armorname string\n\tvar armorBonus, dexBonus int\n\tdexBonus = attrModifier(dexterity)\n\tswitch lottery {\n\tcase 1:\n\t\tarmorname = \"Leather Armor\"\n\t\tarmorBonus = 2\n\t\tif dexBonus > 8 { // Every armor has a limit of how many dexterity bonus points can be added.\n\t\t\tdexBonus = 8\n\t\t}\n\tcase 2:\n\t\tarmorname = \"Chain Shirt\"\n\t\tarmorBonus = 4\n\t\tif dexBonus > 4 {\n\t\t\tdexBonus = 4\n\t\t}\n\tcase 3:\n\t\tarmorname = \"Scale Mail\"\n\t\tarmorBonus = 4\n\t\tif dexBonus > 4 {\n\t\t\tdexBonus = 4\n\t\t}\n\tcase 4:\n\t\tarmorname = \"Breastplate\"\n\t\tarmorBonus = 5\n\t\tif dexBonus > 3 {\n\t\t\tdexBonus = 3\n\t\t}\n\tcase 5:\n\t\tarmorname = \"Full Plate Armor\"\n\t\tarmorBonus = 8\n\t\tif dexBonus > 1 {\n\t\t\tdexBonus = 1\n\t\t}\n\t}\n\treturn armorname, 10 + armorBonus + dexBonus\n}",
"func (*HaveWeapon) Descriptor() ([]byte, []int) {\n\treturn file_msgdata_proto_rawDescGZIP(), []int{6}\n}",
"func (c *Character) GiveWeapon(we Weapon) {\n\tc.Weapons = append(c.Weapons, we)\n}",
"func getActiveCharacter(mid string) string {\n var profileResponse interface{}\n\n // Make GET request to Profile endpoint\n client := &http.Client{}\n reqURL := \"https://www.bungie.net/platform/Destiny2/3/Profile/\" +\n mid +\n \"/?components=200\"\n req, _ := http.NewRequest(\"GET\", reqURL, nil)\n req.Header.Add(\"X-API-Key\", os.Getenv(\"API_KEY\"))\n resp, err := client.Do(req)\n if ( err != nil) {\n fmt.Println(err)\n }\n // Parse response json for character ids\n err = json.NewDecoder(resp.Body).Decode(&profileResponse)\n if ( err != nil ) {\n fmt.Println(err)\n }\n resp.Body.Close()\n\n // Get relevant json data\n responseJSON := profileResponse.(map[string]interface{})\n responseMap := responseJSON[\"Response\"].(map[string]interface{})\n characterMap := responseMap[\"characters\"].(map[string]interface{})[\"data\"].(map[string]interface{})\n\n activeCharacter := \"-1\"\n latestDate := time.Time{}\n\n for k, v := range characterMap {\n dateString := v.(map[string]interface{})[\"dateLastPlayed\"].(string) // e.g. \"2020-01-09T06:11:35Z\"\n date, _ := time.Parse(\n time.RFC3339,\n dateString)\n if (date.After(latestDate)) {\n activeCharacter = k\n latestDate = date\n }\n }\n\n return activeCharacter\n}",
"func (c *KebaUdp) currentPower() (float64, error) {\n\tvar kr keba.Report3\n\terr := c.roundtrip(\"report\", 3, &kr)\n\n\t// mW to W\n\treturn float64(kr.P) / 1e3, err\n}",
"func (o *GetRecipeInformation200ResponseExtendedIngredientsInner) GetMeta() []string {\n\tif o == nil || o.Meta == nil {\n\t\tvar ret []string\n\t\treturn ret\n\t}\n\treturn o.Meta\n}",
"func (_CraftingI *CraftingICaller) GetWeaponDc(opts *bind.CallOpts, _item_id *big.Int) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _CraftingI.contract.Call(opts, &out, \"get_weapon_dc\", _item_id)\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (m *Blueprint) CurrentPower() (float64, error) {\n\treturn 0, api.ErrNotAvailable\n}",
"func makeEquipment() {\n\tallgear = []gear{\n\t\t/*1*/ gear{name: \"Cloth Gloves\", attack: 0, defense: 10},\n\t\t/*2*/ gear{name: \"Cloth Helm\", attack: 0, defense: 10},\n\t\t/*3*/ gear{name: \"Cloth Cuirass\", attack: 0, defense: 10},\n\t\t/*4*/ gear{name: \"Cloth Boots\", attack: 0, defense: 10},\n\t\t/*5*/ gear{name: \"Iron Shield\", attack: 0, defense: 10},\n\t\t/*6*/ gear{name: \"Steel Gloves\", attack: 0, defense: 20},\n\t\t/*7*/ gear{name: \"Steel Helm\", attack: 0, defense: 20},\n\t\t/*8*/ gear{name: \"Steel Curais\", attack: 0, defense: 20},\n\t\t/*9*/ gear{name: \"Steel Boots\", attack: 0, defense: 20},\n\t\t/*10*/ gear{name: \"Steel Shield\", attack: 0, defense: 20},\n\t\t/*11*/ gear{name: \"Iron Sword\", attack: 10, defense: 0},\n\t\t/*12*/ gear{name: \"Iron 2H Axe\", attack: 25, defense: 0},\n\t\t/*13*/ gear{name: \"Iron Warmace\", attack: 40, defense: 0},\n\t\t/*14*/ gear{name: \"Steel Sword\", attack: 20, defense: 0},\n\t\t/*15*/ gear{name: \"Steel Axe\", attack: 35, defense: 0},\n\t\t/*16*/ gear{name: \"Steel Warmace\", attack: 60, defense: 0},\n\t}\n}",
"func GetCharacterInfo(c *colly.Collector, characterName string) Character {\n\tvar filter string\n\n\tc.OnHTML(\"div.pi-section-contents [data-ref]\", func(e *colly.HTMLElement) {\n\n\t\tvar tabSize string = e.Attr(\"data-ref\")\n\n\t\tif tabSize == \"1\" {\n\t\t\tfilter = \":nth-child(2)\"\n\t\t} else {\n\t\t\tfilter = \":nth-child(1)\"\n\t\t}\n\n\t})\n\tcharacter := Character{}\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\tnickname := goquerySelection.Find(\"section.pi-item:nth-child(1) > div:nth-child(2) > div:nth-child(2)\").Text()\n\t\tcharacter.Nickname = nickname\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\tmotto := goquerySelection.Find(\"[data-source=\\\"motto dst\\\"] .pi-data-value\").Text()\n\n\t\tcharacter.Motto = motto\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\tbio := goquerySelection.Find(\"[data-source=\\\"bio\\\"] .pi-data-value\").Text()\n\t\tcharacter.Bio = bio\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\n\t\tgoquerySelection := e.DOM\n\t\ts, err := goquerySelection.Find(\"[data-source=\\\"perk dst\\\"] .pi-data-value\").Html()\n\t\tcheckError(err)\n\n\t\tperks := trimPerk(s)\n\n\t\tcharacter.Perks = perks\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tvar err error\n\t\tpi := e.ChildAttr(\"section:nth-child(1) > figure:nth-child(1) > a:nth-child(1) > img\", \"src\")\n\t\tpi, err = trimImageURL(pi)\n\t\tcheckError(err)\n\n\t\tcharacter.ProfileImage = pi\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\thealth := goquerySelection.Find(\"section:nth-child(2) > table:nth-child(1) > tbody:nth-child(3) > tr:nth-child(1) > td:nth-child(1)\").Text()\n\n\t\tcharacter.Health = health\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\thunger := goquerySelection.Find(\"section:nth-child(2) > table:nth-child(1) > tbody:nth-child(3) > tr:nth-child(1) > td:nth-child(2)\").Text()\n\n\t\tcharacter.Hunger = hunger\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tgoquerySelection := e.DOM\n\t\tsanity := goquerySelection.Find(\"section:nth-child(2) > table:nth-child(1) > tbody:nth-child(3) > tr:nth-child(1) > td:nth-child(3)\").Text()\n\n\t\tcharacter.Sanity = sanity\n\t})\n\n\tc.OnHTML(\"div.pi-section-content\"+filter, func(e *colly.HTMLElement) {\n\t\tvar l, foodName []string\n\t\tvar link string\n\t\tfood := \"section [data-source=\\\"favorite food\\\"] > div\"\n\t\te.ForEach(food, func(index int, item *colly.HTMLElement) {\n\n\t\t\tfoodName = item.ChildAttrs(\"a\", \"title\")\n\t\t\tl = item.ChildAttrs(\"a\", \"href\")\n\n\t\t})\n\n\t\tcharacter.FavoriteFood = make(map[string]string)\n\t\tfor i := 0; i < len(foodName); i++ {\n\n\t\t\tlink = \"https://dontstarve.fandom.com\" + l[i]\n\t\t\tcharacter.FavoriteFood[foodName[i]] = link\n\t\t}\n\t})\n\n\terr := c.Visit(fmt.Sprintf(\"https://dontstarve.fandom.com/wiki/%s\", characterName))\n\tcheckError(err)\n\n\treturn character\n}",
"func GetRootAsWeapon(buf []byte, offset flatbuffers.UOffsetT) *Weapon {\n\tn := flatbuffers.GetUOffsetT(buf[offset:])\n\tx := &Weapon{}\n\tx.Init(buf, n+offset)\n\treturn x\n}",
"func GetCurrentEngineAndExtras(v *longhorn.Volume, es map[string]*longhorn.Engine) (currentEngine *longhorn.Engine, extras []*longhorn.Engine, err error) {\n\tfor _, e := range es {\n\t\tif e.Spec.Active {\n\t\t\tif currentEngine != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"BUG: found the second active engine %v besides %v\", e.Name, currentEngine.Name)\n\t\t\t}\n\t\t\tcurrentEngine = e\n\t\t} else {\n\t\t\textras = append(extras, e)\n\t\t}\n\t}\n\tif currentEngine == nil {\n\t\tlogrus.Warnf(\"failed to directly pick up the current one from multiple engines for volume %v, fall back to detect the new current engine, \"+\n\t\t\t\"current node %v, desire node %v\", v.Name, v.Status.CurrentNodeID, v.Spec.NodeID)\n\t\treturn GetNewCurrentEngineAndExtras(v, es)\n\t}\n\treturn\n}",
"func (w *RandomWorld) GetBeings() map[string]*GoWorld.Being {\n\treturn w.BeingList\n}",
"func quickBattle(ctx context) {\n\t//Verify that we have enough args\n\tif len(ctx.Args) < 2 {\n\t\treturn\n\t}\n\n\t//fill up the ally user with the information from the database\n\tallypet, err := getPetUser(ctx.Args[0], ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t//fill up the enemy user\n\tenemypet, err := getPetUser(ctx.Args[1], ctx)\n\tif err != nil {\n\t\treturn\n\t}\n\t//check if ally is owned by the command initiator\n\n\tif allypet.OwnerID != ctx.Msg.Author.ID {\n\t\treturn\n\t}\n\n\tif allypet.Training {\n\t\treturn\n\t}\n\t\n\tif enemypet.Training {\n\t\treturn\n\t}\n\n\t//Set the battling flag\n\tdoingBattle := true\n\t\n\t\n\t//Entry point for battle loop. Ally attacks first.\n\tfor doingBattle {\n\t\tif allypet.EffectiveHP <= 0 {\n\t\t\t_,_ = ctx.Session.ChannelMessageSendEmbed(ctx.Msg.ChannelID, createResultEmbed(enemypet, allypet))\n\t\t\tdoingBattle = false\n\n\t\t\tgetLevels(&allypet, &enemypet, false, ctx)\n\n\t\t\treturn\n\t\t}\n\n\t\tallypet.SwingCount ++\n\t\tif doesHit(&allypet, enemypet) {\n\t\t\tdmg := getDamage(&allypet, enemypet)\n\t\t\tenemypet.EffectiveHP -= dmg\n\t\t\tallypet.DMGCount += dmg\n\t\t}\n\t\t\t\n\t\tif enemypet.EffectiveHP <= 0 {\n\t\t\t_,_ = ctx.Session.ChannelMessageSendEmbed(ctx.Msg.ChannelID, createResultEmbed(allypet, enemypet))\n\t\t\tdoingBattle = false\n\n\t\t\tgetLevels(&allypet, &enemypet, true, ctx)\n\t\t\treturn\n\t\t}\n\n\t\tenemypet.SwingCount ++\n\t\tif doesHit(&enemypet, allypet) {\n\t\t\tdmg := getDamage(&enemypet, allypet)\n\t\t\tallypet.EffectiveHP -= dmg\n\t\t\tenemypet.DMGCount += dmg\n\t\t}\t\n\t}\n}",
"func (_CraftingI *CraftingICaller) GetTokenUriWeapon(opts *bind.CallOpts, _item *big.Int) (string, error) {\n\tvar out []interface{}\n\terr := _CraftingI.contract.Call(opts, &out, \"get_token_uri_weapon\", _item)\n\n\tif err != nil {\n\t\treturn *new(string), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(string)).(*string)\n\n\treturn out0, err\n\n}",
"func Known() []string {\n\treturn caps59\n}",
"func (h Here) Current() (Info, error) {\n\thp := &h\n\t(&hp.curOnce).Do(func() {\n\t\tb, err := run(\"go\", \"env\", \"GOMOD\")\n\t\tif err != nil {\n\t\t\thp.curErr = err\n\t\t\treturn\n\t\t}\n\t\troot := filepath.Dir(string(b))\n\t\ti, err := h.Dir(root)\n\t\tif err != nil {\n\t\t\thp.curErr = err\n\t\t\treturn\n\t\t}\n\t\thp.current = i\n\t})\n\n\treturn h.current, h.curErr\n}",
"func GetCraftedCharacterModels(w http.ResponseWriter, req *http.Request) {\n\n\t// Get session values or redirect to Login\n\tsession, err := sessions.Store.Get(req, \"session\")\n\n\tif err != nil {\n\t\tlog.Println(\"error identifying session\")\n\t\thttp.Redirect(w, req, \"/login/\", http.StatusFound)\n\t\treturn\n\t\t// in case of error\n\t}\n\n\t// Prep for user authentication\n\tsessionMap := getUserSessionValues(session)\n\n\tusername := sessionMap[\"username\"]\n\tloggedIn := sessionMap[\"loggedin\"]\n\tisAdmin := sessionMap[\"isAdmin\"]\n\n\tfmt.Println(loggedIn, isAdmin, username)\n\n\tfmt.Println(session)\n\n\t/*\n\t\tif username == \"\" {\n\t\t\thttp.Redirect(w, req, \"/\", http.StatusFound)\n\t\t\treturn\n\t\t}\n\t*/\n\n\tcms, err := database.APICraftedCharacterModels(db)\n\tif err != nil {\n\t\tlog.Println(err)\n\t}\n\n\tjson.NewEncoder(w).Encode(cms)\n}",
"func (o *GetRecipeInformation200ResponseExtendedIngredientsInner) GetMetaOk() ([]string, bool) {\n\tif o == nil || o.Meta == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Meta, true\n}",
"func (_CraftingI *CraftingICallerSession) GetTokenUriWeapon(_item *big.Int) (string, error) {\n\treturn _CraftingI.Contract.GetTokenUriWeapon(&_CraftingI.CallOpts, _item)\n}",
"func (r *Release) getHelmFlags() []string {\n\tvar flgs []string\n\tif flags.forceUpgrades {\n\t\tflgs = append(flgs, \"--force\")\n\t}\n\n\treturn concat(r.getNoHooks(), r.getWait(), r.getTimeout(), r.getMaxHistory(), flags.getRunFlags(), r.HelmFlags, flgs)\n}",
"func (rw *RankedWeapon) Items(skills []decode.CharSkill, cfg npcdefs.NPCCfg) []InvItem {\n\tw := defs.Weapons[rw.WeaponID]\n\tif !w.Reusable {\n\t\treturn rw.oneUseWeapons(rw.Rank, cfg)\n\t}\n\n\tvar items []InvItem\n\n\titems = append(items, InvItem{\n\t\tItemID: w.ItemID,\n\t\tCount: 1,\n\t})\n\n\tif w.ClipItemID != defs.ItemIDNone {\n\t\tcount := util.RandRange(cfg.WeaponClipsMin, cfg.WeaponClipsMax)\n\t\titems = append(items, InvItem{\n\t\t\tItemID: w.ClipItemID,\n\t\t\tCount: count,\n\t\t})\n\t}\n\n\treturn items\n}",
"func (client *XenClient) VGPUGetCompatibilityMetadata(self string) (result map[string]string, err error) {\n\tobj, err := client.APICall(\"VGPU.get_compatibility_metadata\", self)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tinterim := reflect.ValueOf(obj)\n\tresult = map[string]string{}\n\tfor _, key := range interim.MapKeys() {\n\t\tobj := interim.MapIndex(key)\n\t\tresult[key.String()] = obj.String()\n\t}\n\n\treturn\n}",
"func (h *PepicHandler) GetMeta(c echo.Context) error {\n\tnames := strings.Split(c.Param(\"name\"), \",\")\n\tvar files []*entity.ProcessingFile\n\n\tfor _, name := range names {\n\t\tfile, err := h.Storage.GetFile(\"orig\", name)\n\t\tif err != nil {\n\t\t\treturn echo.NewHTTPError(http.StatusNotFound, \"File not found\")\n\t\t}\n\t\tfiles = append(files, file)\n\t}\n\n\treturn c.Render(http.StatusOK, \"meta.html\", map[string]interface{}{\n\t\t\"files\": files,\n\t\t\"host\": c.Request().URL,\n\t\t\"blocks\": config.App.Meta.Blocks,\n\t})\n}",
"func (self *TileSprite) Damage() interface{}{\n return self.Object.Get(\"damage\")\n}",
"func getAvailablePluginInfo(restPluginsMap restPlugins) []restPluginsAvailable {\n\tvar availablePluginsMap []restPluginsAvailable\n\tfor _, plugin := range restPluginsMap.Plugins {\n\t\tlog.Debug(\"getting: \", plugin.Name, \", available info\")\n\t\tavailablePluginURL := baseURL + \"available/\" + plugin.Key + \"-key\"\n\t\tlog.Debug(\"requesting URL: \" + availablePluginURL)\n\t\treq, err := http.NewRequest(\"GET\", availablePluginURL, nil)\n\t\tif err != nil {\n\t\t\tlog.Error(\"http.NewRequest returned an error:\", err)\n\t\t}\n\n\t\tlog.Debug(\"add authorization header to the request\")\n\t\treq.Header.Add(\"Authorization\", bearer)\n\n\t\tlog.Debug(\"make request... get back a response\")\n\t\tres, err := http.DefaultClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.Error(\"http.DefaultClient.Do returned an error:\", err)\n\t\t}\n\t\tdefer res.Body.Close()\n\n\t\tif res.StatusCode != 200 {\n\t\t\tlog.Debug(\"response status code: \", res.StatusCode, \" continuing to next plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debug(\"get the body out of the response\")\n\t\tbody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tlog.Error(\"ioutil.ReadAll returned an error:\", err)\n\t\t}\n\n\t\tif len(body) < 1 {\n\t\t\tlog.Debug(\"body was empty, continue to next plugin\")\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debug(\"create temp map object\")\n\t\tvar tempMap restPluginsAvailable\n\n\t\tlog.Debug(\"unmarshal (turn unicode back into a string) request body into map structure\")\n\t\terr = json.Unmarshal(body, &tempMap)\n\t\tif err != nil {\n\t\t\tlog.Error(\"error Unmarshalling: \", err)\n\t\t\tlog.Info(\"Problem unmarshalling the following string: \", string(body))\n\t\t}\n\n\t\t// add the enabled value from the plugin map to the available map\n\t\ttempMap.Enabled = plugin.Enabled\n\n\t\tlog.Debug(\"adding plugin: \", tempMap.Name, \", and Key: \", tempMap.Key)\n\t\tavailablePluginsMap = append(availablePluginsMap, tempMap)\n\n\t}\n\n\treturn availablePluginsMap\n}",
"func (hs100 *Hs100) GetCurrentPowerConsumption() (PowerConsumption, error) {\n\tresp, err := hs100.commandSender.SendCommand(hs100.Address, currentPowerConsumptionCommand)\n\tif err != nil {\n\t\treturn PowerConsumption{}, errors.Wrap(err, \"Could not read from hs100 device\")\n\t}\n\treturn powerConsumption(resp)\n}",
"func (wb *EVSEWifi) currentPower() (float64, error) {\n\tparams, err := wb.paramG.Get()\n\treturn 1000 * params.ActualPower, err\n}",
"func (self *TileSprite) Heal() interface{}{\n return self.Object.Get(\"heal\")\n}",
"func (g *Game) Cmd(cmds ...string) (res interface{}) {\n switch cmds[0] {\n case \"look\":\n return g.CurrentRoom.Body\n }\n\n\treturn nil\n}",
"func selectRankedWeapons(skills []decode.CharSkill,\n\tcfg npcdefs.NPCCfg) []RankedWeapon {\n\n\tvar rws []RankedWeapon\n\n\twss := sortedWeaponSkills(skills)\n\tlog.Debugf(\"sorted weapon skills:\\n\")\n\tfor _, ws := range wss {\n\t\tlog.Debugf(\" %d %s\", ws.Level, defs.SkillNames[ws.ID])\n\t}\n\tfor rank, ws := range wss {\n\t\tids := weaponIDsWithSkillID(ws.ID)\n\t\tif len(ids) > 0 {\n\t\t\tpoints := rankedWeaponPoints(ws.Level, rank, cfg)\n\t\t\tif points > 0 {\n\t\t\t\tid := selectOneWeaponID(ids, points)\n\t\t\t\tif id != defs.WeaponIDNone {\n\t\t\t\t\trws = append(rws, RankedWeapon{\n\t\t\t\t\t\tAllWeaponIDs: ids,\n\t\t\t\t\t\tWeaponID: id,\n\t\t\t\t\t\tSkillLevel: ws.Level,\n\t\t\t\t\t\tRank: rank,\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn rws\n}",
"func (e Encounter)Help() string { return \"[join], [TARGET [with WEAPON][do DAMAGEROLL][hp +-NN]\" }",
"func (_CraftingI *CraftingISession) GetTokenUriWeapon(_item *big.Int) (string, error) {\n\treturn _CraftingI.Contract.GetTokenUriWeapon(&_CraftingI.CallOpts, _item)\n}",
"func (el epicList) getAvailable(names ...string) []availableEpic {\n\tret := make([]availableEpic, len(names))\n\tfor i, name := range names {\n\t\tfor _, e := range el {\n\t\t\tif e.InternalName == name {\n\t\t\t\tret[i].ID = e.ID\n\t\t\t\tret[i].Name = e.Name\n\t\t\t\tret[i].Desc = e.Desc\n\t\t\t\tret[i].LongDesc = e.LongDesc\n\t\t\t\tif e.EndDate != PositiveInfinityTS {\n\t\t\t\t\tret[i].EndTime = &e.EndDate\n\t\t\t\t}\n\t\t\t\tret[i].RepeatMax = e.RepeatCount\n\t\t\t\tret[i].RepeatCount = 0\n\t\t\t\tret[i].GroupSize = e.GroupSize\n\t\t\t\tret[i].Flags = e.Flags\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}",
"func GetMeta(value Value) *Meta {\n\t// acquire mutex\n\tmetaMutex.Lock()\n\tdefer metaMutex.Unlock()\n\n\t// get typ\n\ttyp := reflect.TypeOf(value)\n\n\t// check cache\n\tif meta, ok := metaCache[typ]; ok {\n\t\treturn meta\n\t}\n\n\t// get first field\n\tfield := typ.Elem().Field(0)\n\n\t// check field type and name\n\tif field.Type != baseType || !field.Anonymous || field.Name != \"Base\" {\n\t\tpanic(`glut: expected first struct field to be an embedded \"glut.Base\"`)\n\t}\n\n\t// check coding tag\n\tjson, hasJSON := field.Tag.Lookup(\"json\")\n\tbson, hasBSON := field.Tag.Lookup(\"bson\")\n\tif (hasJSON && hasBSON) || (!hasJSON && !hasBSON) {\n\t\tpanic(`glut: expected to find a coding tag of the form 'json:\"-\"' or 'bson:\"-\"' on \"glut.Base\"`)\n\t} else if (hasJSON && json != \"-\") || (hasBSON && bson != \"-\") {\n\t\tpanic(`glut: expected to find a coding tag of the form 'json:\"-\"' or 'bson:\"-\"' on \"glut.Base\"`)\n\t}\n\n\t// get coding\n\tcoding := stick.JSON\n\tif hasBSON {\n\t\tcoding = stick.BSON\n\t}\n\n\t// split tag\n\ttag := strings.Split(field.Tag.Get(\"glut\"), \",\")\n\n\t// check tag\n\tif len(tag) != 2 || tag[0] == \"\" || tag[1] == \"\" {\n\t\tpanic(`glut: expected to find a tag of the form 'glut:\"key,ttl\"' on \"glut.Base\"`)\n\t}\n\n\t// get key\n\tkey := tag[0]\n\n\t// get ttl\n\tttl, err := time.ParseDuration(tag[1])\n\tif err != nil {\n\t\tpanic(`glut: invalid duration as time to live on \"glut.Base\"`)\n\t}\n\n\t// prepare meta\n\tmeta := &Meta{\n\t\tType: typ,\n\t\tKey: key,\n\t\tTTL: ttl,\n\t\tCoding: coding,\n\t\tAccessor: stick.BuildAccessor(value, \"Base\"),\n\t}\n\n\t// cache meta\n\tmetaCache[typ] = meta\n\n\treturn meta\n}",
"func (self *PhysicsP2) Walls() interface{}{\n return self.Object.Get(\"walls\")\n}",
"func GetPlayers() {\n\tres, err := http.Get(\"https://ssherder.com/data-api/characters/\")\n\tif err != nil {\n\t\ttools.WriteErr(err)\n\t\tfmt.Println(\"Somethings wrong with Ssherder!\")\n\t\treturn\n\t}\n\tdefer res.Body.Close()\n\n\t// ReadAll to a byte array for Unmarshal\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\ttools.WriteErr(err)\n\t\tfmt.Println(\"Error with: ioutil.ReadAll(res.Body)\")\n\t\treturn\n\t}\n\n\t// Unmarshal JSON data into struct\n\tvar playerStruct []expectedPlayers\n\tif err := json.Unmarshal(body, &playerStruct); err != nil {\n\t\ttools.WriteErr(err)\n\t\tfmt.Println(\"Error with: json.Unmarshal(body, &playerStruct)\")\n\t\treturn\n\t}\n\n\t// loop and store\n\tfor i := 0; i < len(playerStruct); i++ {\n\t\tplayerMap := make(map[string]string)\n\t\tplayerMap[\"Story\"] = playerStruct[i].Story\n\t\tplayerMap[\"ID\"] = strconv.Itoa(playerStruct[i].ID)\n\t\tplayerMap[\"Stones\"] = strings.Join(playerStruct[i].Stones, \", \")\n\n\t\tvar (\n\t\t\tskillString string\n\t\t\tace string\n\t\t\tactive string\n\t\t\tpassives string\n\t\t)\n\n\t\tfor k := 0; k < len(playerStruct[i].Skills); k++ {\n\t\t\t// Define hash key, HGetAll, assign skill info\n\t\t\thashKey := \"skill_\" + strconv.Itoa(playerStruct[i].Skills[k])\n\n\t\t\tskillHash, err := rds.RC.HGetAll(hashKey).Result()\n\t\t\tif err != nil {\n\t\t\t\ttools.WriteErr(err)\n\t\t\t\tfmt.Println(\"Error getting Skill Hash\")\n\t\t\t}\n\n\t\t\tskillName := skillHash[\"Name\"]\n\t\t\tskillDesc := skillHash[\"Description\"]\n\t\t\tskillCat := skillHash[\"Category\"]\n\t\t\tskillCost := skillHash[\"SpiritCost\"]\n\t\t\tskillCD := skillHash[\"Cooldown\"]\n\n\t\t\t// How I want one line printed\n\t\t\tskillInfo := fmt.Sprintf(\"**%s** [%s] \\n%s\\n\\n\", skillName, strings.ToLower(skillCat), skillDesc)\n\n\t\t\tif skillCat == \"ace\" {\n\t\t\t\tace = skillInfo\n\t\t\t} else if skillCat == \"active\" { // active skills have a unique print\n\t\t\t\tactive = fmt.Sprintf(\"**%s** [%s, %s spirit, %sm] \\n%s\\n\\n\", skillName, strings.ToLower(skillCat), skillCost, skillCD, skillDesc)\n\t\t\t} else { // Multiple passives per player\n\t\t\t\tpassives = passives + skillInfo\n\t\t\t}\n\t\t}\n\t\t// Order I want it all in after ace: active > passives\n\t\tskillString = ace\n\t\tskillString = skillString + active\n\t\tskillString = skillString + passives\n\n\t\tplayerMap[\"Skills\"] = skillString\n\n\t\t// Example name: \"Z101 Raklet\"\n\t\t// Split it: [\"Z101\", \"Raklet\"]\n\t\t// Create the same player entries the keys: \"Z101 Raklet\", \"Z101\", and \"Raklet\"\n\t\tplayerName := playerStruct[i].Name\n\t\tsplitName := strings.Split(playerName, \" \")\n\n\t\tstringID := strconv.Itoa(playerStruct[i].ID) // stringify ID\n\t\tkeyID := string(stringID[0]) // grab first index in string form\n\t\tlookupKey := playerStruct[i].Name + \"_\" + keyID\n\n\t\t// Store Character's name by Ssherder IDs\n\t\trds.RedisSet(rds.RC, playerMap[\"ID\"], playerName)\n\n\t\t// set full name, then loop over(if two or more) splitName\n\t\trds.RC.HMSet(strings.ToLower(lookupKey), playerMap)\n\t\tif len(splitName) > 1 {\n\t\t\tfor x := 0; x < len(splitName); x++ {\n\t\t\t\t// check if it exists already\n\t\t\t\tsplitKey := fmt.Sprintf(\"%s_%s\", strings.ToLower(splitName[x]), keyID)\n\t\t\t\texists, err := rds.RC.Exists(splitKey).Result()\n\t\t\t\tif err != nil {\n\t\t\t\t\ttools.WriteErr(err)\n\t\t\t\t\treturn\n\t\t\t\t} else if !exists {\n\t\t\t\t\trds.RC.HMSet(splitKey, playerMap)\n\t\t\t\t} else {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Skill info is saved, now...\n\t// Also save other player info under a UUID key\n\n\t// _, err := io.Copy(os.Stdout, res.Body)\n\t// if err != nil {\n\t// \tfmt.Println(err)\n\t// }\n}",
"func (c *Creature) DetermineMagicPoints() *Attribute {\n\tmp := &Attribute{\n\t\tName: \"Magic Points\",\n\t\tMaxValue: 21,\n\t}\n\n\tp := c.Statistics[\"POW\"]\n\tp.UpdateStatistic()\n\n\tmp.Base = p.Total\n\tmp.Max = p.Total\n\n\treturn mp\n}",
"func Inventory() []adptr.InfoFn {\n\treturn []adptr.InfoFn{\n\t\tbypass.GetInfo,\n\t\tcirconus.GetInfo,\n\t\tcloudwatch.GetInfo,\n\t\tdenier.GetInfo,\n\t\tdogstatsd.GetInfo,\n\t\tfluentd.GetInfo,\n\t\tkubernetesenv.GetInfo,\n\t\tlist.GetInfo,\n\t\tmemquota.GetInfo,\n\t\tnoop.GetInfo,\n\t\topa.GetInfo,\n\t\tprometheus.GetInfo,\n\t\trbac.GetInfo,\n\t\tredisquota.GetInfo,\n\t\tservicecontrol.GetInfo,\n\t\tsignalfx.GetInfo,\n\t\tsolarwinds.GetInfo,\n\t\tstackdriver.GetInfo,\n\t\tstatsd.GetInfo,\n\t\tstdio.GetInfo,\n\t}\n}",
"func ExtraMeta(ctx context.Context) *Meta {\n\tvalue := ctx.Value(metaContextKey{})\n\tif v, ok := value.(*Meta); ok {\n\t\treturn v\n\t}\n\n\treturn nil\n}",
"func (p *player) oxygen() int {\n\treturn p.remainingOxygen\n}",
"func (g *Game) GetWorth(h Hotel) HotelWorth {\n\treturn GetWorth(h, g.CurrentChainSizes[h])\n}",
"func list_featured_game(ctx *iris.Context) {\n gameshort := ctx.GetString(\"gameshort\")\n\n // Check if the game exists\n game := &objects.Game{}\n app.Database.Where(\"short = ?\", gameshort).Or(\"id = ?\", cast.ToUint(gameshort)).First(game)\n if game.Short != gameshort && game.ID != cast.ToUint(gameshort) {\n utils.WriteJSON(ctx, iris.StatusNotFound, utils.Error(\"The gameshort is invalid.\").Code(2125))\n return\n }\n\n var featured []objects.Featured\n app.Database.Find(&featured)\n output := []map[string]interface{}{}\n for _,element := range featured {\n if element.Mod.GameID == game.ID {\n output = append(output, utils.ToMap(element))\n }\n }\n utils.WriteJSON(ctx, iris.StatusOK, iris.Map{\"error\": false, \"count\": len(output), \"data\": output})\n}",
"func (s *sqlService) Items() (items []definitions.ItemDefinition) {\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.context = context.Background()\n\tdb := s.connect(s.context)\n\t// defer db.Close()\n\trows, err := db.QueryContext(s.context, \"SELECT id, name, description, command, base_price, stackable, special, members FROM items ORDER BY id\")\n\tif err != nil {\n\t\tlog.Warn(\"Couldn't load entity definitions from sqlService:\", err)\n\t\treturn\n\t}\n\tdefer rows.Close()\n\n\tfor rows.Next() {\n\t\tnextDef := definitions.ItemDefinition{}\n\t\trows.Scan(&nextDef.ID, &nextDef.Name, &nextDef.Description, &nextDef.Command, &nextDef.BasePrice, &nextDef.Stackable, &nextDef.Quest, &nextDef.Members)\n\t\titems = append(items, nextDef)\n\t}\n\trows.Close()\n\n\trows, err = db.QueryContext(s.context, \"SELECT id, skillIndex, level FROM item_wieldable_requirements\")\n\tif err != nil {\n\t\tlog.Error.Println(\"Couldn't load entity information from sql database:\", err)\n\t\treturn\n\t}\n\tvar id, skill, level int\n\tfor rows.Next() {\n\t\trows.Scan(&id, &skill, &level)\n\t\tif items[id].Requirements == nil {\n\t\t\titems[id].Requirements = make(map[int]int)\n\t\t}\n\t\titems[id].Requirements[skill] = level\n\t}\n\trows.Close()\n\n\trows, err = db.QueryContext(s.context, \"SELECT id, sprite, type, armour_points, magic_points, prayer_points, range_points, weapon_aim_points, weapon_power_points, pos, femaleOnly FROM item_wieldable\")\n\tif err != nil {\n\t\tlog.Error.Println(\"Couldn't load entity information from sql database:\", err)\n\t\treturn\n\t}\n\t// TODO: Integrate into ItemDefinition\n\tfor rows.Next() {\n\t\tnextDef := definitions.EquipmentDefinition{}\n\t\trows.Scan(&nextDef.ID, &nextDef.Sprite, &nextDef.Type, &nextDef.Armour, &nextDef.Magic, &nextDef.Prayer, &nextDef.Ranged, &nextDef.Aim, &nextDef.Power, &nextDef.Position, &nextDef.Female)\n\t\tdefinitions.Equipment = append(definitions.Equipment, nextDef)\n\t}\n\n\treturn\n}",
"func (c *Connection) CurrentPower() (float64, error) {\n\tres, err := c.meterCache.Get()\n\treturn res.FloatValue(\"POWER\"), err\n}",
"func (_BaseLibrary *BaseLibraryCaller) GetMeta(opts *bind.CallOpts, key []byte) ([]byte, error) {\n\tvar out []interface{}\n\terr := _BaseLibrary.contract.Call(opts, &out, \"getMeta\", key)\n\n\tif err != nil {\n\t\treturn *new([]byte), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new([]byte)).(*[]byte)\n\n\treturn out0, err\n\n}",
"func (self *TileSprite) Health() int{\n return self.Object.Get(\"health\").Int()\n}",
"func plugPower(ctx context.Context, promAPI promclient.API) (map[string]Power, error) {\n\tv, warns, err := promAPI.Query(ctx, plugQuery, time.Now())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Prometheus query evaluation: %w\", err)\n\t}\n\tfor _, w := range warns {\n\t\tvlogf(\"During Prometheus query evaluation: %s\", w)\n\t}\n\n\tif v.Type() != prommodel.ValVector {\n\t\treturn nil, fmt.Errorf(\"Prometheus query yielded %v, want vector\", v.Type())\n\t}\n\tvec := v.(prommodel.Vector)\n\tm := make(map[string]Power, len(vec))\n\tfor _, s := range vec {\n\t\tm[string(s.Metric[\"mac\"])] = Power(s.Value)\n\t}\n\treturn m, nil\n}",
"func DoWear(pp *PlayerChar, verb string, dobj thing.Thing,\n prep string, iobj thing.Thing, text string) {\n \n if dobj == nil {\n pp.QWrite(\"Wear what?\")\n return\n }\n \n bod := pp.Body()\n if !bod.IsHolding(dobj) {\n pp.QWrite(\"You are not holding %s.\", dobj.Normal(name.DEF_ART))\n return\n }\n \n if wt, ok := dobj.(thing.Wearable); ok {\n slot := wt.Slot()\n var slots_worn byte = 0\n var can_wear byte\n can_wear, _ = bod.WornSlots(slot)\n var already_worn = make([]string, 0, 0)\n for _, t := range pp.Inventory.Things {\n if wit, wok := t.(thing.Wearable); wok {\n if !bod.IsHolding(t) {\n if wit.Slot() == slot {\n slots_worn++\n already_worn = append(already_worn, t.Normal(0))\n }\n }\n }\n }\n \n if slots_worn < can_wear {\n if rh, _ := bod.HeldIn(\"right_hand\"); rh == dobj {\n bod.SetHeld(\"right_hand\", nil)\n } else {\n bod.SetHeld(\"left_hand\", nil)\n }\n f1p := map[string]interface{} { \"subj\": \"You\",\n \"verb\": \"put\",\n \"pp\": \"your\",\n \"dobj\": dobj.Normal(0), }\n f3p := map[string]interface{} { \"subj\": util.Cap(pp.Normal(0)),\n \"verb\": \"puts\",\n \"pp\": pp.PossPronoun(),\n \"dobj\": f1p[\"dobj\"], }\n var templ string\n slotName := bod.WornSlotName(slot)\n if slotName == \"\" {\n templ = \"{subj} {verb} on {dobj}.\"\n } else {\n templ = fmt.Sprintf(\"{subj} {verb} {dobj} %s.\", slotName)\n }\n \n m := msg.New(\"txt\", gstring.Sprintm(templ, f3p))\n m.Add(pp, \"txt\", gstring.Sprintm(templ, f1p))\n pp.where.Place.(*room.Room).Deliver(m)\n } else {\n f1p := map[string]interface{} { \"pp\": \"your\" }\n slot_str := gstring.Sprintm(bod.WornSlotName(slot), f1p)\n pp.QWrite(\"You are already wearing %s %s.\", util.EnglishList(already_worn), slot_str)\n }\n } else {\n pp.QWrite(\"You cannot wear %s.\", dobj.Normal(0))\n }\n}",
"func (s *skill) currentCoolDown() float64 {\n\ts.m.Lock()\n\tdefer s.m.Unlock()\n\treturn s.internalCD\n}",
"func (b *OGame) GetUniverseSpeed() int64 {\n\treturn b.getUniverseSpeed()\n}",
"func (env *Env) WineDescriptorLookup(descriptor string) *WineInfo {\n\tdescriptor = strings.ToLower(descriptor)\n\tvar wines []WineInfo\n\tenv.db.Find(&wines)\n\n\tconst debug = false\n\n\tconst WorstAcceptable = 6\n\tvar bestMatch WineInfo\n\tbestMatchR := WorstAcceptable\n\n\tif debug {\n\t\tfmt.Println(\"Looking for\", descriptor)\n\t}\n\n\tfor _, wine := range wines {\n\t\tr := fuzzy.RankMatch(descriptor, strings.ToLower(wine.Name))\n\t\tif debug {\n\t\t\tfmt.Printf(\" %d %s\\n\", r, wine.Name)\n\t\t}\n\t\tif r != -1 && r < bestMatchR {\n\t\t\tbestMatch, bestMatchR = wine, r\n\t\t}\n\t}\n\n\tif bestMatchR < WorstAcceptable {\n\t\tif debug {\n\t\t\tfmt.Println(\"Found:\", bestMatch.Name)\n\t\t}\n\t\treturn &bestMatch\n\t}\n\n\treturn nil\n}",
"func GetHardware() Hardware { return hardware }",
"func (v Velocity) GetRecent(request *restful.Request, response *restful.Response) {\n\trow := database.DB.QueryRow(\"SELECT speed FROM gatorloop.wheel1speed ORDER BY idWheel1Speed DESC LIMIT 1\")\n\tvar res sql.NullFloat64\n\terr := row.Scan(&res)\n\tif err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\tlog.Errorf(\"Row scan failed. %v\", err)\n\t\t}\n\t}\n\tvar wheel1Speed float64\n\tif res.Valid {\n\t\twheel1Speed = res.Float64\n\t} else {\n\t\twheel1Speed = 0.0\n\t}\n\n\trow = database.DB.QueryRow(\"SELECT speed FROM gatorloop.wheel2speed ORDER BY idWheel2Speed DESC LIMIT 1\")\n\terr = row.Scan(&res)\n\tif err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\tlog.Errorf(\"Row scan failed. %v\", err)\n\t\t}\n\t}\n\tvar wheel2Speed float64\n\tif res.Valid {\n\t\twheel2Speed = res.Float64\n\t} else {\n\t\twheel2Speed = 0.0\n\t}\n\n\tresponse.WriteEntity(Velocity{math.Max(wheel1Speed, wheel2Speed)})\n}",
"func (g *Game) listUsables() {\n\tletter := 'a'\n\tlistText, _ := sf.NewText(Font)\n\tlistText.SetCharacterSize(12)\n\tlistText.SetPosition(sf.Vector2f{12, 12})\n\tusables := make(map[rune]*Item)\n\tnames := make(map[*Item]string)\n\tfor k, i := range g.player.inventory {\n\t\tif i.effect != nil {\n\t\t\tappendString(listText, strconv.QuoteRune(letter)+\" - \"+k+\" x\"+strconv.Itoa(i.stack))\n\t\t\tusables[letter] = i\n\t\t\tnames[i] = k\n\t\t\tletter++\n\t\t}\n\t}\n\nlistLoop:\n\tfor g.window.IsOpen() {\n\t\tfor event := g.window.PollEvent(); event != nil; event = g.window.PollEvent() {\n\t\t\tswitch et := event.(type) {\n\t\t\tcase sf.EventTextEntered:\n\t\t\t\tdone, used := g.inventoryInput(et.Char, usables, names)\n\t\t\t\tif used != \"\" {\n\t\t\t\t\tusedI := g.player.inventory[used]\n\t\t\t\t\tif usedI.stack > 1 {\n\t\t\t\t\t\tusedI.stack--\n\t\t\t\t\t\tbreak listLoop\n\t\t\t\t\t}\n\t\t\t\t\tdelete(g.player.inventory, used)\n\t\t\t\t\tbreak listLoop\n\t\t\t\t}\n\t\t\t\tif done {\n\t\t\t\t\tbreak listLoop\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tg.window.Clear(sf.ColorBlack())\n\n\t\tg.window.SetView(g.logView)\n\t\tg.drawLog()\n\t\tg.window.SetView(g.gameView)\n\t\tlistText.Draw(g.window, sf.DefaultRenderStates())\n\t\tg.window.Display()\n\t}\n\n\tg.state = PLAY\n}",
"func (self *TileSprite) MaxHealth() int{\n return self.Object.Get(\"maxHealth\").Int()\n}",
"func (c *Connection) GetMeta(p Part) (Meta, error) {\n\tresp, err := c.client.Get(\"https://www.reichelt.de/index.html?ACTION=3&ARTICLE=\" + strconv.Itoa(p.Number))\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tdoc, err := html.Parse(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnodes := metaSelector.MatchAll(doc)\n\tif nodes == nil {\n\t\treturn nil, nil\n\t}\n\n\tresult := make(Meta)\n\n\tfor _, n := range nodes {\n\t\tif n.FirstChild == nil || n.FirstChild.FirstChild == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\theadline := n.FirstChild.FirstChild.Data\n\t\tdata := make(map[string]string)\n\n\t\tnames := metaItemNameSelector.MatchAll(n)\n\t\tvalues := metaItemValueSelector.MatchAll(n)\n\n\t\tif len(names) != len(values) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor i := range names {\n\t\t\tif names[i].FirstChild == nil || values[i].FirstChild == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdata[names[i].FirstChild.Data] = strings.Trim(values[i].FirstChild.Data, \" \")\n\t\t}\n\n\t\tresult[headline] = data\n\t}\n\n\t// insert datasheets\n\tnodes = datasheetSelector.MatchAll(doc)\n\ttemp := make(map[string]string)\n\n\tfor _, node := range nodes {\n\n\t\tif node.FirstChild == nil || node.FirstChild.Type != html.TextNode {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := node.FirstChild.Data\n\t\tlink := \"\"\n\n\t\t// find href of link\n\t\tfor _, k := range node.Attr {\n\t\t\tif k.Key == \"href\" {\n\t\t\t\tlink = k.Val\n\t\t\t}\n\t\t}\n\n\t\tif link == \"\" {\n\t\t\t// no link found\n\t\t\tcontinue\n\t\t}\n\n\t\ttemp[name] = link\n\t}\n\n\tresult[\"datasheets\"] = temp\n\n\t// get MPN\n\tnode := mpnSelector.MatchFirst(doc)\n\tif node != nil && node.FirstChild != nil {\n\t\ttemp[\"mpn\"] = node.FirstChild.Data\n\t}\n\n\t// get Manufacturer\n\tnode = manufacturerSelector.MatchFirst(doc)\n\tif node != nil && node.FirstChild != nil {\n\t\ttemp[\"manufacturer\"] = node.FirstChild.Data\n\t}\n\n\treturn result, nil\n}",
"func getCurrentSettings() metal3api.SettingsMap {\n\n\treturn metal3api.SettingsMap{\n\t\t\"L2Cache\": \"10x512 KB\",\n\t\t\"NetworkBootRetryCount\": \"20\",\n\t\t\"ProcVirtualization\": \"Disabled\",\n\t\t\"SecureBoot\": \"Enabled\",\n\t\t\"AssetTag\": \"X45672917\",\n\t}\n}",
"func (w *Wurfl) LookupProperties(useragent string, proplist []string, vproplist []string) *Device {\n\tua := C.CString(useragent)\n\tdevice := C.wurfl_lookup_useragent(w.wurfl, ua)\n\tC.free(unsafe.Pointer(ua))\n\n\tif device == nil {\n\t\treturn nil\n\t}\n\n\tm := NewKeyStireList()\n\tfor _, prop := range proplist {\n\t\tcprop, gen := w.getCString(prop)\n\t\tval := C.wurfl_device_get_capability(device, cprop)\n\t\tif gen {\n\t\t\tC.free(unsafe.Pointer(cprop))\n\t\t}\n\t\tm.Push(nil, prop, C.GoString(val))\n\t}\n\n\t// get the virtual properties\n\tmv := NewKeyStireList()\n\tfor _, prop := range vproplist {\n\t\tcprop, gen := w.getCString(prop)\n\t\tval := C.wurfl_device_get_virtual_capability(device, cprop)\n\t\tif gen {\n\t\t\tC.free(unsafe.Pointer(cprop))\n\t\t}\n\t\tmv.Push(nil, prop, C.GoString(val))\n\t}\n\n\td := &Device{\n\t\tDevice: C.GoString(C.wurfl_device_get_id(device)),\n\t\tCapabilities: m,\n\t\tVirtualCapabilities: mv,\n\t}\n\tC.wurfl_device_destroy(device)\n\n\treturn d\n}",
"func (wb *CfosPowerBrain) CurrentPower() (float64, error) {\n\tb, err := wb.conn.ReadHoldingRegisters(cfosRegPower, 2)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn float64(binary.BigEndian.Uint32(b)), err\n}",
"func tryDeduceTypeOfObject(item models.Item) []string {\n\t// Try to match known base type.\n\tif attributes, ok := models.BaseTypes[item.Type]; ok {\n\t\treturn attributes\n\t}\n\n\t// Try to deduce item type with the frame type.\n\tswitch item.FrameType {\n\tcase models.GemFrameType:\n\t\treturn models.Gems\n\tcase models.CurrencyFrameType:\n\t\tif strings.Contains(item.Type, \"Oil\") {\n\t\t\treturn models.Oils\n\t\t}\n\t\tif strings.Contains(item.Type, \"Fossil\") {\n\t\t\treturn models.Fossils\n\t\t}\n\t\tif strings.Contains(item.Type, \"Essence\") {\n\t\t\treturn models.Essences\n\t\t}\n\t\treturn models.Currencies\n\tcase models.DivinationCardFrameType:\n\t\treturn models.Cards\n\tcase models.QuestItemFrameType:\n\t\treturn models.Quests\n\t}\n\n\t// Detect jewels type.\n\tif strings.Contains(item.Type, \"Jewel\") {\n\t\treturn models.Jewels\n\t}\n\n\t// Detect map type.\n\tif strings.Contains(item.Type, \"Map\") {\n\t\treturn models.Maps\n\t}\n\n\treturn []string{}\n}",
"func getMonsters(env Environment, cr int) []Monster {\n\tcandidates := make([]Monster,10)\n\tfor name, mon := range monsterCache {\n\t\tcrInt, _ := strconv.Atoi(mon.Challenge_Rating)\n\t\tif crInt == cr {\n\t\t\tcandidates = append(candidates, mon)\n\t\t\tfmt.Println(name)\n\t\t}\n\t}\n\n\treturn candidates\n}",
"func itemIDToWeaponID(itemID int) int {\n\tfor wid, w := range defs.Weapons {\n\t\tif itemID == w.ItemID {\n\t\t\treturn wid\n\t\t}\n\t}\n\n\treturn -1\n}",
"func GetMeta() Meta {\n\treturn Meta{\n\t\tMajorMinorPatch: majorMinorPatch(),\n\t\tShort: Short(),\n\t\tLong: Long(),\n\t\tGitCommit: gitCommit(),\n\t\tGitDirty: gitDirty(),\n\t\tExtraGitCommit: extraGitCommitStamp,\n\t\tIsDev: isDev(),\n\t\tUnstableBranch: IsUnstableBuild(),\n\t\tCap: int(tailcfg.CurrentCapabilityVersion),\n\t}\n}",
"func GetNewCurrentEngineAndExtras(v *longhorn.Volume, es map[string]*longhorn.Engine) (currentEngine *longhorn.Engine, extras []*longhorn.Engine, err error) {\n\toldEngineName := \"\"\n\tfor name := range es {\n\t\te := es[name]\n\t\tif e.Spec.Active {\n\t\t\toldEngineName = e.Name\n\t\t}\n\t\tif (v.Spec.NodeID != \"\" && v.Spec.NodeID == e.Spec.NodeID) ||\n\t\t\t(v.Status.CurrentNodeID != \"\" && v.Status.CurrentNodeID == e.Spec.NodeID) ||\n\t\t\t(v.Status.PendingNodeID != \"\" && v.Status.PendingNodeID == e.Spec.NodeID) {\n\t\t\tif currentEngine != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"BUG: found the second new active engine %v besides %v\", e.Name, currentEngine.Name)\n\t\t\t}\n\t\t\tcurrentEngine = e\n\t\t\tcurrentEngine.Spec.Active = true\n\t\t} else {\n\t\t\textras = append(extras, e)\n\t\t}\n\t}\n\n\tif currentEngine == nil {\n\t\treturn nil, nil, fmt.Errorf(\"cannot find the current engine for the switching after iterating and cleaning up all engines for volume %v, all engines may be detached or in a transient state\", v.Name)\n\t}\n\n\tif currentEngine.Name != oldEngineName {\n\t\tlogrus.Infof(\"Found the new current engine %v for volume %v, the old one is %v\", currentEngine.Name, v.Name, oldEngineName)\n\t} else {\n\t\tlogrus.Infof(\"The current engine for volume %v is still %v\", v.Name, currentEngine.Name)\n\t}\n\n\treturn\n}",
"func (_ EntityHeadLook) Name() string { return \"*EntityHeadLook\" }",
"func (b BaseShip) GetSpeed(techs Researches) int64 {\n\tvar techDriveLvl int64 = 0\n\tif b.ID == SmallCargoID && techs.ImpulseDrive >= 5 {\n\t\tbaseSpeed := 10000\n\t\treturn int64(float64(baseSpeed) + (float64(baseSpeed)*0.2)*float64(techs.ImpulseDrive))\n\t}\n\tif b.ID == BomberID && techs.HyperspaceDrive >= 8 {\n\t\tbaseSpeed := 5000\n\t\treturn int64(float64(baseSpeed) + (float64(baseSpeed)*0.3)*float64(techs.HyperspaceDrive))\n\t}\n\tif b.ID == RecyclerID && (techs.ImpulseDrive >= 17 || techs.HyperspaceDrive >= 15) {\n\t\tif techs.HyperspaceDrive >= 15 {\n\t\t\treturn int64(float64(b.BaseSpeed)+(float64(b.BaseSpeed)*0.3)*float64(techs.HyperspaceDrive)) * 3\n\t\t}\n\t\treturn int64(float64(b.BaseSpeed)+(float64(b.BaseSpeed)*0.2)*float64(techs.ImpulseDrive)) * 2\n\t}\n\tif minLvl, ok := b.Requirements[CombustionDrive.ID]; ok {\n\t\ttechDriveLvl = techs.CombustionDrive\n\t\tif techDriveLvl < minLvl {\n\t\t\ttechDriveLvl = minLvl\n\t\t}\n\t\treturn int64(float64(b.BaseSpeed) + (float64(b.BaseSpeed)*0.1)*float64(techDriveLvl))\n\t} else if minLvl, ok := b.Requirements[ImpulseDrive.ID]; ok {\n\t\ttechDriveLvl = techs.ImpulseDrive\n\t\tif techDriveLvl < minLvl {\n\t\t\ttechDriveLvl = minLvl\n\t\t}\n\t\treturn int64(float64(b.BaseSpeed) + (float64(b.BaseSpeed)*0.2)*float64(techDriveLvl))\n\t} else if minLvl, ok := b.Requirements[HyperspaceDrive.ID]; ok {\n\t\ttechDriveLvl = techs.HyperspaceDrive\n\t\tif techDriveLvl < minLvl {\n\t\t\ttechDriveLvl = minLvl\n\t\t}\n\t\treturn int64(float64(b.BaseSpeed) + (float64(b.BaseSpeed)*0.3)*float64(techDriveLvl))\n\t}\n\treturn int64(float64(b.BaseSpeed) + (float64(b.BaseSpeed)*0.2)*float64(techDriveLvl))\n}",
"func (g *gm) loadWeaponList() {\n\tfileName := \"WeaponList.txt\"\n\tdata, err := g.source.Fetch(fileName)\n\tif err != nil {\n\t\tlog.Logger.Fatal(\"Fetch weapon List\",\n\t\t\tlog.ErrorField(err))\n\t}\n\n\tweapons := make(map[string]interface{})\n\tif err := json.Unmarshal(data, &weapons); err != nil {\n\t\tlog.Logger.Fatal(\"Unmarshal weapon List\",\n\t\t\tlog.ErrorField(err))\n\t}\n\n\tvar weaponList []int64\n\tfor k, _ := range weapons {\n\t\tweaponList = append(weaponList, parse.Int(k))\n\t}\n\n\tg.weaponList.Store(weaponList)\n}",
"func getFoodTarget(p *Player, data PlayerInput) (mgl32.Vec2, bool) {\n\ttargetAcquired := false\n\tok := false\n\tvar target mgl32.Vec2\n\tvar closestFood []int\n\ttmpPos := p.Pos[0]\n\tmin[0], min[1] = float64(tmpPos[0]-p.viewRadius), float64(tmpPos[1]-p.viewRadius)\n\tmax[0], max[1] = float64(tmpPos[0]+p.viewRadius), float64(tmpPos[1]+p.viewRadius)\n\n\tdata.Food.Search(min, max,\n\t\tfunc(min, max []float64, value interface{}) bool {\n\t\t\tif data.FoodDict[value.(int)].P.Sub(tmpPos).Len() < p.viewRadius {\n\t\t\t\tclosestFood = append(closestFood, value.(int))\n\t\t\t}\n\t\t\treturn true\n\t\t},\n\t)\n\n\tfor _, f := range closestFood {\n\t\tif !targetAcquired || tmpPos.Sub(data.FoodDict[f].P).Len() < tmpPos.Sub(target).Len() {\n\t\t\ttarget = data.FoodDict[f].P\n\t\t\ttargetAcquired = true\n\t\t\tok = true\n\t\t}\n\t}\n\n\treturn target, ok\n}",
"func (st *MemStorage) GetCurrent(gun, role string) (*time.Time, []byte, error) {\n\tid := entryKey(gun, role)\n\tst.lock.Lock()\n\tdefer st.lock.Unlock()\n\tspace, ok := st.tufMeta[id]\n\tif !ok || len(space) == 0 {\n\t\treturn nil, nil, ErrNotFound{}\n\t}\n\treturn &(space[len(space)-1].createupdate), space[len(space)-1].data, nil\n}",
"func getListOfGirls() (names map[rune][]string, err error) {\n\t// Initialization of map.\n\tnames = make(map[rune][]string)\n\n\t// Request for start page.\n\tvar response *http.Response\n\tresponse, err = http.Get(\"http://mgewiki.com/w/Category:Monster_Girls\")\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer response.Body.Close()\n\tif response.StatusCode != http.StatusOK {\n\t\treturn\n\t}\n\t// Searching begining of girls list in body.\n\tvar bodyBytes []byte\n\tbodyBytes, err = ioutil.ReadAll(response.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\tbodyString := string(bodyBytes)\n\theader := \"<h2>Pages in category \\\"Monster Girls\\\"</h2>\"\n\tlistStartIndex := strings.Index(bodyString, header)\n\tif listStartIndex == -1 {\n\t\treturn\n\t}\n\tbodyString = bodyString[listStartIndex+len(header):]\n\n\t// Pushing alphabet to map.\n\tfor k := 'A'; k <= 'Z'; k++ {\n\t\tnames[k] = make([]string, 0)\n\t}\n\n\t// Search Loop.\n\tfor {\n\t\t// Searching for girls in girls list.\n\t\tfor k := 'A'; k <= 'Z'; k++ {\n\t\t\tlinkStartIndex := strings.Index(bodyString, \"<h3>\"+string(k)+\"</h3>\")\n\t\t\tif linkStartIndex != -1 {\n\t\t\t\tbodyString = bodyString[linkStartIndex+len(\"<h3>_</h3><ul>\")+1:]\n\t\t\t\tkeyNames := bodyString[:strings.Index(bodyString, \"</ul>\")]\n\t\t\t\tfor {\n\t\t\t\t\tnameStartIndex := strings.Index(keyNames, \"\\\">\")\n\t\t\t\t\tif nameStartIndex == -1 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkeyNames = keyNames[nameStartIndex+2:]\n\t\t\t\t\tnameStopIndex := strings.Index(keyNames, \"</a></li>\")\n\t\t\t\t\tif nameStopIndex == -1 {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tnames[k] = append(names[k], keyNames[:nameStopIndex])\n\t\t\t\t\tkeyNames = keyNames[nameStopIndex+len(\"</a></li>\"):]\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// Searching for next page.\n\t\tlinkStopIndex := strings.Index(bodyString, \"next page\")\n\t\tif (linkStopIndex != -1) && (bodyString[linkStopIndex-1] == '>') {\n\t\t\tquotMarkCounter := 0\n\t\t\tfor quotMarkCounter != 3 {\n\t\t\t\tlinkStopIndex--\n\t\t\t\tif bodyString[linkStopIndex] == '\"' {\n\t\t\t\t\tquotMarkCounter++\n\t\t\t\t}\n\t\t\t}\n\t\t\tlinkStartIndex := linkStopIndex - 1\n\t\t\tfor quotMarkCounter != 4 {\n\t\t\t\tlinkStartIndex--\n\t\t\t\tif bodyString[linkStartIndex] == '\"' {\n\t\t\t\t\tquotMarkCounter++\n\t\t\t\t}\n\t\t\t}\n\t\t\tvar response1 *http.Response\n\t\t\tresponse1, err = http.Get(strings.Replace(\"http://mgewiki.com\"+bodyString[linkStartIndex+1:linkStopIndex], \"&\", \"&\", -1))\n\t\t\tif err != nil {\n\t\t\t\tresponse1.Body.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif response1.StatusCode != http.StatusOK {\n\t\t\t\tresponse1.Body.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbodyBytes, err = ioutil.ReadAll(response1.Body)\n\t\t\tif err != nil {\n\t\t\t\tresponse1.Body.Close()\n\t\t\t\treturn\n\t\t\t}\n\t\t\tbodyString = string(bodyBytes)\n\t\t\tlistStartIndex := strings.Index(bodyString, header)\n\t\t\tif listStartIndex == -1 {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tbodyString = bodyString[listStartIndex+len(header):]\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\treturn\n}",
"func (wds *WeaponAISystem) Update(dt float32) {\n\n\tvar targets []WeaponAIEntity\n\n\tfor _, shooter := range wds.entities {\n\n\t\tif shooter.AIComponent.Type == components.Crazy {\n\t\t\tfor _, other := range wds.entities {\n\t\t\t\tif shooter.WeaponComponent.Loaded {\n\t\t\t\t\ttargets = append(targets, other)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}\n\n}",
"func (c *Character) UnequipWeapon(name string) {\n\tfor i, v := range c.Weapons {\n\t\tif v.Name == name {\n\t\t\tc.Weapons = append(c.Weapons, v)\n\t\t\tc.EquippedWeapons = append(c.EquippedWeapons[:i], c.EquippedWeapons[i+1:]...)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (E_OnfTest1Choice_Vehicle_Battery_Material) IsYANGGoEnum() {}",
"func getCurrentSpeed() float64 {\n\treturn sendMessage(-1.0)\n}"
] | [
"0.54226714",
"0.5285081",
"0.51907164",
"0.5007826",
"0.49341115",
"0.47776833",
"0.47510353",
"0.47389603",
"0.4724035",
"0.46884516",
"0.46460924",
"0.45455116",
"0.45438084",
"0.45210272",
"0.45004123",
"0.44821173",
"0.44700414",
"0.44634646",
"0.44237113",
"0.43863812",
"0.4384768",
"0.43753335",
"0.43648958",
"0.43433812",
"0.43415803",
"0.43359202",
"0.43288302",
"0.4321682",
"0.43010047",
"0.42508173",
"0.42462257",
"0.42331675",
"0.42304605",
"0.4219877",
"0.4202424",
"0.41782847",
"0.41675335",
"0.41655737",
"0.41293228",
"0.41286352",
"0.41256815",
"0.4125183",
"0.41243666",
"0.4114478",
"0.41084698",
"0.40844473",
"0.40696076",
"0.40516704",
"0.4046448",
"0.40244374",
"0.40209612",
"0.40176556",
"0.40070438",
"0.40037823",
"0.4002946",
"0.40007496",
"0.4000374",
"0.39996174",
"0.39968035",
"0.39958104",
"0.39940315",
"0.39910188",
"0.3981971",
"0.3980519",
"0.39778012",
"0.39769995",
"0.39724374",
"0.3965218",
"0.3963137",
"0.39590713",
"0.3958996",
"0.39567652",
"0.39474186",
"0.39425206",
"0.39409047",
"0.39377618",
"0.3926685",
"0.3924618",
"0.39205065",
"0.39185113",
"0.39142197",
"0.39123014",
"0.39064893",
"0.39057618",
"0.38971302",
"0.38932577",
"0.38883722",
"0.3874868",
"0.38648275",
"0.3859427",
"0.38526407",
"0.38503543",
"0.38474837",
"0.38432693",
"0.38391885",
"0.38357893",
"0.38337454",
"0.38323835",
"0.38263875",
"0.38102263"
] | 0.75457877 | 0 |
LastModified returns the latest modified time for all the files and directories. The files in each directory are checked for their last modified time. TODO: use go routines to speed this up nolint: gocyclo | func LastModified(fileOrDir ...string) (time.Time, error) {
var latest time.Time
// TODO: does this error contain enough context?
walker := func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() && info.Name() == ".dobi" {
return filepath.SkipDir
}
if info.ModTime().After(latest) {
latest = info.ModTime()
}
return nil
}
for _, file := range fileOrDir {
info, err := os.Stat(file)
if err != nil {
return latest, err
}
switch info.IsDir() {
case false:
if info.ModTime().After(latest) {
latest = info.ModTime()
continue
}
default:
if err := filepath.Walk(file, walker); err != nil {
return latest, err
}
}
}
return latest, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (f *File) LastModified() (*time.Time, error) {\n\tattr, err := f.getObjectAttrs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &attr.Updated, nil\n}",
"func (dlr DirectoryListResponse) LastModified() string {\n\treturn PathList(dlr).LastModified()\n}",
"func (fdf ExfatFileDirectoryEntry) LastModifiedTimestamp() time.Time {\n\treturn fdf.LastModifiedTimestampRaw.TimestampWithOffset(int(fdf.LastModifiedUtcOffset))\n}",
"func (o *UcsdBackupInfoAllOf) GetLastModified() time.Time {\n\tif o == nil || o.LastModified == nil {\n\t\tvar ret time.Time\n\t\treturn ret\n\t}\n\treturn *o.LastModified\n}",
"func (t *Tag) LastModified() (lastModified time.Time) {\n\tfor _, history := range t.History {\n\t\tif history.Created.After(lastModified) {\n\t\t\tlastModified = history.Created\n\t\t}\n\t}\n\treturn lastModified\n}",
"func LastModifiedDate(fileID string) (string, error) {\n\tcnt := auth.GetClient()\n\n\tsrv, err := drive.New(cnt)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfile, err := srv.Files.Get(fileID).Do()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn filterGoogleTime(file.ModifiedDate), nil\n}",
"func dirTimestamp(dir string) (ts time.Time, reterr error) {\n\n\tdirf, err := os.Open(dir)\n\tif err != nil {\n\t\treturn ts, err\n\t}\n\tdefer dirf.Close()\n\n\tfis, err := dirf.Readdir(-1)\n\tif err != nil {\n\t\treturn ts, err\n\t}\n\n\tfor _, fi := range fis {\n\n\t\tif fi.Name() == \".\" || fi.Name() == \"..\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t// for directories we recurse\n\t\tif fi.IsDir() {\n\t\t\tdirTs, err := dirTimestamp(filepath.Join(dir, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn ts, err\n\t\t\t}\n\t\t\tif dirTs.After(ts) {\n\t\t\t\tts = dirTs\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\t// for files check timestamp\n\t\tmt := fi.ModTime()\n\t\tif mt.After(ts) {\n\t\t\tts = mt\n\t\t}\n\t}\n\n\treturn\n}",
"func (dgpr DirectoryGetPropertiesResponse) LastModified() string {\n\treturn PathGetPropertiesResponse(dgpr).LastModified()\n}",
"func getRecentlyModified(match []string, modified int, verbose bool) []string {\n\tvar matches []string // slice to hold the matching file paths\n\tvar paths []string // slice to hold the file paths\n\tvar modTimes []time.Time // slice to hold the modification times of the files\n\n\t// Loop through the provided slice of file names\n\tfor _, file := range match {\n\t\t// Get the file info and handle any errors\n\t\tinfo, err := os.Stat(file)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\tcontinue\n\t\t}\n\t\t// Append the file path and modification time to the corresponding slices\n\t\tpaths = append(paths, file)\n\t\tmodTimes = append(modTimes, info.ModTime())\n\t}\n\n\t// Sort the slices by modification time\n\tsort.SliceStable(paths, func(i, j int) bool {\n\t\treturn modTimes[i].After(modTimes[j])\n\t})\n\n\t// Get the current time\n\tnow := time.Now()\n\n\t// Loop through the sorted slice of file paths\n\tfor i, path := range paths {\n\t\t// Check if the file was modified within the last modified hours\n\t\tif now.Sub(modTimes[i]) < (time.Duration(modified) * time.Hour) {\n\t\t\t// If it was, append the file path to the matches slice\n\t\t\tmatches = append(matches, path)\n\t\t} else {\n\t\t\tif verbose {\n\t\t\t\tlog.Printf(\"[IGNORING] Last modified time: %s older than configured timeframe (%d hours): %s.\", modTimes[i], modified, path)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Return the slice of matching file paths\n\treturn matches\n}",
"func (bi *BinaryInfo) LastModified() time.Time {\n\treturn bi.lastModified\n}",
"func filesModified(fileModTimes map[string]time.Time) bool {\n\treturnVal := false\n\tfor f := range fileModTimes {\n\t\tfInfo, err := os.Stat(f)\n\t\tif err != nil {\n\t\t\toutput.FatalError(err)\n\t\t}\n\t\tif fileModTimes[f] != fInfo.ModTime() {\n\t\t\tfileModTimes[f] = fInfo.ModTime()\n\t\t\treturnVal = true\n\t\t}\n\t}\n\treturn returnVal\n}",
"func (o *UcsdBackupInfoAllOf) SetLastModified(v time.Time) {\n\to.LastModified = &v\n}",
"func (o *UcsdBackupInfoAllOf) GetLastModifiedOk() (*time.Time, bool) {\n\tif o == nil || o.LastModified == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LastModified, true\n}",
"func (dcr DirectoryCreateResponse) LastModified() string {\n\treturn PathCreateResponse(dcr).LastModified()\n}",
"func (o *StorageHyperFlexStorageContainer) GetLastModifiedTime() time.Time {\n\tif o == nil || o.LastModifiedTime == nil {\n\t\tvar ret time.Time\n\t\treturn ret\n\t}\n\treturn *o.LastModifiedTime\n}",
"func walkDirs(sql bool) (latestSrcMod time.Time) {\n\td, err := os.Open(camRoot)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdirs, err := d.Readdirnames(-1)\n\td.Close()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfor _, dir := range dirs {\n\t\tsrcPath := filepath.Join(camRoot, filepath.FromSlash(dir))\n\t\tif maxMod, err := walkDir(srcPath, walkOpts{sqlite: sql}); err != nil {\n\t\t\tlog.Fatalf(\"Error while walking %s: %v\", srcPath, err)\n\t\t} else {\n\t\t\tif maxMod.After(latestSrcMod) {\n\t\t\t\tlatestSrcMod = maxMod\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (s *s3) GetLastModified(key string) (time.Time, error) {\n\tinput := awss3.GetObjectInput{\n\t\tKey: aws.String(key),\n\t\tBucket: aws.String(s.bucket),\n\t}\n\tobj, err := s.client.GetObject(&input)\n\tif err != nil {\n\t\treturn time.Time{}, err\n\t}\n\n\treturn *obj.LastModified, nil\n}",
"func (m RoleMapping) GetLastModified() time.Time {\n\treturn m.UpdatedAt\n}",
"func (fi *fileInfo) ModTime() time.Time { return fi.mtime }",
"func getLastModifiedTime(href string) (int64, error) {\n\tcurrTime := time.Now().UTC().Unix()\n\tresponse, err := http.Head(href)\n\tif err != nil {\n\t\tfmt.Println(\"Error while downloading head of\", href)\n\t\treturn currTime, err\n\t} else {\n\t\ttimeString := \"\"\n\t\tfor k, v := range response.Header {\n\t\t\tif k == \"Last-Modified\" {\n\t\t\t\ttimeString = v[0]\n\t\t\t\tif err != nil {\n\t\t\t\t\tfmt.Println(err)\n\t\t\t\t\treturn currTime, err\n\t\t\t\t} else {\n\t\t\t\t\tlayout := \"Mon, 02 Jan 2006 15:04:05 GMT\"\n\t\t\t\t\tt, err := time.Parse(layout, timeString)\n\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tfmt.Println(\"Time Parsing error\")\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t\treturn currTime, err\n\t\t\t\t\t}\n\t\t\t\t\treturn t.UTC().Unix(), nil\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn currTime, nil\n\t}\n}",
"func (o FlowOutput) LastModifiedTime() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Flow) pulumi.StringOutput { return v.LastModifiedTime }).(pulumi.StringOutput)\n}",
"func (s *DebugRuleEvaluationStatus) SetLastModifiedTime(v time.Time) *DebugRuleEvaluationStatus {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *AppSummary) SetLastModified(v time.Time) *AppSummary {\n\ts.LastModified = &v\n\treturn s\n}",
"func (o SchemaOutput) LastModified() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Schema) pulumi.StringOutput { return v.LastModified }).(pulumi.StringOutput)\n}",
"func (cfr CreateFilesystemResponse) LastModified() string {\n\treturn cfr.rawResponse.Header.Get(\"Last-Modified\")\n}",
"func (s *Evaluation) SetLastModifiedTime(v time.Time) *Evaluation {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (o *GetFleetsFleetIDMembersOK) SetLastModified(lastModified string) {\n\to.LastModified = lastModified\n}",
"func (m IdentityRole) GetLastModified() time.Time {\n\treturn m.UpdatedAt\n}",
"func (o *StorageHyperFlexStorageContainer) GetLastModifiedTimeOk() (*time.Time, bool) {\n\tif o == nil || o.LastModifiedTime == nil {\n\t\treturn nil, false\n\t}\n\treturn o.LastModifiedTime, true\n}",
"func (s *ProfilerRuleEvaluationStatus) SetLastModifiedTime(v time.Time) *ProfilerRuleEvaluationStatus {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (fi *FileInfo) ModTime() time.Time {\n\treturn fi.LastModified\n}",
"func (s *EvaluationForm) SetLastModifiedTime(v time.Time) *EvaluationForm {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (bbcblr BlockBlobsCommitBlockListResponse) LastModified() time.Time {\n\ts := bbcblr.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (s *DescribeDeviceFleetOutput) SetLastModifiedTime(v time.Time) *DescribeDeviceFleetOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (o *UcsdBackupInfoAllOf) HasLastModified() bool {\n\tif o != nil && o.LastModified != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (s *DescribeFeatureGroupOutput) SetLastModifiedTime(v time.Time) *DescribeFeatureGroupOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *VocabularyFilterInfo) SetLastModifiedTime(v time.Time) *VocabularyFilterInfo {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *FeatureGroup) SetLastModifiedTime(v time.Time) *FeatureGroup {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func FindModifiedFiles() []string {\n\tresult := GitDiff(\"--name-only\", \"-z\")\n\tresult = append(result, GitDiff(\"--name-only\", \"--cached\", \"-z\")...)\n\n\treturn result\n}",
"func (s *Vocabulary) SetLastModifiedTime(v time.Time) *Vocabulary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (m Resource) GetLastModified() time.Time {\n\treturn m.UpdatedAt\n}",
"func (s *DescribeCodeRepositoryOutput) SetLastModifiedTime(v time.Time) *DescribeCodeRepositoryOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeInferenceExperimentOutput) SetLastModifiedTime(v time.Time) *DescribeInferenceExperimentOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (pl PageList) LastModified() time.Time {\n\ts := pl.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (s *GetVocabularyFilterOutput) SetLastModifiedTime(v time.Time) *GetVocabularyFilterOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (g *GitRepo) ModifiedFiles(c *git.Commit, tree *git.Tree) ([]string, error) {\n\topts := &git.DiffOptions{}\n\tmodified := []string{}\n\tparentCount := c.ParentCount()\n\tfor i := uint(0); i <= parentCount; i++ {\n\t\tparentID := c.ParentId(i)\n\t\tif parentID == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlog.Debugf(\"Looking up parent commit-id '%s'\", parentID.String())\n\t\tparent, err := g.r.LookupCommit(parentID)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer parent.Free()\n\n\t\tparentTree, err := parent.Tree()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer parentTree.Free()\n\n\t\tdiff, err := g.r.DiffTreeToTree(parentTree, tree, opts)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer diff.Free()\n\n\t\t_ = diff.ForEach(func(f git.DiffDelta, p float64) (git.DiffForEachHunkCallback, error) {\n\t\t\tmodified = append(modified, f.OldFile.Path)\n\t\t\treturn nil, nil\n\t\t}, git.DiffDetailFiles)\n\t}\n\treturn modified, nil\n}",
"func (s *UpdateVocabularyFilterOutput) SetLastModifiedTime(v time.Time) *UpdateVocabularyFilterOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (cgpr ContainersGetPropertiesResponse) LastModified() time.Time {\n\ts := cgpr.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (d *Dataset) LastModifiedDate() string {\n\treturn d.lastModified.Format(\"02 Jan 2006 15:04:05\")\n}",
"func (s *DescribeAppImageConfigOutput) SetLastModifiedTime(v time.Time) *DescribeAppImageConfigOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *VocabularyInfo) SetLastModifiedTime(v time.Time) *VocabularyInfo {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *EvaluationSummary) SetLastModifiedTime(v time.Time) *EvaluationSummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (r *Distribution) LastModifiedTime() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"lastModifiedTime\"])\n}",
"func (*XMLDocument) LastModified() (lastModified string) {\n\tmacro.Rewrite(\"$_.lastModified\")\n\treturn lastModified\n}",
"func (s *AppImageConfigDetails) SetLastModifiedTime(v time.Time) *AppImageConfigDetails {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (gfpr GetFilesystemPropertiesResponse) LastModified() string {\n\treturn gfpr.rawResponse.Header.Get(\"Last-Modified\")\n}",
"func (o TriggerOutput) LastModified() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Trigger) pulumi.StringOutput { return v.LastModified }).(pulumi.StringOutput)\n}",
"func (s *DeviceFleetSummary) SetLastModifiedTime(v time.Time) *DeviceFleetSummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *CreateVocabularyFilterOutput) SetLastModifiedTime(v time.Time) *CreateVocabularyFilterOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (m *BrowserSiteList) GetLastModifiedDateTime()(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time) {\n val, err := m.GetBackingStore().Get(\"lastModifiedDateTime\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)\n }\n return nil\n}",
"func (s *View) SetLastModifiedTime(v time.Time) *View {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (f *file) ModTime() (t time.Time) {\n\tif f.Object != nil {\n\t\tt = *f.LastModified\n\t}\n\treturn\n}",
"func (bl BlockList) LastModified() time.Time {\n\ts := bl.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (s *InferenceExperimentSummary) SetLastModifiedTime(v time.Time) *InferenceExperimentSummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func GetLastFileMetas(count int) []FileMeta {\n\t// sort so the updatest will be on the first\n\tfmt.Println(fileMetas)\n\tfMetaArray := make([]FileMeta, len(fileMetas))\n\tfor _, v := range fileMetas {\n\t\tfMetaArray = append(fMetaArray, v)\n\t}\n\n\t// make sure the fMetaArray is sorted by UploadAt\n\t// Customized sort rule\n\tsort.Sort(ByUploadTime(fMetaArray))\n\treturn fMetaArray[0:count]\n}",
"func (s *GetVocabularyOutput) SetLastModifiedTime(v time.Time) *GetVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *UpdateVocabularyOutput) SetLastModifiedTime(v time.Time) *UpdateVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *LanguageModel) SetLastModifiedTime(v time.Time) *LanguageModel {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeArtifactOutput) SetLastModifiedTime(v time.Time) *DescribeArtifactOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeLiveSourceOutput) SetLastModifiedTime(v time.Time) *DescribeLiveSourceOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *EvaluationFormSummary) SetLastModifiedTime(v time.Time) *EvaluationFormSummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *InferenceRecommendationsJob) SetLastModifiedTime(v time.Time) *InferenceRecommendationsJob {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (ababr AppendBlobsAppendBlockResponse) LastModified() time.Time {\n\ts := ababr.rawResponse.Header.Get(\"Last-Modified\")\n\tif s == \"\" {\n\t\treturn time.Time{}\n\t}\n\tt, err := time.Parse(time.RFC1123, s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn t\n}",
"func (s *DescribeInferenceRecommendationsJobOutput) SetLastModifiedTime(v time.Time) *DescribeInferenceRecommendationsJobOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *UpdateMedicalVocabularyOutput) SetLastModifiedTime(v time.Time) *UpdateMedicalVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *CreateVocabularyOutput) SetLastModifiedTime(v time.Time) *CreateVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeTrialOutput) SetLastModifiedTime(v time.Time) *DescribeTrialOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *Image) SetLastModifiedTime(v time.Time) *Image {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeProjectOutput) SetLastModifiedTime(v time.Time) *DescribeProjectOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeVodSourceOutput) SetLastModifiedTime(v time.Time) *DescribeVodSourceOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (o *StorageHyperFlexStorageContainer) SetLastModifiedTime(v time.Time) {\n\to.LastModifiedTime = &v\n}",
"func CommitLogFilesForTime(commitLogsDir string, t time.Time) ([]string, error) {\n\tcommitLogFileForTimePattern := fmt.Sprintf(commitLogFileForTimeTemplate, t.UnixNano())\n\treturn commitlogFiles(commitLogsDir, commitLogFileForTimePattern)\n}",
"func (f *file) getModifyTime(filename string) int64 {\n\tfileInfo, _ := os.Stat(filename)\n\tmodTime := fileInfo.ModTime()\n\treturn modTime.Unix()\n}",
"func (s *GetMedicalVocabularyOutput) SetLastModifiedTime(v time.Time) *GetMedicalVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *VocabularySummary) SetLastModifiedTime(v time.Time) *VocabularySummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeImageOutput) SetLastModifiedTime(v time.Time) *DescribeImageOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeLabelingJobOutput) SetLastModifiedTime(v time.Time) *DescribeLabelingJobOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeDomainOutput) SetLastModifiedTime(v time.Time) *DescribeDomainOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (sfpr SetFilesystemPropertiesResponse) LastModified() string {\n\treturn sfpr.rawResponse.Header.Get(\"Last-Modified\")\n}",
"func (ls ListSchema) LastModified() string {\n\treturn ls.rawResponse.Header.Get(\"Last-Modified\")\n}",
"func (s *CreateMedicalVocabularyOutput) SetLastModifiedTime(v time.Time) *CreateMedicalVocabularyOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *GetTaskTemplateOutput) SetLastModifiedTime(v time.Time) *GetTaskTemplateOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeFeatureMetadataOutput) SetLastModifiedTime(v time.Time) *DescribeFeatureMetadataOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeChannelOutput) SetLastModifiedTime(v time.Time) *DescribeChannelOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func ModificationsWatcher(restrictFilesToSuffix string, dirOk func([]string, []string, string, string) bool, postponeAnyModsLaterThanThisAgo time.Duration, onModTime func(map[string]os.FileInfo, int64, bool)) func([]string, []string) int {\n type gather struct {\n os.FileInfo\n modTime int64\n }\n var gathers map[string]gather\n var modnewest int64\n checkmodtime := func(fullpath string, fileinfo os.FileInfo) {\n if fileinfo == nil {\n fileinfo, _ = os.Stat(fullpath)\n }\n if fileinfo != nil {\n modtime := fileinfo.ModTime().UnixNano()\n gathers[fullpath] = gather{fileinfo, modtime}\n if modtime > modnewest {\n modnewest = modtime\n }\n }\n }\n\n var dirok func(string, string) bool\n var ondirorfile func(string, os.FileInfo) bool\n ondirorfile = func(fullpath string, fileinfo os.FileInfo) bool {\n if isdir := fileinfo.IsDir(); (isdir && (dirok == nil || dirok(fullpath, fileinfo.Name()))) ||\n ((!isdir) && (len(restrictFilesToSuffix) == 0 || ustr.Suff(fullpath, restrictFilesToSuffix))) {\n checkmodtime(fullpath, fileinfo)\n if isdir {\n if dircontents, err := ReadDirFunc(fullpath); err == nil {\n for _, fi := range dircontents {\n ondirorfile(filepath.Join(fullpath, fi.Name()), fi)\n }\n }\n }\n }\n return true\n }\n\n var raisings map[string]os.FileInfo\n firstrun, gatherscap, postpone, timeslastraised :=\n true, 64, int64(postponeAnyModsLaterThanThisAgo), make(map[string]int64, 128)\n return func(dirpathsrecursive []string, dirpathsother []string) (numraised int) {\n tstart := time.Now().UnixNano()\n modnewest, gathers, dirok = 0, make(map[string]gather, gatherscap), func(dirfullpath string, dirname string) bool {\n return dirOk(dirpathsrecursive, dirpathsother, dirfullpath, dirname)\n }\n for i := range dirpathsrecursive {\n _, _ = walk(dirpathsrecursive[i], true, false, ondirorfile, nil)\n }\n for _, fullpath := range dirpathsother {\n _, _ = walk(fullpath, false, false, nil, ondirorfile)\n checkmodtime(fullpath, nil)\n }\n gatherscap = len(gathers)\n if firstrun || postpone <= 0 || (tstart-modnewest) > postpone {\n for fullpath, gather := range gathers {\n if tlr, _ := timeslastraised[fullpath]; tlr == 0 || gather.modTime == 0 || tlr <= gather.modTime {\n if timeslastraised[fullpath] = tstart; raisings == nil {\n raisings = make(map[string]os.FileInfo, 4)\n }\n raisings[fullpath] = gather.FileInfo\n }\n }\n }\n onModTime(raisings, tstart, firstrun)\n numraised, raisings, firstrun = len(raisings), nil, false\n return\n }\n}",
"func (s *DescribeContextOutput) SetLastModifiedTime(v time.Time) *DescribeContextOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *CreateLiveSourceOutput) SetLastModifiedTime(v time.Time) *CreateLiveSourceOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *UpdateTaskTemplateOutput) SetLastModifiedTime(v time.Time) *UpdateTaskTemplateOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *CodeRepositorySummary) SetLastModifiedTime(v time.Time) *CodeRepositorySummary {\n\ts.LastModifiedTime = &v\n\treturn s\n}",
"func (s *DescribeLineageGroupOutput) SetLastModifiedTime(v time.Time) *DescribeLineageGroupOutput {\n\ts.LastModifiedTime = &v\n\treturn s\n}"
] | [
"0.6308349",
"0.6235137",
"0.61921304",
"0.60726833",
"0.60483104",
"0.58098406",
"0.5772359",
"0.57584804",
"0.5713177",
"0.56888556",
"0.5680812",
"0.5633928",
"0.559856",
"0.54990345",
"0.5487972",
"0.5473835",
"0.54545426",
"0.5437479",
"0.5435753",
"0.5435252",
"0.54045814",
"0.53950197",
"0.53827035",
"0.5366311",
"0.5357836",
"0.5350742",
"0.5339625",
"0.53355175",
"0.5332332",
"0.5319759",
"0.53069156",
"0.5298845",
"0.5293032",
"0.5291333",
"0.5291133",
"0.52831787",
"0.527353",
"0.5267408",
"0.52673554",
"0.52540433",
"0.5248939",
"0.5245435",
"0.52437544",
"0.52426875",
"0.52348816",
"0.52296287",
"0.52288395",
"0.5228633",
"0.5222008",
"0.5218377",
"0.52137715",
"0.5209881",
"0.5207901",
"0.52058977",
"0.52035815",
"0.5202189",
"0.519981",
"0.5195613",
"0.5192959",
"0.5189912",
"0.5184959",
"0.51805085",
"0.5174347",
"0.51705605",
"0.5159415",
"0.51591486",
"0.5156512",
"0.5145912",
"0.5143176",
"0.5136887",
"0.51329875",
"0.51323074",
"0.5131646",
"0.51172614",
"0.5116938",
"0.51147103",
"0.51097745",
"0.51057047",
"0.51038426",
"0.5103762",
"0.51025677",
"0.510108",
"0.50992346",
"0.5097137",
"0.50930065",
"0.5089162",
"0.50887096",
"0.50880444",
"0.5088031",
"0.50878376",
"0.5086296",
"0.5083275",
"0.5081567",
"0.5077716",
"0.50768787",
"0.5076488",
"0.50726306",
"0.50717616",
"0.506704",
"0.5066428"
] | 0.7738381 | 0 |
onMessage is called when a new event arrives in the roomserver input stream. | func (r *Inputer) Start() error {
_, err := r.JetStream.Subscribe(
r.InputRoomEventTopic,
// We specifically don't use jetstream.WithJetStreamMessage here because we
// queue the task off to a room-specific queue and the ACK needs to be sent
// later, possibly with an error response to the inputter if synchronous.
func(msg *nats.Msg) {
roomID := msg.Header.Get("room_id")
var inputRoomEvent api.InputRoomEvent
if err := json.Unmarshal(msg.Data, &inputRoomEvent); err != nil {
_ = msg.Term()
return
}
_ = msg.InProgress()
index := roomID + "\000" + inputRoomEvent.Event.EventID()
if _, ok := eventsInProgress.LoadOrStore(index, struct{}{}); ok {
// We're already waiting to deal with this event, so there's no
// point in queuing it up again. We've notified NATS that we're
// working on the message still, so that will have deferred the
// redelivery by a bit.
return
}
roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Inc()
r.workerForRoom(roomID).Act(nil, func() {
_ = msg.InProgress() // resets the acknowledgement wait timer
defer eventsInProgress.Delete(index)
defer roomserverInputBackpressure.With(prometheus.Labels{"room_id": roomID}).Dec()
if err := r.processRoomEvent(r.ProcessContext.Context(), &inputRoomEvent); err != nil {
if !errors.Is(err, context.DeadlineExceeded) && !errors.Is(err, context.Canceled) {
sentry.CaptureException(err)
}
logrus.WithError(err).WithFields(logrus.Fields{
"room_id": roomID,
"event_id": inputRoomEvent.Event.EventID(),
"type": inputRoomEvent.Event.Type(),
}).Warn("Roomserver failed to process async event")
_ = msg.Term()
} else {
_ = msg.Ack()
}
})
},
// NATS wants to acknowledge automatically by default when the message is
// read from the stream, but we want to override that behaviour by making
// sure that we only acknowledge when we're happy we've done everything we
// can. This ensures we retry things when it makes sense to do so.
nats.ManualAck(),
// Use a durable named consumer.
r.Durable,
// If we've missed things in the stream, e.g. we restarted, then replay
// all of the queued messages that were waiting for us.
nats.DeliverAll(),
// Ensure that NATS doesn't try to resend us something that wasn't done
// within the period of time that we might still be processing it.
nats.AckWait(MaximumMissingProcessingTime+(time.Second*10)),
)
return err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func onMessage(c *gnet.Connection, channel uint16,\n\tmsg []byte) error {\n\tfmt.Printf(\"Event Callback: message event: addr= %s, channel %v, msg= %s \\n\", c.Addr(), channel, msg)\n\treturn nil\n}",
"func (s *Realm) OnMessage(c *connection.Connection, ctx interface{}, data []byte) (out []byte) {\r\n\tpacket := ctx.(*proto.WorldPacket)\r\n\r\n\tslog.Printf(\"Seq(%d) %s(%d) Recog(%d) Size(%d): %s\",\r\n\t\tpacket.Seq, packet.Opcode, packet.Opcode, packet.Recog,\r\n\t\tpacket.Size, packet)\r\n\r\n\te := c.Context().(*list.Element)\r\n\tsess := e.Value.(*RealmSession)\r\n\tsess.HandlePacket(packet)\r\n\r\n\treturn\r\n}",
"func (this *Device) onMessageReceived(message []byte) {\n if event, err := this.mapEvent(message); err == nil {\n this.messageHandler(this, event)\n }\n}",
"func (sio *SocketIO) onMessage(c *Conn, msg Message) {\n\tif sio.callbacks.onMessage != nil {\n\t\tsio.callbacks.onMessage(c, msg)\n\t}\n}",
"func (c *Conn) OnMessage(messageType int, p []byte) {\n\tfor _, str := range strings.Split(string(p), \"\\r\\n\") {\n\t\tif str != \"\" {\n\t\t\tdata, _ := utils.GbkToUtf8([]byte(str))\n\t\t\tdoc := xml.NewDecoder(bytes.NewReader(data))\n\t\t\tnode := c.respParseAttr(doc)\n\t\t\tnode.Raw = string(data)\n\t\t\tswitch node.getAttr(\"id\") {\n\t\t\tcase \"1\":\n\t\t\t\tstatus := node.getElem(\"result\").getAttr(\"status\")\n\t\t\t\tif status == \"ok\" {\n\t\t\t\t\tc.key1 = node.getElem(\"key\").getAttr(\"key1\")\n\t\t\t\t\t// 初始化心跳\n\t\t\t\t\tc.loginChatServerSuccess()\n\t\t\t\t} else {\n\t\t\t\t\tEventChan <- EventMessage{Type: \"error\", Msg: fmt.Sprintf(\"%v\", \"进入直播间失败\")}\n\t\t\t\t}\n\t\t\tcase \"2\":\n\t\t\tcase \"3\":\n\t\t\tdefault:\n\t\t\t\tc.socketData(node)\n\t\t\t}\n\t\t\tc.pushNode(node)\n\t\t}\n\t}\n}",
"func (w Wrapper) OnNewMessage(f NewMessageHandler) {\n\tw.longpoll.EventNew(4, func(i []interface{}) error {\n\t\tvar event NewMessage\n\t\tif err := event.parse(i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf(event)\n\n\t\treturn nil\n\t})\n}",
"func (c *Client) OnMessage(cb NativeMessageFunc) {\n\tc.onNativeMessageListeners = append(c.onNativeMessageListeners, cb)\n}",
"func (s *Server) OnMessage(msg *Message, sess *Session) {\n\tmsg.SetHeader(proto.Sender, sess.ID)\n\tmsg.SetHeader(proto.Host, s.ServerAddress.Address)\n\tif msg.Id() == \"\" {\n\t\tmsg.SetId(uuid())\n\t}\n\tif msg.Cmd() != proto.Heartbeat {\n\t\t//log.Printf(msg.String())\n\t}\n\n\thandleUrlMessage(msg)\n\n\tcmd := msg.Cmd()\n\thandler, ok := s.handlerTable[cmd]\n\tif ok {\n\t\thandler(s, msg, sess)\n\t\treturn\n\t}\n\tres := NewMessageStatus(400, \"Bad format: command(%s) not support\", cmd)\n\tsess.WriteMessage(res)\n}",
"func (c *Client) OnNewRoomstateMessage(callback func(channel string, user User, message Message)) {\n\tc.onNewRoomstateMessage = callback\n}",
"func (object *MQMessageHandler) OnMQMessage(raw []byte, offset int64) {\n}",
"func (conn *Conn) OnMessage(f func(string, []byte)) {\n\tconn.onMessage = append(conn.onMessage, f)\n}",
"func (c *Client) OnMessage(handler MessageHandler) {\n\tproxy := &eventProxy{client: c, onMessage: handler}\n\tc.client.OnMessage(proxy)\n}",
"func (i *IncomingSocket) OnMessage(belowNexus nexusHelper.Nexus, moneysocketMessage base2.MoneysocketMessage) {\n\tlog.Info(\"websocket nexus got message\")\n\ti.onMessage(belowNexus, moneysocketMessage)\n}",
"func (l DefaultBlazeListener) OnMessage(ctx context.Context, msg MessageView, userId string) error {\n\tlog.Println(\"I got a message: \", msg)\n\tdata, err := base64.StdEncoding.DecodeString(msg.Data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif msg.Category == \"SYSTEM_ACCOUNT_SNAPSHOT\" {\n\t\tvar transfer TransferView\n\t\tif err := json.Unmarshal(data, &transfer); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlog.Println(\"I got a snapshot: \", transfer)\n\t\treturn nil\n\t} else {\n\t\tlog.Printf(\"I got a message, it said: %s\", string(data))\n\t\treturn nil\n\t}\n}",
"func OnMSG(c diam.Conn, m *diam.Message) {\n\tlog.Printf(\"Receiving message from %s\", c.RemoteAddr().String())\n\t//log.Println(m)\n}",
"func (s *OutputNotificationDataConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {\n\tmsg := msgs[0] // Guaranteed to exist if onMessage is called\n\tuserID := string(msg.Header.Get(jetstream.UserID))\n\n\t// Parse out the event JSON\n\tvar data eventutil.NotificationData\n\tif err := json.Unmarshal(msg.Data, &data); err != nil {\n\t\tsentry.CaptureException(err)\n\t\tlog.WithField(\"user_id\", userID).WithError(err).Error(\"user API consumer: message parse failure\")\n\t\treturn true\n\t}\n\n\tstreamPos, err := s.db.UpsertRoomUnreadNotificationCounts(ctx, userID, data.RoomID, data.UnreadNotificationCount, data.UnreadHighlightCount)\n\tif err != nil {\n\t\tsentry.CaptureException(err)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"user_id\": userID,\n\t\t\t\"room_id\": data.RoomID,\n\t\t}).WithError(err).Error(\"Could not save notification counts\")\n\t\treturn false\n\t}\n\n\ts.stream.Advance(streamPos)\n\ts.notifier.OnNewNotificationData(userID, types.StreamingToken{NotificationDataPosition: streamPos})\n\n\tlog.WithFields(log.Fields{\n\t\t\"user_id\": userID,\n\t\t\"room_id\": data.RoomID,\n\t\t\"streamPos\": streamPos,\n\t}).Trace(\"Received notification data from user API\")\n\n\treturn true\n}",
"func (c *Client) OnNewMessage(callback func(channel string, user User, message Message)) {\n\tc.onNewMessage = callback\n}",
"func OnMessage(plugins []Plugin, client *matrix.Client, event *matrix.Event) {\n\tresponses := runCommands(plugins, event)\n\n\tfor _, content := range responses {\n\t\t_, err := client.SendMessageEvent(event.RoomID, \"m.room.message\", content)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\tlog.ErrorKey: err,\n\t\t\t\t\"room_id\": event.RoomID,\n\t\t\t\t\"user_id\": event.Sender,\n\t\t\t\t\"content\": content,\n\t\t\t}).Print(\"Failed to send command response\")\n\t\t}\n\t}\n}",
"func (p *Protocol) OnMessageRecv(client *noise.Client) {\n\tp.Ack(client.ID())\n}",
"func (s *seatPlugin) OnMessage(ctx *muxer.Context) {\n\tparts := ctx.Fields\n\tif len(parts) <= 1 {\n\t\tctx.SendPrivately(how2use)\n\t\treturn\n\t}\n\n\t// Do we have the user id?\n\tgroupID, ok := s.mapping[ctx.FromUserID]\n\tif !ok {\n\t\t// Update the cache...\n\t\tcacheWasLastUpdated := time.Now().Sub(s.lastUpdated)\n\t\tif cacheWasLastUpdated < cacheCanBeUpdatedEvery {\n\t\t\ttryAgain := cacheCanBeUpdatedEvery - cacheWasLastUpdated\n\t\t\tctx.SendPrivately(fmt.Sprintf(\"Please wait %s before trying again\", tryAgain.String()))\n\t\t\treturn\n\t\t}\n\n\t\tif err := s.updateCache(); err != nil {\n\t\t\tctx.SendPrivately(\"Sorry m8 try again later\")\n\t\t\treturn\n\t\t}\n\n\t\t// Try again after the cache update\n\t\tgroupID, ok = s.mapping[ctx.FromUserID]\n\t\tif !ok {\n\t\t\tctx.SendPrivately(fmt.Sprintf(registerMessage, s.server))\n\t\t\treturn\n\t\t}\n\t}\n\n\tswitch strings.ToLower(parts[1]) {\n\tcase \"characters\", \"chars\":\n\t\ts.sendCharacterInfo(groupID, ctx)\n\tdefault:\n\t\tctx.SendPrivately(how2use)\n\t}\n}",
"func (c Client) OnMessage(message []byte) error {\n\tvar (\n\t\tv ws.SubscriptionResponse\n\t)\n\n\terr := json.Unmarshal(message, &v)\n\tif err != nil {\n\t\t// in case an error happens just assume the message is due to a subscription and try to process it\n\t\treturn c.ProcessMessage(message)\n\t}\n\n\tsplit := strings.Split(v.Request.Args[0], \".\")\n\tchannel := split[0]\n\tmarket := split[1]\n\n\tswitch v.Request.Op {\n\tcase Subscribed:\n\t\tc.subscriptions[channel] = append(c.subscriptions[channel], market)\n\t\tc.logger.Infof(\"Successfully %v to channel {%v} for market {%v}\", v.Request.Op, channel, market)\n\t\tbreak\n\tcase Unsubscribed:\n\t\tvar subs []string\n\t\tfor _, sub := range c.subscriptions[channel] {\n\t\t\tif sub == market {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tsubs = append(subs, sub)\n\t\t}\n\t\tc.subscriptions[channel] = subs\n\t\tc.logger.Infof(\"Successfully %v to channel {%v} for market {%v}\", v.Request.Op, channel, market)\n\t\tbreak\n\t}\n\n\treturn nil\n}",
"func (c *Command) OnMsg(user string, msg string) {\n}",
"func (h *Handler) OnMessage(m []byte) (Packet, error) {\n\t//switch over different packet opcodes\n\tswitch OPCode(m[0]) {\n\tcase OPCodeServerMouseMove:\n\t\tp := MouseMove{}\n\t\terr := Decode(m, &p)\n\t\treturn &p, err\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unknown packet: %+v\", m)\n\t}\n}",
"func (sio *SocketIO) OnMessage(f func(*Conn, Message)) os.Error {\n\tif sio.muxed {\n\t\treturn os.NewError(\"OnMessage: already muxed\")\n\t}\n\tsio.callbacks.onMessage = f\n\treturn nil\n}",
"func (g *Game) OnMessageReceived() chan interface{} {\n\treturn g.onMessageReceived\n}",
"func (t *OutputReceiptConsumer) onMessage(ctx context.Context, msgs []*nats.Msg) bool {\n\tmsg := msgs[0] // Guaranteed to exist if onMessage is called\n\treceipt := syncTypes.OutputReceiptEvent{\n\t\tUserID: msg.Header.Get(jetstream.UserID),\n\t\tRoomID: msg.Header.Get(jetstream.RoomID),\n\t\tEventID: msg.Header.Get(jetstream.EventID),\n\t\tType: msg.Header.Get(\"type\"),\n\t}\n\n\tswitch receipt.Type {\n\tcase \"m.read\":\n\t\t// These are allowed to be sent over federation\n\tcase \"m.read.private\", \"m.fully_read\":\n\t\t// These must not be sent over federation\n\t\treturn true\n\t}\n\n\t// only send receipt events which originated from us\n\t_, receiptServerName, err := gomatrixserverlib.SplitID('@', receipt.UserID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"user_id\", receipt.UserID).Error(\"failed to extract domain from receipt sender\")\n\t\treturn true\n\t}\n\tif !t.isLocalServerName(receiptServerName) {\n\t\treturn true\n\t}\n\n\ttimestamp, err := strconv.ParseUint(msg.Header.Get(\"timestamp\"), 10, 64)\n\tif err != nil {\n\t\t// If the message was invalid, log it and move on to the next message in the stream\n\t\tlog.WithError(err).Errorf(\"EDU output log: message parse failure\")\n\t\tsentry.CaptureException(err)\n\t\treturn true\n\t}\n\n\treceipt.Timestamp = spec.Timestamp(timestamp)\n\n\tjoined, err := t.db.GetJoinedHosts(ctx, receipt.RoomID)\n\tif err != nil {\n\t\tlog.WithError(err).WithField(\"room_id\", receipt.RoomID).Error(\"failed to get joined hosts for room\")\n\t\treturn false\n\t}\n\n\tnames := make([]spec.ServerName, len(joined))\n\tfor i := range joined {\n\t\tnames[i] = joined[i].ServerName\n\t}\n\n\tcontent := map[string]fedTypes.FederationReceiptMRead{}\n\tcontent[receipt.RoomID] = fedTypes.FederationReceiptMRead{\n\t\tUser: map[string]fedTypes.FederationReceiptData{\n\t\t\treceipt.UserID: {\n\t\t\t\tData: fedTypes.ReceiptTS{\n\t\t\t\t\tTS: receipt.Timestamp,\n\t\t\t\t},\n\t\t\t\tEventIDs: []string{receipt.EventID},\n\t\t\t},\n\t\t},\n\t}\n\n\tedu := &gomatrixserverlib.EDU{\n\t\tType: spec.MReceipt,\n\t\tOrigin: string(receiptServerName),\n\t}\n\tif edu.Content, err = json.Marshal(content); err != nil {\n\t\tlog.WithError(err).Error(\"failed to marshal EDU JSON\")\n\t\treturn true\n\t}\n\n\tif err := t.queues.SendEDU(edu, receiptServerName, names); err != nil {\n\t\tlog.WithError(err).Error(\"failed to send EDU\")\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (c *app) ReceiveMessage(msg message) {\n\tc.in <- msg\n}",
"func onMessage(cm gcm.CcsMessage) error {\n\ttoylog.Infoln(\"Message, from:\", cm.From, \"with:\", cm.Data)\n\t// Echo the message with a tag.\n\tcm.Data[\"echoed\"] = true\n\tm := gcm.HttpMessage{To: cm.From, Data: cm.Data}\n\tr, err := gcm.SendHttp(*serverKey, m)\n\tif err != nil {\n\t\ttoylog.Errorln(\"Error sending message.\", err)\n\t\treturn err\n\t}\n\ttoylog.Infof(\"Sent message. %+v -> %+v\", m, r)\n\treturn nil\n}",
"func (o *PluginIgmpClient) OnEvent(msg string, a, b interface{}) {\n\n}",
"func (b *BTCC) OnMessage(message []byte, output chan socketio.Message) {\n\tif b.Verbose {\n\t\tlog.Printf(\"%s Websocket message received which isn't handled by default.\\n\", b.GetName())\n\t\tlog.Println(string(message))\n\t}\n}",
"func (c app) ReceiveMessage(msg message) {\n\tc.in <- msg\n}",
"func (c *Client) messageReceived(data []byte) {\n\tfor _, v := range c.onDebugListeners {\n\t\tv(data)\n\t}\n\tif bytes.HasPrefix(data, c.config.EvtMessagePrefix) {\n\t\t//it's a custom ws message\n\t\treceivedEvt := c.messageSerializer.getWebsocketCustomEvent(data)\n\t\tvalue, ok := c.onEventListeners.Load(string(receivedEvt))\n\t\tif !ok || value == nil {\n\t\t\treturn\n\t\t}\n\n\t\tlisteners, ok := value.([]MessageFunc)\n\t\tif !ok || len(listeners) == 0 {\n\t\t\treturn // if not listeners for this event exit from here\n\t\t}\n\n\t\tcustomMessage, err := c.messageSerializer.deserialize(receivedEvt, data)\n\t\tif customMessage == nil || err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tfor i := range listeners {\n\t\t\tif fn, ok := listeners[i].(func()); ok { // its a simple func(){} callback\n\t\t\t\tfn()\n\t\t\t} else if fnString, ok := listeners[i].(func(string)); ok {\n\n\t\t\t\tif msgString, is := customMessage.(string); is {\n\t\t\t\t\tfnString(msgString)\n\t\t\t\t} else if msgInt, is := customMessage.(int); is {\n\t\t\t\t\t// here if server side waiting for string but client side sent an int, just convert this int to a string\n\t\t\t\t\tfnString(strconv.Itoa(msgInt))\n\t\t\t\t}\n\n\t\t\t} else if fnInt, ok := listeners[i].(func(int)); ok {\n\t\t\t\tfnInt(customMessage.(int))\n\t\t\t} else if fnBool, ok := listeners[i].(func(bool)); ok {\n\t\t\t\tfnBool(customMessage.(bool))\n\t\t\t} else if fnBytes, ok := listeners[i].(func([]byte)); ok {\n\t\t\t\tfnBytes(customMessage.([]byte))\n\t\t\t} else {\n\t\t\t\tlisteners[i].(func(interface{}))(customMessage)\n\t\t\t}\n\n\t\t}\n\t} else {\n\t\t// it's native websocket message\n\t\tfor i := range c.onNativeMessageListeners {\n\t\t\tc.onNativeMessageListeners[i](data)\n\t\t}\n\t}\n\n}",
"func (w Wrapper) OnReadInMessages(f ReadInMessagesHandler) {\n\tw.longpoll.EventNew(6, func(i []interface{}) error {\n\t\tvar event ReadInMessages\n\t\tif err := event.parse(i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf(event)\n\n\t\treturn nil\n\t})\n}",
"func (s CurrentTimePlugin) OnMessage(ctx *muxer.Context) {\n\tctx.Send(fmt.Sprintf(\"The time is %s\", time.Now().UTC().Format(timeLayout)))\n}",
"func (s *SlackListener) handleMessageEvent(ev *slack.MessageEvent) error {\n\t// Do not react to itself.\n\tif ev.User == s.botID {\n\t\treturn nil\n\t}\n\tif !strings.HasPrefix(ev.Msg.Text, \"<@\"+s.botID+\">\") {\n\t\treturn nil\n\t}\n\n\t// Parse message\n\tm := strings.Split(strings.TrimSpace(ev.Msg.Text), \" \")[1:]\n\n\tsyn, err := getTickerSynonym(m[0])\n\tif err {\n\t\treturn err\n\t}\n\n\tif err := s.pushCoinInfo(syn, ev.Channel); err != nil {\n\t\treturn fmt.Errorf(\"coin info push failed: %s\", err)\n\t}\n\n\treturn nil\n}",
"func handleMessage(msg *game.InMessage, ws *websocket.Conn, board *game.Board) {\n\tfmt.Println(\"Message Got: \", msg)\n\n}",
"func handleEventMessage(event slackevents.EventsAPIEvent, client *slack.Client) error {\n\tswitch event.Type {\n\t// First we check if this is an CallbackEvent\n\tcase slackevents.CallbackEvent:\n\n\t\tinnerEvent := event.InnerEvent\n\t\t// Yet Another Type switch on the actual Data to see if its an AppMentionEvent\n\t\tswitch ev := innerEvent.Data.(type) {\n\t\tcase *slackevents.AppMentionEvent:\n\t\t\t// The application has been mentioned since this Event is a Mention event\n\t\t\terr := handleAppMentionEvent(ev, client)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn errors.New(\"unsupported event type\")\n\t}\n\treturn nil\n}",
"func (l *Listener) HandleMessage(msg *stan.Msg) {\n\tl.incMsg <- msg\n}",
"func (w *BaseWebsocketClient) OnWsMessage(payload []byte, isBinary bool) {}",
"func (m *Server) onSubscribe(msg *packet.Message) {\n\tklog.Infof(\"OnSubscribe recevie msg from topic: %s\", msg.Topic)\n\tNewMessageMux().Dispatch(msg.Topic, msg.Payload)\n}",
"func (s *SocketModeAdapter) onChannelMessage(event *slackevents.MessageEvent, info *adapter.Info) *adapter.ProviderEvent {\n\treturn s.wrapEvent(\n\t\tadapter.EventChannelMessage,\n\t\tinfo,\n\t\t&adapter.ChannelMessageEvent{\n\t\t\tChannelID: event.Channel,\n\t\t\tText: ScrubMarkdown(event.Text),\n\t\t\tUserID: event.User,\n\t\t},\n\t)\n}",
"func (o *PluginDnsClient) OnEvent(msg string, a, b interface{}) {}",
"func (s *server) OnTopicEvent(ctx context.Context, in *pb.TopicEventRequest) (*empty.Empty, error) {\n\tfmt.Println(\"Topic message arrived\")\n\treturn &empty.Empty{}, nil\n}",
"func (connection *SSEConnection) SetOnMessage(cb func([]byte)) {\n\n}",
"func (h *hub) onMasterMessage(data []byte) error {\n\treturn nil\n}",
"func (c *Client) OnEvent(name string) <-chan *Message {\n\treturn c.subs.subscribe(name)\n}",
"func (h *GrpcHandler) ProcessMessage(eventBody []byte) error {\n\topts, err := h.getConnOptions()\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconn, err := grpc.Dial(h.getAddr(), opts...)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer conn.Close()\n\n\tclient := pb.NewEventMapperClient(conn)\n\n\tevent := &pb.Event{}\n\n\tif err := json.Unmarshal(eventBody, event); err != nil {\n\t\treturn err\n\t}\n\n\teventReq := &pb.EventRequest{\n\t\tUserToken: h.getUserToken(),\n\t\tRKey: \"\",\n\t\tEvent: event,\n\t}\n\n\t_, err = client.CreateEvent(context.Background(), eventReq)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (u *UnityServer) readMessage() {\n\tfor {\n\t\tdata := make([]byte, 8192)\n\t\tn, err := u.conn.Read(data)\n\t\tif err != nil {\n\t\t\tu.Logger.Errorf(\"Error: Reading socket - %v\", err)\n\t\t\tu.stop <- true\n\t\t\tbreak\n\t\t}\n\t\tu.incoming <- string(data[:n])\n\t}\n}",
"func (p *Player) inStream() {\n\tdefer func() {\n\t\tp.ghub.destroyPlayer(p)\n\t\tp.conn.Close()\n\t}()\n\n\t//p.conn.SetPongHandler(func(string) error { p.conn.SetReadDeadline(time.Now().Add(pongWaitTime)); return nil })\n\n\tfor {\n\t\tvar data map[string]interface{}\n\t\terr := p.conn.ReadJSON(&data)\n\n\t\tif err != nil {\n\t\t\tlog.Println(\"err reading msg\")\n\t\t\tbreak\n\t\t}\n\n\t\t//update player position in central\n\t\tp.xPos = data[\"positionX\"].(float64)\n\t\tp.yPos = data[\"positionY\"].(float64)\n\n\t\tp.ghub.publish <- data\n\t}\n}",
"func (o *IpfixNsPlugin) OnEvent(msg string, a, b interface{}) {}",
"func (e *Epher) PushHandler(rw http.ResponseWriter, r *http.Request) {\n\troom := chi.URLParam(r, \"room\")\n\tallPublishedCounter.Inc()\n\n\te.roomLock.RLock()\n\tdefer e.roomLock.RUnlock()\n\n\tif _, ok := e.Rooms[room]; ok {\n\t\tb, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t_ = e.Rooms[room].BroadcastText(b)\n\t} else {\n\t\tlog.Println(\"No listener in\", room)\n\t\trw.WriteHeader(http.StatusServiceUnavailable)\n\t\t_, _ = rw.Write([]byte(\"no_room\"))\n\n\t\tnoListenerPublishedCounter.Inc()\n\t}\n}",
"func handleNewMessage(msg *arbor.ProtocolMessage, recents *RecentList, store *arbor.Store, broadcaster *Broadcaster) {\n\terr := msg.ChatMessage.AssignID()\n\tif err != nil {\n\t\tlog.Println(\"Error creating new message\", err)\n\t}\n\trecents.Add(msg.ChatMessage)\n\tstore.Add(msg.ChatMessage)\n\tbroadcaster.Send(msg)\n}",
"func (s *Socket) listenToMessagesIn() {\n\tfor {\n\t\tmessage := new(Message)\n\t\terr := s.connection.ReadJSON(&message)\n\t\tif err != nil {\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseGoingAway, websocket.CloseAbnormalClosure) {\n\t\t\t\tlog.Printf(\"Error: %v\", err)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\ts.messagesIn <- message\n\t}\n}",
"func (bot *Hitbot) MessageHandler() {\n\tfor {\n\t\t_, p, err := bot.conn.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t//log.Printf(\"Message: %v\", string(p)) //debug info\n\t\tif string(p[:3]) == \"2::\" {\n\t\t\tbot.conn.WriteMessage(websocket.TextMessage, []byte(\"2::\"))\n\t\t\t//log.Print(\"Ping!\")\n\t\t\tcontinue\n\t\t} else if string(p[:3]) == \"1::\" {\n\t\t\tlog.Print(\"Connection successful!\")\n\t\t\tfor _, channel := range bot.channels {\n\t\t\t\tbot.joinChannel(channel)\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if string(p[:4]) == \"5:::\" {\n\t\t\tbot.parseMessage(p[4:])\n\t\t}\n\t}\n}",
"func handlerMsg(msg []byte) {\n\tchats <- string(msg)\n}",
"func (s *SlackListener) handleMessageEvent(ev *slack.MessageEvent) error {\n\n\t// Only response mention to bot. Ignore else.\n\tif !strings.HasPrefix(ev.Msg.Text, fmt.Sprintf(\"<@%s> \", s.BotID)) {\n\t\treturn nil\n\t}\n\n\terr := s.applyHandlers(ev)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (l Listener) OnTextMessage(e *gumble.TextMessageEvent) {\n\tif l.TextMessage != nil {\n\t\tl.TextMessage(e)\n\t}\n}",
"func (c *Client) OnNewClearchatMessage(callback func(channel string, user User, message Message)) {\n\tc.onNewClearchatMessage = callback\n}",
"func (s *BaseRFC5424Listener) EnterMsg(ctx *MsgContext) {}",
"func (s *Switch) onRemoteClientMessage(ctx context.Context, msg net.IncomingMessageEvent) error {\n\tif msg.Message == nil || msg.Conn == nil {\n\t\treturn ErrBadFormat1\n\t}\n\n\t// protocol messages are encrypted in payload\n\t// Locate the session\n\tsession := msg.Conn.Session()\n\n\tif session == nil {\n\t\treturn ErrNoSession\n\t}\n\n\tdecPayload, err := session.OpenMessage(msg.Message)\n\tif err != nil {\n\t\treturn ErrFailDecrypt\n\t}\n\n\tpm := &ProtocolMessage{}\n\tif err = types.BytesToInterface(decPayload, pm); err != nil {\n\t\ts.logger.With().Error(\"error deserializing message\", log.Err(err))\n\t\treturn ErrBadFormat2\n\t}\n\n\t// check that the message was sent within a reasonable time\n\tif ok := timesync.CheckMessageDrift(pm.Metadata.Timestamp); !ok {\n\t\t// TODO: consider kill connection with this node and maybe blacklist\n\t\t// TODO : Also consider moving send timestamp into metadata(encrypted).\n\t\treturn ErrOutOfSync\n\t}\n\n\tdata, err := ExtractData(pm.Payload)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Add metadata collected from p2p message (todo: maybe pass sender and protocol inside metadata)\n\tp2pmeta := service.P2PMetadata{FromAddress: msg.Conn.RemoteAddr()}\n\n\t_, ok := s.gossipProtocolHandlers[pm.Metadata.NextProtocol]\n\n\ts.logger.WithContext(ctx).With().Debug(\"handle incoming message\",\n\t\tlog.String(\"protocol\", pm.Metadata.NextProtocol),\n\t\tlog.FieldNamed(\"peer_id\", msg.Conn.RemotePublicKey()),\n\t\tlog.Bool(\"is_gossip\", ok))\n\n\tif ok {\n\t\t// if this message is tagged with a gossip protocol, relay it.\n\t\treturn s.gossip.Relay(ctx, msg.Conn.RemotePublicKey(), pm.Metadata.NextProtocol, data)\n\t}\n\n\t// route authenticated message to the registered protocol\n\t// messages handled here are always processed by direct based protocols, only the gossip protocol calls ProcessGossipProtocolMessage\n\treturn s.ProcessDirectProtocolMessage(ctx, msg.Conn.RemotePublicKey(), pm.Metadata.NextProtocol, data, p2pmeta)\n}",
"func (tv *TV) MessageHandler() (err error) {\n\tdefer func() {\n\t\ttv.resMutex.Lock()\n\t\tfor _, ch := range tv.res {\n\t\t\tclose(ch)\n\t\t}\n\t\ttv.res = nil\n\t\ttv.resMutex.Unlock()\n\t}()\n\n\tfor {\n\t\tmt, p, err := tv.ws.ReadMessage()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif mt != websocket.TextMessage {\n\t\t\tcontinue\n\t\t}\n\n\t\tmsg := Message{}\n\n\t\terr = json.Unmarshal(p, &msg)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttv.resMutex.Lock()\n\t\tch := tv.res[msg.ID]\n\t\ttv.resMutex.Unlock()\n\n\t\tch <- msg\n\t}\n}",
"func (s *Switch) processMessage(ctx context.Context, ime net.IncomingMessageEvent) {\n\t// Extract request context and add to log\n\tif ime.RequestID != \"\" {\n\t\tctx = log.WithRequestID(ctx, ime.RequestID)\n\t} else {\n\t\tctx = log.WithNewRequestID(ctx)\n\t\ts.logger.WithContext(ctx).Warning(\"got incoming message event with no requestID, setting new id\")\n\t}\n\n\tif s.config.MsgSizeLimit != config.UnlimitedMsgSize && len(ime.Message) > s.config.MsgSizeLimit {\n\t\ts.logger.WithContext(ctx).With().Error(\"message is too big to process\",\n\t\t\tlog.Int(\"limit\", s.config.MsgSizeLimit),\n\t\t\tlog.Int(\"actual\", len(ime.Message)))\n\t\treturn\n\t}\n\n\tif err := s.onRemoteClientMessage(ctx, ime); err != nil {\n\t\t// TODO: differentiate action on errors\n\t\ts.logger.WithContext(ctx).With().Error(\"err reading incoming message, closing connection\",\n\t\t\tlog.FieldNamed(\"sender_id\", ime.Conn.RemotePublicKey()),\n\t\t\tlog.Err(err))\n\t\tif err := ime.Conn.Close(); err == nil {\n\t\t\ts.cPool.CloseConnection(ime.Conn.RemotePublicKey())\n\t\t\ts.Disconnect(ime.Conn.RemotePublicKey())\n\t\t}\n\t}\n}",
"func (player *Player) OnPlayerMessage(handler func(event *event.PlayerMessage)) {\n\tplayer.on(event.NamePlayerMessage, func(e interface{}) {\n\t\thandler(e.(*event.PlayerMessage))\n\t})\n}",
"func (t *cliTransHandler) OnMessage(ctx context.Context, args, result remote.Message) (context.Context, error) {\n\t// do nothing\n\treturn ctx, nil\n}",
"func (api *WebSocketAPI) DispatchMessageEvent(data []byte) error {\n\tif err := api.Battle.DispatchMessageEvent(data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (p *metadataService) onTopicSubscribe(e event.Event) {\n}",
"func (p *pipelineEventCB) onEvents(data []interface{}, acked int) {\n\tp.pushMsg(eventsDataMsg{data: data, total: len(data), acked: acked})\n}",
"func (em *EventManager) handleMessage(i interface{}) error {\n\tvar err error = nil\n\n\tm := i.(Message)\n\n\tlog.Printf(\"Processing message\")\n\n\t// TODO: process incoming message\n\tres, err := em.wc.HandleMessage(m.Text())\n\tif err != nil {\n\t\tlog.Printf(\"Error processing message\")\n\t\treturn err\n\t}\n\n\t// TODO: act on message\n\tswitch res.Intent {\n\tcase analysis.IntentCreateEvent:\n \n\n\tcase analysis.IntentCancelEvent:\n // Check owner, when and where match\n\n // Delete event\n\n\tcase analysis.IntentFindEvents:\n // Find events based on when / where filters\n\n\tcase analysis.IntentAttending:\n // Set attending flag for a given event\n\n\tcase analysis.IntentNotAttending:\n // Set not attending flag for a given event\n\n case analysis.IntentArrived:\n // Set arrived flag for a given event\n\n\t}\n\n\tif res.Response != \"\" {\n\t\tlog.Printf(\"Sending reply\")\n\n\t\t// Generate reply\n\t\treply := m.Reply(res.Response)\n\n\t\t// Locate matching client\n\t\tc, ok := em.clients[m.Connector()]\n\t\tif !ok {\n\t\t\tlog.Printf(\"Invalid connector %s for response\", m.Connector())\n\t\t\treturn err\n\t\t}\n\n\t\t// Send reply\n\t\tc.Send(reply)\n\t}\n\n\treturn err\n}",
"func (nc *NetClient) OnReceive(handler func(*arbor.ChatMessage)) {\n\tnc.receiveHandler = handler\n}",
"func (sub *Subscriber) ReadMessage() {\n\t// create new reader instance\n\treader := bufio.NewReader(sub.Conn)\n\tfor {\n\t\t// read message\n\t\trequest, _, err := reader.ReadLine()\n\t\tif err != nil {\n\t\t\t// stop reading buffer and exit goroutine\n\t\t\tlog.Println(\"Can't read line from socket:\", err)\n\t\t\tbreak\n\t\t} else {\n\t\t\t// check request before pushing into channel\n\t\t\tif len(request) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Connection Id as the address\n\t\t\tconnId := sub.Conn.RemoteAddr().String()\n\t\t\t// push message from subscriber to message channel\n\t\t\tlog.Printf(\"Received message %s on socket %s\", request, connId)\n\t\t\tsub.PublishBoard <- Message{\n\t\t\t\tPayload: request,\n\t\t\t\tSourceConnId: connId,\n\t\t\t}\n\t\t}\n\t}\n}",
"func IngameMessageCreate(lh *loghandler.LogHandler, server *servers.Server, event *loghandler.SayEvent) {\n\tlog.Println(fmt.Sprintf(\"Received command from '%s' on server '%s': %s\", event.Username, server.Name, event.Message))\n\tIngameCommand.Handle(ingame.CommandInfo{SayEvent: *event, Server: server}, event.Message, 0)\n}",
"func (outlet *CollectorOutleter) OnEvent(data *util.Data) bool {\n\t// We'll actually receive a few spurious blank events that FileBeat likes to use to keep its registry\n\t// of file offsets up-to-date. We're really only interested in events that have messages, and we're really\n\t// only concerned with the messages themselves. FileBeat creates the events, typically, in the harvester.\n\t// To see the generation of these events look at log.harverster's Run method.\n\tevent := data.GetEvent()\n\tif event.Fields != nil {\n\t\t// We only want to send over events that actually have message fields (which should actually be all\n\t\t// of them, but just in case). So this is just Go's way of saying \"if map event.Fields has a key\n\t\t// 'message' (while also storing the value at that key to 'msg')\"\n\t\tif msg, ok := event.Fields[\"message\"]; ok {\n\t\t\t// \"msg\" at this stage is just a generic interface{}, which is kind of the closest Go has to\n\t\t\t// a void pointer. We want to try to cast it to a string (which it always should be) before sending\n\t\t\t// it down the wire.\n\t\t\tif str, ok := msg.(string); ok {\n\t\t\t\t// Send the line over our channel\n\t\t\t\toutlet.lines <- str\n\t\t\t} else {\n\t\t\t\tlogp.Warn(\"Encountered non string message field: %s\", msg)\n\t\t\t}\n\t\t}\n\t}\n\n\t// The boolean we return indicates whether we were able to enqueue the data or not. For our purposes,\n\t// since we're not actually using a complicated Spool feature like FileBeat, we can just say we were\n\t// able to.\n\treturn true\n}",
"func msgHandler(c MQTT.Client, msg MQTT.Message) {\n\tfmt.Printf(\"MQTT message received. Topic: %s Message: %s\", msg.Topic(), msg.Payload())\n}",
"func (h *Hub) SendNewMessageEvent(newMessage message.Item) {\n\tdata := map[string]interface{}{}\n\n\tdata[\"event\"] = \"newMessage\"\n\tdata[\"data\"] = newMessage\n\n\tjson, _ := json2.Marshal(data)\n\th.Broadcast <- json\n}",
"func (player *Player) on(eventName event.Name, handler func(event interface{})) {\n\tplayer.Lock()\n\tplayer.handlers[eventName] = handler\n\tplayer.Unlock()\n\t_ = player.WriteJSON(protocol.NewEventRequest(eventName, protocol.Subscribe))\n}",
"func (nc *NetClient) handleMessage(m *arbor.ProtocolMessage) {\n\tswitch m.Type {\n\tcase arbor.NewMessageType:\n\t\tif !nc.Archive.Has(m.UUID) {\n\t\t\tif nc.receiveHandler != nil {\n\t\t\t\tnc.receiveHandler(m.ChatMessage)\n\t\t\t\t// ask Notifier to handle the message\n\t\t\t\tnc.Notifier.Handle(nc, m.ChatMessage)\n\t\t\t}\n\t\t\tif m.Parent != \"\" && !nc.Archive.Has(m.Parent) {\n\t\t\t\tnc.Query(m.Parent)\n\t\t\t}\n\t\t}\n\tcase arbor.WelcomeType:\n\t\tif !nc.Has(m.Root) {\n\t\t\tnc.Query(m.Root)\n\t\t}\n\t\tfor _, recent := range m.Recent {\n\t\t\tif !nc.Has(recent) {\n\t\t\t\tnc.Query(recent)\n\t\t\t}\n\t\t}\n\tcase arbor.MetaType:\n\t\tnc.HandleMeta(m.Meta)\n\t}\n}",
"func (f *FrameProcessor) ProcessMessage(msg *sarama.ConsumerMessage) {\n\tif f.DecodeJSON != 0 {\n\t\tvar decoded map[string]*json.RawMessage\n\t\terr := json.Unmarshal(msg.Value, &decoded)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Error occured during decoding\", err)\n\t\t}\n\t}\n\t// Lets record the event\n\tf.Counter.Incr(1)\n}",
"func IncomingEventGetHandler(ctx *gin.Context) {\n\tdatasource.StreamClient.Publish(config.Config.PubSub.EventName.NewIncomingRequest, []byte(\"new request came in\"))\n\tctx.JSON(http.StatusOK, gin.H{\"ok\": 1})\n}",
"func (p *Protocol) OnMessageSent(client *noise.Client) {\n\tp.Ack(client.ID())\n}",
"func processMessage(consumerMessage *sarama.ConsumerMessage) {\n\treceivedMessage := unmarshal(consumerMessage)\n\trunCallback(receivedMessage, consumerMessage)\n}",
"func onMessageRecv(client mqtt.Client, message mqtt.Message){\n\n\tvar temp models.TempReading\n\tif err := json.Unmarshal(message.Payload(), &temp); err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tfmt.Printf(\"Received message on topic: %v Message: %+v \\n \", message.Topic(), string(message.Payload()))\n\n\tvalvePercent := RegulateTemp(temp.Value)\n\treqNum := fmt.Sprintf(\"%.2f\", valvePercent)\n\tout, err := json.Marshal(reqNum)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\n\tPubValveValue(out,client )\n\n}",
"func onMessage(client mqtt.Client, message mqtt.Message) {\n\tklog.V(2).Info(\"Receive message\", message.Topic())\n\t// Get device ID and get device instance\n\tid := getDeviceID(message.Topic())\n\tif id == \"\" {\n\t\tklog.Error(\"Wrong topic\")\n\t\treturn\n\t}\n\tklog.V(2).Info(\"Device id: \", id)\n\n\tvar dev *globals.BleDev\n\tvar ok bool\n\tif dev, ok = devices[id]; !ok {\n\t\tklog.Error(\"Device not exist\")\n\t\treturn\n\t}\n\n\t// Get twin map key as the propertyName\n\tvar delta common.DeviceTwinDelta\n\tif err := json.Unmarshal(message.Payload(), &delta); err != nil {\n\t\tklog.Error(\"Unmarshal message failed: \", err)\n\t\treturn\n\t}\n\tfor twinName, twinValue := range delta.Delta {\n\t\ti := 0\n\t\tfor i = 0; i < len(dev.Instance.Twins); i++ {\n\t\t\tif twinName == dev.Instance.Twins[i].PropertyName {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif i == len(dev.Instance.Twins) {\n\t\t\tklog.Error(\"Twin not found: \", twinName)\n\t\t\tcontinue\n\t\t}\n\t\t// Desired value is not changed.\n\t\tif dev.Instance.Twins[i].Desired.Value == twinValue {\n\t\t\tcontinue\n\t\t}\n\t\tdev.Instance.Twins[i].Desired.Value = twinValue\n\t\tvar visitorConfig configmap.BleVisitorConfig\n\t\tif err := json.Unmarshal([]byte(dev.Instance.Twins[i].PVisitor.VisitorConfig), &visitorConfig); err != nil {\n\t\t\tklog.Errorf(\"Unmarshal visitor config failed, err is %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tsetVisitor(&visitorConfig, &dev.Instance.Twins[i], dev.BleClient)\n\t}\n}",
"func (o *PluginDnsNs) OnEvent(msg string, a, b interface{}) {}",
"func (self *Whisper) msgHandler(peer *p2p.Peer, ws p2p.MsgReadWriter) error {\n\twpeer := NewPeer(self, peer, ws)\n\t// initialise whisper peer (handshake/status)\n\tif err := wpeer.init(); err != nil {\n\t\treturn err\n\t}\n\t// kick of the main handler for broadcasting/managing envelopes\n\tgo wpeer.start()\n\tdefer wpeer.stop()\n\n\t// Main *read* loop. Writing is done by the peer it self.\n\tfor {\n\t\tmsg, err := ws.ReadMsg()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tenvelope, err := NewEnvelopeFromReader(msg.Payload)\n\t\tif err != nil {\n\t\t\tpeer.Infoln(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := self.add(envelope); err != nil {\n\t\t\t// TODO Punish peer here. Invalid envelope.\n\t\t\tpeer.Infoln(err)\n\t\t}\n\t\twpeer.addKnown(envelope)\n\t}\n}",
"func (a *Adapter) handleMessage() {\n\tfor {\n\t\t_, input, err := a.Conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not read message! %s\", err)\n\t\t\tbreak\n\t\t}\n\n\t\t// Decodes input message\n\t\terr, meta, data := lib.DecodeMessage(&input)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Could not decode incoming message! %s\", err)\n\t\t}\n\n\t\tif glob.V_LOG_IO_MSG {\n\t\t\tlog.Infof(\"Message received! \\nMeta: %s \\nData: %s\", meta, data)\n\t\t}\n\n\t\tgo a.TraverseCBs(meta, data)\n\t}\n}",
"func (s *SocketModeAdapter) onDirectMessage(event *slackevents.MessageEvent, info *adapter.Info) *adapter.ProviderEvent {\n\treturn s.wrapEvent(\n\t\tadapter.EventDirectMessage,\n\t\tinfo,\n\t\t&adapter.DirectMessageEvent{\n\t\t\tChannelID: event.Channel,\n\t\t\tText: ScrubMarkdown(event.Text),\n\t\t\tUserID: event.User,\n\t\t},\n\t)\n}",
"func (r *MetricReceiver) handleMessage(addr net.Addr, msg []byte) {\n\tbuf := bytes.NewBuffer(msg)\n\tfor {\n\t\tline, readerr := buf.ReadBytes('\\n')\n\n\t\t// protocol does not require line to end in \\n, if EOF use received line if valid\n\t\tif readerr != nil && readerr != io.EOF {\n\t\t\tr.handleError(fmt.Errorf(\"error reading message from %s: %s\", addr, readerr))\n\t\t\treturn\n\t\t} else if readerr != io.EOF {\n\t\t\t// remove newline, only if not EOF\n\t\t\tif len(line) > 0 {\n\t\t\t\tline = line[:len(line)-1]\n\t\t\t}\n\t\t}\n\n\t\t// Only process lines with more than one character\n\t\tif len(line) > 1 {\n\t\t\tmetric, err := parseLine(line)\n\t\t\tif err != nil {\n\t\t\t\tr.handleError(fmt.Errorf(\"error parsing line %q from %s: %s\", line, addr, err))\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tgo r.Handler.HandleMetric(metric)\n\t\t}\n\n\t\tif readerr == io.EOF {\n\t\t\t// if was EOF, finished handling\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (w Wrapper) OnReadOutMessages(f ReadOutMessagesHandler) {\n\tw.longpoll.EventNew(7, func(i []interface{}) error {\n\t\tvar event ReadOutMessages\n\t\tif err := event.parse(i); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tf(event)\n\n\t\treturn nil\n\t})\n}",
"func OnSubMessageReceived(client MQTT.Client, message MQTT.Message) {\n\terr := json.Unmarshal(message.Payload(), &deviceTwinResult)\n\tif err != nil {\n\t\tglog.Error(\"Error in unmarshalling: \", err)\n\t}\n}",
"func (this *IoHandlerImp) MessageReceived(filter *IoFilter, obj BaseObject) {\n}",
"func (n *NetImpl) eventHandler(handler interface{}, params ...interface{}) {\n\tcallback := handler.(func(_ *peering.RecvEvent))\n\trecvEvent := params[0].(*peering.RecvEvent)\n\tcallback(recvEvent)\n}",
"func (d *distEventBus) processReceivedMsg(ctx context.Context, msg *pubsub.Message) {\n\tdefer msg.Ack()\n\twrappers, data, ignore, err := d.decodeMsg(msg)\n\tif err != nil {\n\t\tsklog.Errorf(\"Error decoding message: %s\", err)\n\t\treturn\n\t}\n\n\t// If this was flagged to ignore then we are done.\n\tif ignore {\n\t\treturn\n\t}\n\n\t// Publish the events locally if it hasn't been sent by this instance.\n\tfor _, wrapper := range wrappers {\n\t\tif wrapper.Sender != d.clientID {\n\t\t\td.localEventBus.Publish(wrapper.ChannelID, data, true)\n\t\t}\n\t}\n}",
"func (mgr *RoomManager) Message(s *session.Session, msg *UserMessage) error {\n\tif !s.HasKey(roomIDKey) {\n\t\treturn fmt.Errorf(\"not join room yet\")\n\t}\n\troom := s.Value(roomIDKey).(*Room)\n\treturn room.group.Broadcast(\"onMessage\", msg)\n}",
"func (i *IncomingSocket) OnBinMessage(belowNexus nexusHelper.Nexus, msg []byte) {\n\ti.onBinMessage(belowNexus, msg)\n}",
"func (bot *Bot) messageListener(message events.EventMessage) {\n\t// Increase lines count for all announcements.\n\tfor k := range bot.lastURLAnnouncedLinesPassed {\n\t\tbot.lastURLAnnouncedLinesPassed[k] += 1\n\t\t// After 100 lines pass, forget it ever happened.\n\t\tif bot.lastURLAnnouncedLinesPassed[k] > 100 {\n\t\t\tdelete(bot.lastURLAnnouncedLinesPassed, k)\n\t\t\tdelete(bot.lastURLAnnouncedTime, k)\n\t\t}\n\t}\n\n\t// Handles the commands.\n\tif message.AtBot {\n\t\tbot.handleBotCommand(&message)\n\t}\n}",
"func (s Session) receiveHandler() {\n\tdecoder := json.NewDecoder(s.gameTCPConn)\n\tfor {\n\t\tvar gMsg GameMessage\n\t\terr := decoder.Decode(&gMsg)\n\t\tif err != nil {\n\t\t\tlog.Print(err)\n\t\t}\n\t\t//check event\n\t\tswitch gMsg.Event {\n\t\t\tcase \"created\":\n\t\t\ts.Data <- gMsg\n\t\t\tcase \"gamestart\":\n\t\t\ts.LobbyHost.Send <- gMsg\n\t\t\tcase \"gameterminate\":\n\t\t\ts.Data <- gMsg\n\t\t\tcase \"msgplayer\":\n\t\t\tif gMsg.Player == 0 {\n\t\t\t\tuser := s.PlayerMap[gMsg.Player].(HostUser)\n\t\t\t\tuser.Send <- gMsg\n\t\t\t} else {\n\t\t\t\tuser := s.PlayerMap[gMsg.Player].(AnonUser)\n\t\t\t\tuser.Send <- gMsg\n\t\t\t}\n\t\t\tcase \"msgall\":\n\t\t\ts.LobbyHost.Send <- gMsg\n\t\t}\n\t}\n}",
"func (s *Socket) handleMessagesIn() {\n\tfor {\n\t\tm := <-s.messagesIn\n\t\tfmt.Printf(\"Receiving message: %v\", m)\n\t\tswitch m.MessageType {\n\t\tcase PLACE_ORDER:\n\t\t\ts.placeOrder(m.Payload)\n\t\tcase CANCEL_ORDER:\n\t\t\ts.cancelOrder(m.Payload)\n\t\tcase SIGNED_DATA:\n\t\t\ts.executeOrder(m.Payload)\n\t\tcase DONE:\n\t\tdefault:\n\t\t\tpanic(\"Unknown message type\")\n\t\t}\n\t}\n}",
"func (p *Pipeline) OnStructMessage(c ws.Speaker, msg *ws.StructMessage) {\n\tswitch msg.Type {\n\tcase \"store\":\n\t\tvar flows []*flow.Flow\n\t\tif err := json.Unmarshal(msg.Obj, &flows); err != nil {\n\t\t\tlogging.GetLogger().Error(\"Failed to unmarshal flows: \", err)\n\t\t\treturn\n\t\t}\n\n\t\tp.Storer.StoreFlows(flows)\n\tdefault:\n\t\tlogging.GetLogger().Error(\"Unknown message type: \", msg.Type)\n\t}\n}",
"func onNewSocketConnection(s socket.Socket) {\n\t// Don't accept any new socket connections,\n\t// if the application is currently shutting down.\n\tif isShuttdingDown {\n\t\ts.Close()\n\t\treturn\n\t}\n\n\t// Create a new socket session\n\tss := &socketSession{\n\t\tsocketConn: s,\n\t\tsession: nil,\n\t\ttoken: newRandomToken(),\n\t\tstream: stream.New(),\n\n\t\tpingCount: 0,\n\t\tpingTimer: time.NewTimer(pingPeriod),\n\n\t\tstopWriteLoop: make(chan struct{}),\n\t}\n\n\t// Set the socket event functions\n\ts.OnClose(ss.onClose)\n\ts.OnRead(ss.onRead)\n\n\t// Start the goroutine for writing messages to the client\n\tgo ss.writeLoop()\n}",
"func SocketEvent(w http.ResponseWriter, r *http.Request) {\n\t// if r.Header.Get(\"Origin\") != \"http://\"+r.Host {\n\t// \thttp.Error(w, \"Origin not allowed\", 403)\n\t// \tlog.Errorf(\"%s Refused\", r.RemoteAddr)\n\t// \treturn\n\t// }\n\tvar eventLogs = make(chan *nats.Msg, 100)\n\n\tconn, err := websocket.Upgrade(w, r, w.Header(), 1024, 1024)\n\tif err != nil {\n\t\thttp.Error(w, \"Could not open websocket connection\", http.StatusBadRequest)\n\t}\n\n\tif nc.Conn == nil || !(*nc.Conn).IsConnected() {\n\t\t//\t\thttp.Error(w, \"No message bus available\", http.StatusBadRequest)\n\t\tconn.Close()\n\t\treturn\n\t}\n\t// nc.Subscribe(\">\", func(msg *nats.Msg) {\n\t// \teventLogs <- string(msg.Data)\n\t// })\n\tu4, err := uuid.NewV4()\n\tif err != nil {\n\t\tfmt.Println(\"error:\", err)\n\t\treturn\n\t}\n\tvar currentQueue = u4.String()\n\tvar sub *nats.Subscription\n\n\tsub, err = (*nc.Conn).ChanSubscribe(currentQueue, eventLogs)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to subscribe to %s : %s\", currentQueue, err)\n\t}\n\tdefer func(sub **nats.Subscription) {\n\t\terr := (*sub).Unsubscribe()\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to unsubcribe from : %s\", err)\n\t\t}\n\t\terr = backend.DeleteHubRule(currentQueue)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to delete rule %s : %s\", currentQueue, err)\n\t\t}\n\t}(&sub)\n\n\tgo echo(conn, eventLogs)\n\tfor {\n\t\t_, message, err := conn.ReadMessage()\n\t\tif err != nil {\n\t\t\tlog.Println(\"read:\", err)\n\t\t\tbreak\n\t\t}\n\t\t// if string(message) == currentQueue {\n\t\t// \tcontinue\n\t\t// }\n\t\t// if currentQueue == \">\" && string(message) == \"\" {\n\t\t// \tcontinue\n\t\t// }\n\t\t// if string(message) == \"\" {\n\t\t// \tcurrentQueue = \">\"\n\t\t// } else {\n\t\t// \tcurrentQueue = string(message)\n\t\t// }\n\t\t//\n\t\t// err = sub.Unsubscribe()\n\t\t// if err != nil {\n\t\t// \tlog.Errorf(\"Unable to unsubscribe from %s\", currentQueue)\n\t\t// }\n\t\t// sub, err = (*nc.Conn).ChanSubscribe(currentQueue, eventLogs)\n\t\t// if err != nil {\n\t\t// \tlog.Errorf(\"Unable to subscribe from %s\", currentQueue)\n\t\t// }\n\t\tr := lib.HubRule{\n\t\t\tName: currentQueue,\n\t\t\tQueue: \".*\",\n\t\t\tCondition: string(message),\n\t\t\tAction: fmt.Sprintf(\"copy(event,'%s')\", currentQueue),\n\t\t}\n\t\tjsonstr, err := json.Marshal(r)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to marshal %+v : %s\", r, err)\n\t\t}\n\t\terr = backend.SetHubRule(currentQueue, string(jsonstr))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Unable to set rule %s : %s\", string(jsonstr), err)\n\t\t}\n\t}\n}"
] | [
"0.7234711",
"0.7101964",
"0.7077346",
"0.70616764",
"0.6944995",
"0.6751046",
"0.66729075",
"0.66678685",
"0.6633395",
"0.66270655",
"0.65172863",
"0.6511483",
"0.65082186",
"0.65010315",
"0.65000105",
"0.64327663",
"0.64258385",
"0.6350663",
"0.6326081",
"0.62677497",
"0.6266969",
"0.62225324",
"0.62187827",
"0.62150985",
"0.6192441",
"0.618574",
"0.6177542",
"0.6146008",
"0.6091445",
"0.606105",
"0.6030589",
"0.596296",
"0.59030485",
"0.5889788",
"0.58889335",
"0.58722174",
"0.58606",
"0.5856766",
"0.57992536",
"0.57974803",
"0.57849854",
"0.5781176",
"0.5746621",
"0.5737964",
"0.5712297",
"0.5704011",
"0.5688029",
"0.56717235",
"0.5623429",
"0.5620743",
"0.5601565",
"0.5594931",
"0.5580329",
"0.5553539",
"0.55461264",
"0.55440146",
"0.5533195",
"0.5513577",
"0.5506116",
"0.5483408",
"0.5461863",
"0.54605573",
"0.5458708",
"0.54530776",
"0.54253167",
"0.5403513",
"0.5402523",
"0.5402396",
"0.5402244",
"0.5388895",
"0.5373232",
"0.535919",
"0.53583616",
"0.5352833",
"0.53500044",
"0.5342763",
"0.53329754",
"0.53157973",
"0.53148866",
"0.53052175",
"0.53045565",
"0.5303681",
"0.5298947",
"0.5290266",
"0.52877045",
"0.52827126",
"0.52799803",
"0.5278952",
"0.52718616",
"0.52643526",
"0.5263897",
"0.5254772",
"0.5254695",
"0.5249656",
"0.5244904",
"0.52405757",
"0.52173156",
"0.5214566",
"0.5203657",
"0.51871395"
] | 0.55480933 | 54 |
String returns the string representation of a command. | func (c *Command) String() string {
// TODO: criteria
if c.Next != nil {
return c.Executer.String() + ", " + c.Next.String()
}
return c.Executer.String()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (s Command) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (cmd *command) String() string {\n\tif len(cmd.params) == 0 {\n\t\treturn fmt.Sprintf(\"(%s)\", cmd.name)\n\t}\n\n\tparams := make([]string, len(cmd.params))\n\tfor i, param := range cmd.params {\n\t\tswitch concrete := param.(type) {\n\t\tcase int:\n\t\t\tparams[i] = fmt.Sprintf(\"%d\", concrete)\n\t\tcase float64:\n\t\t\tparams[i] = fmt.Sprintf(\"%0.2f\", concrete)\n\t\tcase string:\n\t\t\tparams[i] = concrete\n\t\tcase *command:\n\t\t\tparams[i] = concrete.String()\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"Unexpected type: %T\", concrete))\n\t\t}\n\t}\n\treturn fmt.Sprintf(\"(%s %s)\", cmd.name, strings.Join(params, \" \"))\n}",
"func (cmd *Command) String() string {\n\treturn fmt.Sprintf(\"ID:%s,Op:%d,Key:%s,Value:%s,Resp:%s\", cmd.ID, cmd.Op, cmd.Key, cmd.Value, cmd.Resp)\n}",
"func (cmd *Command) String() string {\n\tstr := \"\"\n\tif cmd.c.Process != nil {\n\t\tstr += fmt.Sprintf(\"(PID %v) \", cmd.c.Process.Pid)\n\t}\n\tif cmd.pgid >= 0 {\n\t\tstr += fmt.Sprintf(\"(PGID %v) \", cmd.pgid)\n\t}\n\tstr += shellquote.Join(cmd.c.Args...)\n\treturn str\n}",
"func (c *Command) String() string {\n\tif len(c.Params) > 0 {\n\t\treturn fmt.Sprintf(\"%s %s\", c.Name, string(bytes.Join(c.Params, byteSpace)))\n\t}\n\treturn string(c.Name)\n}",
"func (c Command) String() string {\n\tvar cmdStr strings.Builder\n\tcmdStr.WriteString(cmdString + c.Command)\n\n\tfirst := true\n\tfor key, val := range c.Properties {\n\t\tif first {\n\t\t\tfirst = false\n\t\t\tcmdStr.WriteString(\" \")\n\t\t} else {\n\t\t\tcmdStr.WriteString(\",\")\n\t\t}\n\n\t\tcmdStr.WriteString(fmt.Sprintf(\"%s=%s\", key, escape(val)))\n\t}\n\n\tcmdStr.WriteString(cmdString)\n\tcmdStr.WriteString(escapeData(c.Message))\n\treturn cmdStr.String()\n}",
"func (c Command) String() string {\n\tsanitize := func(i interface{}) string {\n\t\tvar str string\n\t\tswitch s := i.(type) {\n\t\tcase *regexp.Regexp:\n\t\t\tif s == nil {\n\t\t\t\treturn \"nil\"\n\t\t\t}\n\t\t\tstr = s.String()\n\t\tcase string:\n\t\t\tstr = s\n\t\t}\n\t\treturn strings.Replace(strings.Replace(str, \"\\r\", \"\\\\r\", -1), \"\\n\", \"\\\\n\", -1)\n\t}\n\treturn fmt.Sprintf(\"%s: %v Prototype:%q CommandRegexp:%q Expect:%q Error:%q\", c.Name, c.Timeout, sanitize(c.Prototype), sanitize(c.CommandRegexp), sanitize(c.Response), sanitize(c.Error))\n}",
"func (c *Command) String() string {\n\treturn strings.Join(c.StringSlice(), \" \")\n}",
"func (c Command) String() string {\n\tswitch c {\n\tcase SERVER:\n\t\treturn \"server\"\n\tcase ASSETS:\n\t\treturn \"assets\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}",
"func (cmd *Cmd) String() string {\n\treturn fmt.Sprintf(\"%s %s\", cmd.Name, strings.Join(cmd.Args, \" \"))\n}",
"func (s CommandInvocation) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (n *Cmd) String() string {\n\tw := new(strings.Builder)\n\tw.WriteString(\"•\")\n\t// Write the Cmd's name.\n\tw.WriteString(n.NodeValue.String())\n\t// Begin the context\n\tw.WriteString(\"[\")\n\t// Write the flags.\n\tif len(n.Flags) > 0 {\n\t\tw.WriteString(\"<\")\n\t\tw.WriteString(strings.Join(n.Flags, \",\"))\n\t\tw.WriteString(\">\")\n\t}\n\t// Write the Arguments\n\tif n.Anonymous {\n\t\tfor _, nl := range n.ArgList {\n\t\t\tw.WriteString(fmt.Sprintf(\"{%s}\", nl.String()))\n\t\t}\n\t\t// if len(n.ArgList) == 0 {\n\t\t// \tw.WriteString(\"{}\")\n\t\t// }\n\t} else {\n\t\t// Not anonymous. We sort the map to guarantee the order for unit\n\t\t// tests.\n\t\tvar keys []string\n\t\tfor k := range n.ArgMap {\n\t\t\tkeys = append(keys, k)\n\t\t}\n\t\tsort.Strings(keys)\n\t\tfor _, k := range keys {\n\t\t\tv := n.ArgMap[k]\n\t\t\tw.WriteString(fmt.Sprintf(\"%s={%s}\", k, v.String()))\n\t\t}\n\t}\n\tw.WriteString(\"]\")\n\treturn w.String()\n}",
"func (c Cmd) String() string {\n\tcommands := map[Cmd]string{\n\t\tcmdCalendars: \"calendars\",\n\t\tcmdClear: \"clear\",\n\t\tcmdConfig: \"config\",\n\t\tcmdDates: \"dates\",\n\t\tcmdEvents: \"events\",\n\t\tcmdOpen: \"open\",\n\t\tcmdServer: \"server\",\n\t\tcmdToggle: \"toggle\",\n\t\tcmdUpdateCalendars: \"updateCalendars\",\n\t\tcmdUpdateEvents: \"updateEvents\",\n\t\tcmdUpdateIcons: \"updateIcons\",\n\t\tcmdUpdateWorkflow: \"updateWorkflow\",\n\t}\n\treturn commands[c]\n}",
"func (av AttributeCommand) String() string {\n\tswitch av {\n\tcase AttributeCommandExecute:\n\t\treturn \"execute\"\n\tcase AttributeCommandClose:\n\t\treturn \"close\"\n\tcase AttributeCommandFetch:\n\t\treturn \"fetch\"\n\tcase AttributeCommandPrepare:\n\t\treturn \"prepare\"\n\tcase AttributeCommandReset:\n\t\treturn \"reset\"\n\tcase AttributeCommandSendLongData:\n\t\treturn \"send_long_data\"\n\t}\n\treturn \"\"\n}",
"func (c CommentCommand) String() string {\n\treturn fmt.Sprintf(\"command=%q verbose=%t dir=%q workspace=%q project=%q policyset=%q, clear-policy-approval=%t, flags=%q\", c.Name.String(), c.Verbose, c.RepoRelDir, c.Workspace, c.ProjectName, c.PolicySet, c.ClearPolicyApproval, strings.Join(c.Flags, \",\"))\n}",
"func (s ExecuteCommandOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendCommandOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendCommandOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s GetCommandInvocationOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ListCommandsOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ExecuteCommandInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (cmd *StatusCmdReal) String() string {\n\treturn cmd.statusCmd.String()\n}",
"func (c Command) ToString() string {\r\n\tbuf := new(bytes.Buffer)\r\n\tfor _, arg := range c {\r\n\t\tbuf.Write(arg)\r\n\t\tbuf.WriteString(\" \")\r\n\t}\r\n\r\n\treturn strings.TrimSpace(buf.String())\r\n}",
"func (s *ShellTask) String() string {\n\treturn s.cmd\n}",
"func (c *CurlCommand) String() string {\n\treturn strings.Join(*c, \" \")\n}",
"func (cr CommandReply) String() string {\r\n\tpanic(\"not implemented\")\r\n}",
"func (s CancelCommandOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CancelCommandOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (execution *Execution) String() string {\n\treturn fmt.Sprintf(`%q`, execution.command.GetArgs())\n}",
"func (c cmdType) String() string {\n\tswitch c {\n\tcase statusCmd:\n\t\treturn \"status\"\n\tcase restartCmd:\n\t\treturn \"restart\"\n\tcase setCreds:\n\t\treturn \"set-credentials\"\n\t}\n\treturn \"\"\n}",
"func (s ListCommandsInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (dc DesignedConfig) String() string {\n\treturn fmt.Sprintf(\"%s\", dc.Command)\n}",
"func (s CommandPlugin) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ExecuteQueryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendCommandInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s SendCommandInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s GetCommandInvocationInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (c Command) CmdString() string {\r\n\treturn strings.ToLower(util.SliceToString(c[0]))\r\n}",
"func (ca CmdAction) String() string {\n\treturn string(ca)\n}",
"func (c Commands) String() (r string) {\n\tfor _, val := range c {\n\t\tr += fmt.Sprintf(\"%s\\n\", val.String())\n\t}\n\treturn\n}",
"func (s ListCommandInvocationsOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func commandsToString(cmds []redis.Cmder) string {\n\tvar b bytes.Buffer\n\tfor idx, cmd := range cmds {\n\t\tif idx > 0 {\n\t\t\tb.WriteString(\"\\n\")\n\t\t}\n\t\tb.WriteString(cmd.String())\n\t}\n\treturn b.String()\n}",
"func (p *Path) String() string {\n\tsb := strings.Builder{}\n\tfor i := 0; i < len(p.d); {\n\t\tcmd := p.d[i]\n\t\tswitch cmd {\n\t\tcase moveToCmd:\n\t\t\tfmt.Fprintf(&sb, \"M%g %g\", p.d[i+1], p.d[i+2])\n\t\tcase lineToCmd:\n\t\t\tfmt.Fprintf(&sb, \"L%g %g\", p.d[i+1], p.d[i+2])\n\t\tcase quadToCmd:\n\t\t\tfmt.Fprintf(&sb, \"Q%g %g %g %g\", p.d[i+1], p.d[i+2], p.d[i+3], p.d[i+4])\n\t\tcase cubeToCmd:\n\t\t\tfmt.Fprintf(&sb, \"C%g %g %g %g %g %g\", p.d[i+1], p.d[i+2], p.d[i+3], p.d[i+4], p.d[i+5], p.d[i+6])\n\t\tcase arcToCmd:\n\t\t\trot := p.d[i+3] * 180.0 / math.Pi\n\t\t\tlargeArc, sweep := fromArcFlags(p.d[i+4])\n\t\t\tsLargeArc := \"0\"\n\t\t\tif largeArc {\n\t\t\t\tsLargeArc = \"1\"\n\t\t\t}\n\t\t\tsSweep := \"0\"\n\t\t\tif sweep {\n\t\t\t\tsSweep = \"1\"\n\t\t\t}\n\t\t\tfmt.Fprintf(&sb, \"A%g %g %g %s %s %g %g\", p.d[i+1], p.d[i+2], rot, sLargeArc, sSweep, p.d[i+5], p.d[i+6])\n\t\tcase closeCmd:\n\t\t\tfmt.Fprintf(&sb, \"z\")\n\t\t}\n\t\ti += cmdLen(cmd)\n\t}\n\treturn sb.String()\n}",
"func (s ExecuteCommandConfiguration) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (msg *Message) Command() string {\n\treturn wire.CmdObject\n}",
"func (ins *Instruction) String() string {\n\treturn ins.Operation + \" \" + ins.Argument\n}",
"func (t MessageType) String() string {\n\tswitch t {\n\tcase AckCmdID:\n\t\treturn \"Ack\"\n\tcase PingCmdID:\n\t\treturn \"Ping\"\n\tcase SecretRequestCmdID:\n\t\treturn \"SecretRequest\"\n\tcase UnlockCmdID:\n\t\treturn \"Unlock\"\n\tcase DirectTransferCmdID:\n\t\treturn \"DirectTransfer\"\n\tcase MediatedTransferCmdID:\n\t\treturn \"MediatedTransfer\"\n\tcase AnnounceDisposedTransferCmdID:\n\t\treturn \"AnnounceDisposed\"\n\tcase AnnounceDisposedTransferResponseCmdID:\n\t\treturn \"AnnounceDisposedResponse\"\n\tcase RevealSecretCmdID:\n\t\treturn \"RevealSecret\"\n\tcase RemoveExpiredLockCmdID:\n\t\treturn \"RemoveExpiredHashlock\"\n\tcase SettleRequestCmdID:\n\t\treturn \"SettleRequest\"\n\tcase SettleResponseCmdID:\n\t\treturn \"SettleResponse\"\n\tcase WithdrawRequestCmdID:\n\t\treturn \"WithdrawRequest\"\n\tcase WithdrawResponseCmdID:\n\t\treturn \"WithdrawResponse\"\n\tdefault:\n\t\treturn \"<unknown>\"\n\t}\n}",
"func (s DeleteMultiplexProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (e *Engine) String() string {\n\ts := \"\"\n\tfor _, c := range e.commands {\n\t\ts += string(c)\n\t}\n\treturn s\n}",
"func (oc OpCode) String() string {\n\tswitch oc {\n\tcase OpReply:\n\t\treturn \"OP_REPLY\"\n\tcase OpUpdate:\n\t\treturn \"OP_UPDATE\"\n\tcase OpInsert:\n\t\treturn \"OP_INSERT\"\n\tcase OpQuery:\n\t\treturn \"OP_QUERY\"\n\tcase OpGetMore:\n\t\treturn \"OP_GET_MORE\"\n\tcase OpDelete:\n\t\treturn \"OP_DELETE\"\n\tcase OpKillCursors:\n\t\treturn \"OP_KILL_CURSORS\"\n\tcase OpCommand:\n\t\treturn \"OP_COMMAND\"\n\tcase OpCommandReply:\n\t\treturn \"OP_COMMANDREPLY\"\n\tcase OpCompressed:\n\t\treturn \"OP_COMPRESSED\"\n\tcase OpMsg:\n\t\treturn \"OP_MSG\"\n\tdefault:\n\t\treturn \"<invalid opcode>\"\n\t}\n}",
"func (s DeleteProgramOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CommandFilter) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteMethodOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ExecuteSqlOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (m lockCmdMessage) String() string {\n\tif m.Mode == nil {\n\t\treturn fmt.Sprintf(\"No object lock configuration is enabled\")\n\t}\n\n\treturn fmt.Sprintf(\"%s mode is enabled for %s\", console.Colorize(\"Mode\", *m.Mode), console.Colorize(\"Validity\", *m.Validity))\n}",
"func (s *Delete) String() (stmt string, args []interface{}, err error) {\n\treturn s.render()\n}",
"func (s DeleteBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (m *Message) String() string {\n\tbuf := &bytes.Buffer{}\n\n\t// Write any IRCv3 tags if they exist in the message\n\tif len(m.Tags) > 0 {\n\t\tbuf.WriteByte('@')\n\t\tbuf.WriteString(m.Tags.String())\n\t\tbuf.WriteByte(' ')\n\t}\n\n\t// Add the prefix if we have one\n\tif m.Prefix != nil && m.Prefix.Name != \"\" {\n\t\tbuf.WriteByte(':')\n\t\tbuf.WriteString(m.Prefix.String())\n\t\tbuf.WriteByte(' ')\n\t}\n\n\t// Add the command since we know we'll always have one\n\tbuf.WriteString(m.Command)\n\n\tif len(m.Params) > 0 {\n\t\targs := m.Params[:len(m.Params)-1]\n\t\ttrailing := m.Params[len(m.Params)-1]\n\n\t\tif len(args) > 0 {\n\t\t\tbuf.WriteByte(' ')\n\t\t\tbuf.WriteString(strings.Join(args, \" \"))\n\t\t}\n\n\t\t// If trailing is zero-length, contains a space or starts with\n\t\t// a : we need to actually specify that it's trailing.\n\t\tif len(trailing) == 0 || strings.ContainsRune(trailing, ' ') || trailing[0] == ':' {\n\t\t\tbuf.WriteString(\" :\")\n\t\t} else {\n\t\t\tbuf.WriteString(\" \")\n\t\t}\n\t\tbuf.WriteString(trailing)\n\t}\n\n\treturn buf.String()\n}",
"func (om OperationMsg) String() string {\n\tout, err := json.Marshal(om)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn string(out)\n}",
"func (s DeleteRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ExecuteQueryInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CancelCommandInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CancelCommandInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (i invocation) String() string {\n\treturn fmt.Sprintf(\"%s %s\", i.name, quotedArgsString(i.finalArgs))\n}",
"func (e *Executor) String() string {\n\treturn fmt.Sprintf(`{\n Build: %s,\n Distribution: %s,\n Host: %s,\n ID: %d,\n Repo: %v,\n Runtime: %s,\n Pipeline: %v,\n}`,\n\t\tstrings.ReplaceAll(e.Build.String(), \" \", \" \"),\n\t\te.GetDistribution(),\n\t\te.GetHost(),\n\t\te.GetID(),\n\t\tstrings.ReplaceAll(e.Repo.String(), \" \", \" \"),\n\t\te.GetRuntime(),\n\t\te.GetPipeline(),\n\t)\n}",
"func (s DeleteCodeRepositoryOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (o Op) String() string {\n\treturn opString[o]\n}",
"func (s BatchExecuteStatementOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s ListCommandInvocationsInput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func formatCommand(command string, args ...interface{}) string {\n\tline := []interface{}{command}\n\tline = append(line, args...)\n\treturn fmt.Sprintln(line...)\n}",
"func (o BigIpLicenseOutput) Command() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *BigIpLicense) pulumi.StringOutput { return v.Command }).(pulumi.StringOutput)\n}",
"func (m *NEGInstr) String() string {\n\treturn fmt.Sprintf(\"\\tNEGS %v, %v\", m.dest, m.arg)\n}",
"func (d ExecDetails) String() string {\n\tparts := make([]string, 0, 6)\n\tif d.ProcessTime > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"process_time:%v\", d.ProcessTime))\n\t}\n\tif d.WaitTime > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"wait_time:%v\", d.WaitTime))\n\t}\n\tif d.BackoffTime > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"backoff_time:%v\", d.BackoffTime))\n\t}\n\tif d.RequestCount > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"request_count:%d\", d.RequestCount))\n\t}\n\tif d.TotalKeys > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"total_keys:%d\", d.TotalKeys))\n\t}\n\tif d.ProcessedKeys > 0 {\n\t\tparts = append(parts, fmt.Sprintf(\"processed_keys:%d\", d.ProcessedKeys))\n\t}\n\treturn strings.Join(parts, \" \")\n}",
"func (s DeleteAlgorithmOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (x CInstruction) String() string {\n\treturn string(x)\n}",
"func (s DeleteModelPackageOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s CreateBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (d DeleteObjectsOutput) String() string {\n\treturn helper.Prettify(d)\n}",
"func (s ExecuteScriptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (o SysbenchSpecOutput) Command() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v SysbenchSpec) *string { return v.Command }).(pulumi.StringPtrOutput)\n}",
"func (op *output) String() string {\n\treturn fmt.Sprintf(\"%s:%v\", op.txHash, op.vout)\n}",
"func (op *Operator) String() string {\n\targs := make([]any, len(op.args))\n\tif op.operator == OperatorStr {\n\t\targs[0] = op.args[0]\n\t} else {\n\t\tfor i, arg := range op.args {\n\t\t\targs[i] = toSQLString(arg)\n\t\t}\n\t}\n\treturn fmt.Sprintf(op.format, args...)\n}",
"func (p Packet) String() string {\n\treturn fmt.Sprintf(\"<Packet {Command: %x, Data: %v, Expect: %v}>\", p.Command, p.Data, p.Expect)\n}",
"func (s DeleteDatabaseOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteSyncJobOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteAssessmentRunOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (n *Notification) String() string {\n\tstr := \"[\" + strings.ToUpper(n.Name) + \"]\"\n\n\targs := strings.TrimSpace(n.Arguments)\n\tif args != \"\" {\n\t\tstr += \" \" + args\n\t}\n\n\tcontext := strings.TrimSpace(n.Context)\n\tif context != \"\" {\n\t\tstr += \"\\n\\n\" + context\n\t}\n\n\treturn str\n}",
"func (s DeleteMultiplexOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (r *RequestDelete) String() string {\n\treturn fmt.Sprintf(\"id: %s, type: delete, key: %s\", r.ID, r.Key)\n}",
"func (s DeleteObjectOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteBotVersionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s BatchDetectSyntaxOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeleteConnectionOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (o IscsiInterfaceGetIterRequestQuery) String() string {\n\treturn ToString(reflect.ValueOf(o))\n}",
"func (s DeleteBotAliasOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (n *Node) String() (s string) {\n\treturn fmt.Sprintf(\"\\\"%s\\\" ==> %s/%d %s %s/%d\", n.Original, n.Lref, n.Lval, n.Op, n.Rref, n.Rval)\n}",
"func (s DisassociateLexBotOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (s DeletePromptOutput) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func (m *NOTInstr) String() string {\n\treturn fmt.Sprintf(\"\\tEOR %v, %v, #1\", m.dest, m.arg)\n}",
"func (s DeleteMessageOutput) String() string {\n\treturn awsutil.Prettify(s)\n}"
] | [
"0.82588065",
"0.80942166",
"0.80462706",
"0.79341733",
"0.79099274",
"0.78881663",
"0.77696174",
"0.77659005",
"0.77516574",
"0.76951456",
"0.7572861",
"0.75336504",
"0.7422595",
"0.7410901",
"0.7392648",
"0.7357018",
"0.71632457",
"0.71632457",
"0.716274",
"0.70549613",
"0.70222926",
"0.69917774",
"0.6897801",
"0.68698615",
"0.6823803",
"0.6810143",
"0.6793705",
"0.6793705",
"0.6771687",
"0.67633706",
"0.672188",
"0.6682415",
"0.66218704",
"0.6578419",
"0.6576606",
"0.6576606",
"0.6520872",
"0.6509788",
"0.64986086",
"0.6449316",
"0.6441491",
"0.6424215",
"0.6412923",
"0.63855314",
"0.63749063",
"0.6368946",
"0.63261366",
"0.6262951",
"0.62411636",
"0.6240032",
"0.6224867",
"0.6205263",
"0.6200847",
"0.61785233",
"0.6177699",
"0.61719626",
"0.61682236",
"0.6154517",
"0.6152472",
"0.61461556",
"0.6132684",
"0.61128426",
"0.61128426",
"0.6104843",
"0.60920954",
"0.6085005",
"0.60823995",
"0.608106",
"0.6076704",
"0.6070686",
"0.60239625",
"0.60186803",
"0.6007142",
"0.59930253",
"0.5984945",
"0.59809184",
"0.5978957",
"0.5972062",
"0.5968902",
"0.5965232",
"0.5964106",
"0.5960777",
"0.5959612",
"0.59574723",
"0.5955285",
"0.59550446",
"0.5950792",
"0.59475905",
"0.5943259",
"0.59432304",
"0.59425807",
"0.5937014",
"0.59365606",
"0.59356225",
"0.59321874",
"0.5929221",
"0.5923321",
"0.5921184",
"0.5919029",
"0.59181595"
] | 0.70939344 | 19 |
Run runs a command and any subsequent commands if it's chained. | func (c *Command) Run(ctx context.Context) error {
// TODO: criteria
err := c.Exec(ctx)
if err != nil {
return err
}
if c.Next != nil {
return c.Next.Run(ctx)
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Cmd) Run() error {\n\treturn c.runInnerCommand()\n}",
"func (c *Command) run(cmd, path string, clearStack bool) error {\n\tif c.specialCmd(cmd, path) {\n\t\treturn nil\n\t}\n\tcmds := strings.Split(cmd, \" \")\n\tcommand := strings.ToLower(cmds[0])\n\tgvr, v, err := c.viewMetaFor(command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch command {\n\tcase \"ctx\", \"context\", \"contexts\":\n\t\tif len(cmds) == 2 {\n\t\t\treturn useContext(c.app, cmds[1])\n\t\t}\n\t\treturn c.exec(cmd, gvr, c.componentFor(gvr, path, v), clearStack)\n\tcase \"dir\":\n\t\tif len(cmds) != 2 {\n\t\t\treturn errors.New(\"You must specify a directory\")\n\t\t}\n\t\treturn c.app.dirCmd(cmds[1])\n\tdefault:\n\t\t// checks if Command includes a namespace\n\t\tns := c.app.Config.ActiveNamespace()\n\t\tif len(cmds) == 2 {\n\t\t\tns = cmds[1]\n\t\t}\n\t\tif err := c.app.switchNS(ns); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !c.alias.Check(command) {\n\t\t\treturn fmt.Errorf(\"`%s` Command not found\", cmd)\n\t\t}\n\t\treturn c.exec(cmd, gvr, c.componentFor(gvr, path, v), clearStack)\n\t}\n}",
"func (c Command) Run(args ...string) error {\n\treturn c.builder().Run(args...)\n}",
"func (c *Chain) Run(ctx context.Context, options [][]string, input io.Reader, output io.Writer) error {\n\tvar currentReader = input\n\n\t// cancel everything when something goes wrong\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\tdone := make(chan error, len(c.runnables))\n\n\t// kick of runnables and chain via io.Pipe\n\tfor i := 0; i < len(c.runnables); i++ {\n\t\tvar opts []string\n\t\tif i < len(options) {\n\t\t\topts = options[i]\n\t\t}\n\t\tcurrentReader = runChained(ctx, c.runnables[i], opts, currentReader, done)\n\t}\n\n\t// shovel last output to the output of this runnable\n\tgo func() {\n\t\t_, err := io.Copy(output, currentReader)\n\t\tdone <- err\n\t}()\n\n\t// wait for everything to finish\n\tfor i := 0; i < len(c.runnables)+1; i++ {\n\t\terr := <-done\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (s *Shell) Run(ctx context.Context, args []string) error {\n\tcmds := map[string]*Command{}\n\tfor _, cmd := range s.Commands {\n\t\tcmdNames := append(cmd.Aliases, cmd.Name)\n\t\tfor i := range cmdNames {\n\t\t\tcmds[cmdNames[i]] = cmd\n\t\t}\n\t}\n\n\tif len(args) == 1 && args[0] == \"\" {\n\t\ts.WriteUsage(os.Stdout)\n\t\treturn nil\n\t}\n\n\tcmd := args[0]\n\tvar cleanedArgs []string\n\tif len(args) > 1 {\n\t\tcleanedArgs = args[1:]\n\t}\n\n\tc, ok := cmds[cmd]\n\tif ok {\n\t\treturn c.Do(ctx, s, cleanedArgs)\n\t}\n\treturn s.hasNoSuchCommand(ctx, cmd)\n}",
"func Run() {\n\tflag.Usage = usage\n\tflag.Parse()\n\n\targs := flag.Args()\n\tif len(args) < 1 {\n\t\tusage()\n\t}\n\n\tmutex.Lock()\n\tc, ok := commands[args[0]]\n\tmutex.Unlock()\n\tif !ok || !c.Runnable() {\n\t\tfmt.Fprintf(os.Stderr, \"%s: unknown subcommand %s\\nRun '%s help' for usage.\\n\", Name, args[0], Name)\n\t\tos.Exit(1)\n\t}\n\n\tfs := flag.NewFlagSet(c.Name(), flag.ExitOnError)\n\tfs.Usage = func() { Usage(c) }\n\tc.Register(fs)\n\tfs.Parse(args[1:])\n\terr := c.Run(fs.Args())\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s: %v\\n\", Name, c.Name(), err)\n\t\tos.Exit(1)\n\t}\n}",
"func (s *replayService) Run(command string) error {\n\tif commands, ok := s.commands.Commands[command]; ok {\n\t\tif commands.Error != \"\" {\n\t\t\treturn errors.New(commands.Error)\n\t\t}\n\t}\n\ts.commands.Next(command)\n\treturn nil\n}",
"func (t *Test) Run() error {\n\tfor _, cmd := range t.cmds {\n\t\t// TODO(fabxc): aggregate command errors, yield diffs for result\n\t\t// comparison errors.\n\t\tif err := t.exec(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func Run() error {\n\treturn command.Execute()\n}",
"func (f CommanderFunc) Run(ctx context.Context, command string, args ...string) error {\n\treturn f(ctx, command, args...)\n}",
"func (b *RunStep) Run(state quantum.StateBag) error {\n\trunner := state.Get(\"runner\").(quantum.Runner)\n\tconn := state.Get(\"conn\").(quantum.AgentConn)\n\toutCh := conn.Logs()\n\tsigCh := conn.Signals()\n\n\tif b.Command == \"\" {\n\t\tcommandRaw, ok := state.GetOk(\"command\")\n\t\tif ok {\n\t\t\tb.Command = commandRaw.(string)\n\t\t}\n\t}\n\n\tlog.Printf(\"Running command: %v\", b.Command)\n\n\terr := runner.Run(b.Command, outCh, sigCh)\n\tif err != nil {\n\t\treturn errors.New(\"Cmd: \" + b.Command + \" failed: \" + err.Error())\n\t}\n\treturn nil\n}",
"func Run() {\n\tcmd.Execute()\n}",
"func (r *simpleRunner) run(cmd *runner.Command, runId runner.RunId, doneCh chan struct{}) {\n\tcheckout, err, checkoutDone := (snapshots.Checkout)(nil), (error)(nil), make(chan struct{}, 1)\n\tgo func() {\n\t\tcheckout, err = r.checkouter.Checkout(cmd.SnapshotId)\n\t\tclose(checkoutDone)\n\t}()\n\n\t// Wait for checkout or cancel\n\tselect {\n\tcase <-doneCh:\n\t\treturn\n\tcase <-checkoutDone:\n\t}\n\tif err != nil {\n\t\tr.updateStatus(runner.ErrorStatus(runId, fmt.Errorf(\"could not checkout: %v\", err)))\n\t\treturn\n\t}\n\tdefer checkout.Release()\n\n\tp, err := r.exec.Exec(execer.Command{\n\t\tArgv: cmd.Argv,\n\t\tDir: checkout.Path(),\n\t})\n\tif err != nil {\n\t\tr.updateStatus(runner.ErrorStatus(runId, fmt.Errorf(\"could not exec: %v\", err)))\n\t\treturn\n\t}\n\n\tr.updateStatus(runner.RunningStatus(runId))\n\n\tprocessCh := make(chan execer.ProcessStatus, 1)\n\tgo func() { processCh <- p.Wait() }()\n\n\t// Wait for process complete or cancel\n\tselect {\n\tcase <-doneCh:\n\t\tp.Abort()\n\t\treturn\n\tcase st := <-processCh:\n\t\tr.updateStatus(makeRunnerStatus(st, runId))\n\t}\n}",
"func (c *Subcommand) Run(flags *flag.FlagSet) error {\n\tif c.runFn != nil {\n\t\treturn c.runFn(flags)\n\t}\n\treturn nil\n}",
"func (m *Manager) Run(command []string) error {\n\topts := buildah.RunOptions{}\n\treturn m.b.Run(command, opts)\n}",
"func (r *MockRunner) Run(cmd *exec.Cmd) error {\n\targs := r.Called(cmd)\n\treturn args.Error(0)\n}",
"func (c *Cmd) Run() error {\n\treturn c.Cmd.Run()\n}",
"func (c *CmdReal) Run() error {\n\treturn c.cmd.Run()\n}",
"func Run(cmd *exec.Cmd) error {\n\treturn DefaultRunner.Run(cmd)\n}",
"func (r RealCommandRunner) Run(command string, args ...string) ([]byte, error) {\n\tout, err := exec.Command(command, args...).CombinedOutput()\n\treturn out, err\n}",
"func (c *Command) Run(ctx *Context) {\n\tc.initialize()\n\n\tif c.ShowHelp == nil {\n\t\tc.ShowHelp = showHelp\n\t}\n\n\t// parse cli arguments\n\tcl := &commandline{\n\t\tflags: c.Flags,\n\t\tcommands: c.Commands,\n\t}\n\tvar err error\n\tif c.SkipFlagParsing {\n\t\tcl.args = ctx.args[1:]\n\t} else {\n\t\terr = cl.parse(ctx.args[1:])\n\t}\n\n\t// build context\n\tnewCtx := &Context{\n\t\tname: ctx.name + \" \" + c.Name,\n\t\tapp: ctx.app,\n\t\tcommand: c,\n\t\tflags: c.Flags,\n\t\tcommands: c.Commands,\n\t\targs: cl.args,\n\t\tparent: ctx,\n\t}\n\n\tif err != nil {\n\t\tnewCtx.ShowError(err)\n\t}\n\n\t// show --help\n\tif newCtx.GetBool(\"help\") {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n\n\t// command not found\n\tif cl.command == nil && len(c.Commands) > 0 && len(cl.args) > 0 {\n\t\tcmd := cl.args[0]\n\t\tif c.OnCommandNotFound != nil {\n\t\t\tc.OnCommandNotFound(newCtx, cmd)\n\t\t} else {\n\t\t\tnewCtx.ShowError(fmt.Errorf(\"no such command: %s\", cmd))\n\t\t}\n\t\treturn\n\t}\n\n\t// run command\n\tif cl.command != nil {\n\t\tcl.command.Run(newCtx)\n\t\treturn\n\t}\n\n\tif c.Action != nil {\n\t\tdefer newCtx.handlePanic()\n\t\tc.Action(newCtx)\n\t} else {\n\t\tnewCtx.ShowHelpAndExit(0)\n\t}\n}",
"func (c *Command) run() {\n\tdefer c.done()\n\tlog.Println(\"Executing \", c.Command)\n\tvar oscmd *exec.Cmd\n\n\tif len(c.parsed) > 1 {\n\t\toscmd = exec.Command(c.parsed[0], c.parsed[1:]...)\n\t} else {\n\t\toscmd = exec.Command(c.parsed[0])\n\t}\n\tif c.session.cwd != \"\" {\n\t\toscmd.Dir = c.session.cwd\n\t}\n\n\tstdout, err := oscmd.StdoutPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\tstderr, err := oscmd.StderrPipe()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\treturn\n\t}\n\n\terr = oscmd.Start()\n\tif err != nil {\n\t\tc.push(c.Id, \"console\", err.Error())\n\t\tlog.Println(err)\n\t\treturn\n\t}\n\tc.session.processes[c.Id] = oscmd.Process.Pid\n\n\treader := bufio.NewReader(stdout)\n\treaderErr := bufio.NewReader(stderr)\n\tgo c.readAndPush(readerErr)\n\tc.readAndPush(reader)\n\n\toscmd.Wait()\n}",
"func (cli *CLI) Run(commands ...string) (string, error) {\n\treturn cli.exec(exec.Command(cli.path, commands...))\n}",
"func (cmd *MoveCommand) Run() {\n\tnewSrcPwd := cmdPath(cmd.client.Pwd, cmd.Source)\n\tnewTargetPwd := cmdPath(cmd.client.Pwd, cmd.Target)\n\n\tt := cmd.client.GetType(newSrcPwd)\n\tif t != client.NODE && t != client.LEAF {\n\t\tfmt.Fprintln(cmd.stderr, \"Not a valid source path: \"+newSrcPwd)\n\t\treturn\n\t}\n\n\trunCommandWithTraverseTwoPaths(cmd.client, newSrcPwd, newTargetPwd, cmd.moveSecret)\n\treturn\n}",
"func Run(ch chan bool, cmd *exec.Cmd, dep Target) Target {\n\tannotate := func() error {\n\t\t<-ch\n\t\tInfof(\"run %v\", cmd.Args)\n\t\terr := cmd.Run()\n\t\tch <- true\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"run %v: %v\", cmd.Args, err)\n\t\t}\n\t\treturn err\n\t}\n\ttarget := newTarget(annotate, dep)\n\treturn &target // TODO\n}",
"func run(cmd *exec.Cmd) error {\n\tcmdStr := strings.Join(cmd.Args, \" \")\n\tif cmd.Dir != \"\" {\n\t\tlog.Debug(\"running [%s] in directory %s\", cmdStr, cmd.Dir)\n\t} else {\n\t\tlog.Debug(\"running [%s]\", cmdStr)\n\t}\n\treturn cmd.Run()\n}",
"func Run(cmd *exec.Cmd) (string, error) {\n\tklog.V(4).Infof(\"Executing: %s\", cmd)\n\n\tr, w := io.Pipe()\n\tcmd.Stdout = w\n\tcmd.Stderr = w\n\tbuffer := new(bytes.Buffer)\n\tr2 := io.TeeReader(r, buffer)\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tgo func() {\n\t\tdefer wg.Done()\n\t\tscanner := bufio.NewScanner(r2)\n\t\tfor scanner.Scan() {\n\t\t\tklog.V(5).Infof(\"%s: %s\", cmd.Path, scanner.Text())\n\t\t}\n\t}()\n\terr := cmd.Run()\n\tw.Close()\n\twg.Wait()\n\tklog.V(4).Infof(\"%s terminated, with %d bytes of output and error %v\", cmd.Path, buffer.Len(), err)\n\n\toutput := string(buffer.Bytes())\n\tif err != nil {\n\t\terr = fmt.Errorf(\"command %q failed: %v\\nOutput: %s\", cmd, err, output)\n\t}\n\treturn output, err\n}",
"func (o *Cmd) Run(rl lib.ReactorLog, msg lib.Msg) error {\n\n\tvar args []string\n\n\tif o.argsjson {\n\t\tfor _, parse := range o.args {\n\t\t\tif strings.Contains(parse, \"$.\") {\n\t\t\t\tnewParse := parse\n\t\t\t\tfor _, argValue := range strings.Split(parse, \"$.\") {\n\t\t\t\t\tif argValue == \"\" {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\top, _ := jq.Parse(\".\" + argValue) // create an Op\n\t\t\t\t\tvalue, _ := op.Apply(msg.Body())\n\t\t\t\t\tnewParse = strings.Replace(newParse, \"$.\"+argValue, strings.Trim(string(value), \"\\\"\"), -1)\n\t\t\t\t}\n\t\t\t\targs = append(args, newParse)\n\t\t\t} else {\n\t\t\t\targs = append(args, parse)\n\t\t\t}\n\t\t}\n\t} else {\n\t\targs = o.args\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), maximumCmdTimeLive)\n\tdefer cancel()\n\n\tvar c *exec.Cmd\n\tif len(args) > 0 {\n\t\tc = exec.CommandContext(ctx, o.cmd, args...)\n\t} else {\n\t\tc = exec.CommandContext(ctx, o.cmd)\n\t}\n\n\tc.Stdout = rl\n\tc.Stderr = rl\n\n\trunlog := fmt.Sprintf(\"RUN: %s %s\", o.cmd, strings.Join(args, \" \"))\n\tlog.Println(runlog)\n\trl.WriteStrings(runlog)\n\tif err := c.Run(); err != nil {\n\t\t// This will fail after timeout.\n\t\trl.WriteStrings(fmt.Sprintf(\"ERROR: %s\", err))\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (c *Repeater) Run(cli plugin.CliConnection, args []string) {\n\tif args[0] != c.GetMetadata().Commands[0].Name {\n\t\treturn\n\t}\n\n\tvar firstArg = args[1]\n\n\tvar orgInfos []orgInfo\n\tvar err error\n\n\tvar currentOrg plugin_models.Organization\n\tvar currentSpace plugin_models.Space\n\n\tdefer func() {\n\t\tcli.CliCommand(\"target\", \"-o\", currentOrg.Name, \"-s\", currentSpace.Name)\n\t}()\n\n\tif strings.HasPrefix(firstArg, \"--\") {\n\t\tif len(args) < 3 {\n\t\t\tfmt.Printf(\"You have to tell do-all to do something!\")\n\t\t\tlo.G.Panic(\"You have to tell do-all to do something!\")\n\t\t}\n\n\t\targs = args[2:]\n\n\t\torgInfos, err = c.getAllOrgsAndSpaces(cli, (firstArg == \"--global\"))\n\t} else {\n\t\tif len(args) < 2 {\n\t\t\tfmt.Printf(\"You have to tell do-all to do something!\")\n\t\t\tlo.G.Panic(\"You have to tell do-all to do something!\")\n\t\t}\n\n\t\torgInfos, err = c.getCurrentOrgAndSpace(cli)\n\n\t\targs = args[1:]\n\t}\n\n\tif err != nil {\n\t\tlo.G.Panic(\"PLUGIN ERROR: get apps: \", err)\n\t\treturn\n\t}\n\n\t// capture current target\n\tcurrentOrg, err = cli.GetCurrentOrg()\n\tif err != nil {\n\t\tlo.G.Panic(\"PLUGIN ERROR: get apps: \", err)\n\t\treturn\n\t}\n\n\tcurrentSpace, err = cli.GetCurrentSpace()\n\tif err != nil {\n\t\tlo.G.Panic(\"PLUGIN ERROR: get apps: \", err)\n\t\treturn\n\t}\n\n\tfor _, orgInfo := range orgInfos {\n\t\tfor _, space := range orgInfo.spaces {\n\t\t\tc.runCommands(cli, orgInfo.name, space, args)\n\t\t}\n\t}\n\n}",
"func (c *PushCommand) Run(args []string) int {\n\n\treturn 0\n}",
"func Run(cmd *exec.Cmd, name string, settings ...SettingsFunc) {\n\tr := DefaultRunner()\n\tr.Run(cmd, name, settings...)\n}",
"func (cb *CommandBuilder) Run() CommandProcs {\n\treturn cb.WithPolicy(CmdOnErrContinue).Start().Wait()\n}",
"func (c command) run() error {\n\tc.FlagSet.Parse(flag.Args()[1:])\n\treturn c.f(c.FlagSet)\n}",
"func (c *Command) Run(t *testing.T) {\n\targs := strings.Split(c.Args, \" \")\n\tif output, err := exec.Command(c.Exec, args...).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Error executing: '%s' '%s' -err: '%v'\", c.Exec, c.Args, strings.TrimSpace(string(output)))\n\t}\n}",
"func (c *Cmd) Run() error {\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\treturn c.Wait()\n}",
"func (c *Cmd) Run() error {\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\treturn c.Wait()\n}",
"func Run(commands []pakelib.Command, logger *log.Logger) {\n\tcfg := config.New()\n\tfor line, command := range commands {\n\t\terr := command.Execute(cfg, logger)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Errorf(\"There was an error at line %d: %s\", line+1, err.Error())\n\t\t\tlogger.Println(errMsg.Error())\n\t\t\toutput.Error(errMsg)\n\t\t}\n\t\tcfg.SmartReset()\n\t}\n}",
"func RunWith(\n\tfn func(cmd *cobra.Command, args []string) error,\n) func(cmd *cobra.Command, args []string) {\n\treturn func(cmd *cobra.Command, args []string) {\n\t\tif err := fn(cmd, args); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}",
"func (d *Driver) Run(opts ...Option) error {\n\terr := d.Configure(opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmdArgs, err := d.buildCmdArgs()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := d.execCommand(cmdArgs[0], cmdArgs[1:]...)\n\tif d.opts.debug || d.opts.dryrun {\n\t\t_, ref := d.getCmdSpec()\n\t\tmsg := \"Executing\"\n\t\tif d.opts.dryrun {\n\t\t\tmsg = \"Would execute\"\n\t\t}\n\t\tfmt.Fprintf(os.Stderr, \"%s [%s] -> `%s`...\\n\", msg, ref, cmd)\n\t}\n\n\tif !d.opts.dryrun {\n\t\tcmd.Stdin = os.Stdin\n\t\tcmd.Stdout = d.opts.out\n\t\tcmd.Stderr = os.Stderr\n\n\t\treturn cmd.Run()\n\t}\n\n\treturn nil\n}",
"func (cg *Group) Run() (execError error) {\n\tif len(cg.chains) == 0 {\n\t\treturn nil\n\t}\n\tfor _, op := range cg.chains {\n\t\tif op.mainOperation.segment == sqlSelect {\n\t\t\treturn errors.Errorf(\"cannot query as part of a chain.\")\n\t\t}\n\t}\n\tdb := cg.chains[0].db\n\ttxdb, err := db.BeginTransaction()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"getting transaction to run chain group\")\n\t}\n\tdefer func() {\n\t\tif execError != nil {\n\t\t\terr := db.RollbackTransaction()\n\t\t\texecError = errors.Wrapf(execError,\n\t\t\t\t\"there was a failure running the expression and also rolling back te transaction: %v\",\n\t\t\t\terr)\n\t\t} else {\n\t\t\terr := db.CommitTransaction()\n\t\t\texecError = errors.Wrap(err, \"could not commit the transaction\")\n\t\t}\n\t}()\n\n\tif cg.set != \"\" {\n\t\terr := txdb.Set(cg.set)\n\t\tif err != nil {\n\t\t\treturn errors.Wrapf(err, \"setting %q to the transaction\", cg.set)\n\t\t}\n\t}\n\n\tfor _, op := range cg.chains {\n\t\tquery, args, err := op.Render()\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"rendeding part of chain transaction\")\n\t\t}\n\t\terr = txdb.Exec(query, args...)\n\t\tif err != nil {\n\t\t\treturn errors.Wrap(err, \"error executing query in group\")\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *Reflex) runCommand(name string, stdout chan<- OutMsg) {\n\tcommand := replaceSubSymbol(r.command, r.subSymbol, name)\n\tcmd := exec.Command(command[0], command[1:]...)\n\tr.cmd = cmd\n\n\tif flagSequential {\n\t\tseqCommands.Lock()\n\t}\n\n\ttty, err := pty.Start(cmd)\n\tif err != nil {\n\t\tinfoPrintln(r.id, err)\n\t\treturn\n\t}\n\tr.tty = tty\n\n\tgo func() {\n\t\tscanner := bufio.NewScanner(tty)\n\t\tfor scanner.Scan() {\n\t\t\tstdout <- OutMsg{r.id, scanner.Text()}\n\t\t}\n\t\t// Intentionally ignoring scanner.Err() for now.\n\t\t// Unfortunately, the pty returns a read error when the child dies naturally, so I'm just going to ignore\n\t\t// errors here unless I can find a better way to handle it.\n\t}()\n\n\tr.mu.Lock()\n\tr.running = true\n\tr.mu.Unlock()\n\tgo func() {\n\t\terr := cmd.Wait()\n\t\tif !r.Killed() && err != nil {\n\t\t\tstdout <- OutMsg{r.id, fmt.Sprintf(\"(error exit: %s)\", err)}\n\t\t}\n\t\tr.done <- struct{}{}\n\t\tif flagSequential {\n\t\t\tseqCommands.Unlock()\n\t\t}\n\t}()\n}",
"func (runner *SSHRunner) Run(command string) (string, error) {\n\treturn runner.runSSHCommandFromDriver(command, false)\n}",
"func (c Command) Run(ctx *Context) (err error) {\n\tif !c.SkipFlagParsing {\n\t\tif len(c.Subcommands) > 0 {\n\t\t\treturn c.startApp(ctx)\n\t\t}\n\t}\n\n\tif !c.HideHelp && (HelpFlag != BoolFlag{}) {\n\t\t// append help to flags\n\t\tc.Flags = append(\n\t\t\tc.Flags,\n\t\t\tHelpFlag,\n\t\t)\n\t}\n\n\tif ctx.App.UseShortOptionHandling {\n\t\tc.UseShortOptionHandling = true\n\t}\n\n\tset, err := c.parseFlags(ctx.Args().Tail(), ctx.shellComplete)\n\n\tcontext := NewContext(ctx.App, set, ctx)\n\tcontext.Command = c\n\tif checkCommandCompletions(context, c.Name) {\n\t\treturn nil\n\t}\n\n\tif err != nil {\n\t\tif c.OnUsageError != nil {\n\t\t\terr := c.OnUsageError(context, err, false)\n\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\treturn err\n\t\t}\n\t\t_, _ = fmt.Fprintln(context.App.Writer, \"Incorrect Usage:\", err.Error())\n\t\t_, _ = fmt.Fprintln(context.App.Writer)\n\t\t_ = ShowCommandHelp(context, c.Name)\n\t\treturn err\n\t}\n\n\tif checkCommandHelp(context, c.Name) {\n\t\treturn nil\n\t}\n\n\tcerr := checkRequiredFlags(c.Flags, context)\n\tif cerr != nil {\n\t\t_ = ShowCommandHelp(context, c.Name)\n\t\treturn cerr\n\t}\n\n\tif c.After != nil {\n\t\tdefer func() {\n\t\t\tafterErr := c.After(context)\n\t\t\tif afterErr != nil {\n\t\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\t\tif err != nil {\n\t\t\t\t\terr = NewMultiError(err, afterErr)\n\t\t\t\t} else {\n\t\t\t\t\terr = afterErr\n\t\t\t\t}\n\t\t\t}\n\t\t}()\n\t}\n\n\tif c.Before != nil {\n\t\terr = c.Before(context)\n\t\tif err != nil {\n\t\t\tcontext.App.handleExitCoder(context, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif c.Action == nil {\n\t\tc.Action = helpSubcommand.Action\n\t}\n\n\terr = HandleAction(c.Action, context)\n\n\tif err != nil {\n\t\tcontext.App.handleExitCoder(context, err)\n\t}\n\treturn err\n}",
"func (i ios) Run(ctx context.Context, command string) (string, error) {\n\toutput, err := i.RunUntil(ctx, command, i.basePrompt())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\toutput = strings.ReplaceAll(output, \"\\r\\n\", \"\\n\")\n\tlines := strings.Split(output, \"\\n\")\n\tresult := \"\"\n\n\tfor i := 1; i < len(lines)-1; i++ {\n\t\tresult += lines[i] + \"\\n\"\n\t}\n\n\treturn result, nil\n}",
"func Run(dir, command string, flags ...string) error {\n\tcmd := exec.Command(command, flags...)\n\tcmd.Dir = dir\n\terr := cmd.Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to run command: %s %v\", command, err)\n\t}\n\treturn nil\n}",
"func (cmd *Command) Run() error {\n\t// Copied from exec.Cmd#Run\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\treturn cmd.Wait()\n}",
"func (c *addDutRun) Run(a subcommands.Application, args []string, env subcommands.Env) int {\n\tif err := c.innerRun(a, args, env); err != nil {\n\t\tPrintError(a.GetErr(), err)\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func RunCommand(command ...string) {\n\tif len(command) > 1 {\n\t\tswitch command[0] {\n\t\tcase \"run\":\n\t\t\trun(command[1:])\n\t\tcase \"build\":\n\t\t\tbuild(command[1:])\n\t\tcase \"test\":\n\t\t\ttest(command[1:])\n\t\tcase \"help\":\n\t\t\tdisplayHelp(command[1])\n\t\t}\n\t} else {\n\t\tdisplayHelp()\n\t}\n}",
"func (cmd *Cmd) Run() error {\n\tif isWindows {\n\t\treturn cmd.Spawn()\n\t}\n\treturn cmd.Exec()\n}",
"func (c *InstallCommand) Run() error {\n\tcontext, err := c.Settings.Config.GetCurrentContext()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpkg, err := ioutil.ReadFile(c.ChaincodePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := resmgmt.InstallCCRequest{\n\t\tName: c.ChaincodeName,\n\t\tPath: c.ChaincodePath,\n\t\tVersion: c.ChaincodeVersion,\n\t\tPackage: &resource.CCPackage{\n\t\t\tType: peer.ChaincodeSpec_GOLANG,\n\t\t\tCode: pkg,\n\t\t},\n\t}\n\n\toptions := []resmgmt.RequestOption{\n\t\tresmgmt.WithTargetEndpoints(context.Peers...),\n\t\tresmgmt.WithRetry(retry.DefaultResMgmtOpts),\n\t}\n\n\tif _, err := c.ResourceManagement.InstallCC(req, options...); err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Fprintf(c.Settings.Streams.Out, \"successfully installed chaincode '%s'\\n\", c.ChaincodeName)\n\n\treturn nil\n}",
"func (cmd *FigletCommand) Run(args ...string) {\n\tchannel, err := cmd.Channel(cmd.line)\n\tif err != nil {\n\t\tcmd.bot.LogError(\"FigletCommand.Run()\", err)\n\t\treturn\n\t}\n\n\tif len(args) <= 0 {\n\t\tcmd.bot.Log.WithField(\"args\", args).Debug(\"FigletCommand.Run(): No args\")\n\t\treturn\n\t}\n\n\tphrase := args[0]\n\tif phrase == \"\" {\n\t\tcmd.bot.Log.Debug(\"FigletCommand.Run(): No phrase\")\n\t\treturn\n\t}\n\n\tif output, err := exec.Command(figletPath, phrase).Output(); err != nil {\n\t\tcmd.bot.LogError(\"FigletCommand.Run()\", err)\n\t} else {\n\t\tfor _, line := range strings.Split(string(output), \"\\n\") {\n\t\t\tcmd.bot.Msg(cmd.conn, channel, line)\n\t\t}\n\t}\n}",
"func RunCMD(cmds []string, done func()) chan bool {\n\tif len(cmds) < 0 {\n\t\tpanic(\"commands list cant be empty\")\n\t}\n\n\tvar relunch = make(chan bool)\n\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif err := recover(); err != nil {\n\t\t\t\tlog.Printf(\"cmdRun.Error: %+s\", err)\n\t\t\t}\n\t\t}()\n\n\tcmdloop:\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase do, ok := <-relunch:\n\n\t\t\t\tif !ok {\n\t\t\t\t\tbreak cmdloop\n\t\t\t\t}\n\n\t\t\t\tif !do {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tfor _, cox := range cmds {\n\n\t\t\t\t\tcmd := strings.Split(cox, \" \")\n\n\t\t\t\t\tif len(cmd) <= 1 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\n\t\t\t\t\tcmdo := exec.Command(cmd[0], cmd[1:]...)\n\t\t\t\t\tcmdo.Stdout = os.Stdout\n\t\t\t\t\tcmdo.Stderr = os.Stderr\n\n\t\t\t\t\tif err := cmdo.Start(); err != nil {\n\t\t\t\t\t\tfmt.Printf(\"---> Error executing command: %s -> %s\\n\", cmd, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif done != nil {\n\t\t\t\t\tdone()\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t}()\n\treturn relunch\n}",
"func (proxy *Command) Run() error {\n\tif Enable {\n\t\tstartTime := time.Now()\n\t\tdefer proxy.measure(startTime)\n\t}\n\treturn proxy.Original.Run()\n}",
"func Run(command string, args []string) error {\n\n\targs = deleteEmptyFields(args)\n\tlog.WithFields(log.Fields{\n\t\t\"command\": strings.Join(append([]string{command}, args...), \" \"),\n\t}).Info(\"execute command\")\n\n\tcmd := exec.Command(command, args...)\n\tvar stderr bytes.Buffer\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = &stderr\n\n\tstart := time.Now()\n\terr := cmd.Run()\n\telapsed := time.Since(start)\n\n\tif err != nil && elapsed < time.Second {\n\t\terrStr := stderr.String()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"command\": command,\n\t\t\t\"args\": args,\n\t\t}).Error(errStr)\n\t}\n\n\treturn err\n\n}",
"func (c *Command) run(args []string) error {\n\terr := c.Fn(args)\n\tif cerr, ok := err.(ArgError); ok {\n\t\treturn fmt.Errorf(\"%v\\nusage: %v %v\", cerr, c.Name, c.Usage)\n\t}\n\n\treturn err\n}",
"func (cmd *Cmd) Run() error {\n\tvar stderr bytes.Buffer\n\tcmd.cmd.Stderr = &stderr\n\terr := cmd.cmd.Run()\n\tif err != nil {\n\t\tswitch err.(type) {\n\t\tcase *exec.ExitError:\n\t\t\tfmt.Fprintf(os.Stderr, \"%s\\n\", stderr.String())\n\t\t\treturn err\n\t\tdefault:\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (*CmdRunner) Run(cmd *exec.Cmd) error {\n\tfmt.Println(strings.Join(cmd.Args, \" \"))\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn cmd.Wait()\n}",
"func (runner execRunner) Run(name string, args ...string) error {\n\tcmd := exec.Command(name, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func (t *SimpleChaincode) Run(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"run is running \" + function)\n\treturn t.Invoke(stub, function, args)\n}",
"func Run() int {\n\tflag.Parse()\n\tctx := context.Background()\n\tres := subcommands.Execute(ctx)\n\treturn int(res)\n}",
"func (r *OC) Run(arg ...string) error {\n\treturn r.run(os.Stdout, arg...)\n}",
"func (b *Builder) Run() error {\n\tdefer b.Cleanup()\n\tlogrus.Debug(b.Options)\n\n\tfor _, s := range b.steps {\n\t\terr := s()\n\t\tif err != nil {\n\t\t\treturn errors.WithStack(err)\n\t\t}\n\t\tos.Chdir(b.Root)\n\t}\n\treturn nil\n}",
"func (s *Stream) Run(cmd string) error {\n\t// First parse the entire command string\n\tcm, er := parser.ParseString(cmd)\n\tif er != nil {\n\t\treturn er\n\t}\n\t//spew.Dump(cm)\n\t// So now we run these commands on the file\n\tfi, er := interpreter.New(s.file)\n\tif er != nil {\n\t\treturn er\n\t}\n\treturn fi.Run(cm)\n}",
"func Run() error {\n\terr := Make()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"make: %w\", err)\n\t}\n\n\tbaseDir, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getwd: %w\", err)\n\t}\n\tdefer func() {\n\t\tos.Chdir(baseDir)\n\t}()\n\terr = os.Chdir(\"../bin/\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"chdir ../bin: %w\", err)\n\t}\n\t_, err = os.Stat(\"eqx\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat eqx: %w\", err)\n\t}\n\terr = sh.Run(\"eqx\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"run eqx: %w\", err)\n\t}\n\treturn nil\n}",
"func (r Runner) Run(commandAndArgs ...string) (string, error) {\n\tif len(commandAndArgs) == 0 {\n\t\treturn \"\", fmt.Errorf(\"No command provided.\")\n\t}\n\toutput, err := exec.Command(commandAndArgs[0], commandAndArgs[1:]...).CombinedOutput()\n\toutputString := string(output)\n\tif err != nil {\n\t\terrorString := fmt.Sprintf(\"%s\", err)\n\t\tif outputString != \"\" {\n\t\t\terrorString = fmt.Sprintf(\"%s, details: %s\", errorString, outputString)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"Failed to execute command %s: %s\", commandAndArgs, errorString)\n\t}\n\treturn outputString, nil\n}",
"func (ui *UI) Run(\n\t// This function will be called by Run() to find the implemenation for a command name.\n\t// You can use the provided ui.DynamicCommandLookup(impl), or you can generate a\n\t// completely type-safe UI, and use ui.staticCommandLookup.\n\tgetCommandMethod func(commandName string) (impl func(), found bool),\n\tcommandNames []string,\n) {\n\tunknown := make([]string, 0)\n\tqueue := make([]func(), 0, len(commandNames))\n\tfor _, n := range commandNames {\n\t\tfn, found := getCommandMethod(n)\n\t\tif !found {\n\t\t\t// special case for \"help\": provide help on any other commands given and\n\t\t\t// do nothing else\n\t\t\tif n == \"help\" {\n\t\t\t\tqueue = []func(){ui.HelpFor(commandNames)}\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tunknown = append(unknown, n)\n\t\t\tcontinue\n\t\t}\n\t\tqueue = append(queue, fn)\n\t}\n\n\tif len(unknown) > 0 {\n\t\tpanic(fmt.Sprintf(\"Unknown commands: %v\", unknown))\n\t}\n\n\tfor _, fn := range queue {\n\t\tfn()\n\t}\n}",
"func (r *Runner) Run() {\n\tif err := os.MkdirAll(r.Dir, 0700); err != nil {\n\t\tr.setError(err)\n\t\treturn\n\t}\n\tdefer r.Cleanup()\n\tif flags.Verbose {\n\t\tfmt.Printf(\"Run %s %s\\n\", r.Binary, strings.Join(r.Args, \" \"))\n\t}\n\n\tvar stdout, stderr bytes.Buffer\n\tr.Cmd.Dir = r.Dir\n\tr.Cmd.Stdout = &stdout\n\tr.Cmd.Stderr = &stderr\n\n\tdefer func() {\n\t\tr.Stdout = stdout.Bytes()\n\t\tr.Stderr = stderr.Bytes()\n\t\tif !r.Preserve || r.Error() == nil {\n\t\t\treturn\n\t\t}\n\t\tif err := writefile(filepath.Join(r.Dir, \"stdout\"), r.Stdout); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t\tif err := writefile(filepath.Join(r.Dir, \"stderr\"), r.Stderr); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t}()\n\n\tr.mu.Lock()\n\t// r.err will not be nil if we tried to kill ourselves off\n\tif r.err == nil {\n\t\tr.err = r.Cmd.Start()\n\t\tif r.err == nil {\n\t\t\tgo func() {\n\t\t\t\tr.errCh <- r.Cmd.Wait()\n\t\t\t\tclose(r.errCh)\n\t\t\t}()\n\t\t} else {\n\t\t\tclose(r.errCh)\n\t\t}\n\t}\n\terr := r.err\n\tr.mu.Unlock()\n\tif err != nil {\n\t\treturn\n\t}\n\tt := time.NewTimer(r.Timeout)\n\tdefer t.Stop()\n\tr.Wait(t, killSignals)\n}",
"func Run(logger log.Logger, streams cmd.IOStreams, args []string) error {\n\t// NOTE: we handle the quiet flag here so we can fully silence cobra\n\tif checkQuiet(args) {\n\t\t// if we are in quiet mode, we want to suppress all status output\n\t\t// only streams.Out should be written to (program output)\n\t\tlogger = log.NoopLogger{}\n\t\tstreams.ErrOut = io.Discard\n\t}\n\t// actually run the command\n\tc := kind.NewCommand(logger, streams)\n\tc.SetArgs(args)\n\tif err := c.Execute(); err != nil {\n\t\tlogError(logger, err)\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (p *Proxy) Run(command string, args ...string) error {\n\tlog.WithFields(log.Fields{\n\t\t\"command\": command,\n\t\t\"args\": args,\n\t}).Debug(\"exec\")\n\n\tcmd := exec.Command(command, args...)\n\tcmd.Env = append(os.Environ(), p.functionEnvVars()...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Dir = p.Dir\n\n\treturn cmd.Run()\n}",
"func (b *Builder) Run(args ...string) error {\n\trun, err := b.exec(args...)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn run.Run()\n}",
"func Run(ctx context.Context, args []string) {\n\tvar binary string\n\tif len(args) == 0 {\n\t\tbinary = Build(ctx, []string{\".\"}, \"\")\n\t} else {\n\t\tbinary, args = args[0], args[1:]\n\t}\n\tcmd := exec.CommandContext(ctx, binary, args...)\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tmust.Nil(cmd.Run())\n\tmust.Nil(os.Remove(binary))\n}",
"func RunMultipleCommands(commands []entity.Command) error {\n\tvar dir string\n\tfor _, command := range commands {\n\t\tlookPath, err := exec.LookPath(command.Name)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"installing %s is in your future\", command.Name)\n\t\t\treturn err\n\t\t}\n\t\tcmd := exec.Command(lookPath, command.Args...)\n\t\tif strings.Contains(command.Name, \"cd\") {\n\t\t\tdir = command.Args[0]\n\t\t} else {\n\t\t\tcmd.Dir = dir\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tvar out bytes.Buffer\n\t\t\tcmd.Stdout = &out\n\t\t\tlog.Printf(\"command: %s, %q\\r\\n %s\", command.Name, command.Args, out.String())\n\t\t}\n\t}\n\treturn nil\n}",
"func (f *FakeCmdRunner) Run(name string, args ...string) error {\n\treturn f.RunWithOptions(cmd.CommandOpts{}, name, args...)\n}",
"func (c *Tool) Run() ([]byte, error) {\n\tif IsDebug() == true {\n\t\tstart := time.Now()\n\t\tcommands := c.Commands()\n\t\tout, err := c.Command().CombinedOutput()\n\t\tfmt.Println(time.Since(start), commands)\n\t\treturn out, err\n\t}\n\treturn c.Command().CombinedOutput()\n}",
"func (t *SimpleChaincode) Run(stub *shim.ChaincodeStub, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"run is running \" + function)\n\treturn t.Invoke(stub, function, args)\n}",
"func Run() error {\n\tcmd := NewCommand(os.Stdin, os.Stdout, os.Stderr)\n\treturn cmd.Execute()\n}",
"func (c *Commandable) Run() (status *types.CommandStatus, err error) {\n\truncmdlist, err := NewRunCmdListFromSpec(c.Command.Spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tout := runcmdlist.Run()\n\tc.setStatus(out)\n\treturn c.Status, nil\n}",
"func (h *Handler) Run(_ *cli.Context) error {\n\tcmd, err := newCommand(h)\n\tutils.FatalOnErr(err)\n\n\tutils.FatalOnErr(cmd.Run())\n\n\treturn nil\n}",
"func (m *MockRunner) Run(cmd string) ([]byte, error) {\n\targs := m.Called(1)\n\treturn args.Get(0).([]byte), args.Error(1)\n}",
"func (s *Minidump) Run(command string) (result string, err error) {\n\n\t// Check options\n\tif ok, err := s.CheckRequiredOptions(); !ok {\n\t\treturn \"\", err\n\t}\n\n\t// Check session\n\terr = s.GetSession()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcommandList, err := s.Parse()\n\tresult = strings.Join(commandList, \" \")\n\n\treturn result, nil\n}",
"func (t testCommand) Run() error {\n\tif t.shouldFail {\n\t\treturn errors.New(\"I AM ERROR\")\n\t}\n\treturn nil\n}",
"func (c *Cmd) Run(opts ...RunOption) error {\n\tif err := c.Start(); err != nil {\n\t\treturn err\n\t}\n\n\terr := c.Wait(opts...)\n\treturn err\n}",
"func (client *Client) Run() {\n\tret, val, msg := client.executeCommand()\n\tlog.Printf(\"Execute command result: %v\", ret)\n\tlog.Printf(\"Execute command value: %v\", val)\n\tlog.Printf(\"Execute command message: %v\", msg)\n}",
"func Run(opts ...cli.Option) error {\n\treturn cmd.Run(opts...)\n}",
"func (cmd *Command) Do(ctx context.Context, shell *Shell, args []string) (err error) {\n\tc := &Context{ctx: ctx, shell: shell, args: args}\n\tif cmd.Before != nil {\n\t\terr = cmd.Before(c)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\tif cmd.Action != nil {\n\t\terr = cmd.Action(c)\n\t}\n\treturn err\n}",
"func Run(cmd *exec.Cmd) (*RunResult, error) {\n\trr := &RunResult{Args: cmd.Args}\n\n\tvar outb, errb bytes.Buffer\n\tcmd.Stdout, rr.Stdout = &outb, &outb\n\tcmd.Stderr, rr.Stderr = &errb, &errb\n\n\tstart := time.Now()\n\tklog.V(1).Infof(\"Running: %s\", cmd)\n\terr := cmd.Run()\n\trr.Duration = time.Since(start)\n\n\tif err != nil {\n\t\tif exitError, ok := err.(*exec.ExitError); ok {\n\t\t\trr.ExitCode = exitError.ExitCode()\n\t\t}\n\t}\n\n\tklog.V(1).Infof(\"Completed: %s (duration: %s, exit code: %d, err: %v)\", cmd, rr.Duration, rr.ExitCode, err)\n\tif len(rr.Stderr.Bytes()) > 0 {\n\t\tklog.V(1).Infof(\"stderr:\\n%s\\n\", rr.Stderr.String())\n\t}\n\n\treturn rr, err\n}",
"func (b *Builder) Run(r *Runner) {\n\tr.Run()\n\tb.mu.Lock()\n\tdelete(b.running, r)\n\tb.mu.Unlock()\n}",
"func (c *TestCommand) Run() error {\n\tlocalPath, err := c.build()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile, err := os.Open(localPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tftpConn, err := c.config.DialFtp()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ftpConn.Close()\n\n\t_, name := filepath.Split(localPath)\n\tdronePath, err := ftpConn.Upload(file, name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ftpConn.Del(name)\n\n\ttelnetConn, err := c.config.DialTelnet()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer telnetConn.Close()\n\n\tcmd := fmt.Sprintf(\"chmod +x %s && %s\", dronePath, dronePath)\n\treturn telnetConn.Exec(cmd, os.Stdout)\n}",
"func Run() error {\n\trootCmd := newRootCmd()\n\treturn rootCmd.Execute()\n}",
"func (c *Command) Run() (int, error) {\n\n\t// Ensure we always target running instances\n\tc.Targets = append(c.Targets, \"instance-state-name=running\")\n\n\ttargets, err := c.targets()\n\tif err != nil {\n\t\treturn 1, err\n\t}\n\n\t// Randomize and limit the number of instances\n\tinstanceIDs, err := randomTargets(targets, c.TargetLimit)\n\tif err != nil {\n\t\treturn 1, err\n\t}\n\n\t// Split the instanceIDs into batches of 50 items.\n\tbatch := 50\n\tfor i := 0; i < len(instanceIDs); i += batch {\n\t\tj := i + batch\n\t\tif j > len(instanceIDs) {\n\t\t\tj = len(instanceIDs)\n\t\t}\n\n\t\tif err := c.RunCommand(instanceIDs[i:j]); err != nil {\n\t\t\treturn 1, err\n\t\t}\n\n\t}\n\n\treturn exitCode, nil\n}",
"func run(name string, arg ...string) error {\n\tcmd := exec.Command(name, arg...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func Run() {\n\tif len(os.Args) < 2 {\n\t\tusageExit(errors.New(\"missing command name\"))\n\t}\n\tselected := os.Args[1]\n\tif selected == \"-h\" || selected == \"--help\" || selected == \"-help\" {\n\t\tusageExit(nil)\n\t}\n\tif strings.HasPrefix(selected, \"-\") {\n\t\tusageExit(errors.New(\"command name required as first argument\"))\n\t}\n\tcmd, ok := commands[selected]\n\tif !ok {\n\t\tusageExit(errors.New(\"unknown command '\" + selected + \"'\"))\n\t}\n\n\t// flags.Parse prints help in case of -help. I'm not a fan of this.\n\tcmd.flags.SetOutput(ioutil.Discard)\n\terr := cmd.flags.Parse(os.Args[2:])\n\tcmd.flags.SetOutput(Output)\n\tif err == flag.ErrHelp {\n\t\tcmdUsageExit(cmd.flags, nil)\n\t}\n\tif err != nil {\n\t\tcmdUsageExit(cmd.flags, err)\n\t}\n\terr = cmd.f()\n\tif err != nil {\n\t\tif isArgError(err) {\n\t\t\tcmdUsageExit(cmd.flags, err)\n\t\t}\n\t\tfmt.Fprintf(Output, \"Error running %s command: %+v\", cmd.flags.Name(), err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(0)\n}",
"func (j Jibi) RunCommand(cmd Command, resp chan string) {\n\tif cmd < cmdCPU {\n\t\tj.cpu.RunCommand(cmd, resp)\n\t} else if cmd < cmdGPU {\n\t\tj.gpu.RunCommand(cmd, resp)\n\t} else if cmd < cmdKEYPAD {\n\t\tj.kp.RunCommand(cmd, resp)\n\t} else if cmd < cmdALL {\n\t\tj.cpu.RunCommand(cmd, resp)\n\t\tj.gpu.RunCommand(cmd, resp)\n\t\tj.kp.RunCommand(cmd, resp)\n\t}\n}",
"func (r *Remoter) Run(waitCtx context.Context, cmds []string, stdout io.Writer, stderr io.Writer) error {\n\n\tclt := r.clt\n\tssn, err := clt.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer ssn.Close()\n\n\t// not know why use this\n\t// keep it for someday I know this\n\t//modes := ssh.TerminalModes{\n\t// ssh.ECHO: 0, // disable echoing\n\t// ssh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud\n\t// ssh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud\n\t//}\n\t//_ = modes\n\t//if err := ssn.RequestPty(\"xterm\", 80, 40, modes); err != nil {\n\t// return nil\n\t//}\n\n\tshellInput, err := ssn.StdinPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tssn.Stdout = stdout\n\tssn.Stderr = stderr\n\terr = ssn.Shell()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, cmd := range cmds {\n\t\t_, err = fmt.Fprintf(shellInput, \"%s\\n\", cmd)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// we must have exit to return `Wait()`\n\t_, err = fmt.Fprintf(shellInput, \"exit\\n\")\n\tif err != nil {\n\t\treturn err\n\t}\n\t// all command after exit will not run\n\tnoNeedWait, waitGrp := r.wait(waitCtx)\n\terr = ssn.Wait()\n\tclose(noNeedWait)\n\twaitGrp.Wait()\n\treturn err\n}",
"func (r *Runner) Run(ctx context.Context, node syntax.Node) error {\n\tif !r.didReset {\n\t\tr.Reset()\n\t}\n\tr.err = nil\n\tr.filename = \"\"\n\tswitch x := node.(type) {\n\tcase *syntax.File:\n\t\tr.filename = x.Name\n\t\tr.stmts(ctx, x.StmtList)\n\tcase *syntax.Stmt:\n\t\tr.stmt(ctx, x)\n\tcase syntax.Command:\n\t\tr.cmd(ctx, x)\n\tdefault:\n\t\treturn fmt.Errorf(\"Node can only be File, Stmt, or Command: %T\", x)\n\t}\n\tif r.exit > 0 {\n\t\tr.setErr(ExitStatus(r.exit))\n\t}\n\treturn r.err\n}",
"func (a *BootstrapCommand) Run() (all []string, err error) {\n\tfor _, c := range BootstrapCommands {\n\t\tif c != a {\n\t\t\tvar files []string\n\t\t\tfiles, err = c.Run()\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tall = append(all, files...)\n\t\t}\n\t}\n\n\treturn\n}",
"func (c *LocalCmd) Run() error {\n\treturn runCmd(c.cmd, c.args, c.env, ioutil.Discard, ioutil.Discard)\n}",
"func (m *MiddlewareChain) Run(w http.ResponseWriter, req *http.Request) {\n\tm.chain.ServeHTTP(w, req)\n}",
"func (cr *CommandRunner) Run() {\n\tfor {\n\t\tif cr.cmd == nil {\n\t\t\tif cr.killed {\n\t\t\t\treturn\n\t\t\t}\n\t\t\t// subprocess is not running\n\t\t\tselect {\n\t\t\tcase <-cr.runChan:\n\t\t\t\tlog.Debug(\"Received on runChan, starting subprocess\")\n\t\t\t\tcr.createSubprocess()\n\t\t\tcase <-cr.killChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\t// subprocess is running\n\t\t\tselect {\n\t\t\tcase <-cr.runChan:\n\t\t\t\tcontinue\n\t\t\tcase <-cr.killChan:\n\t\t\t\t// using -pid here due to setpgid\n\t\t\t\tif !cr.killed {\n\t\t\t\t\tlog.Debug(\"Sending SIGTERM\")\n\t\t\t\t\tsyscall.Kill(-cr.cmd.Process.Pid, syscall.SIGTERM)\n\t\t\t\t\tcr.killed = true\n\t\t\t\t} else {\n\t\t\t\t\tlog.Debug(\"Sending SIGKILL\")\n\t\t\t\t\tsyscall.Kill(-cr.cmd.Process.Pid, syscall.SIGKILL)\n\t\t\t\t}\n\t\t\tcase <-cr.ticker.C:\n\t\t\t\tcr.wait()\n\t\t\t}\n\t\t}\n\t}\n}",
"func Run(cmd *exec.Cmd) (string, error) {\n\treturn DefaultExecutor.Run(cmd)\n}"
] | [
"0.6471542",
"0.6431098",
"0.6414838",
"0.6394",
"0.6344191",
"0.63309324",
"0.6237898",
"0.62331253",
"0.61902803",
"0.6167945",
"0.6162895",
"0.6149439",
"0.6135483",
"0.6120912",
"0.6120514",
"0.61096513",
"0.6101045",
"0.6068022",
"0.6066729",
"0.6057943",
"0.602746",
"0.60109323",
"0.59939516",
"0.59859955",
"0.59852064",
"0.59541315",
"0.59330475",
"0.5916792",
"0.58711743",
"0.5871142",
"0.5866493",
"0.5850601",
"0.584505",
"0.58322453",
"0.5827397",
"0.5827397",
"0.58236563",
"0.57998043",
"0.5790571",
"0.57904285",
"0.5787022",
"0.5780909",
"0.5726633",
"0.5725403",
"0.57242715",
"0.57242167",
"0.5720108",
"0.571426",
"0.5700008",
"0.569322",
"0.5691684",
"0.56866103",
"0.5672691",
"0.5668122",
"0.56627214",
"0.56548405",
"0.5643489",
"0.5642197",
"0.5637128",
"0.56233764",
"0.56213623",
"0.5620013",
"0.5619941",
"0.56094563",
"0.56070316",
"0.5606608",
"0.56055146",
"0.5605031",
"0.55989647",
"0.5597178",
"0.5596797",
"0.5595121",
"0.5587262",
"0.55795044",
"0.55749595",
"0.557418",
"0.5572997",
"0.55406344",
"0.5540428",
"0.5534464",
"0.5533903",
"0.55334795",
"0.55243355",
"0.5523401",
"0.55226076",
"0.55151737",
"0.55148613",
"0.55130553",
"0.550374",
"0.55036193",
"0.55019546",
"0.55018157",
"0.54997945",
"0.5496081",
"0.54896307",
"0.54857993",
"0.5473216",
"0.5460835",
"0.545666",
"0.5455523"
] | 0.72497594 | 0 |
cmdParse parses a command string into a command structure. | func cmdParse(commandStr string, config *Config) (*Command, error) {
// TODO: criteria
lexer := lex(commandStr)
defer lexer.drain()
cmdToken := lexer.nextItem()
if cmdToken.typ != itemString {
return nil, fmt.Errorf("expected string, got token '%s'", cmdToken.val)
}
// TODO: chained commands
command := &Command{}
fn, ok := cmdParseTable[cmdToken.val]
if !ok {
return nil, fmt.Errorf("command '%s' not implemented", cmdToken.val)
}
cmd, err := fn(lexer, config)
if err != nil {
return nil, err
}
command.Executer = cmd
return command, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Parser) Parse(str string) (Command, []string, error) {\n\tvar cmd Command\n\targs := p.extractArgs(str)\n\n\tswitch strings.ToUpper(args[0]) {\n\tcase \"HELP\":\n\t\tcmd = &Help{parser: p}\n\tcase \"DEL\":\n\t\tcmd = &Del{strg: p.strg}\n\tcase \"EXPIRE\":\n\t\tcmd = &Expire{clck: p.clck, strg: p.strg}\n\tcase \"GET\":\n\t\tcmd = &Get{strg: p.strg}\n\tcase \"SET\":\n\t\tcmd = &Set{strg: p.strg}\n\tcase \"STRLEN\":\n\t\tcmd = &Strlen{strg: p.strg}\n\tcase \"GETBIT\":\n\t\tcmd = &GetBit{strg: p.strg}\n\tcase \"SETBIT\":\n\t\tcmd = &SetBit{strg: p.strg}\n\tcase \"HGET\":\n\t\tcmd = &Hget{strg: p.strg}\n\tcase \"HKEYS\":\n\t\tcmd = &Hkeys{strg: p.strg}\n\tcase \"HSET\":\n\t\tcmd = &Hset{strg: p.strg}\n\tcase \"HVALS\":\n\t\tcmd = &Hvals{strg: p.strg}\n\tcase \"KEYS\":\n\t\tcmd = &Keys{strg: p.strg}\n\tcase \"LINDEX\":\n\t\tcmd = &Lindex{strg: p.strg}\n\tcase \"LLEN\":\n\t\tcmd = &Llen{strg: p.strg}\n\tcase \"LPOP\":\n\t\tcmd = &Lpop{strg: p.strg}\n\tcase \"LPUSH\":\n\t\tcmd = &Lpush{strg: p.strg}\n\tcase \"RPUSH\":\n\t\tcmd = &Rpush{strg: p.strg}\n\tcase \"RPOP\":\n\t\tcmd = &Rpop{strg: p.strg}\n\tcase \"LRANGE\":\n\t\tcmd = &Lrange{strg: p.strg}\n\tcase \"LREM\":\n\t\tcmd = &Lrem{strg: p.strg}\n\tcase \"TTL\":\n\t\tcmd = &TTL{strg: p.strg, clck: p.clck}\n\tcase \"TYPE\":\n\t\tcmd = &Type{strg: p.strg}\n\tcase \"PING\":\n\t\tcmd = &Ping{}\n\tdefault:\n\t\treturn nil, nil, ErrCommandNotFound\n\t}\n\n\treturn cmd, args[1:], nil\n}",
"func ParseCmd(buffer []byte) Command {\n\tcmd := Command{Cmd: buffer[0], Params: make([]string, 0)}\n\n\ti := 1\n\tfor {\n\t\tif len(buffer) < i+4 {\n\t\t\tbreak\n\t\t}\n\n\t\targlen := bytesToUint32(buffer[i], buffer[i+1], buffer[i+2], buffer[i+3])\n\n\t\tif len(buffer) < i+4+int(arglen) {\n\t\t\tbreak\n\t\t}\n\n\t\tcmd.Params = append(cmd.Params, string(buffer[i+4:i+4+int(arglen)]))\n\n\t\ti += int(arglen) + 4\n\t}\n\n\treturn cmd\n}",
"func ParseCommand(cmdStr string) (Command, error) {\n\treCommand, _ := regexp.Compile(\"(/?[\\\\w\\\\.;@,!@#$&^-_=*\\\\+]+)\")\n\tmatch := reCommand.FindAllStringSubmatch(cmdStr, -1)\n\n\tcmd := Command{}\n\tif len(match) == 0 {\n\t\treturn cmd, fmt.Errorf(\"Unknown command: %v\", cmdStr)\n\t}\n\n\tcmd.Command = match[0][0]\n\tswitch cmd.Command {\n\tcase \"/reg\":\n\t\tcmd.Args = make([]string, 2, 2)\n\t\tcmd.Args[0] = match[1][0]\n\t\tcmd.Args[1] = match[2][0]\n\tcase \"/receipt\":\n\t\tcmd.Args = make([]string, 0, 1)\n\t\tif len(match) > 1 {\n\t\t\tcmd.Args = append(cmd.Args, match[1][0])\n\t\t}\n\tcase \"/notify\":\n\t\tcmd.Args = make([]string, 0, 1)\n\t\tif len(match) > 1 {\n\t\t\tcmd.Args = append(cmd.Args, match[1][0])\n\t\t}\n\tcase \"/get\":\n\t\tcmd.Args = make([]string, 0, 1)\n\t\tif len(match) > 1 {\n\t\t\tcmd.Args = append(cmd.Args, match[1][0])\n\t\t}\n\tcase \"/help\":\n\t\tcmd.Args = make([]string, 0, 0)\n\tdefault:\n\t\treturn cmd, fmt.Errorf(\"Unknown command: %v\", cmd.Command)\n\t}\n\n\treturn cmd, nil\n}",
"func (p *Parser) Parse(str string) (Command, []string, error) {\n\tvar cmd Command\n\ttrimPrefixStr := strings.TrimSpace(str)\n\tswitch {\n\tcase strings.HasPrefix(strings.ToLower(trimPrefixStr), \"flow_run\"):\n\t\tcmd = &FlowRunCmd{[]byte(str), p.IStorage}\n\t\treturn cmd, nil, nil\n\tcase strings.HasPrefix(strings.ToLower(trimPrefixStr), \"flow\"):\n\t\tcmd = &FlowCmd{[]byte(str), p.IStorage}\n\t\treturn cmd, nil, nil\n\tcase strings.HasPrefix(strings.ToLower(trimPrefixStr), \"action\"):\n\t\tcmd = &ActionCmd{[]byte(str), p.IStorage}\n\t\treturn cmd, nil, nil\n\t}\n\n\targs := p.extractArgs(trimPrefixStr)\n\tif len(args) == 0 {\n\t\treturn nil, nil, ErrCommandNotFound\n\t}\n\n\tswitch strings.ToLower(args[0]) {\n\tcase \"list\":\n\t\tcmd = &List{p.IStorage}\n\tcase \"get\":\n\t\tcmd = &Get{p.IStorage}\n\tcase \"del\":\n\t\tcmd = &Del{p.IStorage}\n\tcase \"help\":\n\t\tcmd = &Help{}\n\tdefault:\n\t\treturn nil, nil, ErrCommandNotFound\n\t}\n\n\treturn cmd, args[1:], nil\n}",
"func ParseCommand(line string) *Command {\n\twords := strings.Split(line, \" \")\n\tif len(words) < 2 {\n\t\t// FIXME error\n\t\treturn nil\n\t}\n\n\ttag := strings.ToUpper(words[0])\n\tcommand := strings.ToUpper(words[1])\n\targs := strings.Join(words[2:len(words)], \" \")\n\n\treturn &Command{\n\t\ttag: tag,\n\t\tcommand: command,\n\t\targs: args,\n\t\torig: line,\n\t}\n}",
"func ParseCommand(s string) (Command, error) {\n\tcmd, s, err := scanCommand(s)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif s != \"\" {\n\t\treturn nil, fmt.Errorf(\"extraneous input %q after command\", s)\n\t}\n\treturn cmd, nil\n}",
"func ParseCommand(exp string) []*exec.Cmd {\n\trawCommands := mapTrim(strings.Split(exp, \"|\"), strings.TrimSpace)\n\tcommands := make([]*exec.Cmd, len(rawCommands))\n\tfor i, cmd := range rawCommands {\n\t\targs := mapTrim(split(cmd), strings.TrimSpace, trimQuotes)\n\t\tcommands[i] = exec.Command(args[0], args[1:]...)\n\t}\n\treturn commands\n}",
"func ParseCmd(l []byte, lenght int) (string, int) {\n\tline := string(l[:lenght])\n\tline = strings.ToLower(line)\n\tline = strings.TrimSpace(line)\n\tcmds := strings.Split(line, \" \")\n\n\tfmt.Println(cmds)\n\n\tif len(cmds) == 0 {\n\t\tfmt.Println(\"Qui0...\" + cmds[0])\n\t\treturn StrInvalidCommand, InvalidCommand\n\t}\n\n\tcommand, ok := Table[strings.TrimSpace(cmds[0])]\n\tif !ok {\n\t\tfmt.Println(\"Qui...\" + cmds[0])\n\t\treturn StrCommandNotFound, CommandNotFound\n\t}\n\n\treturn command(cmds[1:])\n}",
"func (eCmd *Exec) Parse(args []string) error {\n\tif len(args) < 2 {\n\t\treturn errors.New(\"must specify a target container and command\")\n\t}\n\n\ttargetContainer, err := strconv.Atoi(args[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"target container must be a number: %s\", args[0])\n\t}\n\n\teCmd.targetContainer = targetContainer\n\teCmd.command = strings.Join(args[1:], \" \")\n\treturn nil\n}",
"func ParseCommand(line String) (cmd string, args CommandList, nArg uint32){\n\n\targs = make(CommandList)\n\t//Process line byte by byte and every time a space is met, populate either cmd or args\n\t//Count characters in line and number of tokens\n\tvar i uint32\n\ti = 0\n\tvar k uint32\n\tk = 0\n\tvar t uint32\n\n\tnArg = 0\n\tf := 0\n\n\tfor i<4 {\n\t\targs[i] = \"\"\n\t\ti++\n\t}\n\n\ti = 0\n\n\tfor i<uint32(len(line)) && k<5 && line[i] != '\\n' {\n\n\t\tt = i\n\t\tvar tmp string\n\t\ttmp = \"\"\n\n\t\tfor line[t] == ' ' {\n\t\t\tt++\n\t\t}\n\n\t\tfor t<uint32(len(line)) && line[t] != ' ' && line[t] != '\\n' {\n\t\t\ttmp = fmt.Sprintf(\"%s%c\", tmp, line[t])\n\t\t\tt++\n\t\t}\n\n\t\tif k == 0 {\n\n\t\t\tcmd = fmt.Sprintf(\"%s\", tmp)\n\n\t\t} else {\n\n\t\t\tif tmp == \"<\" || tmp == \">\" {\n\t\t\t\tf = 1\n\t\t\t}\n\t\t\tif tmp != \"\" {\n\t\t\t\tif f == 0 {\n\t\t\t\t\tnArg++\n\t\t\t\t}\n\t\t\t}\n\t\t\targs[k-1] = fmt.Sprintf(\"%s\", tmp)\n\t\t}\n\n\t\tk++\n\t\ti = t\n\n\t}\n\n\n\treturn\n\n}",
"func parseCommand(cmd string) (string, string, bool) {\n\n\tindexOfOpenTag := strings.Index(cmd, \"<\")\n\tindexOfColon := strings.Index(cmd, \":\")\n\tindexOfCloseTag := strings.Index(cmd, \">\")\n\tindexOfSpace := strings.Index(cmd, \" \")\n\tcountOpenTags := strings.Count(cmd, \"<\")\n\tcountCloseTags := strings.Count(cmd, \">\")\n\tcountColons := strings.Count(cmd, \":\")\n\tcountOpenSpaces := strings.Count(cmd, \" \")\n\tconditions := (indexOfOpenTag != -1) && (indexOfCloseTag != -1) && (indexOfColon != -1) && (indexOfSpace == -1) && (countOpenTags == 1) && (countCloseTags == 1) && (countColons == 1) && (countOpenSpaces == 0)\n\n\tarrangement := indexOfOpenTag < indexOfColon && indexOfColon < indexOfCloseTag\n\n\tvalidSyntax := conditions && arrangement\n\n\tif !validSyntax {\n\t\treturn \"\", \"\", false\n\t}\n\tcommandName := cmd[1:indexOfColon]\n\tcommandVal := cmd[1+indexOfColon : indexOfCloseTag]\n\n\treturn commandName, commandVal, validSyntax\n\n}",
"func (client *Client) Parse(cmd string) (string, string) {\n\tpattern := regexp.MustCompile(`\\s+`)\n\tcmd = strings.TrimSpace(cmd)\n\ttmp := strings.Split(pattern.ReplaceAllString(cmd, \" \"), \" \")\n\tif len(tmp) > 1 {\n\t\treturn strings.TrimSpace(tmp[0]), strings.TrimSpace(tmp[1])\n\t}\n\treturn strings.TrimSpace(tmp[0]), \"\"\n}",
"func ParseCommand(commandString string) PathCommand {\n\n\tswitch commandString {\n\tcase \"M\":\n\t\treturn MoveToAbsolute\n\tcase \"m\":\n\t\treturn MoveToRelative\n\tcase \"Z\", \"z\":\n\t\treturn ClosePath\n\tcase \"L\":\n\t\treturn LineToAbsolute\n\tcase \"l\":\n\t\treturn LineToRelative\n\tdefault:\n\t\treturn NotAValidCommand\n\t}\n\tpanic(\"Not reachable\")\n}",
"func ParseCommand(args []string) {\n SetProcess(args[0])\n SetConfig(config.Process(args[0]))\n opts := commandOpts(args)\n\n if message, ok := cfg.Valid(); ok {\n\n name := args[1]\n daemonCmd, daemonOk := DaemonizedCommands()[name]\n infoCmd, infoOk := InfoCommands()[name]\n interactiveCmd, interactiveOk := InteractiveCommands()[name]\n\n switch {\n case daemonOk:\n daemonCmd.run(opts...)\n case infoOk:\n infoCmd.run(opts...)\n case interactiveOk:\n interactiveCmd.run(opts...)\n default:\n logger.Log(fmt.Sprintf(\"Running Command: (%v) doesn't exists\\n\", args[1]))\n }\n } else {\n logger.Log(message)\n }\n\n}",
"func parse(invocation string, verbose bool) (*command, error) {\n\tif len(strings.TrimSpace(invocation)) == 0 {\n\t\treturn &command{}, fmt.Errorf(\"Empty strings are not valid commands.\")\n\t}\n\n\tp := newParser(invocation, verbose)\n\tcmd := p.command(p.Scan())\n\tif len(p.errors) == 0 && p.tok != scanner.EOF {\n\t\tp.parseError(\"EOF\")\n\t}\n\tif len(p.errors) > 0 {\n\t\treterrs := make([]string, len(p.errors))\n\t\tfor i, err := range p.errors {\n\t\t\treterrs[i] = err.Error()\n\t\t}\n\t\treturn cmd, e(strings.Join(reterrs, \"\\n\"))\n\t}\n\treturn cmd, nil\n}",
"func ParseCommand(text string) (Command, error) {\n\tvar parts = strings.Split(text, \" \")\n\tif len(parts) != 2 {\n\t\treturn Command{\"\", \"\"}, errors.New(\"Cannot recognize command. Please follow format of \\\"Action Payload\\\"\")\n\t}\n\treturn Command{parts[0], parts[1]}, nil\n}",
"func ParseCmd(fullCmd string) (string, []string, error) {\n\tvals := re.FindAllString(fullCmd, -1)\n\n\tif len(vals) == 0 {\n\t\treturn \"\", nil, &noCommandSpecified{}\n\t}\n\tcmd := vals[0]\n\tcmd, err := exec.LookPath(cmd)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\tif !filepath.IsAbs(cmd) {\n\t\tcmd, err = filepath.Abs(cmd)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t}\n\n\targs := vals[1:]\n\t// remove single quotes from args, since they can confuse exec\n\tfor i, arg := range args {\n\t\targs[i] = strings.Trim(arg, \"'\")\n\t}\n\n\treturn cmd, args, nil\n}",
"func parseCmd() *Cmd {\n\n\tvar cmd = &Cmd{}\n\tflag.BoolVar(&cmd.helpFlag, \"help\", false, \"print help message\")\n\tflag.StringVar(&cmd.jsOutOption, \"jsOutOption\", `C:\\myWorkingDirectory\\Networking\\ipam\\src\\Sources\\IPAM\\WebUI\\Dashboard\\out`, \"jsOutOption\")\n\tflag.StringVar(&cmd.sfAppOption, \"sfAppOption\", `C:\\myWorkingDirectory\\Networking\\ipam\\src\\Sources\\IPAM\\WebUI\\Dashboard\\out`, \"sfApplicationOption\")\n\tflag.StringVar(&cmd.operation, \"op\", \"test\", \"test|copy\")\n\tflag.Parse()\n\n\targs := flag.Args()\n\tif len(args) > 0 {\n\t\tcmd.fileName = args[0]\n\t\tcmd.args = args[1:]\n\t}\n\n\treturn cmd\n}",
"func Parse(raw []byte) (Command, error) {\n\trd := Reader{buf: raw, end: len(raw)}\n\tvar leftover int\n\tcmds, err := rd.readCommands(&leftover)\n\tif err != nil {\n\t\treturn Command{}, err\n\t}\n\tif leftover > 0 {\n\t\treturn Command{}, errTooMuchData\n\t}\n\treturn cmds[0], nil\n\n}",
"func (cp *CmdExit) Parse(argString string) error {\n\tcp.Command.Parse(argString)\n\treturn nil\n}",
"func parseCommand(tokens []token, idx int) (PipelineNode, int) {\n\tswitch tokens[idx].tokenType {\n\tcase plus_token:\n\t\tpanic(\"Unexpected plus\")\n\tcase minus_token:\n\t\tpanic(\"Unexpected minus\")\n\tcase pipe_token:\n\t\tpanic(\"Unexpected pipe\")\n\tcase string_literal_token:\n\t\tpanic(\"expected: command\")\n\t}\n\n\tcmd := Command{Command: tokens[idx].tokenValue}\n\n\tfor {\n\t\tidx = idx + 1\n\t\tif idx >= len(tokens) {\n\t\t\tbreak\n\t\t}\n\n\t\tct := tokens[idx]\n\t\tif ct.tokenType == identifier_token || ct.tokenType == string_literal_token {\n\t\t\tcmd.Arguments = append(cmd.Arguments, ct.tokenValue)\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn cmd, idx\n}",
"func parseCommand(commandMap stringmap.StringMap, registry *feature.Registry, m *discordgo.MessageCreate) (*model.Command, error) {\n\tcontent := m.Content\n\tif !strings.HasPrefix(content, \"?\") {\n\t\treturn &model.Command{\n\t\t\tType: model.CommandTypeNone,\n\t\t}, nil\n\t}\n\tsplitContent := strings.Split(content, \" \")\n\n\t// Parse builtins.\n\tif parser := registry.GetParserByName(splitContent[0]); parser != nil {\n\t\tcommand, err := parser.Parse(splitContent, m)\n\t\tcommand.OriginalName = splitContent[0]\n\t\treturn command, err\n\t}\n\n\t// See if it's a custom command.\n\thas, err := commandMap.Has(splitContent[0][1:])\n\tif err != nil {\n\t\tlog.Info(\"Error doing custom parsing\", err)\n\t\treturn nil, err\n\t}\n\tif has {\n\t\treturn registry.FallbackParser.Parse(splitContent, m)\n\t}\n\n\t// No such command!\n\treturn &model.Command{\n\t\tType: model.CommandTypeUnrecognized,\n\t}, nil\n}",
"func ParseCmd(line string) (*ParsedLine, error) {\n\tres := &ParsedLine{}\n\n\t// We're going to upper-case this, which may explode on us if this\n\t// is UTF-8 or anything that smells like it.\n\tif !isall7bit([]byte(line)) {\n\t\treturn nil, ErrCmdContainsNonASCII\n\t}\n\n\t// Trim trailing space from the line, because some confused people\n\t// send eg 'RSET ' or 'QUIT '. Probably other people put trailing\n\t// spaces on other commands. This is probably not completely okay\n\t// by the RFCs, but my view is 'real clients trump RFCs'.\n\tline = strings.TrimRightFunc(line, unicode.IsSpace)\n\n\t// Search in the command table for the prefix that matches. If\n\t// it's not found, this is definitely not a good command.\n\t// We search on an upper-case version of the line to make my life\n\t// much easier.\n\tfound := -1\n\tupper := strings.ToUpper(line)\n\tfor i := range smtpCommand {\n\t\tif strings.HasPrefix(upper, smtpCommand[i].text) {\n\t\t\tfound = i\n\t\t\tbreak\n\t\t}\n\t}\n\tif found == -1 {\n\t\treturn nil, ErrCmdUnrecognized\n\t}\n\n\t// Validate that we've ended at a word boundary, either a space or\n\t// ':'. If we don't, this is not a valid match. Note that we now\n\t// work with the original-case line, not the upper-case version.\n\tcmd := smtpCommand[found]\n\tllen := len(line)\n\tclen := len(cmd.text)\n\tif !(llen == clen || line[clen] == ' ' || line[clen] == ':') {\n\t\treturn nil, ErrCmdUnrecognized\n\t}\n\n\t// This is a real command, so we must now perform real argument\n\t// extraction and validation. At this point any remaining errors\n\t// are command argument errors, so we set the command type in our\n\t// result.\n\tres.Cmd = cmd.cmd\n\tswitch cmd.argtype {\n\tcase noArg:\n\t\tif llen != clen {\n\t\t\treturn nil, ErrCmdHasNoArg\n\t\t}\n\tcase mustArg:\n\t\tif llen <= clen+1 {\n\t\t\treturn nil, ErrCmdRequiresArg\n\t\t}\n\t\t// FIXME: (corpix) Probably we should fail here\n\t\t// if you have some spaces before your arguments\n\t\t// then just 'sorry, your problem'.\n\n\t\t// Even if there are nominal characters they could be\n\t\t// all whitespace. Although we've trimmed trailing\n\t\t// whitespace before now, there could be whitespace\n\t\t// *before* the argument and we want to trim it too.\n\t\tt := strings.TrimSpace(line[clen+1:])\n\t\tif len(t) == 0 {\n\t\t\treturn nil, ErrCmdRequiresArg\n\t\t}\n\t\tres.Arg = t\n\tcase oneOrTwoArgs:\n\t\t// This implicitly allows 'a b c', with 'b c' becoming\n\t\t// the Params value.\n\t\t// TODO: is this desirable? Is this allowed by the AUTH RFC?\n\t\tparts := strings.SplitN(line, \" \", 3)\n\t\tswitch len(parts) {\n\t\tcase 1:\n\t\t\treturn nil, ErrCmdRequiresArgs\n\t\tcase 2:\n\t\t\tres.Arg = parts[1]\n\t\tcase 3:\n\t\t\tres.Arg = parts[1]\n\t\t\tres.Params = parts[2]\n\t\t}\n\tcase canArg:\n\t\t// get rid of whitespace between command and the argument.\n\t\tif llen > clen+1 {\n\t\t\tres.Arg = strings.TrimSpace(line[clen+1:])\n\t\t}\n\tcase colonAddress:\n\t\t// FIXME: (corpix) this function should be cleaned.\n\t\t// Before cleaning it up I should decide what RFC features\n\t\t// is important(most commonly used) and what should be removed\n\t\t// to keep code as simple as possible.\n\n\t\tvar idx int\n\t\t// Minimum llen is clen + ':<>', three characters\n\t\tif llen < clen+3 {\n\t\t\treturn nil, ErrCmdRequiresAddress\n\t\t}\n\t\t// We explicitly check for '>' at the end of the string\n\t\t// to accept (at this point) 'MAIL FROM:<<...>>'. This will\n\t\t// fail if people also supply ESMTP parameters, of course.\n\t\t// Such is life.\n\t\t// TODO: reject them here? Maybe it's simpler.\n\t\t// BUG: this is imperfect because in theory I think you\n\t\t// can embed a quoted '>' inside a valid address and so\n\t\t// fool us. But I'm not putting a full RFC whatever address\n\t\t// parser in here, thanks, so we'll reject those.\n\t\tif line[llen-1] == '>' {\n\t\t\tidx = llen - 1\n\t\t} else {\n\t\t\tidx = strings.IndexByte(line, '>')\n\t\t\tif idx != -1 {\n\t\t\t\tif len(line) >= idx+1 && line[idx+1] != ' ' {\n\t\t\t\t\treturn nil, ErrCmdImpropperArgFmt\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// NOTE: the RFC is explicit that eg 'MAIL FROM: <addr...>'\n\t\t// is not valid, ie there cannot be a space between the : and\n\t\t// the '<'. Normally we'd refuse to accept it, but a few too\n\t\t// many things invalidly generate it.\n\t\tif line[clen] != ':' || idx == -1 {\n\t\t\treturn nil, ErrCmdImpropperArgFmt\n\t\t}\n\t\tspos := clen + 1\n\t\tif line[spos] == ' ' {\n\t\t\tspos++\n\t\t}\n\t\tif line[spos] != '<' {\n\t\t\treturn nil, ErrCmdImpropperArgFmt\n\t\t}\n\t\tres.Arg = line[spos+1 : idx]\n\t\t// As a side effect of this we generously allow trailing\n\t\t// whitespace after RCPT TO and MAIL FROM. You're welcome.\n\t\tres.Params = strings.TrimSpace(line[idx+1 : llen])\n\t}\n\n\treturn res, nil\n}",
"func ParseCommand(message string) (string, string) {\n\tprefix := regexp.MustCompile(`^/(.*)$`)\n\t// every message should have the prefix now\n\tif prefix.MatchString(message) {\n\t\tfmt.Println(\"command prefix matched\")\n\t\tmessage = prefix.ReplaceAllString(message, \"$1\")\n\t}\n\t// check create member\n\thello := regexp.MustCompile(`^hello$`)\n\tif hello.MatchString(message) {\n\t\tfmt.Println(\"hello matched\")\n\t\treturn \"hello\", \"\"\n\t}\n\t// check create member\n\tcreate_member := regexp.MustCompile(`^create member[\\s]*(.*)$`)\n\tif create_member.MatchString(message) {\n\t\tfmt.Println(\"create member matched\")\n\t\tmessage = create_member.ReplaceAllString(message, \"$1\")\n\t\treturn \"create_member\", message\n\t}\n\n\treturn \"invalid\", message\n}",
"func Parse(input io.Reader) (commands []*Command, err error) {\n\tscanner := bufio.NewScanner(input)\n\n\tvar currentToken token\n\tscanner.Split(tokenize(¤tToken))\n\n\tvar tokens []token\n\tfor scanner.Scan() {\n\t\ttokens = append(tokens, currentToken)\n\n\t\tif numTokens := len(tokens); numTokens > 1 {\n\t\t\tprevToken := tokens[numTokens-2]\n\t\t\tif mergedToken := prevToken.Merge(currentToken); mergedToken != nil {\n\t\t\t\ttokens[numTokens-2] = mergedToken\n\t\t\t\ttokens = tokens[:numTokens-1]\n\t\t\t}\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tbeginning := true\n\tvar currentCommand *Command\n\tfor _, token := range tokens {\n\t\tif token.Type() == tokenTypeWhitespace {\n\t\t\tcontinue // Ignore whitespace tokens.\n\t\t}\n\n\t\tif token.Type() == tokenTypeNewline {\n\t\t\tif !beginning { // handle leading newlines.\n\t\t\t\t// Newline signals the end of a command.\n\t\t\t\tcommands = append(commands, currentCommand)\n\t\t\t\tcurrentCommand = nil\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tif token.Type() == tokenTypeHeredoc {\n\t\t\tif currentCommand == nil {\n\t\t\t\treturn nil, errors.New(\"unexpected heredoc\")\n\t\t\t}\n\n\t\t\tcurrentCommand.Heredoc = token.Value()\n\n\t\t\t// Heredoc also signals the end of a command.\n\t\t\tcommands = append(commands, currentCommand)\n\t\t\tcurrentCommand = nil\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif token.Type() != tokenTypeArg {\n\t\t\treturn nil, errors.New(\"unexpected token\")\n\t\t}\n\n\t\tbeginning = false\n\t\t// Append arg to current command.\n\t\tif currentCommand == nil {\n\t\t\tcurrentCommand = &Command{}\n\t\t}\n\t\tcurrentCommand.Args = append(currentCommand.Args, token.Value())\n\t}\n\n\tif currentCommand != nil {\n\t\t// Handles case with no trailing newline.\n\t\tcommands = append(commands, currentCommand)\n\t}\n\n\treturn commands, nil\n}",
"func (fr *Frame) ParseCmd(str string) (zwords []T, s string) {\n\ts = str\n\t//- log.Printf(\"< ParseCmd < %#v\\n\", s)\n\tzwords = make([]T, 0, 4)\n\tvar c uint8\n\n\t// skip space or ;\n\ti := 0\n\tn := len(s)\n\tfor i < n {\n\t\tc = s[i]\n\t\tif !WhiteOrSemi(s[i]) {\n\t\t\tbreak\n\t\t}\n\t\ti++\n\t}\n\ts = s[i:]\n\nLoop:\n\tfor len(s) > 0 {\n\t\t// found non-white\n\t\tswitch s[0] {\n\t\tcase ']':\n\t\t\tbreak Loop\n\t\tcase '{':\n\t\t\tnewresult, rest := fr.ParseCurly(s)\n\t\t\tzwords = append(zwords, newresult)\n\t\t\ts = rest\n\t\tcase '[':\n\t\t\tnewresult, rest := fr.ParseSquare(s)\n\t\t\tzwords = append(zwords, newresult)\n\t\t\ts = rest\n\t\tcase '\"':\n\t\t\tnewresult, rest := fr.ParseQuote(s)\n\t\t\tzwords = append(zwords, newresult)\n\t\t\ts = rest\n\t\tdefault:\n\t\t\tnewresult, rest := fr.ParseWord(s)\n\t\t\tzwords = append(zwords, newresult)\n\t\t\ts = rest\n\t\t}\n\n\t\t// skip white\n\t\tn = len(s)\n\t\ti = 0\n\tSkip:\n\t\tfor i < n {\n\t\t\tswitch s[i] {\n\t\t\tcase ' ', '\\t', '\\r':\n\t\t\t\ti++\n\t\t\t\tcontinue Skip\n\t\t\tcase ';', '\\n':\n\t\t\t\tbreak Skip\n\t\t\tdefault:\n\t\t\t\tbreak Skip\n\t\t\t}\n\t\t}\n\t\ts = s[i:]\n\t\tif len(s) == 0 {\n\t\t\tbreak Loop // end of string\n\t\t}\n\t\tc = s[0]\n\t\tif c == ';' || c == '\\n' {\n\t\t\ts = s[1:] // Omit the semicolon or newline\n\t\t\tbreak Loop // end of cmd\n\t\t}\n\t} // End Loop\n\treturn\n}",
"func ParseCommand() (*Command, []string) {\n\targs := pflag.Args()\n\tif len(args) < 1 {\n\t\tfatal(\"No command supplied\\n\")\n\t}\n\n\tcmd := strings.ToLower(args[0])\n\targs = args[1:]\n\n\t// Find the command doing a prefix match\n\tvar found = make([]*Command, 0, 1)\n\tvar command *Command\n\tfor i := range Commands {\n\t\ttrialCommand := &Commands[i]\n\t\t// exact command name found - use that\n\t\tif trialCommand.Name == cmd {\n\t\t\tcommand = trialCommand\n\t\t\tbreak\n\t\t} else if strings.HasPrefix(trialCommand.Name, cmd) {\n\t\t\tfound = append(found, trialCommand)\n\t\t}\n\t}\n\tif command == nil {\n\t\tswitch len(found) {\n\t\tcase 0:\n\t\t\tfs.Stats.Error()\n\t\t\tlog.Fatalf(\"Unknown command %q\", cmd)\n\t\tcase 1:\n\t\t\tcommand = found[0]\n\t\tdefault:\n\t\t\tfs.Stats.Error()\n\t\t\tvar names []string\n\t\t\tfor _, cmd := range found {\n\t\t\t\tnames = append(names, `\"`+cmd.Name+`\"`)\n\t\t\t}\n\t\t\tlog.Fatalf(\"Not unique - matches multiple commands: %s\", strings.Join(names, \", \"))\n\t\t}\n\t}\n\tif command.Run == nil {\n\t\tsyntaxError()\n\t}\n\tcommand.checkArgs(args)\n\treturn command, args\n}",
"func (f *Factory) ParseCommand() *CommandInfo {\n\tvar (\n\t\targv = len(os.Args)\n\t\tcommandArgs []string\n\t\tcommand ICommand\n\t\tok bool\n\t)\n\n\tif argv > 1 {\n\t\tf.CommandName = os.Args[1]\n\t}\n\tif argv > 2 {\n\t\tcommandArgs = os.Args[2:]\n\t}\n\n\tcommand, ok = f.Commands[f.CommandName]\n\tif ok {\n\t\tf.Command = command\n\t} else {\n\t\tf.CommandName = f.Name\n\t\tcommand = f\n\t}\n\n\tcommand.SetFlags()\n\tflag.CommandLine.Parse(commandArgs)\n\n\tif f.Help {\n\t\tf.Run()\n\t}\n\treturn &CommandInfo{\n\t\tCommand: command,\n\t\tCommandName: f.CommandName,\n\t}\n}",
"func (p *CustomParser) Parse(splitContent []string, m *discordgo.MessageCreate) (*model.Command, error) {\n\t// TODO(jake): Drop this and external hash check, handle missing commands solely in execute.\n\thas, err := p.commandMap.Has(splitContent[0][1:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !has {\n\t\tlog.Fatal(\"parseCustom called with non-custom command\", errors.New(\"wat\"))\n\t}\n\treturn &model.Command{\n\t\tType: model.CommandTypeCustom,\n\t\tCustom: &model.CustomData{\n\t\t\tCall: splitContent[0][1:],\n\t\t\tArgs: strings.Join(splitContent[1:], \" \"),\n\t\t},\n\t}, nil\n}",
"func (pCmd *Ps) Parse(args []string) error {\n\treturn nil\n}",
"func (pCmd *Ps) Parse(args []string) error {\n\treturn nil\n}",
"func parseCommand(dst io.ReaderFrom, cmd *exec.Cmd) error {\n\tout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn errCmd(err, cmd)\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\treturn errCmd(err, cmd)\n\t}\n\n\t_, err = dst.ReadFrom(out)\n\tif err != nil {\n\t\treturn errCmd(err, cmd)\n\t}\n\n\treturn errCmd(cmd.Wait(), cmd)\n}",
"func parseCommand(command string) []string {\n\tparsedCommand := []string{}\n\n\t// remove the tabs in between the command.\n\tcommand = strings.Replace(command, Tab, CommandSeparator, -1)\n\n\t// remove the empty string\n\tfor _, s := range strings.Split(command, CommandSeparator) {\n\t\tif s != \"\" {\n\t\t\tparsedCommand = append(parsedCommand, s)\n\t\t}\n\t}\n\n\treturn parsedCommand\n}",
"func (t *Test) parse(input string) error {\n\tlines := getLines(input)\n\tvar err error\n\t// Scan for steps line by line.\n\tfor i := 0; i < len(lines); i++ {\n\t\tl := lines[i]\n\t\tif len(l) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tvar cmd testCommand\n\n\t\tswitch c := strings.ToLower(patSpace.Split(l, 2)[0]); {\n\t\tcase c == \"clear\":\n\t\t\tcmd = &clearCmd{}\n\t\tcase c == \"load\":\n\t\t\ti, cmd, err = t.parseLoad(lines, i)\n\t\tcase strings.HasPrefix(c, \"eval\"):\n\t\t\ti, cmd, err = t.parseEval(lines, i)\n\t\tdefault:\n\t\t\treturn raise(i, \"invalid command %q\", l)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tt.cmds = append(t.cmds, cmd)\n\t}\n\treturn nil\n}",
"func ParseCmdPack() {\n\t// check args number\n\tif len(os.Args) == 2 {\n\t\tpackCmd.Usage()\n\t\tos.Exit(1)\n\t}\n\t// parse command pack\n\terr := packCmd.Parse(os.Args[2:])\n\tif err != nil {\n\t\tlog.Println(\"Error Parse Pack Command.\")\n\t\tos.Exit(1)\n\t}\n\t// handle command parameters\n\terr = handleCmdPack(packSrc, packDest, packType)\n\tif err != nil {\n\t\tfmt.Print(\"\\n\")\n\t\tfmt.Println(\"Pack failure:\", err)\n\t\tos.Exit(1)\n\t}\n\tfmt.Print(\"\\n\")\n\tfmt.Println(\"Pack success.\")\n}",
"func parseCommand(instrToolFlagSet *instrumentationToolFlagSet, args []string) (commandExecutionFunc, int, error) {\n\tcmdIdPos := parseFlagsUntilFirstNonOptionArg(instrToolFlagSet, args)\n\tif cmdIdPos == -1 {\n\t\treturn nil, cmdIdPos, errors.New(\"unexpected arguments\")\n\t}\n\tcmdId := args[cmdIdPos]\n\targs = args[cmdIdPos:]\n\tcmdId, err := parseCommandID(cmdId)\n\tif err != nil {\n\t\treturn nil, cmdIdPos, err\n\t}\n\n\tif commandParser, exists := commandParserMap[cmdId]; exists {\n\t\tcmd, err := commandParser(args)\n\t\treturn cmd, cmdIdPos, err\n\t} else {\n\t\treturn nil, cmdIdPos, nil\n\t}\n}",
"func Parse(cmd interface{}, opts ...parseOpt) {\n\topts = append([]parseOpt{Program(filepath.Base(os.Args[0]))}, opts...)\n\tp, err := newParser(cmd, opts...)\n\tif err == nil {\n\t\terr = p.parse(os.Args[1:])\n\t}\n\tif err == ErrDefaultHelp {\n\t\tp.printUsage(os.Stderr)\n\t\tos.Exit(0)\n\t}\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"tagflag: %s\\n\", err)\n\t\tif _, ok := err.(userError); ok {\n\t\t\tos.Exit(2)\n\t\t}\n\t\tos.Exit(1)\n\t}\n}",
"func parse(filename string, lineno int, line string) (cmd *command, err error) {\n\tcmd = &command{file: filename, line: lineno}\n\tvar (\n\t\trawArg []argFragment // text fragments of current arg so far (need to add line[start:i])\n\t\tstart = -1 // if >= 0, position where current arg text chunk starts\n\t\tquoted = false // currently processing quoted text\n\t)\n\n\tflushArg := func() error {\n\t\tif len(rawArg) == 0 {\n\t\t\treturn nil // Nothing to flush.\n\t\t}\n\t\tdefer func() { rawArg = nil }()\n\n\t\tif cmd.name == \"\" && len(rawArg) == 1 && !rawArg[0].quoted {\n\t\t\targ := rawArg[0].s\n\n\t\t\t// Command prefix ! means negate the expectations about this command:\n\t\t\t// go command should fail, match should not be found, etc.\n\t\t\t// Prefix ? means allow either success or failure.\n\t\t\tswitch want := expectedStatus(arg); want {\n\t\t\tcase failure, successOrFailure:\n\t\t\t\tif cmd.want != \"\" {\n\t\t\t\t\treturn errors.New(\"duplicated '!' or '?' token\")\n\t\t\t\t}\n\t\t\t\tcmd.want = want\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Command prefix [cond] means only run this command if cond is satisfied.\n\t\t\tif strings.HasPrefix(arg, \"[\") && strings.HasSuffix(arg, \"]\") {\n\t\t\t\twant := true\n\t\t\t\targ = strings.TrimSpace(arg[1 : len(arg)-1])\n\t\t\t\tif strings.HasPrefix(arg, \"!\") {\n\t\t\t\t\twant = false\n\t\t\t\t\targ = strings.TrimSpace(arg[1:])\n\t\t\t\t}\n\t\t\t\tif arg == \"\" {\n\t\t\t\t\treturn errors.New(\"empty condition\")\n\t\t\t\t}\n\t\t\t\tcmd.conds = append(cmd.conds, condition{want: want, tag: arg})\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif arg == \"\" {\n\t\t\t\treturn errors.New(\"empty command\")\n\t\t\t}\n\t\t\tcmd.name = arg\n\t\t\treturn nil\n\t\t}\n\n\t\tcmd.rawArgs = append(cmd.rawArgs, rawArg)\n\t\treturn nil\n\t}\n\n\tfor i := 0; ; i++ {\n\t\tif !quoted && (i >= len(line) || strings.ContainsRune(argSepChars, rune(line[i]))) {\n\t\t\t// Found arg-separating space.\n\t\t\tif start >= 0 {\n\t\t\t\trawArg = append(rawArg, argFragment{s: line[start:i], quoted: false})\n\t\t\t\tstart = -1\n\t\t\t}\n\t\t\tif err := flushArg(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif i >= len(line) || line[i] == '#' {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\tif i >= len(line) {\n\t\t\treturn nil, errors.New(\"unterminated quoted argument\")\n\t\t}\n\t\tif line[i] == '\\'' {\n\t\t\tif !quoted {\n\t\t\t\t// starting a quoted chunk\n\t\t\t\tif start >= 0 {\n\t\t\t\t\trawArg = append(rawArg, argFragment{s: line[start:i], quoted: false})\n\t\t\t\t}\n\t\t\t\tstart = i + 1\n\t\t\t\tquoted = true\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// 'foo''bar' means foo'bar, like in rc shell and Pascal.\n\t\t\tif i+1 < len(line) && line[i+1] == '\\'' {\n\t\t\t\trawArg = append(rawArg, argFragment{s: line[start:i], quoted: true})\n\t\t\t\tstart = i + 1\n\t\t\t\ti++ // skip over second ' before next iteration\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// ending a quoted chunk\n\t\t\trawArg = append(rawArg, argFragment{s: line[start:i], quoted: true})\n\t\t\tstart = i + 1\n\t\t\tquoted = false\n\t\t\tcontinue\n\t\t}\n\t\t// found character worth saving; make sure we're saving\n\t\tif start < 0 {\n\t\t\tstart = i\n\t\t}\n\t}\n\n\tif cmd.name == \"\" {\n\t\tif cmd.want != \"\" || len(cmd.conds) > 0 || len(cmd.rawArgs) > 0 || cmd.background {\n\t\t\t// The line contains a command prefix or suffix, but no actual command.\n\t\t\treturn nil, errors.New(\"missing command\")\n\t\t}\n\n\t\t// The line is blank, or contains only a comment.\n\t\treturn nil, nil\n\t}\n\n\tif n := len(cmd.rawArgs); n > 0 {\n\t\tlast := cmd.rawArgs[n-1]\n\t\tif len(last) == 1 && !last[0].quoted && last[0].s == \"&\" {\n\t\t\tcmd.background = true\n\t\t\tcmd.rawArgs = cmd.rawArgs[:n-1]\n\t\t}\n\t}\n\treturn cmd, nil\n}",
"func (o *Options) Parse() error {\n\terr := o.cmd.Execute()\n\treturn err\n}",
"func handleParse(h Handler, content string) ([]string, error) {\n\tvar (\n\t\tp CmdParser\n\t\tok bool\n\t)\n\tif p, ok = h.(CmdParser); !ok {\n\t\treturn cmdParserDefault(content), nil\n\t}\n\n\treturn p.Parse(content)\n}",
"func (c *Command) Parse(args []string) error {\n\treturn c.flagSet.Parse(args)\n}",
"func ParseMessage(line string) (*Message, error) {\n\t// Trim the line and make sure we have data\n\tline = strings.TrimRight(line, \"\\r\\n\")\n\tif len(line) == 0 {\n\t\treturn nil, ErrZeroLengthMessage\n\t}\n\n\tc := &Message{\n\t\tTags: Tags{},\n\t\tPrefix: &Prefix{},\n\t}\n\n\tif line[0] == '@' {\n\t\tloc := strings.Index(line, \" \")\n\t\tif loc == -1 {\n\t\t\treturn nil, ErrMissingDataAfterTags\n\t\t}\n\n\t\tc.Tags = ParseTags(line[1:loc])\n\t\tline = line[loc+1:]\n\t}\n\n\tif line[0] == ':' {\n\t\tloc := strings.Index(line, \" \")\n\t\tif loc == -1 {\n\t\t\treturn nil, ErrMissingDataAfterPrefix\n\t\t}\n\n\t\t// Parse the identity, if there was one\n\t\tc.Prefix = ParsePrefix(line[1:loc])\n\t\tline = line[loc+1:]\n\t}\n\n\t// Split out the trailing then the rest of the args. Because\n\t// we expect there to be at least one result as an arg (the\n\t// command) we don't need to special case the trailing arg and\n\t// can just attempt a split on \" :\"\n\tsplit := strings.SplitN(line, \" :\", 2)\n\tc.Params = strings.FieldsFunc(split[0], func(r rune) bool {\n\t\treturn r == ' '\n\t})\n\n\t// If there are no args, we need to bail because we need at\n\t// least the command.\n\tif len(c.Params) == 0 {\n\t\treturn nil, ErrMissingCommand\n\t}\n\n\t// If we had a trailing arg, append it to the other args\n\tif len(split) == 2 {\n\t\tc.Params = append(c.Params, split[1])\n\t}\n\n\t// Because of how it's parsed, the Command will show up as the\n\t// first arg.\n\tc.Command = strings.ToUpper(c.Params[0])\n\tc.Params = c.Params[1:]\n\n\t// If there are no params, set it to nil, to make writing tests and other\n\t// things simpler.\n\tif len(c.Params) == 0 {\n\t\tc.Params = nil\n\t}\n\n\treturn c, nil\n}",
"func SlashCommandParse(r *http.Request) (s SlashCommand, err error) {\n\tif err = r.ParseForm(); err != nil {\n\t\treturn s, err\n\t}\n\ts.Token = r.PostForm.Get(\"token\")\n\ts.TeamID = r.PostForm.Get(\"team_id\")\n\ts.TeamDomain = r.PostForm.Get(\"team_domain\")\n\ts.EnterpriseID = r.PostForm.Get(\"enterprise_id\")\n\ts.EnterpriseName = r.PostForm.Get(\"enterprise_name\")\n\ts.ChannelID = r.PostForm.Get(\"channel_id\")\n\ts.ChannelName = r.PostForm.Get(\"channel_name\")\n\ts.UserID = r.PostForm.Get(\"user_id\")\n\ts.UserName = r.PostForm.Get(\"user_name\")\n\ts.Command = r.PostForm.Get(\"command\")\n\ts.Text = r.PostForm.Get(\"text\")\n\ts.ResponseURL = r.PostForm.Get(\"response_url\")\n\ts.TriggerID = r.PostForm.Get(\"trigger_id\")\n\treturn s, nil\n}",
"func parseCommand(cmdLine string) (param string) {\n\tvar result = map[string]map[string]string{}\n\tjson.Unmarshal([]byte(cmdLine), &result)\n\n\t// Print the data type of result variable\n\tswitch {\n\tcase result[\"query\"][\"region\"] != \"\":\n\t\tparam = result[\"query\"][\"region\"]\n\tcase result[\"query\"][\"date\"] != \"\":\n\t\tparam = result[\"query\"][\"date\"]\n\tdefault:\n\t\tparam = \"\"\n\t}\n\n\treturn\n}",
"func CommandParser() {\n\tinput := bufio.NewReader(os.Stdin)\n\tfor {\n\t\tline, _, err := input.ReadLine()\n\t\tif err != nil {\n\t\t\tHandleStopCommand([]string{}, nil)\n\t\t}\n\t\tif string(line) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tHandleCommand(string(line), nil)\n\t}\n}",
"func (cmd *MoveCommand) Parse(args []string) (success bool) {\n\tif len(args) == 3 {\n\t\tcmd.Source = args[1]\n\t\tcmd.Target = args[2]\n\t\tsuccess = true\n\t} else {\n\t\tfmt.Println(\"Usage:\\nmv <from> <to>\")\n\t}\n\treturn success\n}",
"func parseCommand(in []byte) []string {\n\tout := make([]string, 0)\n\tvar buf []byte\n\n\tskipwhite := true\n\tquotes := false\n\tfor _, b := range in {\n\t\t// Quoted things\n\t\tif quotes && b != '\"' {\n\t\t\tbuf = append(buf, b)\n\t\t\tcontinue\n\t\t}\n\t\tif b == '\"' {\n\t\t\tquotes = !quotes\n\t\t\tout = append(out, string(buf))\n\t\t\tbuf = nil\n\t\t\tcontinue\n\t\t}\n\n\t\t// White space\n\t\tif skipwhite && (b == ' ' || b == '\\t') {\n\t\t\tcontinue\n\t\t}\n\t\tif b == ' ' || b == '\\t' {\n\t\t\tskipwhite = true\n\t\t\tcontinue\n\t\t}\n\t\tif skipwhite {\n\t\t\tskipwhite = false\n\t\t\tif len(buf) > 0 {\n\t\t\t\tout = append(out, string(buf))\n\t\t\t}\n\t\t\tbuf = nil\n\t\t}\n\n\t\t// Everything else\n\t\tbuf = append(buf, b)\n\t}\n\tif len(buf) > 0 {\n\t\tout = append(out, string(buf))\n\t}\n\treturn out\n}",
"func ParseCommandInput(input string) (command string, args []string) {\n\tfields := strings.Fields(input)\n\tif len(fields) == 0 {\n\t\treturn \"\", nil\n\t}\n\tcommand = strings.ToUpper(fields[0])\n\n\tif len(fields) > 1 {\n\t\targs = fields[1:]\n\t} else {\n\t\targs = []string{}\n\t}\n\n\treturn\n}",
"func parseInput(message string) Command {\n res := standardInputMessageRegex.FindAllStringSubmatch(message, -1)\n if (len(res) == 1) {\n // there is a command\n return Command {\n Command: res[0][1],\n Body: res[0][2],\n }\n } else {\n return Command {\n Body: util.Decode(message),\n }\n }\n}",
"func (mp *MessageProcessor) Parse(message string) (*PaintCommand, error) {\n\n\t// Regular expression to match a valid command\n\tre, _ := regexp.Compile(\"^\\\\(([0-9]?[0-9]?[0-9]?),([0-9]?[0-9]?[0-9]?),([a-fA-F0-9]{2})([a-fA-F0-9]{2})([a-fA-F0-9]{2})\\\\)$\")\n\tcommandFields := re.FindStringSubmatch(message)\n\n\t// If the regex doesnt match, return immediately\n\tif commandFields == nil {\n\t\treturn nil, errors.New(\"Message does not match command regular expression\")\n\t}\n\n\t// Parse out every field\n\t// Skip commandFields[0] because that will be the entire match\n\tx, xErr := strconv.ParseUint(commandFields[1], 10, 16)\n\ty, yErr := strconv.ParseUint(commandFields[2], 10, 16)\n\tr, rErr := strconv.ParseUint(commandFields[3], 16, 8)\n\tg, gErr := strconv.ParseUint(commandFields[4], 16, 8)\n\tb, bErr := strconv.ParseUint(commandFields[5], 16, 8)\n\n\t// See if any errors occurred during field parsing\n\tif xErr != nil || yErr != nil || rErr != nil || gErr != nil || bErr != nil {\n\t\treturn nil, errors.New(\"Error converting field string to integer\")\n\t}\n\n\t// Check if the parsed X and Y are out of the 800x600 boundaries\n\tif !(x < 800) {\n\t\treturn nil, errors.New(\"X coordinate must be in [0,799]\")\n\t} else if !(y < 600) {\n\t\treturn nil, errors.New(\"Y coordinate must be in [0,599]\")\n\n\t}\n\n\t// Create a PaintCommand and return it\n\treturn &PaintCommand{\n\t\tX: uint16(x),\n\t\tY: uint16(y),\n\t\tRed: uint8(r),\n\t\tBlue: uint8(b),\n\t\tGreen: uint8(g),\n\t}, nil\n}",
"func parseCommand() *command {\n\tstartParam := \"1 minute ago\"\n\tendParam := \"now\"\n\tstreamParam := \"\"\n\n\tcommand := &command{limit: 50, tail: false}\n\n\tflag.StringVar(&command.profile, \"profile\", \"\", \"AWS credential profile to use.\")\n\tflag.StringVar(&command.region, \"region\", \"us-east-1\", \"AWS region to request logs from\")\n\tflag.StringVar(&command.logGroupName, \"group\", \"\", \"Log group name to read from\")\n\tflag.StringVar(&streamParam, \"streams\", \"\", \"List of streams, comma separated.\")\n\tflag.StringVar(&command.filter, \"filter\", \"\", \"Filter pattern to apply\")\n\tflag.StringVar(&startParam, \"start\", \"1 minute ago\", \"The RFC3339 time that log events should start from\")\n\tflag.StringVar(&endParam, \"end\", \"now\", \"The RFC3339 time that log events should end\")\n\tflag.UintVar(&command.abv, \"abv\", 10, \"Abbreviate streams names to N characters\")\n\tflag.Parse()\n\n\tif command.help {\n\t\tusage()\n\t}\n\n\tif command.region == \"\" || command.logGroupName == \"\" {\n\t\tusage()\n\t}\n\n\tstartTime, err := parseTime(startParam)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not parse start time.\\n\")\n\t\tfmt.Println()\n\t\tusage()\n\t}\n\tcommand.start = startTime\n\n\tendTime, err := parseTime(endParam)\n\tif err != nil {\n\t\tfmt.Printf(\"Could not parse end time.\\n\")\n\t\tfmt.Println()\n\t\tusage()\n\t}\n\tcommand.end = endTime\n\n\tif streamParam != \"\" {\n\t\tcommand.streams = strings.Split(streamParam, \",\")\n\t}\n\n\treturn command\n}",
"func (c *Help) Parse(\n\tctx context.Context,\n\targs []string,\n\tflags map[string]string,\n) error {\n\tif len(args) == 0 {\n\t\tc.Command = NewHelp()\n\t} else {\n\t\tif r, ok := cli.Registrar[cli.CmdName(args[0])]; !ok {\n\t\t\tc.Command = NewHelp()\n\t\t} else {\n\t\t\tc.Command = r()\n\t\t}\n\t}\n\treturn nil\n}",
"func (d *commandSyncDb) Parse(args []string) {\n\tvar name string\n\n\tflagSet := flag.NewFlagSet(\"orm command: syncdb\", flag.ExitOnError)\n\tflagSet.StringVar(&name, \"db\", \"default\", \"DataBase alias name\")\n\tflagSet.BoolVar(&d.force, \"force\", false, \"drop tables before create\")\n\tflagSet.BoolVar(&d.verbose, \"v\", false, \"verbose info\")\n\tflagSet.Parse(args)\n\n\td.al = getDbAlias(name)\n}",
"func (g *GenerateKeySubcommand) Parse(arguments []string) error {\n\terr := cmd.ParseFlagsWithConfig(g.flagSet, arguments, DefaultConfigPath, ServiceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = ValidateClientID(g)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Parse() error {\n\treturn CommandLine.Parse(os.Args[1:])\n}",
"func (c *Commands) parse(\n\targs []string,\n\tparentFlagMap FlagMap,\n\tskipFlagMaps bool,\n) (\n\tcmds []*Command,\n\tflagsMap FlagMap,\n\trest []string,\n\terr error,\n) {\n\tvar fgs []FlagMap\n\tcur := c\n\n\tfor len(args) > 0 && cur != nil {\n\t\t// Extract the command name from the arguments.\n\t\tname := args[0]\n\n\t\t// Try to find the command.\n\t\tcmd := cur.Get(name)\n\t\tif cmd == nil {\n\t\t\tbreak\n\t\t}\n\n\t\targs = args[1:]\n\t\tcmds = append(cmds, cmd)\n\t\tcur = &cmd.commands\n\n\t\t// Parse the command flags.\n\t\tfg := make(FlagMap)\n\t\targs, err = cmd.flags.parse(args, fg)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tif !skipFlagMaps {\n\t\t\tfgs = append(fgs, fg)\n\t\t}\n\t}\n\n\tif !skipFlagMaps {\n\t\t// Merge all the flag maps without default values.\n\t\tflagsMap = make(FlagMap)\n\t\tfor i := len(fgs) - 1; i >= 0; i-- {\n\t\t\tflagsMap.copyMissingValues(fgs[i], false)\n\t\t}\n\t\tflagsMap.copyMissingValues(parentFlagMap, false)\n\n\t\t// Now include default values. This will ensure, that default values have\n\t\t// lower rank.\n\t\tfor i := len(fgs) - 1; i >= 0; i-- {\n\t\t\tflagsMap.copyMissingValues(fgs[i], true)\n\t\t}\n\t\tflagsMap.copyMissingValues(parentFlagMap, true)\n\t}\n\n\trest = args\n\treturn\n}",
"func Parse(writer io.Writer) (*flags.Parser, error) {\n\tcmdOpts := new(CmdOptions)\n\tparser := flags.NewParser(cmdOpts, flags.PrintErrors)\n\tvar err error\n\tif nonOptionArgs, err = parser.Parse(); err != nil {\n\t\tif !flags.WroteHelp(err) && !cmdOpts.NoHelpMessage {\n\t\t\tparser.WriteHelp(writer)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tif cmdOpts.Start.File != \"\" {\n\t\tif _, err := os.Stat(cmdOpts.Start.File); os.IsNotExist(err) {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\t//non-option arguments\n\tif len(nonOptionArgs) > 0 && cmdOpts.Connection.PgURL == \"\" {\n\t\tcmdOpts.Connection.PgURL = nonOptionArgs[0]\n\t}\n\treturn parser, nil\n}",
"func SlashCommandParse(r *fasthttp.Request) (s SlashCommand, err error) {\n\ts.Token = util.ByteToString(r.PostArgs().Peek(\"token\"))\n\ts.TeamID = util.ByteToString(r.PostArgs().Peek(\"team_id\"))\n\ts.TeamDomain = util.ByteToString(r.PostArgs().Peek(\"team_domain\"))\n\ts.EnterpriseID = util.ByteToString(r.PostArgs().Peek(\"enterprise_id\"))\n\ts.EnterpriseName = util.ByteToString(r.PostArgs().Peek(\"enterprise_name\"))\n\ts.ChannelID = util.ByteToString(r.PostArgs().Peek(\"channel_id\"))\n\ts.ChannelName = util.ByteToString(r.PostArgs().Peek(\"channel_name\"))\n\ts.UserID = util.ByteToString(r.PostArgs().Peek(\"user_id\"))\n\ts.UserName = util.ByteToString(r.PostArgs().Peek(\"user_name\"))\n\ts.Command = util.ByteToString(r.PostArgs().Peek(\"command\"))\n\ts.Text = util.ByteToString(r.PostArgs().Peek(\"text\"))\n\ts.ResponseURL = util.ByteToString(r.PostArgs().Peek(\"response_url\"))\n\ts.TriggerID = util.ByteToString(r.PostArgs().Peek(\"trigger_id\"))\n\ts.APIAppID = util.ByteToString(r.PostArgs().Peek(\"api_app_id\"))\n\treturn s, nil\n}",
"func (crc *CmdGetRegNumWithColour) Parse(argString string) error {\n\t\tcrc.Command.Parse(argString)\n\t\tcrc.Color = crc.Args[0]\n\t\treturn nil\n\t}",
"func Parse() (*CmdOptions, error) {\n\tcmdOpts := new(CmdOptions)\n\tparser := flags.NewParser(cmdOpts, flags.PrintErrors)\n\tvar err error\n\tif _, err = parser.Parse(); err != nil {\n\t\tif !flags.WroteHelp(err) {\n\t\t\tparser.WriteHelp(os.Stdout)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn cmdOpts, nil\n}",
"func ParseCliCommand(args []string) (err error, options []string, command string, subcommand string, parameters map[string][]string) {\n\targCount := len(args)\n\tpos := 1\n\n\t// Options\n\toptions = make([]string, 0)\n\tfor _, val := range args[pos:] {\n\t\tif !utils.IsFlag(val) {\n\t\t\tbreak\n\t\t}\n\t\toptions = append(options, utils.GetFlag(val))\n\t\tpos++\n\t}\n\n\t// Command\n\tif pos >= argCount {\n\t\terr = errors.New(\"command is required\")\n\t\treturn\n\t}\n\tcommand = strings.ToLower(args[pos])\n\tpos++\n\n\t//subcommand\n\tif pos >= argCount {\n\t\treturn\n\t}\n\tsubcommand = strings.ToLower(args[pos])\n\tpos++\n\n\t// Parameters\n\tif pos >= argCount {\n\t\treturn\n\t}\n\tparameters = make(map[string][]string)\n\tvar parameterName string\n\tfor _, val := range args[2:] {\n\t\tif utils.IsFlag(val) {\n\t\t\tparameterName = utils.GetFlag(val)\n\t\t\tif parameterName == \"\" {\n\t\t\t\t// aws cli doesn't valid this\n\t\t\t\terr = fmt.Errorf(\"input contains parameter with no name\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif _, exists := parameters[parameterName]; exists {\n\t\t\t\t// aws cli doesn't valid this\n\t\t\t\terr = fmt.Errorf(\"duplicate parameter %v\", parameterName)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tparameters[parameterName] = make([]string, 0)\n\t\t} else {\n\t\t\tparameters[parameterName] = append(parameters[parameterName], val)\n\t\t}\n\t}\n\treturn\n}",
"func parse(cmdlineReader io.Reader) CmdLine {\n\traw, err := ioutil.ReadAll(cmdlineReader)\n\tline := CmdLine{}\n\tif err != nil {\n\t\tlog.Printf(\"Can't read command line: %v\", err)\n\t\tline.Err = err\n\t\tline.Raw = \"\"\n\t} else {\n\t\tline.Raw = strings.TrimRight(string(raw), \"\\n\")\n\t\tline.AsMap = parseToMap(line.Raw)\n\t}\n\treturn line\n}",
"func (m *modUdpPingClient) Parse() error {\n\n\tlistCommand := flag.NewFlagSet(\"\", flag.ExitOnError)\n\tflagVerbose := listCommand.Bool(\"verbose\", false, \"verbose output\")\n\tflagPrintConfig := listCommand.Bool(\"print-config\", false, \"print default config\")\n\n\targcount := 3\n\tlistCommand.Parse(os.Args[argcount:])\n\tif *flagVerbose {\n\t\tm.verbose = true\n\t\tfmt.Fprintf(os.Stderr, \"verbose: enabled\\n\")\n\t\targcount += 1\n\t}\n\tif *flagPrintConfig {\n\t\tconfig := m.configJsonify()\n\t\tfmt.Fprintf(os.Stderr, \"default config:\\n\")\n\t\tfmt.Fprintf(os.Stdout, \"%s\\n\", config)\n\t\tos.Exit(0)\n\t}\n\n\t// parse arguments\n\tfor _, word := range os.Args[argcount:] {\n\t\tok := valid_option(word)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"not a valid option: %q, must be key=val\", word)\n\t\t}\n\t\tkv := strings.Split(word, \"=\")\n\t\tm.cli_args[kv[0]] = kv[1]\n\t}\n\n\treturn nil\n}",
"func NewCmdFromStr(input string) (*Command, error) {\n\tinputSplitArr := strings.Fields(input) // input = \"mkdir a/b\"\n\n\tif len(inputSplitArr) > 2 {\n\t\treturn nil, fmt.Errorf(invalidInputErr)\n\t}\n\tcmdInstruction := inputSplitArr[0]\n\tif err := validateInstruction(cmdInstruction); err != nil { // validate instruction\n\t\treturn nil, err\n\t}\n\tcmd := &Command{\n\t\tInstruction: cmdInstruction, // --> \"mkdir\"\n\t}\n\n\tif len(inputSplitArr) > 1 {\n\t\tcmd.Parameters = inputSplitArr[1] // --> \"a/b\"\n\t}\n\n\treturn cmd, nil\n}",
"func parseGitCommand(sshcmd string) (command, name string, err error) {\n\t// The following regex validates the git command, which is in the form:\n\t// <git-command> [<namespace>/]<name>\n\t// with namespace being optional. If a namespace is used, we validate it\n\t// according to the following:\n\t// - a namespace is optional\n\t// - a namespace contains only alphanumerics, underlines, @´s, -´s, +´s\n\t// and periods but it does not start with a period (.)\n\t// - one and exactly one slash (/) separates namespace and the actual name\n\tr, err := regexp.Compile(`(git-[a-z-]+) '/?([\\w-+@][\\w-+.@]*/)?([\\w-]+)\\.git'`)\n\t// r, err := regexp.Compile(`git-(upload|receive)-pack '/?([\\w-+@][\\w-+.@]*/)?([\\w-]+)\\.git'`)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tm := r.FindStringSubmatch(sshcmd)\n\tif len(m) != 4 {\n\t\treturn \"\", \"\", errors.New(\"You've tried to execute some weird command, I'm deliberately denying you to do that, get over it.\")\n\t}\n\treturn m[1], m[2] + m[3], nil\n}",
"func ParseCommandDir(path string) (ParsedCommands, error) {\n\tfs := token.NewFileSet()\n\tpkgs, err := parser.ParseDir(fs, path, nil, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfv := &commandVisitor{}\n\tfor _, pkg := range pkgs {\n\t\tast.Walk(fv, pkg)\n\t}\n\n\treturn fv.parsedCommands, nil\n}",
"func (app *App) Parse(arguments []string) error {\n\t// reset the requested command\n\tapp.invoked = nil\n\n\tif arguments == nil || len(arguments) == 0 {\n\t\treturn app.usageFailf(\"no arguments\")\n\t}\n\tout := app.Output()\n\n\tinitFlagSet := func(name, usage string) *flag.FlagSet {\n\t\tfs := flag.NewFlagSet(name, app.ErrorHandling)\n\t\tfs.SetOutput(out)\n\t\tfs.Usage = func() {\n\t\t\tfmt.Fprintln(out, usage)\n\t\t}\n\t\treturn fs\n\t}\n\n\tcmdName := arguments[0]\n\tif strings.HasPrefix(cmdName, \"-\") {\n\t\t// TODO: make app like command interface and use (cmd *Command) FlagSet\n\t\tfs := initFlagSet(\"\", app.Usage)\n\t\treturn fs.Parse(arguments)\n\t}\n\n\tcmd := app.findCommandByName(cmdName)\n\tif cmd == nil {\n\t\treturn app.usageFailf(\"unknown command %q\", cmdName)\n\t}\n\n\tfs := cmd.FlagSet(out)\n\n\terr := fs.Parse(arguments[1:])\n\n\tif err == nil {\n\t\t// save the requested command\n\t\tapp.invoked = cmd\n\t}\n\treturn err\n}",
"func (crc *CmdGetRegNumWithColour) Parse(argString string) error {\n\tcrc.Command.Parse(argString)\n\tcrc.Color = crc.Args[0]\n\treturn nil\n}",
"func (c *RunCommand) GetParsedCmd() (string, []string, error) {\n\tcmdStr := os.ExpandEnv(c.GetCmdString())\n\treturn cmdParse(cmdStr)\n}",
"func UnmarshalCommand(data []byte) (Command, error) {\n\tvar cmd Command\n\n\t// 2 unknown bytes preceding, ignoring right now\n\tslug := string(data[2:6])\n\tswitch slug {\n\tcase \"InCm\":\n\t\tcmd = new(cmds.IncmCmd)\n\tcase \"_ver\":\n\t\tcmd = new(cmds.VerCmd)\n\tcase \"_pin\":\n\t\tcmd = new(cmds.PinCmd)\n\tcase \"Warn\":\n\t\tcmd = new(cmds.WarnCmd)\n\tcase \"_top\":\n\t\tcmd = new(cmds.TopCmd)\n\tcase \"_MeC\":\n\t\tcmd = new(cmds.MecCmd)\n\tcase \"_mpl\":\n\t\tcmd = new(cmds.MplCmd)\n\tcase \"_MvC\":\n\t\tcmd = new(cmds.MvcCmd)\n\tcase \"_SSC\":\n\t\tcmd = new(cmds.SscCmd)\n\tcase \"_TlC\":\n\t\tcmd = new(cmds.TlcCmd)\n\tcase \"_MAC\":\n\t\tcmd = new(cmds.MacCmd)\n\tcase \"Powr\":\n\t\tcmd = new(cmds.PowrCmd)\n\tcase \"VidM\":\n\t\tcmd = new(cmds.VidmCmd)\n\tcase \"InPr\":\n\t\tcmd = new(cmds.InprCmd)\n\tcase \"PrgI\":\n\t\tcmd = new(cmds.PrgiCmd)\n\tcase \"PrvI\":\n\t\tcmd = new(cmds.PrviCmd)\n\tcase \"AuxS\":\n\t\tcmd = new(cmds.AuxsCmd)\n\tcase \"MPCE\":\n\t\tcmd = new(cmds.MpceCmd)\n\tcase \"MPfe\":\n\t\tcmd = new(cmds.MpfeCmd)\n\tcase \"TlIn\":\n\t\tcmd = make(cmds.TlinCmd)\n\tcase \"TlSr\":\n\t\tcmd = make(cmds.TlsrCmd)\n\tcase \"Time\":\n\t\tcmd = new(cmds.TimeCmd)\n\tdefault:\n\t\t// unknown command (yet)\n\t\tcmd = cmds.NewUnknownCommand(slug)\n\t}\n\n\treturn cmd, cmd.UnmarshalBinary(data[6:])\n}",
"func (p *Parser) Parse(msg string) (err error) {\n\n\tif list := re.Operation.FindStringSubmatch(msg); list != nil {\n\t\terr = p.handleOperation(list[1:])\n\t} else if list := re.Begin.FindStringSubmatch(msg); list != nil {\n\t\terr = p.handleBegin(list[1:])\n\t} else if list := re.Commit.FindStringSubmatch(msg); list != nil {\n\t\terr = p.handleCommit(list[1:])\n\t} else {\n\t\terr = errors.Wrapf(ErrInvalidMessage, \"message: %s\", msg)\n\t}\n\n\treturn errors.WithStack(err)\n}",
"func ParseRequest(msg []byte) (btcjson.Cmd, *btcjson.Error) {\n\tcmd, err := btcjson.ParseMarshaledCmd(msg)\n\tif err != nil || cmd.Id() == nil {\n\t\treturn cmd, &btcjson.ErrInvalidRequest\n\t}\n\treturn cmd, nil\n}",
"func (cm *Cmd) parsePlugCmd() {\n\n\tif len(os.Args) < 3 {\n\t\tfmt.Println(\"not found [Plug-in name]\")\n\t\tfmt.Println(\"go-sniffer [device] [plug] [plug's params(optional)]\")\n\t\tos.Exit(1)\n\t}\n\n\tcm.Device = os.Args[1]\n\tplugName := os.Args[2]\n\tplugParams := os.Args[3:]\n\tcm.plugHandle.SetOption(plugName, plugParams)\n}",
"func (cgs *CmdGetSlotNumWithRegNum) Parse(argString string) error {\n\tcgs.Command.Parse(argString)\n\tcgs.RegistrationNumber = cgs.Args[0]\n\treturn nil\n}",
"func parseModCommand(dpack DataPackage) {\n\tdebug(\"Parsing inbound mod command: \" + dpack.Message)\n}",
"func parseMany(invocation string, verbose bool) ([]*command, error) {\n\tif len(strings.TrimSpace(invocation)) == 0 {\n\t\treturn nil, fmt.Errorf(\"Empty strings are not valid commands.\")\n\t}\n\n\tp := newParser(invocation, verbose)\n\n\tcmds := make([]*command, 0)\n\tp.Scan()\n\tfor len(p.errors) == 0 && p.tok != scanner.EOF {\n\t\tcmds = append(cmds, p.command(p.tok))\n\t}\n\tif len(p.errors) > 0 {\n\t\treterrs := make([]string, len(p.errors))\n\t\tfor i, err := range p.errors {\n\t\t\treterrs[i] = err.Error()\n\t\t}\n\t\treturn cmds, e(strings.Join(reterrs, \"\\n\"))\n\t}\n\treturn cmds, nil\n}",
"func (cm *Cmd) parseInternalCmd() {\n\n\targ := string(os.Args[1])\n\tcmd := strings.Trim(arg, InternalCmdPrefix)\n\n\tswitch cmd {\n\tcase InternalCmdHelp:\n\t\tcm.printHelpMessage()\n\t\tbreak\n\tcase InternalCmdEnv:\n\t\tfmt.Println(\"External plug-in path : \" + cm.plugHandle.dir)\n\t\tbreak\n\tcase InternalCmdList:\n\t\tcm.plugHandle.PrintList()\n\t\tbreak\n\tcase InternalCmdVer:\n\t\tfmt.Println(cxt.Version)\n\t\tbreak\n\tcase InternalDevice:\n\t\tcm.printDevice()\n\t\tbreak\n\t}\n\tos.Exit(1)\n}",
"func (p *Parser) ParseString(str string, logger *log.Logger) ([]pakelib.Command, error) {\n\tvar commands []pakelib.Command\n\tsilentLogger := log.New(ioutil.Discard, \"\", log.LstdFlags)\n\tlines := strings.Split(str, \"\\n\")\n\tfor linenum, line := range lines {\n\t\tcommand, err := p.ParseLine(line, silentLogger)\n\t\tif err != nil {\n\t\t\terrMsg := fmt.Errorf(\"An error occured on line %d: %s\", linenum+1, err.Error())\n\t\t\tlogger.Println(errMsg.Error())\n\t\t\treturn []pakelib.Command{}, errMsg\n\t\t}\n\t\tcommands = append(commands, command)\n\t}\n\treturn commands, nil\n}",
"func FormatCommand(s string, t int) (Command, error) {\n\traw := make(map[string]Command)\n\terr := json.Unmarshal([]byte(s), &raw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd, ok := raw[\"action\"]\n\tif ok != true {\n\t\treturn nil, errors.New(\"no command specified\")\n\t}\n\tswitch cmd {\n\tcase cmdAction.MOVE:\n\t\tres := moveCommand{}\n\t\terr = json.Unmarshal([]byte(s), &res)\n\t\tres.Team = t\n\t\treturn res, err\n\tcase cmdAction.ABILITY:\n\t\tres := abilityCommand{}\n\t\terr = json.Unmarshal([]byte(s), &res)\n\t\tres.Team = t\n\t\treturn res, err\n\tcase cmdAction.ATTACK:\n\t\tres := attackCommand{}\n\t\terr = json.Unmarshal([]byte(s), &res)\n\t\tres.Team = t\n\t\treturn res, err\n\tcase cmdAction.ENDTURN:\n\t\tres := endTurnCommand{}\n\t\terr = json.Unmarshal([]byte(s), &res)\n\t\tres.Team = t\n\t\treturn res, err\n\tcase cmdAction.FORFEIT:\n\t\tprintln(\"I am definitely telling you it is forfeit\")\n\t\tres := forfeitCommand{}\n\t\terr = json.Unmarshal([]byte(s), &res)\n\t\tres.Team = t\n\t\treturn res, err\n\tdefault:\n\t\treturn nil, errors.New(\"unrecognized command\")\n\t}\n}",
"func ParseFlags(cmd string) (Flags, error) {\n\tgroups := clusterNameFlagRegex.FindAllStringSubmatch(cmd, -1)\n\tcmd, clusterName := extractParam(cmd, groups)\n\n\tcmd, filter, err := extractFilterParam(cmd)\n\tif err != nil {\n\t\treturn Flags{}, err\n\t}\n\ttokenized, err := shellwords.Parse(cmd)\n\tif err != nil {\n\t\treturn Flags{}, errors.New(cantParseCmd)\n\t}\n\treturn Flags{\n\t\tCleanCmd: cmd,\n\t\tFilter: filter,\n\t\tClusterName: clusterName,\n\t\tTokenizedCmd: tokenized,\n\t}, nil\n}",
"func Parse() {\n\tCommandLine.Complete()\n\tCommandLine.Parse(os.Args[1:])\n}",
"func (self *Opts) Parse() error {\n nextLiteral := false\n for i, arg := range self.AllArgs {\n if !strings.HasPrefix(arg, \"-\") || nextLiteral {\n nextLiteral = false\n if lcmd := self.LastCommand(); lcmd != nil && lcmd.ExpectedArgs() > 0 {\n lcmd.AppendArg(arg, i)\n } else {\n self.AppendExtraArg(arg, i)\n }\n continue\n }\n if arg == \"-\" { // bare \"-\" escapes the next argument\n nextLiteral = true\n continue\n }\n if arg == \"--\" { // stop Commands, rest go to extra args\n extrasIndex := i + 1\n for k, arg := range self.AllArgs[extrasIndex:] {\n self.AppendExtraArg(arg, k + extrasIndex)\n }\n break\n }\n var cmd, cmdArg string\n var spec CommandSpec\n prefix := \"-\"\n if strings.HasPrefix(arg, \"--\") {\n prefix = \"--\"\n kv := strings.SplitN(arg[2:], \"=\", 2)\n cmd = kv[0]\n spec = self.MenuWord(cmd)\n if len(kv) == 2 {\n cmdArg = kv[1]\n }\n } else {\n cmdRune, size := utf8.DecodeRuneInString(arg[1:])\n cmd = string(cmdRune)\n spec = self.MenuLetter(cmdRune)\n cmdArg = arg[1 + size:]\n }\n self.AppendCommand(cmd, i, spec)\n if spec == nil {\n return fmt.Errorf(\"unknown option: %s in: %s\", self.LastCommand(), arg)\n }\n if cmdArg == \"\" {\n continue\n }\n if lcmd := self.LastCommand(); lcmd.ExpectedArgs() > 0 {\n lcmd.AppendArg(cmdArg, i)\n continue\n }\n if prefix == \"--\" {\n return fmt.Errorf(\"--%s takes no values, but saw %s\", cmd, arg)\n }\n // series of single-letter flags\n for _, flag := range cmdArg {\n spec = self.MenuLetter(flag)\n self.AppendCommand(string(flag), i, spec)\n if spec == nil {\n return fmt.Errorf(\"unknown option: %s in: %s\", self.LastCommand(), arg)\n }\n }\n }\n for _, opt := range self.Commands {\n if opt.ArgsRequired() {\n return fmt.Errorf(\"%s is missing a value\", opt)\n }\n }\n return nil\n}",
"func Parse(s string, flags syntax.Flags,) (*syntax.Regexp, error)",
"func Parse(stderr, stdout io.Writer, args []string) (inv Invocation, cmd Command, err error) {\n\tinv.Stdout = stdout\n\tfs := flag.FlagSet{}\n\tfs.SetOutput(stdout)\n\n\t// options flags\n\n\tfs.BoolVar(&inv.Force, \"f\", false, \"force recreation of compiled magefile\")\n\tfs.BoolVar(&inv.Debug, \"debug\", mg.Debug(), \"turn on debug messages\")\n\tfs.BoolVar(&inv.Verbose, \"v\", mg.Verbose(), \"show verbose output when running mage targets\")\n\tfs.BoolVar(&inv.Help, \"h\", false, \"show this help\")\n\tfs.DurationVar(&inv.Timeout, \"t\", 0, \"timeout in duration parsable format (e.g. 5m30s)\")\n\tfs.BoolVar(&inv.Keep, \"keep\", false, \"keep intermediate mage files around after running\")\n\tfs.StringVar(&inv.Dir, \"d\", \"\", \"directory to read magefiles from\")\n\tfs.StringVar(&inv.WorkDir, \"w\", \"\", \"working directory where magefiles will run\")\n\tfs.StringVar(&inv.GoCmd, \"gocmd\", mg.GoCmd(), \"use the given go binary to compile the output\")\n\tfs.StringVar(&inv.GOOS, \"goos\", \"\", \"set GOOS for binary produced with -compile\")\n\tfs.StringVar(&inv.GOARCH, \"goarch\", \"\", \"set GOARCH for binary produced with -compile\")\n\tfs.StringVar(&inv.Ldflags, \"ldflags\", \"\", \"set ldflags for binary produced with -compile\")\n\n\t// commands below\n\n\tfs.BoolVar(&inv.List, \"l\", false, \"list mage targets in this directory\")\n\tvar showVersion bool\n\tfs.BoolVar(&showVersion, \"version\", false, \"show version info for the mage binary\")\n\tvar mageInit bool\n\tfs.BoolVar(&mageInit, \"init\", false, \"create a starting template if no mage files exist\")\n\tvar clean bool\n\tfs.BoolVar(&clean, \"clean\", false, \"clean out old generated binaries from CACHE_DIR\")\n\tvar compileOutPath string\n\tfs.StringVar(&compileOutPath, \"compile\", \"\", \"output a static binary to the given path\")\n\n\tfs.Usage = func() {\n\t\tfmt.Fprint(stdout, `\nmage [options] [target]\n\nMage is a make-like command runner. See https://magefile.org for full docs.\n\nCommands:\n -clean clean out old generated binaries from CACHE_DIR\n -compile <string>\n output a static binary to the given path\n -h show this help\n -init create a starting template if no mage files exist\n -l list mage targets in this directory\n -version show version info for the mage binary\n\nOptions:\n -d <string> \n directory to read magefiles from (default \".\" or \"magefiles\" if exists)\n -debug turn on debug messages\n -f force recreation of compiled magefile\n -goarch sets the GOARCH for the binary created by -compile (default: current arch)\n -gocmd <string>\n\t\t use the given go binary to compile the output (default: \"go\")\n -goos sets the GOOS for the binary created by -compile (default: current OS)\n -ldflags sets the ldflags for the binary created by -compile (default: \"\")\n -h show description of a target\n -keep keep intermediate mage files around after running\n -t <string>\n timeout in duration parsable format (e.g. 5m30s)\n -v show verbose output when running mage targets\n -w <string>\n working directory where magefiles will run (default -d value)\n`[1:])\n\t}\n\terr = fs.Parse(args)\n\tif err == flag.ErrHelp {\n\t\t// parse will have already called fs.Usage()\n\t\treturn inv, cmd, err\n\t}\n\tif err == nil && inv.Help && len(fs.Args()) == 0 {\n\t\tfs.Usage()\n\t\t// tell upstream, to just exit\n\t\treturn inv, cmd, flag.ErrHelp\n\t}\n\n\tnumCommands := 0\n\tswitch {\n\tcase mageInit:\n\t\tnumCommands++\n\t\tcmd = Init\n\tcase compileOutPath != \"\":\n\t\tnumCommands++\n\t\tcmd = CompileStatic\n\t\tinv.CompileOut = compileOutPath\n\t\tinv.Force = true\n\tcase showVersion:\n\t\tnumCommands++\n\t\tcmd = Version\n\tcase clean:\n\t\tnumCommands++\n\t\tcmd = Clean\n\t\tif fs.NArg() > 0 {\n\t\t\t// Temporary dupe of below check until we refactor the other commands to use this check\n\t\t\treturn inv, cmd, errors.New(\"-h, -init, -clean, -compile and -version cannot be used simultaneously\")\n\t\t}\n\t}\n\tif inv.Help {\n\t\tnumCommands++\n\t}\n\n\tif inv.Debug {\n\t\tdebug.SetOutput(stderr)\n\t}\n\n\tinv.CacheDir = mg.CacheDir()\n\n\tif numCommands > 1 {\n\t\tdebug.Printf(\"%d commands defined\", numCommands)\n\t\treturn inv, cmd, errors.New(\"-h, -init, -clean, -compile and -version cannot be used simultaneously\")\n\t}\n\n\tif cmd != CompileStatic && (inv.GOARCH != \"\" || inv.GOOS != \"\") {\n\t\treturn inv, cmd, errors.New(\"-goos and -goarch only apply when running with -compile\")\n\t}\n\n\tinv.Args = fs.Args()\n\tif inv.Help && len(inv.Args) > 1 {\n\t\treturn inv, cmd, errors.New(\"-h can only show help for a single target\")\n\t}\n\n\tif len(inv.Args) > 0 && cmd != None {\n\t\treturn inv, cmd, fmt.Errorf(\"unexpected arguments to command: %q\", inv.Args)\n\t}\n\tinv.HashFast = mg.HashFast()\n\treturn inv, cmd, err\n}",
"func (cmd *GetUserListCmd) Parse(args []string) {\n\tcmd.flagSet.Parse(args)\n}",
"func (p *parser) parse(cmdOutput string) []string {\n\tp.ips = []string{}\n\tp.scanner = bufio.NewScanner(strings.NewReader(cmdOutput))\n\tfor p.scanner.Scan() {\n\t\tline := p.scanner.Text()\n\t\tline = strings.TrimLeft(line, \" \\t\")\n\t\tp.parseLine(line)\n\t}\n\treturn p.ips\n}",
"func Parse() (commandLine CommandLine) {\n\tflag.StringVar(&commandLine.Log, \"log\", \"info\", \"log level (trace|debug|info|warn|error|fatal)\")\n\tflag.StringVar(&commandLine.ConfigFolder, \"config\", \"./\", \"config file directory path\")\n\tflag.Parse()\n\treturn\n}",
"func (rs *Comparators) Parse(fields []interface{}) error {\n\tvar available []string\n\tswitch len(fields) {\n\tcase 2:\n\t\tcmps, ok := fields[1].([]interface{})\n\t\tif !ok {\n\t\t\treturn errors.New(\"Second argment must be a list\")\n\t\t}\n\t\tfor _, cmp := range cmps {\n\t\t\ts, ok := cmp.(string)\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"Comparator ID must be a string\")\n\t\t\t}\n\n\t\t\tavailable = append(available, s)\n\t\t}\n\n\t\tfallthrough\n\tcase 1:\n\t\tcmp, ok := fields[0].(string)\n\t\tif !ok {\n\t\t\treturn errors.New(\"Comparator ID must be a string\")\n\t\t}\n\n\t\trs.Active = cmp\n\t\trs.Matched = available\n\tcase 0:\n\t\treturn errors.New(\"At least one argument is required for the COMPARATOR command\")\n\tdefault:\n\t\treturn errors.New(\"Too many arguments for the COMPARATOR command\")\n\t}\n\n\treturn nil\n}",
"func ParseCmdRequest(b []byte) (*CmdRequest, error) {\n\tif len(b) < 7 {\n\t\treturn nil, errors.New(\"short cmd request\")\n\t}\n\tif b[0] != Version5 {\n\t\treturn nil, errors.New(\"unexpected protocol version\")\n\t}\n\tif Command(b[1]) != CmdConnect {\n\t\treturn nil, errors.New(\"unexpected command\")\n\t}\n\tif b[2] != 0 {\n\t\treturn nil, errors.New(\"non-zero reserved field\")\n\t}\n\treq := &CmdRequest{Version: int(b[0]), Cmd: Command(b[1])}\n\tl := 2\n\toff := 4\n\tswitch b[3] {\n\tcase AddrTypeIPv4:\n\t\tl += net.IPv4len\n\t\treq.Addr.IP = make(net.IP, net.IPv4len)\n\tcase AddrTypeIPv6:\n\t\tl += net.IPv6len\n\t\treq.Addr.IP = make(net.IP, net.IPv6len)\n\tcase AddrTypeFQDN:\n\t\tl += int(b[4])\n\t\toff = 5\n\tdefault:\n\t\treturn nil, errors.New(\"unknown address type\")\n\t}\n\tif len(b[off:]) < l {\n\t\treturn nil, errors.New(\"short cmd request\")\n\t}\n\tif req.Addr.IP != nil {\n\t\tcopy(req.Addr.IP, b[off:])\n\t} else {\n\t\treq.Addr.Name = string(b[off : off+l-2])\n\t}\n\treq.Addr.Port = int(b[off+l-2])<<8 | int(b[off+l-1])\n\treturn req, nil\n}",
"func (o *DrainOptions) Parse(cmd *cobra.Command) error {\n\tsettings.Bind(cmd.Flags()) // needs to be run inside the command and before any viper usage for flags to be visible\n\n\tif debug {\n\t\tlog.Debug().Msgf(\"All keys: %+v\", viper.AllKeys())\n\t\tlog.Debug().Msgf(\"All settings: %+v\", viper.AllSettings())\n\t\tcmd.Flags().VisitAll(func(flag *pflag.Flag) {\n\t\t\tlog.Debug().Msgf(\"'%s' -> flag: '%+v' | setting: '%+v'\", flag.Name, flag.Value, viper.Get(flag.Name))\n\t\t})\n\t\tlog.Debug().Msgf(\"Settings: %+v\", *o)\n\t}\n\n\tif err := settings.Parse(o.Kubernetes); err != nil {\n\t\treturn err\n\t}\n\tif err := settings.Parse(o.Drainer); err != nil {\n\t\treturn err\n\t}\n\tlog.Debug().Msgf(\"Parsed settings: %+v\", *o)\n\treturn nil\n}",
"func (s *Server) parseSlashCommand(r *http.Request) (*SlashCommand, error) {\n\n\tsc := &SlashCommand{}\n\n\t// parse data from the HTTP request\n\terr := r.ParseForm()\n\tif err != nil {\n\t\treturn sc, err\n\t}\n\n\t// load form data into a SlashCommand struct\n\tsc.ChannelID = r.Form.Get(\"channel_id\")\n\tsc.ChannelName = r.Form.Get(\"channel_name\")\n\tsc.Command = r.Form.Get(\"command\")\n\tsc.TeamDomain = r.Form.Get(\"team_domain\")\n\tsc.TeamID = r.Form.Get(\"team_id\")\n\tsc.Text = r.Form.Get(\"text\")\n\tsc.Token = r.Form.Get(\"token\")\n\tsc.UserID = r.Form.Get(\"user_id\")\n\tsc.UserName = r.Form.Get(\"user_name\")\n\n\treturn sc, nil\n}",
"func extractCommand(body []byte) (cmd Command, args string) {\n\tcmdPos := bytes.Index(append([]byte{'\\n'}, body...), []byte(\"\\n\"+commandPrefix))\n\tif cmdPos == -1 {\n\t\tcmd = CmdNone\n\t\treturn\n\t}\n\tcmdPos += len(commandPrefix)\n\tfor cmdPos < len(body) && body[cmdPos] == ' ' {\n\t\tcmdPos++\n\t}\n\tcmdEnd := bytes.IndexByte(body[cmdPos:], '\\n')\n\tif cmdEnd == -1 {\n\t\tcmdEnd = len(body) - cmdPos\n\t}\n\tif cmdEnd1 := bytes.IndexByte(body[cmdPos:], '\\r'); cmdEnd1 != -1 && cmdEnd1 < cmdEnd {\n\t\tcmdEnd = cmdEnd1\n\t}\n\tif cmdEnd1 := bytes.IndexByte(body[cmdPos:], ' '); cmdEnd1 != -1 && cmdEnd1 < cmdEnd {\n\t\tcmdEnd = cmdEnd1\n\t}\n\tswitch string(body[cmdPos : cmdPos+cmdEnd]) {\n\tdefault:\n\t\tcmd = CmdUnknown\n\tcase \"\":\n\t\tcmd = CmdNone\n\tcase \"upstream\":\n\t\tcmd = CmdUpstream\n\tcase \"fix\", \"fix:\":\n\t\tcmd = CmdFix\n\tcase \"dup\", \"dup:\":\n\t\tcmd = CmdDup\n\tcase \"undup\":\n\t\tcmd = CmdUnDup\n\tcase \"test\", \"test:\":\n\t\tcmd = CmdTest\n\tcase \"invalid\":\n\t\tcmd = CmdInvalid\n\tcase \"uncc\", \"uncc:\":\n\t\tcmd = CmdUnCC\n\tcase \"test_5_arg_cmd\":\n\t\tcmd = cmdTest5\n\t}\n\t// Some email clients split text emails at 80 columns are the transformation is irrevesible.\n\t// We try hard to restore what was there before.\n\t// For \"test:\" command we know that there must be 2 tokens without spaces.\n\t// For \"fix:\"/\"dup:\" we need a whole non-empty line of text.\n\tswitch cmd {\n\tcase CmdTest:\n\t\targs = extractArgsTokens(body[cmdPos+cmdEnd:], 2)\n\tcase cmdTest5:\n\t\targs = extractArgsTokens(body[cmdPos+cmdEnd:], 5)\n\tcase CmdFix, CmdDup:\n\t\targs = extractArgsLine(body[cmdPos+cmdEnd:])\n\tcase CmdUnknown:\n\t\targs = extractArgsLine(body[cmdPos:])\n\t}\n\treturn\n}",
"func ParseMessage(message string) (Message, error) {\n\tif len(message) > 0 {\n\t\ttags := make(map[string]string)\n\t\tvar command string\n\t\tvar source string\n\t\tvar parameters []string\n\n\t\targs := strings.Fields(message)\n\t\tfor i := 0; i < len(args); i++ {\n\t\t\t// TODO: Define a lookup source for commands\n\t\t\tif args[i] == \"CAP\" || args[i] == \"PRIVMSG\" || args[i] == \"PING\" {\n\t\t\t\tcommand = args[i]\n\t\t\t\tparameters = args[i+1:]\n\t\t\t\tbreak\n\t\t\t}\n\t\t\t// Handle tags\n\t\t\tif args[i][0] == '@' {\n\t\t\t\trawTags := strings.Split(args[i][1:len(args[i])], \";\")\n\t\t\t\tfor _, tag := range rawTags {\n\t\t\t\t\tkeyValuePair := strings.Split(tag, \"=\")\n\t\t\t\t\tif len(keyValuePair) == 2 {\n\t\t\t\t\t\ttags[keyValuePair[0]] = keyValuePair[1]\n\t\t\t\t\t} else {\n\t\t\t\t\t\treturn Message{}, errors.New(\"invalid tag format\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Handle source\n\t\t\tif args[i][0] == ':' {\n\t\t\t\tsource = args[i][1:len(args[i])]\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn Message{\n\t\t\tTags: tags,\n\t\t\tSource: source,\n\t\t\tCommand: command,\n\t\t\tParameters: parameters,\n\t\t}, nil\n\t}\n\treturn Message{}, errors.New(\"empty message\")\n}",
"func (p *parser) command(tok rune) *command {\n\tswitch tok {\n\tcase '(':\n\t\tcmd := p.command(p.Scan())\n\t\tp.demands(')')\n\t\treturn cmd\n\tcase scanner.Ident:\n\t\tcmd := &command{\n\t\t\tname: p.TokenText(),\n\t\t\tparams: p.params(),\n\t\t}\n\t\treturn cmd\n\t}\n\tp.parseError(\"command\")\n\treturn &command{}\n}",
"func (p *Parser) Parse(args []string) (remainder []string) {\n\n\tif p.ParseHelp && !p.helpAdded {\n\t\tdescription := p.HelpOptDescription\n\t\tif description == \"\" {\n\t\t\tdescription = \"Show this help and exit\"\n\t\t}\n\t\tif p.HideHelpOpt {\n\t\t\tp.Hidden()\n\t\t}\n\t\tp.Flags(\"-h\", \"--help\").Bool(description)\n\t\tp.helpAdded = true\n\t}\n\n\tif p.ParseVersion && !p.versionAdded {\n\t\tdescription := p.VersionOptDescription\n\t\tif description == \"\" {\n\t\t\tdescription = \"Show the version and exit\"\n\t\t}\n\t\tif p.HideVersionOpt {\n\t\t\tp.Hidden()\n\t\t}\n\t\tp.Flags(\"-v\", \"--version\").Bool(description)\n\t\tp.versionAdded = true\n\t}\n\n\targLength := len(args) - 1\n\tcomplete, words, compWord, prefix := getCompletionData()\n\n\t// Command-line auto-completion support.\n\tif complete {\n\n\t\tseenLong := []string{}\n\t\tseenShort := []string{}\n\n\t\tsubcommands, err := shlex.Split(args[0])\n\t\tif err != nil {\n\t\t\tprocess.Exit(1)\n\t\t}\n\n\t\twords = words[len(subcommands):]\n\t\tcompWord -= len(subcommands)\n\n\t\targWords := []string{}\n\t\tskipNext := false\n\t\toptCount := 0\n\n\t\tfor _, word := range words {\n\t\t\tif skipNext {\n\t\t\t\tskipNext = false\n\t\t\t\toptCount += 1\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif strings.HasPrefix(word, \"--\") && word != \"--\" {\n\t\t\t\top, ok := p.long2options[word]\n\t\t\t\tif ok {\n\t\t\t\t\tseenLong = append(seenLong, op.longFlag)\n\t\t\t\t\tseenShort = append(seenShort, op.shortFlag)\n\t\t\t\t\tif op.label != \"\" {\n\t\t\t\t\t\tskipNext = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\toptCount += 1\n\t\t\t} else if strings.HasPrefix(word, \"-\") && !(word == \"-\" || word == \"--\") {\n\t\t\t\top, ok := p.short2options[word]\n\t\t\t\tif ok {\n\t\t\t\t\tseenLong = append(seenLong, op.longFlag)\n\t\t\t\t\tseenShort = append(seenShort, op.shortFlag)\n\t\t\t\t\tif op.label != \"\" {\n\t\t\t\t\t\tskipNext = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\toptCount += 1\n\t\t\t} else {\n\t\t\t\targWords = append(argWords, word)\n\t\t\t\tif p.haltFlagParsing {\n\t\t\t\t\tif p.haltFlagParsingString != \"\" {\n\t\t\t\t\t\tif word == p.haltFlagParsingString {\n\t\t\t\t\t\t\tprocess.Exit(1)\n\t\t\t\t\t\t}\n\t\t\t\t\t} else if (compWord - optCount) == p.haltFlagParsingN {\n\t\t\t\t\t\tprocess.Exit(1)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Pass to the shell completion if the previous word was a flag\n\t\t// expecting some parameter.\n\t\tif compWord >= 1 {\n\t\t\tvar completer Completer\n\t\t\tprev := words[compWord-1]\n\t\t\tif prev != \"--\" && prev != \"-\" {\n\t\t\t\tif strings.HasPrefix(prev, \"--\") {\n\t\t\t\t\top, ok := p.long2options[prev]\n\t\t\t\t\tif ok && op.label != \"\" {\n\t\t\t\t\t\tif op.completer == nil {\n\t\t\t\t\t\t\tprocess.Exit(1)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcompleter = op.completer\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t} else if strings.HasPrefix(prev, \"-\") {\n\t\t\t\t\top, ok := p.short2options[prev]\n\t\t\t\t\tif ok && op.label != \"\" {\n\t\t\t\t\t\tif op.completer == nil {\n\t\t\t\t\t\t\tprocess.Exit(1)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tcompleter = op.completer\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif completer != nil {\n\t\t\t\tcompletions := make([]string, 0)\n\t\t\t\tfor _, item := range completer.Complete(argWords, compWord) {\n\t\t\t\t\tif strings.HasPrefix(item, prefix) {\n\t\t\t\t\t\tcompletions = append(completions, item)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tfmt.Print(strings.Join(completions, \" \"))\n\t\t\t\tprocess.Exit(1)\n\t\t\t}\n\t\t}\n\n\t\tcompletions := make([]string, 0)\n\n\t\tif p.Completer != nil {\n\t\t\tfor _, item := range p.Completer.Complete(argWords, compWord-optCount) {\n\t\t\t\tif strings.HasPrefix(item, prefix) {\n\t\t\t\t\tcompletions = append(completions, item)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor flag, op := range p.long2options {\n\t\t\tif !(contains(seenLong, op.longFlag) || contains(seenShort, op.shortFlag) || op.hidden) {\n\t\t\t\tif strings.HasPrefix(flag, prefix) {\n\t\t\t\t\tcompletions = append(completions, flag)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor flag, op := range p.short2options {\n\t\t\tif !(contains(seenLong, op.longFlag) || contains(seenShort, op.shortFlag) || op.hidden) {\n\t\t\t\tif strings.HasPrefix(flag, prefix) {\n\t\t\t\t\tcompletions = append(completions, flag)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfmt.Print(strings.Join(completions, \" \"))\n\t\tprocess.Exit(1)\n\n\t}\n\n\tif argLength == 0 {\n\t\treturn\n\t}\n\n\tvar op *option\n\tvar ok bool\n\n\tidx := 1\n\taddNext := false\n\n\tfor {\n\t\targ := args[idx]\n\t\tnoOpt := true\n\t\tif addNext {\n\t\t\tremainder = append(remainder, arg)\n\t\t\tif idx == argLength {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tidx += 1\n\t\t\tcontinue\n\t\t} else if strings.HasPrefix(arg, \"--\") && arg != \"--\" {\n\t\t\top, ok = p.long2options[arg]\n\t\t\tif ok {\n\t\t\t\tnoOpt = false\n\t\t\t}\n\t\t} else if strings.HasPrefix(arg, \"-\") && !(arg == \"-\" || arg == \"--\") {\n\t\t\top, ok = p.short2options[arg]\n\t\t\tif ok {\n\t\t\t\tnoOpt = false\n\t\t\t}\n\t\t} else {\n\t\t\tremainder = append(remainder, arg)\n\t\t\tif p.haltFlagParsing {\n\t\t\t\tif arg == p.haltFlagParsingString {\n\t\t\t\t\taddNext = true\n\t\t\t\t} else if len(remainder) == p.haltFlagParsingN {\n\t\t\t\t\taddNext = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif idx == argLength {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tidx += 1\n\t\t\tcontinue\n\t\t}\n\t\tif noOpt {\n\t\t\texit(\"%s: no such option: %s\", args[0], arg)\n\t\t}\n\t\tif op.label != \"\" {\n\t\t\tif idx == argLength {\n\t\t\t\texit(\"%s: %s option requires an argument\", args[0], arg)\n\t\t\t}\n\t\t}\n\t\tif op.valueType == boolValue {\n\t\t\tif op.longFlag == \"--help\" && p.ParseHelp {\n\t\t\t\tp.PrintUsage()\n\t\t\t\tprocess.Exit(1)\n\t\t\t} else if op.longFlag == \"--version\" && p.ParseVersion {\n\t\t\t\tfmt.Printf(\"%s\\n\", p.version())\n\t\t\t\tprocess.Exit(0)\n\t\t\t}\n\t\t\tv := op.value.(*bool)\n\t\t\t*v = true\n\t\t\top.defined = true\n\t\t\tidx += 1\n\t\t} else if op.valueType == durationValue {\n\t\t\tif idx == argLength {\n\t\t\t\texit(\"%s: no value specified for %s\", args[0], arg)\n\t\t\t}\n\t\t\tdurationValue, err := time.ParseDuration(args[idx+1])\n\t\t\tif err != nil {\n\t\t\t\texit(\"%s: couldn't convert %s value '%s' to a duration\", args[0], arg, args[idx+1])\n\t\t\t}\n\t\t\tv := op.value.(*time.Duration)\n\t\t\t*v = durationValue\n\t\t\top.defined = true\n\t\t\tidx += 2\n\t\t} else if op.valueType == stringValue {\n\t\t\tif idx == argLength {\n\t\t\t\texit(\"%s: no value specified for %s\", args[0], arg)\n\t\t\t}\n\t\t\tv := op.value.(*string)\n\t\t\t*v = args[idx+1]\n\t\t\top.defined = true\n\t\t\tidx += 2\n\t\t} else if op.valueType == intValue {\n\t\t\tif idx == argLength {\n\t\t\t\texit(\"%s: no value specified for %s\", args[0], arg)\n\t\t\t}\n\t\t\tintValue, err := strconv.Atoi(args[idx+1])\n\t\t\tif err != nil {\n\t\t\t\texit(\"%s: couldn't convert %s value '%s' to an integer\", args[0], arg, args[idx+1])\n\t\t\t}\n\t\t\tv := op.value.(*int)\n\t\t\t*v = intValue\n\t\t\top.defined = true\n\t\t\tidx += 2\n\t\t} else if op.valueType == floatValue {\n\t\t\tif idx == argLength {\n\t\t\t\texit(\"%s: no value specified for %s\", args[0], arg)\n\t\t\t}\n\t\t\tfloatValue, err := strconv.ParseFloat(args[idx+1], 64)\n\t\t\tif err != nil {\n\t\t\t\texit(\"%s: couldn't convert %s value '%s' to a float\", args[0], arg, args[idx+1])\n\t\t\t}\n\t\t\tv := op.value.(*float64)\n\t\t\t*v = floatValue\n\t\t\top.defined = true\n\t\t\tidx += 2\n\t\t}\n\t\tif idx > argLength {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tfor _, op := range p.options {\n\t\tif op.required && !op.defined {\n\t\t\texit(\"%s: required: %s\", args[0], op)\n\t\t}\n\t}\n\n\treturn\n\n}",
"func parseCommand(message string) Command {\n res := chatServerResponseRegex.FindAllStringSubmatch(message, -1)\n if (len(res) == 1) {\n // we've got a match\n return Command {\n Command: util.Decode(res[0][1]),\n Username: util.Decode(res[0][2]),\n Body: util.Decode(res[0][3]),\n }\n } else {\n // it's irritating that I can't return a nil value here - must be something I'm missing\n return Command{}\n }\n}",
"func Parse() {\n\tflag.Parse()\n}",
"func readCommand(args []string) (*Command, error) {\n\tvar cmd, subcmd *Command\n\tvar ok bool\n\tif len(args) == 0 {\n\t\t// No command passed in: Print usage.\n\t\treturn &Command{\n\t\t\tCmd: func(cmd *Command) error { return Usage(nil) },\n\t\t}, nil\n\t}\n\tvar name = args[0]\n\tcmd, ok = Commands[name]\n\tif !ok {\n\t\t// Command not found: Print usage.\n\t\treturn &Command{\n\t\t\tCmd: func(cmd *Command) error { return Usage(nil) },\n\t\t}, nil\n\t}\n\t// command found. Remove it from the argument list.\n\targs = args[1:]\n\n\tif len(cmd.children) == 0 {\n\t\treturn cmdWithFlagsChecked(cmd, args)\n\t}\n\n\t// len (cmd.children > 0)\n\n\tif len(args) == 0 {\n\t\t// Subcommands exist but none was not found in args.\n\t\t// If no main cmd is defined, return an error.\n\t\tif cmd.Cmd == nil {\n\t\t\treturn wrongOrMissingSubcommand(cmd)\n\t\t}\n\t}\n\n\t// len (cmd.children > 0) && len(args) > 0\n\n\tvar subname = args[0]\n\tsubcmd, ok = cmd.children[subname]\n\tif ok {\n\t\t// subcommand found.\n\t\targs = args[1:]\n\t\tcmd = subcmd\n\t} else {\n\t\t// no subcommand passed in, so cmd should have a Cmd to execute\n\t\treturn wrongOrMissingSubcommand(cmd)\n\t}\n\n\treturn cmdWithFlagsChecked(cmd, args)\n}",
"func ParsePacket(msg string) (Packet, error) {\n\tvar start int\n\treturn readPacket(bufio.NewReader(bytes.NewBufferString(msg)), &start, len(msg))\n}",
"func Parse(reader io.Reader) (*Script, error) {\n\tlogrus.Info(\"Parsing script file\")\n\n\tlineScanner := bufio.NewScanner(reader)\n\tlineScanner.Split(bufio.ScanLines)\n\tvar script Script\n\tscript.Preambles = make(map[string][]Command)\n\tline := 1\n\tfor lineScanner.Scan() {\n\t\ttext := strings.TrimSpace(lineScanner.Text())\n\t\tif text == \"\" || text[0] == '#' {\n\t\t\tline++\n\t\t\tcontinue\n\t\t}\n\t\tlogrus.Debugf(\"Parsing [%d: %s]\", line, text)\n\n\t\t// split DIRECTIVE [ARGS] after first space(s)\n\t\tvar cmdName, rawArgs string\n\t\ttokens := spaceSep.Split(text, 2)\n\t\tif len(tokens) == 2 {\n\t\t\trawArgs = tokens[1]\n\t\t}\n\t\tcmdName = tokens[0]\n\n\t\tif !Cmds[cmdName].Supported {\n\t\t\treturn nil, fmt.Errorf(\"line %d: %s unsupported\", line, cmdName)\n\t\t}\n\n\t\tswitch cmdName {\n\t\tcase CmdAs:\n\t\t\tcmd, err := NewAsCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdAs] = []Command{cmd} // save only last AS instruction\n\t\tcase CmdEnv:\n\t\t\tcmd, err := NewEnvCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdEnv] = append(script.Preambles[CmdEnv], cmd)\n\t\tcase CmdFrom:\n\t\t\tcmd, err := NewFromCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdFrom] = []Command{cmd} // saves only last FROM\n\t\tcase CmdKubeConfig:\n\t\t\tcmd, err := NewKubeConfigCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdKubeConfig] = []Command{cmd}\n\t\tcase CmdAuthConfig:\n\t\t\tcmd, err := NewAuthConfigCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdAuthConfig] = []Command{cmd}\n\t\tcase CmdOutput:\n\t\t\tcmd, err := NewOutputCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdOutput] = []Command{cmd}\n\t\tcase CmdWorkDir:\n\t\t\tcmd, err := NewWorkdirCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Preambles[CmdWorkDir] = []Command{cmd}\n\t\tcase CmdCapture:\n\t\t\tcmd, err := NewCaptureCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Actions = append(script.Actions, cmd)\n\t\tcase CmdCopy:\n\t\t\tcmd, err := NewCopyCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Actions = append(script.Actions, cmd)\n\t\tcase CmdRun:\n\t\t\tcmd, err := NewRunCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Actions = append(script.Actions, cmd)\n\t\tcase CmdKubeGet:\n\t\t\tcmd, err := NewKubeGetCommand(line, rawArgs)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tscript.Actions = append(script.Actions, cmd)\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"%s not supported\", cmdName)\n\t\t}\n\t\tlogrus.Debugf(\"%s parsed OK\", cmdName)\n\t\tline++\n\t}\n\tlogrus.Debug(\"Done parsing\")\n\treturn enforceDefaults(&script)\n}"
] | [
"0.76060694",
"0.7605101",
"0.75365144",
"0.73861027",
"0.71764857",
"0.71486765",
"0.71374834",
"0.7126098",
"0.70438343",
"0.7025327",
"0.702413",
"0.7009736",
"0.69934446",
"0.6967141",
"0.69124764",
"0.6912338",
"0.6868991",
"0.686784",
"0.6845462",
"0.6681955",
"0.6670775",
"0.6601739",
"0.6555076",
"0.6534537",
"0.64369214",
"0.6432527",
"0.64137983",
"0.6349242",
"0.6316524",
"0.6287986",
"0.6287986",
"0.6267622",
"0.6263046",
"0.6253235",
"0.6228169",
"0.62250215",
"0.62194425",
"0.6172338",
"0.616511",
"0.6130466",
"0.6095536",
"0.6094492",
"0.6077733",
"0.60666925",
"0.60360676",
"0.6028156",
"0.60202366",
"0.6013062",
"0.60065305",
"0.6006448",
"0.6001978",
"0.59738755",
"0.59710747",
"0.5970244",
"0.59691244",
"0.5961451",
"0.59451026",
"0.59425205",
"0.59349847",
"0.59241724",
"0.59201294",
"0.5916752",
"0.59070605",
"0.5894953",
"0.589467",
"0.58859676",
"0.58842623",
"0.5880487",
"0.58677197",
"0.5834815",
"0.580223",
"0.57992613",
"0.5774209",
"0.5760631",
"0.57593745",
"0.5735736",
"0.5724053",
"0.57099485",
"0.56976485",
"0.56751025",
"0.5654177",
"0.5647683",
"0.56388164",
"0.5612865",
"0.5598994",
"0.5594894",
"0.5577226",
"0.5573348",
"0.5555301",
"0.55161476",
"0.5506206",
"0.55050737",
"0.5497154",
"0.54710585",
"0.54478663",
"0.542425",
"0.5417528",
"0.5409529",
"0.53894645",
"0.53887403"
] | 0.8199973 | 0 |
applyResourcesToAllContainers applies consistent resource requests for all containers and all init containers in the pod | func (c *Cluster) applyResourcesToAllContainers(spec *v1.PodSpec, resources v1.ResourceRequirements) {
for i := range spec.InitContainers {
spec.InitContainers[i].Resources = resources
}
for i := range spec.Containers {
spec.Containers[i].Resources = resources
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func newContainerResources(m *influxdatav1alpha1.Influxdb) corev1.ResourceRequirements {\n\tresources := corev1.ResourceRequirements{}\n\tif m.Spec.Pod != nil {\n\t\tresources = m.Spec.Pod.Resources\n\t}\n\treturn resources\n}",
"func initContainers(pod corev1.Pod, secretKey, secretFormat string, parameters WhSvrParameters) (patch []patchOperation) {\n\tinitContainers := []corev1.Container{}\n\n\tsecretInjectorContainer := corev1.Container{\n\t\tImage: parameters.Image,\n\t\tName: \"secrets-injector\",\n\t\tCommand: []string{parameters.Command},\n\t\tArgs: []string{parameters.CommandArg, secretKey, parameters.SecretVolume, secretFormat},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\tcorev1.VolumeMount{\n\t\t\t\tName: \"secrets\",\n\t\t\t\tMountPath: parameters.SecretVolume,\n\t\t\t},\n\t\t},\n\t}\n\n\tinitContainers = append(initContainers, secretInjectorContainer)\n\n\tvar initOp string\n\tif len(pod.Spec.InitContainers) != 0 {\n\t\tinitContainers = append(initContainers, pod.Spec.InitContainers...)\n\t\tinitOp = \"replace\"\n\t} else {\n\t\tinitOp = \"add\"\n\t}\n\n\tglog.V(4).Infof(\"Patch operation %s\", initOp)\n\n\tpatch = append(patch, patchOperation{\n\t\tOp: initOp,\n\t\tPath: \"/spec/initContainers\",\n\t\tValue: initContainers,\n\t})\n\n\treturn\n}",
"func WaitForResources(objects object.K8sObjects, opts *kubectlcmd.Options) error {\n\tif opts.DryRun {\n\t\tlogAndPrint(\"Not waiting for resources ready in dry run mode.\")\n\t\treturn nil\n\t}\n\n\tcs, err := kubernetes.NewForConfig(k8sRESTConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"k8s client error: %s\", err)\n\t}\n\n\terrPoll := wait.Poll(2*time.Second, opts.WaitTimeout, func() (bool, error) {\n\t\tpods := []v1.Pod{}\n\t\tservices := []v1.Service{}\n\t\tdeployments := []deployment{}\n\t\tnamespaces := []v1.Namespace{}\n\n\t\tfor _, o := range objects {\n\t\t\tkind := o.GroupVersionKind().Kind\n\t\t\tswitch kind {\n\t\t\tcase \"Namespace\":\n\t\t\t\tnamespace, err := cs.CoreV1().Namespaces().Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tnamespaces = append(namespaces, *namespace)\n\t\t\tcase \"Pod\":\n\t\t\t\tpod, err := cs.CoreV1().Pods(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tpods = append(pods, *pod)\n\t\t\tcase \"ReplicationController\":\n\t\t\t\trc, err := cs.CoreV1().ReplicationControllers(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tlist, err := getPods(cs, rc.Namespace, rc.Spec.Selector)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tpods = append(pods, list...)\n\t\t\tcase \"Deployment\":\n\t\t\t\tcurrentDeployment, err := cs.AppsV1().Deployments(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\t_, _, newReplicaSet, err := kubectlutil.GetAllReplicaSets(currentDeployment, cs.AppsV1())\n\t\t\t\tif err != nil || newReplicaSet == nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tnewDeployment := deployment{\n\t\t\t\t\tnewReplicaSet,\n\t\t\t\t\tcurrentDeployment,\n\t\t\t\t}\n\t\t\t\tdeployments = append(deployments, newDeployment)\n\t\t\tcase \"DaemonSet\":\n\t\t\t\tds, err := cs.AppsV1().DaemonSets(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tlist, err := getPods(cs, ds.Namespace, ds.Spec.Selector.MatchLabels)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tpods = append(pods, list...)\n\t\t\tcase \"StatefulSet\":\n\t\t\t\tsts, err := cs.AppsV1().StatefulSets(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tlist, err := getPods(cs, sts.Namespace, sts.Spec.Selector.MatchLabels)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tpods = append(pods, list...)\n\t\t\tcase \"ReplicaSet\":\n\t\t\t\trs, err := cs.AppsV1().ReplicaSets(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tlist, err := getPods(cs, rs.Namespace, rs.Spec.Selector.MatchLabels)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tpods = append(pods, list...)\n\t\t\tcase \"Service\":\n\t\t\t\tsvc, err := cs.CoreV1().Services(o.Namespace).Get(o.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tservices = append(services, *svc)\n\t\t\t}\n\t\t}\n\t\tisReady := namespacesReady(namespaces) && podsReady(pods) && deploymentsReady(deployments) && servicesReady(services)\n\t\tif !isReady {\n\t\t\tlogAndPrint(\"Waiting for resources ready with timeout of %v\", opts.WaitTimeout)\n\t\t}\n\t\treturn isReady, nil\n\t})\n\n\tif errPoll != nil {\n\t\tlogAndPrint(\"Failed to wait for resources ready: %v\", errPoll)\n\t\treturn fmt.Errorf(\"failed to wait for resources ready: %s\", errPoll)\n\t}\n\treturn nil\n}",
"func (initializer *initializer) applyRecomendedResources(pod *v1.Pod, recommendation *apimock.Recommendation, policy apimock.ResourcesPolicy) {\n\tfor _, container := range pod.Spec.Containers {\n\t\tcontainerRecommendation := getRecommendationForContainer(recommendation, container)\n\t\tif containerRecommendation == nil {\n\t\t\tcontinue\n\t\t}\n\t\tcontainerPolicy := getContainerPolicy(container.Name, &policy)\n\t\tapplyVPAPolicy(containerRecommendation, containerPolicy)\n\t\tfor resource, recommended := range containerRecommendation.Resources {\n\t\t\trequested, exists := container.Resources.Requests[resource]\n\t\t\tif exists {\n\t\t\t\t// overwriting existing resource spec\n\t\t\t\tglog.V(2).Infof(\"updating resources request for pod %v container %v resource %v old value: %v new value: %v\",\n\t\t\t\t\tpod.Name, container.Name, resource, requested, recommended)\n\t\t\t} else {\n\t\t\t\t// adding new resource spec\n\t\t\t\tglog.V(2).Infof(\"updating resources request for pod %v container %v resource %v old value: none new value: %v\",\n\t\t\t\t\tpod.Name, container.Name, resource, recommended)\n\t\t\t}\n\n\t\t\tcontainer.Resources.Requests[resource] = recommended\n\t\t}\n\t}\n\n}",
"func initializeResources(ctx context.Context, f *framework.Framework, protocols []v1.Protocol, ports []int32) *kubeManager {\n\tk8s, err := initializeCluster(ctx, f, protocols, ports)\n\tframework.ExpectNoError(err, \"unable to initialize resources\")\n\treturn k8s\n}",
"func allResources(container *api.Container) map[api.ResourceName]bool {\n\tresources := map[api.ResourceName]bool{}\n\tfor _, resource := range supportedComputeResources {\n\t\tresources[resource] = false\n\t}\n\tfor resource := range container.Resources.Requests {\n\t\tresources[resource] = true\n\t}\n\tfor resource := range container.Resources.Limits {\n\t\tresources[resource] = true\n\t}\n\treturn resources\n}",
"func (f *RemoteRuntime) UpdateContainerResources(ctx context.Context, req *kubeapi.UpdateContainerResourcesRequest) (*kubeapi.UpdateContainerResourcesResponse, error) {\n\terr := f.RuntimeService.UpdateContainerResources(ctx, req.ContainerId, &kubeapi.ContainerResources{Linux: req.Linux})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &kubeapi.UpdateContainerResourcesResponse{}, nil\n}",
"func allResources(container *v1.Container) map[v1.ResourceName]bool {\n\tresources := map[v1.ResourceName]bool{}\n\tfor _, resource := range supportedQoSComputeResources.List() {\n\t\tresources[v1.ResourceName(resource)] = false\n\t}\n\tfor resource := range container.Resources.Requests {\n\t\tresources[resource] = true\n\t}\n\tfor resource := range container.Resources.Limits {\n\t\tresources[resource] = true\n\t}\n\treturn resources\n}",
"func (p podWebHook) getInitContainers() []corev1.Container {\n\tcmd := fmt.Sprintf(\"cp /usr/local/bin/%s %s\", injectorExecutable, p.injectorDir)\n\n\tcontainer := corev1.Container{\n\t\tName: \"copy-azurekeyvault-env\",\n\t\tImage: viper.GetString(\"azurekeyvault_env_image\"),\n\t\tImagePullPolicy: corev1.PullPolicy(viper.GetString(\"webhook_container_image_pull_policy\")),\n\t\tCommand: []string{\"sh\", \"-c\", cmd},\n\t\tSecurityContext: &corev1.SecurityContext{\n\t\t\tCapabilities: &corev1.Capabilities{\n\t\t\t\tDrop: []corev1.Capability{\"ALL\"},\n\t\t\t},\n\t\t\tReadOnlyRootFilesystem: &[]bool{viper.GetBool(\"webhook_container_security_context_read_only\")}[0],\n\t\t\tRunAsNonRoot: &[]bool{viper.GetBool(\"webhook_container_security_context_non_root\")}[0],\n\t\t\tPrivileged: &[]bool{viper.GetBool(\"webhook_container_security_context_privileged\")}[0],\n\t\t},\n\t\tVolumeMounts: []corev1.VolumeMount{\n\t\t\t{\n\t\t\t\tName: initContainerVolumeName,\n\t\t\t\tMountPath: p.injectorDir,\n\t\t\t},\n\t\t},\n\t}\n\tif viper.IsSet(\"webhook_container_security_context_user_uid\") {\n\t\tcontainer.SecurityContext.RunAsUser = &[]int64{viper.GetInt64(\"webhook_container_security_context_user_uid\")}[0]\n\t}\n\tif viper.IsSet(\"webhook_container_security_context_group_gid\") {\n\t\tcontainer.SecurityContext.RunAsGroup = &[]int64{viper.GetInt64(\"webhook_container_security_context_group_gid\")}[0]\n\t}\n\n\treturn []corev1.Container{container}\n}",
"func (b *Botanist) DeployContainerRuntimeResources(ctx context.Context) error {\n\tfns := []flow.TaskFn{}\n\tfor _, worker := range b.Shoot.Info.Spec.Provider.Workers {\n\t\tif worker.CRI != nil {\n\t\t\tfor _, containerRuntime := range worker.CRI.ContainerRuntimes {\n\t\t\t\tcr := containerRuntime\n\t\t\t\tworkerName := worker.Name\n\t\t\t\ttoApply := extensionsv1alpha1.ContainerRuntime{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: getContainerRuntimeKey(cr.Type, workerName),\n\t\t\t\t\t\tNamespace: b.Shoot.SeedNamespace,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tfns = append(fns, func(ctx context.Context) error {\n\t\t\t\t\t_, err := controllerutil.CreateOrUpdate(ctx, b.K8sSeedClient.Client(), &toApply, func() error {\n\t\t\t\t\t\tmetav1.SetMetaDataAnnotation(&toApply.ObjectMeta, v1beta1constants.GardenerOperation, v1beta1constants.GardenerOperationReconcile)\n\t\t\t\t\t\ttoApply.Spec.BinaryPath = extensionsv1alpha1.ContainerDRuntimeContainersBinFolder\n\t\t\t\t\t\ttoApply.Spec.Type = cr.Type\n\t\t\t\t\t\tif cr.ProviderConfig != nil {\n\t\t\t\t\t\t\ttoApply.Spec.ProviderConfig = &cr.ProviderConfig.RawExtension\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttoApply.Spec.WorkerPool.Name = workerName\n\t\t\t\t\t\ttoApply.Spec.WorkerPool.Selector.MatchLabels = map[string]string{gardencorev1beta1constants.LabelWorkerPool: workerName, gardencorev1beta1constants.LabelWorkerPoolDeprecated: workerName}\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t})\n\t\t\t\t\treturn err\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\treturn flow.Parallel(fns...)(ctx)\n}",
"func ApplyMachineResources(ctx context.Context, c client.Client) error {\n\tfns := make([]flow.TaskFn, 0, len(machineCRDs))\n\n\tfor _, crd := range machineCRDs {\n\t\tobj := &apiextensionsv1beta1.CustomResourceDefinition{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: crd.Name,\n\t\t\t},\n\t\t}\n\t\tspec := crd.Spec.DeepCopy()\n\n\t\tfns = append(fns, func(ctx context.Context) error {\n\t\t\t_, err := controllerutil.CreateOrUpdate(ctx, c, obj, func() error {\n\t\t\t\tobj.Labels = utils.MergeStringMaps(obj.Labels, deletionProtectionLabels)\n\t\t\t\tobj.Spec = *spec\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\treturn err\n\t\t})\n\t}\n\n\treturn flow.Parallel(fns...)(ctx)\n}",
"func findContainers(managedResource *hclConfigs.Resource, jsonBody jsonObj, hclBody *hclsyntax.Body) (containers []output.ContainerDetails, initContainers []output.ContainerDetails) {\n\tif isKuberneteResource(managedResource) {\n\t\tcontainers, initContainers = extractContainerImagesFromk8sResources(managedResource, hclBody)\n\t} else if isAzureConatinerResource(managedResource) {\n\t\tcontainers = fetchContainersFromAzureResource(jsonBody)\n\t} else if isAwsConatinerResource(managedResource) {\n\t\tcontainers = fetchContainersFromAwsResource(jsonBody, hclBody, managedResource.DeclRange.Filename)\n\t}\n\treturn\n}",
"func (r *podResourceRecommender) estimateContainerResources(s *model.AggregateContainerState,\n\tcustomClient *kubernetes.Clientset, containerName string) RecommendedContainerResources {\n\n\t// fmt.Println(\"Container Name:\", containerName)\t\n\tif (containerName == \"pwitter-front\" || containerName == \"azure-vote-front\") {\n\t\t// custom metrics\n\t\tvar metrics MetricValueList\n\t\tmetricName := \"response_time\"\n\t\terr := getMetrics(customClient, &metrics, metricName)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Cannot get metric %s from Prometheus. Reason: %+v\", metricName, err)\n\t\t}\n\t\tresponse_time := parseValue(metrics.Items[0].Value)\n\t\t// fmt.Println(\"Response time:\", response_time)\n\t\t\n\t\tmetricName = \"response_count\"\n\t\terr = getMetrics(customClient, &metrics, metricName)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"Cannot get metric %s from Prometheus. Reason: %+v\", metricName, err)\n\t\t}\n\t\tresponse_count := parseValue(metrics.Items[0].Value)\n\t\t// fmt.Println(\"Response count:\", response_count)\n\n\t\trequests := response_count - old_count\n\t\told_count = response_count // new count\n\t\trespTime := response_time\n\t\n\t\treq := float64(requests / (*control_replicasNum)) // active requests + queue of requests\n\t\trt := respTime // mean of the response times\n\t\terror := (*control_sla) - rt\n\t\tke := ((*control_a)-1)/((*control_pNom)-1)*error\n\t\tui := uiOld+(1-(*control_pNom))*ke\n\t\tut := ui+ke\n\t\n\t\ttargetCore := req*(ut-(*control_a1Nom)-1000.0*(*control_a2Nom))/(1000.0*(*control_a3Nom)*((*control_a1Nom)-ut))\n\t\n\t\tapproxCore := 0.0\n\t\tif error < 0 {\n\t\t\tapproxCore = *control_coreMax\n\t\t} else {\n\t\t\tapproxCore = math.Min(math.Max(math.Abs(targetCore), *podMinCPUMillicores/1000.0), *control_coreMax)\n\t\t}\n\t\t\n\t\tapproxUt := ((1000.0*(*control_a2Nom)+(*control_a1Nom))*req+1000.0*(*control_a1Nom)*(*control_a3Nom)*approxCore)/(req+1000.0*(*control_a3Nom)*approxCore)\n\t\tuiOld = approxUt-ke\n\n\t\t// fmt.Println(\n\t\t// \t\"== Controller debug ==\",\n\t\t// \t\"\\nRequests:\", req,\n\t\t// \t\"\\nResponse time:\", rt,\n\t\t// \t\" s\\nerror:\", error,\n\t\t// \t\"\\nke:\", ke,\n\t\t// \t\"\\nui:\", ui,\n\t\t// \t\"\\nut:\", ut,\n\t\t// \t\"\\ntargetCore:\", targetCore,\n\t\t// \t\"\\napproxCore:\", approxCore,\n\t\t// \t\"\\napproxUt:\", approxUt,\n\t\t// \t\"\\nuiOld:\", uiOld)\n\t\t\n\t\tfmt.Printf(\"%.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f, %.3f\\n\",\n\t\t\treq, rt, error, ke, ui, ut, targetCore, approxCore, approxUt, uiOld)\n\n\t\treturn RecommendedContainerResources{\n\t\t\tTarget: model.Resources{\n\t\t\t\tmodel.ResourceCPU: model.CPUAmountFromCores(approxCore),\n\t\t\t\tmodel.ResourceMemory: model.MemoryAmountFromBytes(*control_memory*1024*1024),\n\t\t\t},\n\t\t\tLowerBound: model.Resources{\n\t\t\t\tmodel.ResourceCPU: model.CPUAmountFromCores(*podMinCPUMillicores/1000.0),\n\t\t\t\tmodel.ResourceMemory: model.MemoryAmountFromBytes(*control_memory*1024*1024),\n\t\t\t},\n\t\t\tUpperBound: model.Resources{\n\t\t\t\tmodel.ResourceCPU: model.CPUAmountFromCores(*control_coreMax),\n\t\t\t\tmodel.ResourceMemory: model.MemoryAmountFromBytes(*control_memory*1024*1024),\n\t\t\t},\n\t\t}\n\t} else {\n\t\treturn RecommendedContainerResources{\n\t\t\tr.targetEstimator.GetResourceEstimation(s),\n\t\t\tr.lowerBoundEstimator.GetResourceEstimation(s),\n\t\t\tr.upperBoundEstimator.GetResourceEstimation(s),\n\t\t}\n\t}\n\t\n}",
"func withLimits(p *st.PodWrapper, resMap map[string]string, initContainer bool) *st.PodWrapper {\n\tif len(resMap) == 0 {\n\t\treturn p\n\t}\n\n\tres := v1.ResourceList{}\n\tfor k, v := range resMap {\n\t\tres[v1.ResourceName(k)] = resource.MustParse(v)\n\t}\n\n\tvar containers *[]v1.Container\n\tvar cntName string\n\tif initContainer {\n\t\tcontainers = &p.Obj().Spec.InitContainers\n\t\tcntName = \"initcnt\"\n\t} else {\n\t\tcontainers = &p.Obj().Spec.Containers\n\t\tcntName = \"cnt\"\n\t}\n\n\t*containers = append(*containers, v1.Container{\n\t\tName: fmt.Sprintf(\"%s-%d\", cntName, len(*containers)+1),\n\t\tImage: imageutils.GetPauseImageName(),\n\t\tResources: v1.ResourceRequirements{\n\t\t\tLimits: res,\n\t\t},\n\t})\n\n\treturn p\n}",
"func (s *Server) UpdateContainerResources(ctx context.Context, req *pb.UpdateContainerResourcesRequest) (resp *pb.UpdateContainerResourcesResponse, err error) {\n\tc, err := s.GetContainerFromShortID(req.GetContainerId())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstate := c.State()\n\tif !(state.Status == oci.ContainerStateRunning || state.Status == oci.ContainerStateCreated) {\n\t\treturn nil, fmt.Errorf(\"container %s is not running or created state: %s\", c.ID(), state.Status)\n\t}\n\n\tresources := toOCIResources(req.GetLinux())\n\tif err := s.Runtime().UpdateContainer(c, resources); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &pb.UpdateContainerResourcesResponse{}, nil\n}",
"func (p *DockerPod) UpdateResources(id string, resource *schedTypes.TaskResources) error {\n\tvar exist bool\n\tvar conTask *container.BcsContainerTask\n\n\tfor _, con := range p.conTasks {\n\t\tif con.RuntimeConf.ID == id {\n\t\t\texist = true\n\t\t\tconTask = con\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif !exist {\n\t\treturn fmt.Errorf(\"container id %s is invalid\", id)\n\t}\n\n\terr := p.conClient.UpdateResources(id, resource)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconTask.RuntimeConf.Resource.Cpus = *resource.ReqCpu\n\tconTask.RuntimeConf.Resource.Mem = *resource.ReqMem\n\treturn nil\n}",
"func PodRequestsAndLimits(pod *v1.Pod) (reqs, limits v1.ResourceList) {\n\treqs, limits = v1.ResourceList{}, v1.ResourceList{}\n\tfor _, container := range pod.Spec.Containers {\n\t\taddResourceList(reqs, container.Resources.Requests)\n\t\taddResourceList(limits, container.Resources.Limits)\n\t}\n\t// init containers define the minimum of any resource\n\tfor _, container := range pod.Spec.InitContainers {\n\t\tmaxResourceList(reqs, container.Resources.Requests)\n\t\tmaxResourceList(limits, container.Resources.Limits)\n\t}\n\treturn\n}",
"func (r *ContainerizedWorkloadReconciler) cleanupResources(ctx context.Context,\n\tworkload *oamv1alpha2.ContainerizedWorkload, deployUID, serviceUID *types.UID) error {\n\tlog := r.Log.WithValues(\"gc deployment\", workload.Name)\n\tvar deploy appsv1.Deployment\n\tvar service corev1.Service\n\tfor _, res := range workload.Status.Resources {\n\t\tuid := res.UID\n\t\tif res.Kind == KindDeployment {\n\t\t\tif uid != *deployUID {\n\t\t\t\tlog.Info(\"Found an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t\tdn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, dn, &deploy); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &deploy); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned deployment\", \"deployment UID\", *deployUID, \"orphaned UID\", uid)\n\t\t\t}\n\t\t} else if res.Kind == KindService {\n\t\t\tif uid != *serviceUID {\n\t\t\t\tlog.Info(\"Found an orphaned service\", \"orphaned UID\", uid)\n\t\t\t\tsn := client.ObjectKey{Name: res.Name, Namespace: workload.Namespace}\n\t\t\t\tif err := r.Get(ctx, sn, &service); err != nil {\n\t\t\t\t\tif apierrors.IsNotFound(err) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := r.Delete(ctx, &service); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Removed an orphaned service\", \"orphaned UID\", uid)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func DeployResources(testRunner base.ClusterTestRunner) error {\n\t// Deploys a static set of resources\n\tlog.Printf(\"Deploying resources\")\n\n\tpub, _ := testRunner.GetPublicContext(1)\n\tprv, _ := testRunner.GetPrivateContext(1)\n\n\t// Deploys the same set of resources against both clusters\n\t// resources will have index (1 or 2), depending on the\n\t// cluster they are being deployed to\n\tfor i, cluster := range []*client.VanClient{pub.VanClient, prv.VanClient} {\n\t\tclusterIdx := i + 1\n\n\t\t// Annotations (optional) to deployment and services\n\t\tdepAnnotations := map[string]string{}\n\t\tstatefulSetAnnotations := map[string]string{}\n\t\tdaemonSetAnnotations := map[string]string{}\n\t\tsvcNoTargetAnnotations := map[string]string{}\n\t\tsvcTargetAnnotations := map[string]string{}\n\t\tpopulateAnnotations(clusterIdx, depAnnotations, svcNoTargetAnnotations, svcTargetAnnotations,\n\t\t\tstatefulSetAnnotations, daemonSetAnnotations)\n\n\t\t// Create a service without annotations to be taken by Skupper as a deployment will be annotated with this service address\n\t\tif _, err := createService(cluster, fmt.Sprintf(\"nginx-%d-dep-not-owned\", clusterIdx), map[string]string{}); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// One single deployment will be created (for the nginx http server)\n\t\tif _, err := createDeployment(cluster, depAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := createStatefulSet(cluster, statefulSetAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif _, err := createDaemonSet(cluster, daemonSetAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Now create two services. One that does not have a target address,\n\t\t// and another that provides a target address.\n\t\tif _, err := createService(cluster, fmt.Sprintf(\"nginx-%d-svc-exp-notarget\", clusterIdx), svcNoTargetAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// This service with the target should not be exposed (only the target service will be)\n\t\tif _, err := createService(cluster, fmt.Sprintf(\"nginx-%d-svc-target\", clusterIdx), svcTargetAnnotations); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Wait for pods to be running\n\tfor _, cluster := range []*client.VanClient{pub.VanClient, prv.VanClient} {\n\t\tlog.Printf(\"waiting on pods to be running on %s\", cluster.Namespace)\n\t\t// Get all pod names\n\t\tpodList, err := cluster.KubeClient.CoreV1().Pods(cluster.Namespace).List(context.TODO(), metav1.ListOptions{})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif len(podList.Items) == 0 {\n\t\t\treturn fmt.Errorf(\"no pods running\")\n\t\t}\n\n\t\tfor _, pod := range podList.Items {\n\t\t\t_, err := kube.WaitForPodStatus(cluster.Namespace, cluster.KubeClient, pod.Name, corev1.PodRunning, timeout, interval)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func ComputePodResourceRequest(pod *v1.Pod) *Resource {\n\tresource := &Resource{}\n\tfor _, container := range pod.Spec.Containers {\n\t\tresource.Add(container.Resources.Requests)\n\t}\n\n\t// take max_resource(sum_pod, any_init_container)\n\tfor _, container := range pod.Spec.InitContainers {\n\t\tresource.SetMaxResource(container.Resources.Requests)\n\t}\n\n\t// If Overhead is being utilized, add to the total requests for the pod\n\tif pod.Spec.Overhead != nil {\n\t\tresource.Add(pod.Spec.Overhead)\n\t}\n\n\treturn resource\n}",
"func (pc *BasicECSPodCreator) translateContainerResources(containers []*ecs.Container, defs []cocoa.ECSContainerDefinition) []cocoa.ECSContainerResources {\n\tvar resources []cocoa.ECSContainerResources\n\n\tfor _, container := range containers {\n\t\tif container == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tname := utility.FromStringPtr(container.Name)\n\t\tres := cocoa.NewECSContainerResources().\n\t\t\tSetContainerID(utility.FromStringPtr(container.ContainerArn)).\n\t\t\tSetName(name).\n\t\t\tSetSecrets(pc.translateContainerSecrets(defs))\n\t\tresources = append(resources, *res)\n\t}\n\n\treturn resources\n}",
"func UpdateAllDynamicResources(client client.Client, log logr.Logger, scheme *runtime.Scheme, cache *helpers.ResourceCache) (ctrl.Result, error) {\n\t// Clear the watched roles cache maps since we're about to recreate them anyway - gets rid of anything we used to care about but no longer need\n\tcache.WatchedRoles = map[types.NamespacedName]bool{}\n\tcache.WatchedClusterRoles = map[types.NamespacedName]bool{}\n\n\tdynamicRoleList := &rbacv1alpha1.DynamicRoleList{}\n\terr := client.List(context.TODO(), dynamicRoleList)\n\tif err != nil {\n\t\tlog.Error(err, \"could not list Dynamic Roles\")\n\t\treturn reconcile.Result{}, err\n\t}\n\tfor _, dynamicRole := range dynamicRoleList.Items {\n\t\t_, err := ReconcileDynamicRole(&dynamicRole, client, scheme, log, cache)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\tdynamicClusterRoleList := &rbacv1alpha1.DynamicClusterRoleList{}\n\terr = client.List(context.TODO(), dynamicClusterRoleList)\n\tif err != nil {\n\t\tlog.Error(err, \"could not list Dynamic Cluster Roles\")\n\t\treturn reconcile.Result{}, err\n\t}\n\tfor _, dynamicClusterRole := range dynamicClusterRoleList.Items {\n\t\t_, err := ReconcileDynamicClusterRole(&dynamicClusterRole, client, scheme, log, cache)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\tlog.Info(\"All computed roles have been reconciled\")\n\treturn reconcile.Result{}, nil\n}",
"func (m *Manifest) jobsToInitContainers(igName string, jobs []Job, namespace string) ([]v1.Container, error) {\n\tinitContainers := []v1.Container{}\n\n\t// one init container for each release, for copying specs\n\tdoneReleases := map[string]bool{}\n\tfor _, job := range jobs {\n\t\tif _, ok := doneReleases[job.Release]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tdoneReleases[job.Release] = true\n\t\treleaseImage, err := m.GetReleaseImage(igName, job.Name)\n\t\tif err != nil {\n\t\t\treturn []v1.Container{}, err\n\t\t}\n\n\t\tinContainerReleasePath := filepath.Join(\"/var/vcap/rendering/jobs-src/\", job.Release)\n\t\tinitContainers = append(initContainers, v1.Container{\n\t\t\tName: fmt.Sprintf(\"spec-copier-%s\", job.Name),\n\t\t\tImage: releaseImage,\n\t\t\tVolumeMounts: []v1.VolumeMount{\n\t\t\t\t{\n\t\t\t\t\tName: \"rendering-data\",\n\t\t\t\t\tMountPath: \"/var/vcap/rendering\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tCommand: []string{\"bash\", \"-c\", fmt.Sprintf(`mkdir -p \"%s\" && cp -ar /var/vcap/jobs-src/* \"%s\"`, inContainerReleasePath, inContainerReleasePath)},\n\t\t})\n\t}\n\n\t_, resolvedPropertiesSecretName := m.CalculateEJobOutputSecretPrefixAndName(DeploymentSecretTypeInstanceGroupResolvedProperties, igName)\n\tvolumeMounts := []v1.VolumeMount{\n\t\t{\n\t\t\tName: \"rendering-data\",\n\t\t\tMountPath: \"/var/vcap/rendering\",\n\t\t},\n\t\t{\n\t\t\tName: \"jobs-dir\",\n\t\t\tMountPath: \"/var/vcap/jobs\",\n\t\t},\n\t\t{\n\t\t\tName: generateVolumeName(resolvedPropertiesSecretName),\n\t\t\tMountPath: fmt.Sprintf(\"/var/run/secrets/resolved-properties/%s\", igName),\n\t\t\tReadOnly: true,\n\t\t},\n\t}\n\n\tinitContainers = append(initContainers, v1.Container{\n\t\tName: fmt.Sprintf(\"renderer-%s\", igName),\n\t\tImage: GetOperatorDockerImage(),\n\t\tVolumeMounts: volumeMounts,\n\t\tEnv: []v1.EnvVar{\n\t\t\t{\n\t\t\t\tName: \"INSTANCE_GROUP_NAME\",\n\t\t\t\tValue: igName,\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"MANIFEST_PATH\",\n\t\t\t\tValue: fmt.Sprintf(\"/var/run/secrets/resolved-properties/%s/properties.yaml\", igName),\n\t\t\t},\n\t\t\t{\n\t\t\t\tName: \"JOBS_DIR\",\n\t\t\t\tValue: \"/var/vcap/rendering\",\n\t\t\t},\n\t\t},\n\t\tCommand: []string{\"/bin/sh\"},\n\t\tArgs: []string{\"-c\", `cf-operator template-render`},\n\t})\n\n\treturn initContainers, nil\n}",
"func (c *ClusterResourceSet) AddAllResources() error {\n\n\tif err := c.spec.HasSufficientSubnets(); err != nil {\n\t\treturn err\n\t}\n\n\tvpcResource, err := c.vpcResourceSet.AddResources()\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error adding VPC resources\")\n\t}\n\n\tc.vpcResourceSet.AddOutputs()\n\tclusterSG := c.addResourcesForSecurityGroups(vpcResource)\n\n\tif privateCluster := c.spec.PrivateCluster; privateCluster.Enabled {\n\t\tvpcEndpointResourceSet := NewVPCEndpointResourceSet(c.provider, c.rs, c.spec, vpcResource.VPC, vpcResource.SubnetDetails.Private, clusterSG.ClusterSharedNode)\n\n\t\tif err := vpcEndpointResourceSet.AddResources(); err != nil {\n\t\t\treturn errors.Wrap(err, \"error adding resources for VPC endpoints\")\n\t\t}\n\t}\n\n\tc.addResourcesForIAM()\n\tc.addResourcesForControlPlane(vpcResource.SubnetDetails)\n\n\tif len(c.spec.FargateProfiles) > 0 {\n\t\tc.addResourcesForFargate()\n\t}\n\n\tc.rs.defineOutput(outputs.ClusterStackName, gfn.RefStackName, false, func(v string) error {\n\t\tif c.spec.Status == nil {\n\t\t\tc.spec.Status = &api.ClusterStatus{}\n\t\t}\n\t\tc.spec.Status.StackName = v\n\t\treturn nil\n\t})\n\n\tc.Template().Mappings[servicePrincipalPartitionMapName] = servicePrincipalPartitionMappings\n\n\tc.rs.template.Description = fmt.Sprintf(\n\t\t\"%s (dedicated VPC: %v, dedicated IAM: %v) %s\",\n\t\tclusterTemplateDescription,\n\t\tc.spec.VPC.ID == \"\",\n\t\tc.rs.withIAM,\n\t\ttemplateDescriptionSuffix,\n\t)\n\n\treturn nil\n}",
"func ApplyAll(manifests name.ManifestMap, version pkgversion.Version, opts *kubectlcmd.Options) (CompositeOutput, error) {\n\tlog.Infof(\"Preparing manifests for these components:\")\n\tfor c := range manifests {\n\t\tlog.Infof(\"- %s\", c)\n\t}\n\tlog.Infof(\"Component dependencies tree: \\n%s\", installTreeString())\n\tif err := InitK8SRestClient(opts.Kubeconfig, opts.Context); err != nil {\n\t\treturn nil, err\n\t}\n\treturn applyRecursive(manifests, version, opts)\n}",
"func (c *clusterCache) startMissingWatches() error {\n\tapis, err := c.kubectl.GetAPIResources(c.config, true, c.settings.ResourcesFilter)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclient, err := c.kubectl.NewDynamicClient(c.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclientset, err := kubernetes.NewForConfig(c.config)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnamespacedResources := make(map[schema.GroupKind]bool)\n\tfor i := range apis {\n\t\tapi := apis[i]\n\t\tnamespacedResources[api.GroupKind] = api.Meta.Namespaced\n\t\tif _, ok := c.apisMeta[api.GroupKind]; !ok {\n\t\t\tctx, cancel := context.WithCancel(context.Background())\n\t\t\tc.apisMeta[api.GroupKind] = &apiMeta{namespaced: api.Meta.Namespaced, watchCancel: cancel}\n\n\t\t\terr := c.processApi(client, api, func(resClient dynamic.ResourceInterface, ns string) error {\n\t\t\t\tresourceVersion, err := c.loadInitialState(ctx, api, resClient, ns)\n\t\t\t\tif err != nil && c.isRestrictedResource(err) {\n\t\t\t\t\tkeep := false\n\t\t\t\t\tif c.respectRBAC == RespectRbacStrict {\n\t\t\t\t\t\tk, permErr := c.checkPermission(ctx, clientset.AuthorizationV1().SelfSubjectAccessReviews(), api)\n\t\t\t\t\t\tif permErr != nil {\n\t\t\t\t\t\t\treturn fmt.Errorf(\"failed to check permissions for resource %s: %w, original error=%v\", api.GroupKind.String(), permErr, err.Error())\n\t\t\t\t\t\t}\n\t\t\t\t\t\tkeep = k\n\t\t\t\t\t}\n\t\t\t\t\t// if we are not allowed to list the resource, remove it from the watch list\n\t\t\t\t\tif !keep {\n\t\t\t\t\t\tdelete(c.apisMeta, api.GroupKind)\n\t\t\t\t\t\tdelete(namespacedResources, api.GroupKind)\n\t\t\t\t\t\treturn nil\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tgo c.watchEvents(ctx, api, resClient, ns, resourceVersion)\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tc.namespacedResources = namespacedResources\n\treturn nil\n}",
"func (r *remoteRuntimeService) UpdateContainerResources(ctx context.Context, containerID string, resources *runtimeapi.ContainerResources) (err error) {\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] UpdateContainerResources\", \"containerID\", containerID, \"timeout\", r.timeout)\n\tctx, cancel := context.WithTimeout(ctx, r.timeout)\n\tdefer cancel()\n\n\tif _, err := r.runtimeClient.UpdateContainerResources(ctx, &runtimeapi.UpdateContainerResourcesRequest{\n\t\tContainerId: containerID,\n\t\tLinux: resources.GetLinux(),\n\t\tWindows: resources.GetWindows(),\n\t}); err != nil {\n\t\tklog.ErrorS(err, \"UpdateContainerResources from runtime service failed\", \"containerID\", containerID)\n\t\treturn err\n\t}\n\tklog.V(10).InfoS(\"[RemoteRuntimeService] UpdateContainerResources Response\", \"containerID\", containerID)\n\n\treturn nil\n}",
"func (h *HelmReconciler) PruneResources(gvks []schema.GroupVersionKind, all bool, namespace string) error {\n\tallErrors := []error{}\n\townerLabels := h.customizer.PruningDetails().GetOwnerLabels()\n\townerAnnotations := h.customizer.PruningDetails().GetOwnerAnnotations()\n\tfor _, gvk := range gvks {\n\t\tobjects := &unstructured.UnstructuredList{}\n\t\tobjects.SetGroupVersionKind(gvk)\n\t\terr := h.client.List(context.TODO(), objects, client.MatchingLabels(ownerLabels), client.InNamespace(namespace))\n\t\tif err != nil {\n\t\t\t// we only want to retrieve resources clusters\n\t\t\tlog.Warnf(\"retrieving resources to prune type %s: %s not found\", gvk.String(), err)\n\t\t\tcontinue\n\t\t}\n\tobjectLoop:\n\t\tfor _, object := range objects.Items {\n\t\t\tannotations := object.GetAnnotations()\n\t\t\tfor ownerKey, ownerValue := range ownerAnnotations {\n\t\t\t\t// we only want to delete objects that contain the annotations\n\t\t\t\t// if we're not pruning all objects, we only want to prune those whose annotation value does not match what is expected\n\t\t\t\tif value, ok := annotations[ownerKey]; !ok || (!all && value == ownerValue) {\n\t\t\t\t\tcontinue objectLoop\n\t\t\t\t}\n\t\t\t}\n\t\t\terr = h.client.Delete(context.TODO(), &object, client.PropagationPolicy(metav1.DeletePropagationBackground))\n\t\t\tif err == nil {\n\t\t\t\tif listenerErr := h.customizer.Listener().ResourceDeleted(&object); listenerErr != nil {\n\t\t\t\t\tlog.Errorf(\"error calling listener: %s\", err)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif listenerErr := h.customizer.Listener().ResourceError(&object, err); listenerErr != nil {\n\t\t\t\t\tlog.Errorf(\"error calling listener: %s\", err)\n\t\t\t\t}\n\t\t\t\tallErrors = append(allErrors, err)\n\t\t\t}\n\t\t}\n\t}\n\treturn utilerrors.NewAggregate(allErrors)\n}",
"func containerResources(j *v1alpha1.Jira) v1.ResourceRequirements {\n\tresources := v1.ResourceRequirements{}\n\tif j.Spec.Pod != nil {\n\t\tresources = j.Spec.Pod.Resources\n\t}\n\treturn resources\n}",
"func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) {\n\tpods := make(map[kubetypes.UID]*kubecontainer.Pod)\n\tsandboxes, err := m.getKubeletSandboxes(all)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range sandboxes {\n\t\ts := sandboxes[i]\n\t\tif s.Metadata == nil {\n\t\t\tklog.V(4).Infof(\"Sandbox does not have metadata: %+v\", s)\n\t\t\tcontinue\n\t\t}\n\t\tpodUID := kubetypes.UID(s.Metadata.Uid)\n\t\tif _, ok := pods[podUID]; !ok {\n\t\t\tpods[podUID] = &kubecontainer.Pod{\n\t\t\t\tID: podUID,\n\t\t\t\tName: s.Metadata.Name,\n\t\t\t\tNamespace: s.Metadata.Namespace,\n\t\t\t\tTenant: s.Metadata.Tenant,\n\t\t\t}\n\t\t}\n\t\tp := pods[podUID]\n\t\tconverted, err := m.sandboxToKubeContainer(s)\n\t\tif err != nil {\n\t\t\tklog.V(4).Infof(\"Convert %q sandbox %v of pod %q failed: %v\", m.runtimeName, s, podUID, err)\n\t\t\tcontinue\n\t\t}\n\t\tp.Sandboxes = append(p.Sandboxes, converted)\n\t}\n\n\tcontainers, err := m.getKubeletContainers(all)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range containers {\n\t\tc := containers[i]\n\t\tif c.Metadata == nil {\n\t\t\tklog.V(4).Infof(\"Container does not have metadata: %+v\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\tlabelledInfo := getContainerInfoFromLabels(c.Labels)\n\t\tpod, found := pods[labelledInfo.PodUID]\n\t\tif !found {\n\t\t\tpod = &kubecontainer.Pod{\n\t\t\t\tID: labelledInfo.PodUID,\n\t\t\t\tName: labelledInfo.PodName,\n\t\t\t\tNamespace: labelledInfo.PodNamespace,\n\t\t\t\tTenant: labelledInfo.PodTenant,\n\t\t\t}\n\t\t\tpods[labelledInfo.PodUID] = pod\n\t\t}\n\n\t\tconverted, err := m.toKubeContainer(c)\n\t\tif err != nil {\n\t\t\tklog.V(4).Infof(\"Convert %s container %v of pod %q failed: %v\", m.runtimeName, c, labelledInfo.PodUID, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tpod.Containers = append(pod.Containers, converted)\n\t}\n\n\t// Convert map to list.\n\tvar result []*kubecontainer.Pod\n\tfor _, pod := range pods {\n\t\tresult = append(result, pod)\n\t}\n\n\treturn result, nil\n}",
"func (r *Reconciler) SetDesiredCoreApp() {\n\tr.CoreApp.Spec.Template.Labels[\"noobaa-core\"] = r.Request.Name\n\tr.CoreApp.Spec.Template.Labels[\"noobaa-mgmt\"] = r.Request.Name\n\tr.CoreApp.Spec.Template.Labels[\"noobaa-s3\"] = r.Request.Name\n\tr.CoreApp.Spec.Selector.MatchLabels[\"noobaa-core\"] = r.Request.Name\n\tr.CoreApp.Spec.ServiceName = r.ServiceMgmt.Name\n\n\tpodSpec := &r.CoreApp.Spec.Template.Spec\n\tpodSpec.ServiceAccountName = \"noobaa-operator\" // TODO do we use the same SA?\n\tfor i := range podSpec.InitContainers {\n\t\tc := &podSpec.InitContainers[i]\n\t\tif c.Name == \"init-mongo\" {\n\t\t\tc.Image = r.NooBaa.Status.ActualImage\n\t\t}\n\t}\n\tfor i := range podSpec.Containers {\n\t\tc := &podSpec.Containers[i]\n\t\tif c.Name == \"noobaa-server\" {\n\t\t\tc.Image = r.NooBaa.Status.ActualImage\n\t\t\tfor j := range c.Env {\n\t\t\t\tif c.Env[j].Name == \"AGENT_PROFILE\" {\n\t\t\t\t\tc.Env[j].Value = fmt.Sprintf(`{ \"image\": \"%s\" }`, r.NooBaa.Status.ActualImage)\n\t\t\t\t}\n\t\t\t}\n\t\t\tif r.NooBaa.Spec.CoreResources != nil {\n\t\t\t\tc.Resources = *r.NooBaa.Spec.CoreResources\n\t\t\t}\n\t\t} else if c.Name == \"mongodb\" {\n\t\t\tif r.NooBaa.Spec.MongoImage == nil {\n\t\t\t\tc.Image = options.MongoImage\n\t\t\t} else {\n\t\t\t\tc.Image = *r.NooBaa.Spec.MongoImage\n\t\t\t}\n\t\t\tif r.NooBaa.Spec.MongoResources != nil {\n\t\t\t\tc.Resources = *r.NooBaa.Spec.MongoResources\n\t\t\t}\n\t\t}\n\t}\n\tif r.NooBaa.Spec.ImagePullSecret == nil {\n\t\tpodSpec.ImagePullSecrets =\n\t\t\t[]corev1.LocalObjectReference{}\n\t} else {\n\t\tpodSpec.ImagePullSecrets =\n\t\t\t[]corev1.LocalObjectReference{*r.NooBaa.Spec.ImagePullSecret}\n\t}\n\tfor i := range r.CoreApp.Spec.VolumeClaimTemplates {\n\t\tpvc := &r.CoreApp.Spec.VolumeClaimTemplates[i]\n\t\tpvc.Spec.StorageClassName = r.NooBaa.Spec.StorageClassName\n\n\t\t// TODO we want to own the PVC's by NooBaa system but get errors on openshift:\n\t\t// Warning FailedCreate 56s statefulset-controller\n\t\t// create Pod noobaa-core-0 in StatefulSet noobaa-core failed error:\n\t\t// Failed to create PVC mongo-datadir-noobaa-core-0:\n\t\t// persistentvolumeclaims \"mongo-datadir-noobaa-core-0\" is forbidden:\n\t\t// cannot set blockOwnerDeletion if an ownerReference refers to a resource\n\t\t// you can't set finalizers on: , <nil>, ...\n\n\t\t// r.Own(pvc)\n\t}\n}",
"func (a KubectlLayerApplier) GetResources(ctx context.Context, layer layers.Layer) (resources []kraanv1alpha1.Resource, err error) {\n\tlogging.TraceCall(a.getLog(layer))\n\tdefer logging.TraceExit(a.getLog(layer))\n\n\tsourceHrs, clusterHrs, err := a.GetSourceAndClusterHelmReleases(ctx, layer)\n\tif err != nil {\n\t\treturn nil, errors.WithMessagef(err, \"%s - failed to get helm releases\", logging.CallerStr(logging.Me))\n\t}\n\n\tfor key, source := range sourceHrs {\n\t\tresource := kraanv1alpha1.Resource{\n\t\t\tNamespace: source.GetNamespace(),\n\t\t\tName: source.GetName(),\n\t\t\tKind: \"helmreleases.helm.toolkit.fluxcd.io\",\n\t\t\tLastTransitionTime: metav1.Now(),\n\t\t\tStatus: \"Unknown\",\n\t\t}\n\t\thr, ok := clusterHrs[key]\n\t\tif ok {\n\t\t\ta.logDebug(\"HelmRelease in AddonsLayer source directory and on cluster\", layer, logging.GetObjKindNamespaceName(source)...)\n\t\t\tresources = append(resources, a.getResourceInfo(layer, resource, hr.Status.Conditions))\n\t\t} else {\n\t\t\t// this resource exists in the source directory but not on the cluster\n\t\t\ta.logDebug(\"HelmRelease in AddonsLayer source directory but not on cluster\", layer, logging.GetObjKindNamespaceName(source)...)\n\t\t\tresource.Status = kraanv1alpha1.NotDeployed\n\t\t\tresources = append(resources, resource)\n\t\t}\n\t}\n\n\tfor key, hr := range clusterHrs {\n\t\tresource := kraanv1alpha1.Resource{\n\t\t\tNamespace: hr.GetNamespace(),\n\t\t\tName: hr.GetName(),\n\t\t\tKind: \"helmreleases.helm.toolkit.fluxcd.io\",\n\t\t\tLastTransitionTime: metav1.Now(),\n\t\t\tStatus: \"Unknown\",\n\t\t}\n\t\t_, ok := sourceHrs[key]\n\t\tif !ok {\n\t\t\ta.logDebug(\"HelmRelease not in AddonsLayer source directory but on cluster\", layer, \"name\", clusterHrs[key])\n\t\t\tresources = append(resources, a.getResourceInfo(layer, resource, hr.Status.Conditions))\n\t\t}\n\t}\n\treturn resources, err\n}",
"func RequestsForPods(pods ...*v1.Pod) v1.ResourceList {\n\tresources := []v1.ResourceList{}\n\tfor _, pod := range pods {\n\t\tfor _, container := range pod.Spec.Containers {\n\t\t\tresources = append(resources, container.Resources.Requests)\n\t\t}\n\t}\n\treturn Merge(resources...)\n}",
"func computeAppResources(isolators types.Isolators) (appResources, error) {\n\tres := appResources{}\n\tvar err error\n\n\twithIsolator := func(name string, f func() error) error {\n\t\tok, err := cgroup.IsIsolatorSupported(name)\n\t\tif err != nil {\n\t\t\treturn errwrap.Wrapf(\"could not check for isolator \"+name, err)\n\t\t}\n\n\t\tif !ok {\n\t\t\tfmt.Fprintf(os.Stderr, \"warning: resource/%s isolator set but support disabled in the kernel, skipping\\n\", name)\n\t\t\treturn nil\n\t\t}\n\n\t\treturn f()\n\t}\n\n\tfor _, isolator := range isolators {\n\t\tif err != nil {\n\t\t\treturn res, err\n\t\t}\n\n\t\tswitch v := isolator.Value().(type) {\n\t\tcase *types.ResourceMemory:\n\t\t\terr = withIsolator(\"memory\", func() error {\n\t\t\t\tif v.Limit() == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\n\t\t\t\tval := uint64(v.Limit().Value())\n\t\t\t\tres.MemoryLimit = &val\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase *types.ResourceCPU:\n\t\t\terr = withIsolator(\"cpu\", func() error {\n\t\t\t\tif v.Limit() == nil {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tif v.Limit().Value() > MaxMilliValue {\n\t\t\t\t\treturn fmt.Errorf(\"cpu limit exceeds the maximum millivalue: %v\", v.Limit().String())\n\t\t\t\t}\n\n\t\t\t\tval := uint64(v.Limit().MilliValue() / 10)\n\t\t\t\tres.CPUQuota = &val\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase *types.LinuxCPUShares:\n\t\t\terr = withIsolator(\"cpu\", func() error {\n\t\t\t\tval := uint64(*v)\n\t\t\t\tres.LinuxCPUShares = &val\n\t\t\t\treturn nil\n\t\t\t})\n\t\tcase *types.LinuxOOMScoreAdj:\n\t\t\tval := int(*v)\n\t\t\tres.LinuxOOMScoreAdjust = &val\n\t\t}\n\t}\n\n\treturn res, err\n}",
"func (m *kubeGenericRuntimeManager) computePodActions(ctx context.Context, pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {\n\tklog.V(5).InfoS(\"Syncing Pod\", \"pod\", klog.KObj(pod))\n\n\tcreatePodSandbox, attempt, sandboxID := runtimeutil.PodSandboxChanged(pod, podStatus)\n\tchanges := podActions{\n\t\tKillPod: createPodSandbox,\n\t\tCreateSandbox: createPodSandbox,\n\t\tSandboxID: sandboxID,\n\t\tAttempt: attempt,\n\t\tContainersToStart: []int{},\n\t\tContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),\n\t}\n\n\t// If we need to (re-)create the pod sandbox, everything will need to be\n\t// killed and recreated, and init containers should be purged.\n\tif createPodSandbox {\n\t\tif !shouldRestartOnFailure(pod) && attempt != 0 && len(podStatus.ContainerStatuses) != 0 {\n\t\t\t// Should not restart the pod, just return.\n\t\t\t// we should not create a sandbox, and just kill the pod if it is already done.\n\t\t\t// if all containers are done and should not be started, there is no need to create a new sandbox.\n\t\t\t// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.\n\t\t\t//\n\t\t\t// If ContainerStatuses is empty, we assume that we've never\n\t\t\t// successfully created any containers. In this case, we should\n\t\t\t// retry creating the sandbox.\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\n\t\t// Get the containers to start, excluding the ones that succeeded if RestartPolicy is OnFailure.\n\t\tvar containersToStart []int\n\t\tfor idx, c := range pod.Spec.Containers {\n\t\t\tif pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure && containerSucceeded(&c, podStatus) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontainersToStart = append(containersToStart, idx)\n\t\t}\n\n\t\t// If there is any regular container, it means all init containers have\n\t\t// been initialized.\n\t\thasInitialized := hasAnyRegularContainerCreated(pod, podStatus)\n\t\t// We should not create a sandbox, and just kill the pod if initialization\n\t\t// is done and there is no container to start.\n\t\tif hasInitialized && len(containersToStart) == 0 {\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\n\t\t// If we are creating a pod sandbox, we should restart from the initial\n\t\t// state.\n\t\tif len(pod.Spec.InitContainers) != 0 {\n\t\t\t// Pod has init containers, return the first one.\n\t\t\tchanges.InitContainersToStart = []int{0}\n\t\t\treturn changes\n\t\t}\n\t\tchanges.ContainersToStart = containersToStart\n\t\treturn changes\n\t}\n\n\t// Ephemeral containers may be started even if initialization is not yet complete.\n\tfor i := range pod.Spec.EphemeralContainers {\n\t\tc := (*v1.Container)(&pod.Spec.EphemeralContainers[i].EphemeralContainerCommon)\n\n\t\t// Ephemeral Containers are never restarted\n\t\tif podStatus.FindContainerStatusByName(c.Name) == nil {\n\t\t\tchanges.EphemeralContainersToStart = append(changes.EphemeralContainersToStart, i)\n\t\t}\n\t}\n\n\thasInitialized := m.computeInitContainerActions(pod, podStatus, &changes)\n\tif changes.KillPod || !hasInitialized {\n\t\t// Initialization failed or still in progress. Skip inspecting non-init\n\t\t// containers.\n\t\treturn changes\n\t}\n\n\tif isInPlacePodVerticalScalingAllowed(pod) {\n\t\tchanges.ContainersToUpdate = make(map[v1.ResourceName][]containerToUpdateInfo)\n\t\tlatestPodStatus, err := m.GetPodStatus(ctx, podStatus.ID, pod.Name, pod.Namespace)\n\t\tif err == nil {\n\t\t\tpodStatus = latestPodStatus\n\t\t}\n\t}\n\n\t// Number of running containers to keep.\n\tkeepCount := 0\n\t// check the status of containers.\n\tfor idx, container := range pod.Spec.Containers {\n\t\tcontainerStatus := podStatus.FindContainerStatusByName(container.Name)\n\n\t\t// Call internal container post-stop lifecycle hook for any non-running container so that any\n\t\t// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated\n\t\t// to it.\n\t\tif containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {\n\t\t\t\tklog.ErrorS(err, \"Internal container post-stop lifecycle hook failed for container in pod with error\",\n\t\t\t\t\t\"containerName\", container.Name, \"pod\", klog.KObj(pod))\n\t\t\t}\n\t\t}\n\n\t\t// If container does not exist, or is not running, check whether we\n\t\t// need to restart it.\n\t\tif containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {\n\t\t\t\tklog.V(3).InfoS(\"Container of pod is not in the desired state and shall be started\", \"containerName\", container.Name, \"pod\", klog.KObj(pod))\n\t\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t\t\tif containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\t// If container is in unknown state, we don't know whether it\n\t\t\t\t\t// is actually running or not, always try killing it before\n\t\t\t\t\t// restart to avoid having 2 running instances of the same container.\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container is in %q state, try killing it before restart\",\n\t\t\t\t\t\t\tcontainerStatus.State),\n\t\t\t\t\t\treason: reasonUnknown,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// The container is running, but kill the container if any of the following condition is met.\n\t\tvar message string\n\t\tvar reason containerKillReason\n\t\trestart := shouldRestartOnFailure(pod)\n\t\t// Do not restart if only the Resources field has changed with InPlacePodVerticalScaling enabled\n\t\tif _, _, changed := containerChanged(&container, containerStatus); changed &&\n\t\t\t(!isInPlacePodVerticalScalingAllowed(pod) ||\n\t\t\t\tkubecontainer.HashContainerWithoutResources(&container) != containerStatus.HashWithoutResources) {\n\t\t\tmessage = fmt.Sprintf(\"Container %s definition changed\", container.Name)\n\t\t\t// Restart regardless of the restart policy because the container\n\t\t\t// spec changed.\n\t\t\trestart = true\n\t\t} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {\n\t\t\t// If the container failed the liveness probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed liveness probe\", container.Name)\n\t\t\treason = reasonLivenessProbe\n\t\t} else if startup, found := m.startupManager.Get(containerStatus.ID); found && startup == proberesults.Failure {\n\t\t\t// If the container failed the startup probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed startup probe\", container.Name)\n\t\t\treason = reasonStartupProbe\n\t\t} else if isInPlacePodVerticalScalingAllowed(pod) && !m.computePodResizeAction(pod, idx, containerStatus, &changes) {\n\t\t\t// computePodResizeAction updates 'changes' if resize policy requires restarting this container\n\t\t\tcontinue\n\t\t} else {\n\t\t\t// Keep the container.\n\t\t\tkeepCount++\n\t\t\tcontinue\n\t\t}\n\n\t\t// We need to kill the container, but if we also want to restart the\n\t\t// container afterwards, make the intent clear in the message. Also do\n\t\t// not kill the entire pod since we expect container to be running eventually.\n\t\tif restart {\n\t\t\tmessage = fmt.Sprintf(\"%s, will be restarted\", message)\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\n\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\tname: containerStatus.Name,\n\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\tmessage: message,\n\t\t\treason: reason,\n\t\t}\n\t\tklog.V(2).InfoS(\"Message for Container of pod\", \"containerName\", container.Name, \"containerStatusID\", containerStatus.ID, \"pod\", klog.KObj(pod), \"containerMessage\", message)\n\t}\n\n\tif keepCount == 0 && len(changes.ContainersToStart) == 0 {\n\t\tchanges.KillPod = true\n\t\t// To prevent the restartable init containers to keep pod alive, we should\n\t\t// not restart them.\n\t\tchanges.InitContainersToStart = nil\n\t}\n\n\treturn changes\n}",
"func (m *kubeGenericRuntimeManager) GetPods(ctx context.Context, all bool) ([]*kubecontainer.Pod, error) {\n\tpods := make(map[kubetypes.UID]*kubecontainer.Pod)\n\tsandboxes, err := m.getKubeletSandboxes(ctx, all)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range sandboxes {\n\t\ts := sandboxes[i]\n\t\tif s.Metadata == nil {\n\t\t\tklog.V(4).InfoS(\"Sandbox does not have metadata\", \"sandbox\", s)\n\t\t\tcontinue\n\t\t}\n\t\tpodUID := kubetypes.UID(s.Metadata.Uid)\n\t\tif _, ok := pods[podUID]; !ok {\n\t\t\tpods[podUID] = &kubecontainer.Pod{\n\t\t\t\tID: podUID,\n\t\t\t\tName: s.Metadata.Name,\n\t\t\t\tNamespace: s.Metadata.Namespace,\n\t\t\t}\n\t\t}\n\t\tp := pods[podUID]\n\t\tconverted, err := m.sandboxToKubeContainer(s)\n\t\tif err != nil {\n\t\t\tklog.V(4).InfoS(\"Convert sandbox of pod failed\", \"runtimeName\", m.runtimeName, \"sandbox\", s, \"podUID\", podUID, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\tp.Sandboxes = append(p.Sandboxes, converted)\n\t\tp.CreatedAt = uint64(s.GetCreatedAt())\n\t}\n\n\tcontainers, err := m.getKubeletContainers(ctx, all)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := range containers {\n\t\tc := containers[i]\n\t\tif c.Metadata == nil {\n\t\t\tklog.V(4).InfoS(\"Container does not have metadata\", \"container\", c)\n\t\t\tcontinue\n\t\t}\n\n\t\tlabelledInfo := getContainerInfoFromLabels(c.Labels)\n\t\tpod, found := pods[labelledInfo.PodUID]\n\t\tif !found {\n\t\t\tpod = &kubecontainer.Pod{\n\t\t\t\tID: labelledInfo.PodUID,\n\t\t\t\tName: labelledInfo.PodName,\n\t\t\t\tNamespace: labelledInfo.PodNamespace,\n\t\t\t}\n\t\t\tpods[labelledInfo.PodUID] = pod\n\t\t}\n\n\t\tconverted, err := m.toKubeContainer(c)\n\t\tif err != nil {\n\t\t\tklog.V(4).InfoS(\"Convert container of pod failed\", \"runtimeName\", m.runtimeName, \"container\", c, \"podUID\", labelledInfo.PodUID, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tpod.Containers = append(pod.Containers, converted)\n\t}\n\n\t// Convert map to list.\n\tvar result []*kubecontainer.Pod\n\tfor _, pod := range pods {\n\t\tresult = append(result, pod)\n\t}\n\n\t// There are scenarios where multiple pods are running in parallel having\n\t// the same name, because one of them have not been fully terminated yet.\n\t// To avoid unexpected behavior on container name based search (for example\n\t// by calling *Kubelet.findContainer() without specifying a pod ID), we now\n\t// return the list of pods ordered by their creation time.\n\tsort.SliceStable(result, func(i, j int) bool {\n\t\treturn result[i].CreatedAt > result[j].CreatedAt\n\t})\n\tklog.V(4).InfoS(\"Retrieved pods from runtime\", \"all\", all)\n\treturn result, nil\n}",
"func ApplyMachineResourcesForConfig(ctx context.Context, config *rest.Config) error {\n\tc, err := client.New(config, client.Options{Scheme: apiextensionsScheme})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn ApplyMachineResources(ctx, c)\n}",
"func TestSystemResourceRequests(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tt.Parallel()\n\t_, ns := minikubetestenv.AcquireCluster(t)\n\tkubeClient := tu.GetKubeClient(t)\n\n\t// Expected resource requests for pachyderm system pods:\n\tdefaultLocalMem := map[string]string{\n\t\t\"pachd\": \"512M\",\n\t\t\"etcd\": \"512M\",\n\t}\n\tdefaultLocalCPU := map[string]string{\n\t\t\"pachd\": \"250m\",\n\t\t\"etcd\": \"250m\",\n\t}\n\tdefaultCloudMem := map[string]string{\n\t\t\"pachd\": \"3G\",\n\t\t\"etcd\": \"2G\",\n\t}\n\tdefaultCloudCPU := map[string]string{\n\t\t\"pachd\": \"1\",\n\t\t\"etcd\": \"1\",\n\t}\n\t// Get Pod info for 'app' from k8s\n\tvar c v1.Container\n\tfor _, app := range []string{\"pachd\", \"etcd\"} {\n\t\terr := backoff.Retry(func() error {\n\t\t\tpodList, err := kubeClient.CoreV1().Pods(ns).List(\n\t\t\t\tcontext.Background(),\n\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\tLabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector(\n\t\t\t\t\t\tmap[string]string{\"app\": app, \"suite\": \"pachyderm\"},\n\t\t\t\t\t)),\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.EnsureStack(err)\n\t\t\t}\n\t\t\tif len(podList.Items) < 1 {\n\t\t\t\treturn errors.Errorf(\"could not find pod for %s\", app) // retry\n\t\t\t}\n\t\t\tc = podList.Items[0].Spec.Containers[0]\n\t\t\treturn nil\n\t\t}, backoff.NewTestingBackOff())\n\t\trequire.NoError(t, err)\n\n\t\t// Make sure the pod's container has resource requests\n\t\tcpu, ok := c.Resources.Requests[v1.ResourceCPU]\n\t\trequire.True(t, ok, \"could not get CPU request for \"+app)\n\t\trequire.True(t, cpu.String() == defaultLocalCPU[app] ||\n\t\t\tcpu.String() == defaultCloudCPU[app])\n\t\tmem, ok := c.Resources.Requests[v1.ResourceMemory]\n\t\trequire.True(t, ok, \"could not get memory request for \"+app)\n\t\trequire.True(t, mem.String() == defaultLocalMem[app] ||\n\t\t\tmem.String() == defaultCloudMem[app])\n\t}\n}",
"func (o *ClusterUninstaller) discoverCloudControllerResources(ctx context.Context) error {\n\to.Logger.Debugf(\"Discovering cloud controller resources\")\n\terrs := []error{}\n\n\t// Instance group related items\n\tinstanceGroups, err := o.listCloudControllerInstanceGroups(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tinstallerInstanceGroups, err := o.listInstanceGroups(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\tclusterInstanceGroups := append(instanceGroups, installerInstanceGroups...)\n\tif len(clusterInstanceGroups) != 0 {\n\t\tbackends, err := o.listCloudControllerBackendServices(ctx, clusterInstanceGroups)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tfor _, backend := range backends {\n\t\t\to.Logger.Debugf(\"Discovering cloud controller resources for %s\", backend.name)\n\t\t\terr := o.discoverCloudControllerLoadBalancerResources(ctx, backend.name)\n\t\t\tif err != nil {\n\t\t\t\terrs = append(errs, err)\n\t\t\t}\n\t\t}\n\t\to.insertPendingItems(\"backendservice\", backends)\n\t}\n\to.insertPendingItems(\"instancegroup\", instanceGroups)\n\n\t// Get a list of known cluster instances\n\tinstances, err := o.listInstances(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Target pool related items\n\tpools, err := o.listCloudControllerTargetPools(ctx, instances)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, pool := range pools {\n\t\to.Logger.Debugf(\"Discovering cloud controller resources for %s\", pool.name)\n\t\terr := o.discoverCloudControllerLoadBalancerResources(ctx, pool.name)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\to.insertPendingItems(\"targetpool\", pools)\n\n\t// cloudControllerUID related items\n\tif len(o.cloudControllerUID) > 0 {\n\t\t// Discover Cloud Controller health checks: k8s-cloudControllerUID-node\n\t\tfilter := fmt.Sprintf(\"name eq \\\"k8s-%s-node\\\"\", o.cloudControllerUID)\n\t\tfound, err := o.listHealthChecksWithFilter(ctx, \"items(name),nextPageToken\", filter, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.insertPendingItems(\"healthcheck\", found)\n\n\t\t// Discover Cloud Controller http health checks: k8s-cloudControllerUID-node\n\t\tfound, err = o.listHTTPHealthChecksWithFilter(ctx, \"items(name),nextPageToken\", filter, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.insertPendingItems(\"httphealthcheck\", found)\n\n\t\t// Discover Cloud Controller firewall rules: k8s-cloudControllerUID-node-hc, k8s-cloudControllerUID-node-http-hc\n\t\tfilter = fmt.Sprintf(\"name eq \\\"k8s-%s-node-hc\\\"\", o.cloudControllerUID)\n\t\tfound, err = o.listFirewallsWithFilter(ctx, \"items(name),nextPageToken\", filter, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.insertPendingItems(\"firewall\", found)\n\n\t\tfilter = fmt.Sprintf(\"name eq \\\"k8s-%s-node-http-hc\\\"\", o.cloudControllerUID)\n\t\tfound, err = o.listFirewallsWithFilter(ctx, \"items(name),nextPageToken\", filter, nil)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\to.insertPendingItems(\"firewall\", found)\n\t}\n\n\treturn aggregateError(errs, 0)\n}",
"func DeployResourceConfigs(\n\tctx context.Context,\n\tc client.Client,\n\tnamespace string,\n\tclusterType ClusterType,\n\tmanagedResourceName string,\n\tregistry *managedresources.Registry,\n\tallResources ResourceConfigs,\n) error {\n\tif clusterType == ClusterTypeSeed {\n\t\tfor _, r := range allResources {\n\t\t\tif r.MutateFn != nil {\n\t\t\t\tr.MutateFn()\n\t\t\t}\n\t\t\tif err := registry.Add(r.Obj); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn managedresources.CreateForSeed(ctx, c, namespace, managedResourceName, false, registry.SerializedObjects())\n\t}\n\n\tfor _, r := range allResources {\n\t\tswitch r.Class {\n\t\tcase Application:\n\t\t\tif r.MutateFn != nil {\n\t\t\t\tr.MutateFn()\n\t\t\t}\n\t\t\tif err := registry.Add(r.Obj); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase Runtime:\n\t\t\tif _, err := controllerutils.GetAndCreateOrMergePatch(ctx, c, r.Obj, func() error {\n\t\t\t\tif r.MutateFn != nil {\n\t\t\t\t\tr.MutateFn()\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn managedresources.CreateForShoot(ctx, c, namespace, managedResourceName, managedresources.LabelValueGardener, false, registry.SerializedObjects())\n}",
"func (router *Router) getResources(w http.ResponseWriter, r *http.Request) {\n\tclusterNames := r.URL.Query()[\"cluster\"]\n\tnamespaces := r.URL.Query()[\"namespace\"]\n\tname := r.URL.Query().Get(\"name\")\n\tresource := r.URL.Query().Get(\"resource\")\n\tpath := r.URL.Query().Get(\"path\")\n\tparamName := r.URL.Query().Get(\"paramName\")\n\tparam := r.URL.Query().Get(\"param\")\n\n\tlog.WithFields(logrus.Fields{\"clusters\": clusterNames, \"namespaces\": namespaces, \"name\": name, \"resource\": resource, \"path\": path, \"paramName\": paramName, \"param\": param}).Tracef(\"getResources\")\n\n\tvar resources []Resources\n\n\t// Loop through all the given cluster names and get for each provided name the cluster interface. After that we\n\t// check if the resource was provided via the forbidden resources list.\n\tfor _, clusterName := range clusterNames {\n\t\tcluster := router.clusters.GetCluster(clusterName)\n\t\tif cluster == nil {\n\t\t\terrresponse.Render(w, r, nil, http.StatusBadRequest, \"Invalid cluster name\")\n\t\t\treturn\n\t\t}\n\n\t\tif router.isForbidden(resource) {\n\t\t\terrresponse.Render(w, r, nil, http.StatusForbidden, fmt.Sprintf(\"Access for resource %s is forbidding\", resource))\n\t\t\treturn\n\t\t}\n\n\t\t// If the namespaces slice is nil, we retrieve the resource for all namespaces. If a list of namespaces was\n\t\t// provided we loop through all the namespaces and return the resources for these namespaces. All results are\n\t\t// added to the resources slice, which is then returned by the api.\n\t\tif namespaces == nil {\n\t\t\tlist, err := cluster.GetResources(r.Context(), \"\", name, path, resource, paramName, param)\n\t\t\tif err != nil {\n\t\t\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not get resources\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar tmpResources map[string]interface{}\n\t\t\terr = json.Unmarshal(list, &tmpResources)\n\t\t\tif err != nil {\n\t\t\t\terrresponse.Render(w, r, err, http.StatusInternalServerError, \"Could not unmarshal resources\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresources = append(resources, Resources{\n\t\t\t\tCluster: clusterName,\n\t\t\t\tNamespace: \"\",\n\t\t\t\tResources: tmpResources,\n\t\t\t})\n\t\t} else {\n\t\t\tfor _, namespace := range namespaces {\n\t\t\t\tlist, err := cluster.GetResources(r.Context(), namespace, name, path, resource, paramName, param)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrresponse.Render(w, r, err, http.StatusBadRequest, \"Could not get resources\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tvar tmpResources map[string]interface{}\n\t\t\t\terr = json.Unmarshal(list, &tmpResources)\n\t\t\t\tif err != nil {\n\t\t\t\t\terrresponse.Render(w, r, err, http.StatusInternalServerError, \"Could not unmarshal resources\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tresources = append(resources, Resources{\n\t\t\t\t\tCluster: clusterName,\n\t\t\t\t\tNamespace: namespace,\n\t\t\t\t\tResources: tmpResources,\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n\n\tlog.WithFields(logrus.Fields{\"count\": len(resources)}).Tracef(\"getResources\")\n\trender.JSON(w, r, resources)\n}",
"func (ds *DevicesScheduler) TakePodResources(podInfo *types.PodInfo, nodeInfo *types.NodeInfo) error {\n\tfor index, d := range ds.Devices {\n\t\terr := d.TakePodResources(nodeInfo, podInfo, ds.RunGroupScheduler[index])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func runReplicatedPodOnEachNode(ctx context.Context, f *framework.Framework, nodes []v1.Node, namespace string, podsPerNode int, id string, labels map[string]string, memRequest int64) error {\n\tginkgo.By(\"Run a pod on each node\")\n\tfor _, node := range nodes {\n\t\terr := makeNodeUnschedulable(ctx, f.ClientSet, &node)\n\n\t\tn := node\n\t\tginkgo.DeferCleanup(makeNodeSchedulable, f.ClientSet, &n, false)\n\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tconfig := &testutils.RCConfig{\n\t\tClient: f.ClientSet,\n\t\tName: id,\n\t\tNamespace: namespace,\n\t\tTimeout: defaultTimeout,\n\t\tImage: imageutils.GetPauseImageName(),\n\t\tReplicas: 0,\n\t\tLabels: labels,\n\t\tMemRequest: memRequest,\n\t}\n\terr := e2erc.RunRC(ctx, *config)\n\tif err != nil {\n\t\treturn err\n\t}\n\trc, err := f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i, node := range nodes {\n\t\terr = makeNodeSchedulable(ctx, f.ClientSet, &node, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Update replicas count, to create new pods that will be allocated on node\n\t\t// (we retry 409 errors in case rc reference got out of sync)\n\t\tfor j := 0; j < 3; j++ {\n\t\t\t*rc.Spec.Replicas = int32((i + 1) * podsPerNode)\n\t\t\trc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Update(ctx, rc, metav1.UpdateOptions{})\n\t\t\tif err == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif !apierrors.IsConflict(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tklog.Warningf(\"Got 409 conflict when trying to scale RC, retries left: %v\", 3-j)\n\t\t\trc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{})\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\terr = wait.PollImmediate(5*time.Second, podTimeout, func() (bool, error) {\n\t\t\trc, err = f.ClientSet.CoreV1().ReplicationControllers(namespace).Get(ctx, id, metav1.GetOptions{})\n\t\t\tif err != nil || rc.Status.ReadyReplicas < int32((i+1)*podsPerNode) {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to coerce RC into spawning a pod on node %s within timeout\", node.Name)\n\t\t}\n\t\terr = makeNodeUnschedulable(ctx, f.ClientSet, &node)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func collectCRDResources(discovery discovery.DiscoveryInterface) ([]*metav1.APIResourceList, error) {\n\tresources, err := discovery.ServerResources()\n\tcrdResources := []*metav1.APIResourceList{}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, res := range resources {\n\t\tgv, err := schema.ParseGroupVersion(res.GroupVersion)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif gv.Group != \"apiextensions.k8s.io\" {\n\t\t\tcontinue\n\t\t}\n\t\temptyAPIResourceList := metav1.APIResourceList{\n\t\t\tGroupVersion: res.GroupVersion,\n\t\t}\n\t\temptyAPIResourceList.APIResources = findCRDGVRs(res.APIResources)\n\t\tcrdResources = append(crdResources, &emptyAPIResourceList)\n\t}\n\n\treturn crdResources, nil\n}",
"func CleanKubernetesResources(ctx context.Context, l logr.Logger, c client.Client, requirements labels.Requirements) error {\n\tops := utilclient.DefaultCleanOps()\n\treturn flow.Parallel(\n\t\tcleanResourceFn(l, ops, c, &batchv1beta1.CronJobList{}, \"CronJob\", false, addAdditionalListOptions(botanist.CronJobCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &appsv1.DaemonSetList{}, \"DaemonSet\", false, addAdditionalListOptions(botanist.DaemonSetCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &appsv1.DeploymentList{}, \"Deployment\", false, addAdditionalListOptions(botanist.DeploymentCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &batchv1.JobList{}, \"Job\", false, addAdditionalListOptions(botanist.JobCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &corev1.PodList{}, \"Pod\", false, addAdditionalListOptions(botanist.PodCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &appsv1.ReplicaSetList{}, \"ReplicaSet\", false, addAdditionalListOptions(botanist.ReplicaSetCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &corev1.ReplicationControllerList{}, \"ReplicationController\", false, addAdditionalListOptions(botanist.ReplicationControllerCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &appsv1.StatefulSetList{}, \"StatefulSet\", false, addAdditionalListOptions(botanist.StatefulSetCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &corev1.PersistentVolumeClaimList{}, \"PVC\", false, addAdditionalListOptions(botanist.PersistentVolumeClaimCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &extensionsv1beta1.IngressList{}, \"Ingress\", false, addAdditionalListOptions(botanist.IngressCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &corev1.ServiceList{}, \"Service\", false, addAdditionalListOptions(botanist.ServiceCleanOption, requirements)),\n\t\tcleanResourceFn(l, ops, c, &corev1.NamespaceList{}, \"Namespace\", false, NamespaceCleanOption),\n\t)(ctx)\n}",
"func ensureMultiClusterResources(ctx context.Context, flags flags, clientMap map[string]kubernetes.Interface) error {\n\tif flags.cleanup {\n\t\tif err := performCleanup(ctx, clientMap, flags); err != nil {\n\t\t\treturn xerrors.Errorf(\"failed performing cleanup of resources: %w\", err)\n\t\t}\n\t}\n\n\tif err := ensureAllClusterNamespacesExist(ctx, clientMap, flags); err != nil {\n\t\treturn xerrors.Errorf(\"failed ensuring namespaces: %w\", err)\n\t}\n\tfmt.Println(\"Ensured namespaces exist in all clusters.\")\n\n\tif err := createServiceAccountsAndRoles(ctx, clientMap, flags); err != nil {\n\t\treturn xerrors.Errorf(\"failed creating service accounts and roles in all clusters: %w\", err)\n\t}\n\tfmt.Println(\"Ensured ServiceAccounts and Roles.\")\n\n\tsecrets, err := getAllWorkerClusterServiceAccountSecretTokens(ctx, clientMap, flags)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed to get service account secret tokens: %w\", err)\n\t}\n\n\tif len(secrets) != len(flags.memberClusters) {\n\t\treturn xerrors.Errorf(\"required %d serviceaccount tokens but found only %d\\n\", len(flags.memberClusters), len(secrets))\n\t}\n\n\tkubeConfig, err := createKubeConfigFromServiceAccountTokens(secrets, flags)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed to create kube config from service account tokens: %w\", err)\n\t}\n\n\tkubeConfigBytes, err := yaml.Marshal(kubeConfig)\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed to marshal kubeconfig: %w\", err)\n\t}\n\n\tcentralClusterClient := clientMap[flags.centralCluster]\n\tif err != nil {\n\t\treturn xerrors.Errorf(\"failed to get central cluster clientset: %w\", err)\n\t}\n\n\tif err := createKubeConfigSecret(ctx, centralClusterClient, kubeConfigBytes, flags); err != nil {\n\t\treturn xerrors.Errorf(\"failed creating KubeConfig secret: %w\", err)\n\t}\n\n\tif flags.sourceCluster != \"\" {\n\t\tif err := setupDatabaseRoles(ctx, clientMap, flags); err != nil {\n\t\t\treturn xerrors.Errorf(\"failed setting up database roles: %w\", err)\n\t\t}\n\t\tfmt.Println(\"Ensured database Roles in member clusters.\")\n\t} else if flags.installDatabaseRoles {\n\t\tif err := installDatabaseRoles(ctx, clientMap, flags); err != nil {\n\t\t\treturn xerrors.Errorf(\"failed installing database roles: %w\", err)\n\t\t}\n\t\tfmt.Println(\"Ensured database Roles in member clusters.\")\n\t}\n\n\treturn nil\n}",
"func buildNormalizedNodeResourceMap(pods []*core_v1.Pod, nodes []*core_v1.Node) nodeResourceMap { // nolint: gocyclo\n\tnrm := nodeResourceMap{}\n\n\tfor _, n := range nodes {\n\t\tnrm[n.ObjectMeta.Name] = allocatedNodeResources{node: n}\n\t}\n\n\t// We sum the total allocated resources on every node from our list of pods.\n\t// Some strategies may wish to price pods based on their fraction of allocated\n\t// node resources, rather than the total resources available on a node. This\n\t// may punish lone pods that are initially scheduled onto large nodes, but this\n\t// may be desirable as it rightfully punishes applications that may cause\n\t// frequent node turnover.\n\tfor _, p := range pods {\n\t\tnr, ok := nrm[p.Spec.NodeName]\n\t\tif !ok {\n\t\t\tlog.Log.Warnw(\"unexpected missing node from NodeMap\", zap.String(\"nodeName\", p.Spec.NodeName))\n\t\t\tcontinue\n\t\t}\n\t\tnr.cpuUsed += sumPodResource(p, core_v1.ResourceCPU)\n\t\tnr.memoryUsed += sumPodResource(p, core_v1.ResourceMemory)\n\t\tnr.gpuUsed += sumPodResource(p, ResourceGPU)\n\t\tnrm[p.Spec.NodeName] = nr\n\t}\n\n\tfor k, v := range nrm {\n\t\tc := v.node.Status.Capacity.Cpu()\n\t\tif c != nil {\n\t\t\tv.cpuAvailable = c.MilliValue()\n\t\t}\n\n\t\tm := v.node.Status.Capacity.Memory()\n\t\tif m != nil {\n\t\t\tv.memoryAvailable = m.Value()\n\t\t}\n\n\t\tg := gpuCapacity(&v.node.Status.Capacity)\n\t\tif g != nil {\n\t\t\tv.gpuAvailable = g.Value()\n\t\t}\n\n\t\t// The ratio of cpuUsed / cpuAvailable is used for proportional scaling of\n\t\t// resources to \"normalize\" pod resource utilization to a full node. If\n\t\t// cpuUsed is 0 because the pods that are running have not made resource\n\t\t// requests, there's a possible divide by 0 in calling code so we default to\n\t\t// setting cpuUsed to cpuAvailable.\n\t\tif v.cpuUsed == 0 {\n\t\t\tv.cpuUsed = v.cpuAvailable\n\t\t}\n\n\t\tif v.memoryUsed == 0 {\n\t\t\tv.memoryUsed = v.memoryAvailable\n\t\t}\n\n\t\tif v.gpuUsed == 0 {\n\t\t\tv.gpuUsed = v.gpuAvailable\n\t\t}\n\n\t\tnrm[k] = v\n\t}\n\n\treturn nrm\n}",
"func UpdateResources(clientset kubernetes.Interface, restConfig *rest.Config, cluster *crv1.Pgcluster) error {\n\t// get a list of all of the instance deployments for the cluster\n\tdeployments, err := operator.GetInstanceDeployments(clientset, cluster)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// iterate through each PostgreSQL instance deployment and update the\n\t// resource values for the database container\n\t//\n\t// NOTE: a future version (near future) will first try to detect the primary\n\t// so that all the replicas are updated first, and then the primary gets the\n\t// update\n\tfor _, deployment := range deployments.Items {\n\t\t// first, initialize the requests/limits resource to empty Resource Lists\n\t\tdeployment.Spec.Template.Spec.Containers[0].Resources.Requests = v1.ResourceList{}\n\t\tdeployment.Spec.Template.Spec.Containers[0].Resources.Limits = v1.ResourceList{}\n\n\t\t// now, simply deep copy the values from the CRD\n\t\tif cluster.Spec.Resources != nil {\n\t\t\tdeployment.Spec.Template.Spec.Containers[0].Resources.Requests = cluster.Spec.Resources.DeepCopy()\n\t\t}\n\n\t\tif cluster.Spec.Limits != nil {\n\t\t\tdeployment.Spec.Template.Spec.Containers[0].Resources.Limits = cluster.Spec.Limits.DeepCopy()\n\t\t}\n\n\t\t// Before applying the update, we want to explicitly stop PostgreSQL on each\n\t\t// instance. This prevents PostgreSQL from having to boot up in crash\n\t\t// recovery mode.\n\t\t//\n\t\t// If an error is returned, we only issue a warning\n\t\tif err := stopPostgreSQLInstance(clientset, restConfig, deployment); err != nil {\n\t\t\tlog.Warn(err)\n\t\t}\n\n\t\t// update the deployment with the new values\n\t\tif _, err := clientset.AppsV1().Deployments(deployment.Namespace).Update(&deployment); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (r *ReconcileHyperConverged) getAllResources(cr *hcov1alpha1.HyperConverged, request reconcile.Request) []runtime.Object {\n\treturn []runtime.Object{\n\t\tnewKubeVirtConfigForCR(cr, request.Namespace),\n\t\tnewKubeVirtForCR(cr, request.Namespace),\n\t\tnewCDIForCR(cr, UndefinedNamespace),\n\t\tnewNetworkAddonsForCR(cr, UndefinedNamespace),\n\t\tnewKubeVirtCommonTemplateBundleForCR(cr, OpenshiftNamespace),\n\t\tnewKubeVirtNodeLabellerBundleForCR(cr, request.Namespace),\n\t\tnewKubeVirtTemplateValidatorForCR(cr, request.Namespace),\n\t}\n}",
"func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {\n\tresources := AllCRDs()\n\n\tns := Namespace(o.Namespace)\n\tif err := appendUnstructured(resources, ns); err != nil {\n\t\tfmt.Printf(\"error appending Namespace %s: %s\\n\", ns.GetName(), err.Error())\n\t}\n\n\tserviceAccountName := defaultServiceAccountName\n\tif o.ServiceAccountName == \"\" {\n\t\tcrb := ClusterRoleBinding(o.Namespace)\n\t\tif err := appendUnstructured(resources, crb); err != nil {\n\t\t\tfmt.Printf(\"error appending ClusterRoleBinding %s: %s\\n\", crb.GetName(), err.Error())\n\t\t}\n\t\tsa := ServiceAccount(o.Namespace, o.ServiceAccountAnnotations)\n\t\tif err := appendUnstructured(resources, sa); err != nil {\n\t\t\tfmt.Printf(\"error appending ServiceAccount %s: %s\\n\", sa.GetName(), err.Error())\n\t\t}\n\t} else {\n\t\tserviceAccountName = o.ServiceAccountName\n\t}\n\n\tif o.SecretData != nil {\n\t\tsec := Secret(o.Namespace, o.SecretData)\n\t\tif err := appendUnstructured(resources, sec); err != nil {\n\t\t\tfmt.Printf(\"error appending Secret %s: %s\\n\", sec.GetName(), err.Error())\n\t\t}\n\t}\n\n\tif !o.NoDefaultBackupLocation {\n\t\tbsl := BackupStorageLocation(o.Namespace, o.ProviderName, o.Bucket, o.Prefix, o.BSLConfig, o.CACertData)\n\t\tif err := appendUnstructured(resources, bsl); err != nil {\n\t\t\tfmt.Printf(\"error appending BackupStorageLocation %s: %s\\n\", bsl.GetName(), err.Error())\n\t\t}\n\t}\n\n\t// A snapshot location may not be desirable for users relying on pod volume backup/restore\n\tif o.UseVolumeSnapshots {\n\t\tvsl := VolumeSnapshotLocation(o.Namespace, o.ProviderName, o.VSLConfig)\n\t\tif err := appendUnstructured(resources, vsl); err != nil {\n\t\t\tfmt.Printf(\"error appending VolumeSnapshotLocation %s: %s\\n\", vsl.GetName(), err.Error())\n\t\t}\n\t}\n\n\tsecretPresent := o.SecretData != nil\n\n\tdeployOpts := []podTemplateOption{\n\t\tWithAnnotations(o.PodAnnotations),\n\t\tWithLabels(o.PodLabels),\n\t\tWithImage(o.Image),\n\t\tWithResources(o.VeleroPodResources),\n\t\tWithSecret(secretPresent),\n\t\tWithDefaultRepoMaintenanceFrequency(o.DefaultRepoMaintenanceFrequency),\n\t\tWithServiceAccountName(serviceAccountName),\n\t\tWithGarbageCollectionFrequency(o.GarbageCollectionFrequency),\n\t\tWithUploaderType(o.UploaderType),\n\t}\n\n\tif len(o.Features) > 0 {\n\t\tdeployOpts = append(deployOpts, WithFeatures(o.Features))\n\t}\n\n\tif o.RestoreOnly {\n\t\tdeployOpts = append(deployOpts, WithRestoreOnly())\n\t}\n\n\tif len(o.Plugins) > 0 {\n\t\tdeployOpts = append(deployOpts, WithPlugins(o.Plugins))\n\t}\n\n\tif o.DefaultVolumesToFsBackup {\n\t\tdeployOpts = append(deployOpts, WithDefaultVolumesToFsBackup())\n\t}\n\n\tdeploy := Deployment(o.Namespace, deployOpts...)\n\n\tif err := appendUnstructured(resources, deploy); err != nil {\n\t\tfmt.Printf(\"error appending Deployment %s: %s\\n\", deploy.GetName(), err.Error())\n\t}\n\n\tif o.UseNodeAgent {\n\t\tdsOpts := []podTemplateOption{\n\t\t\tWithAnnotations(o.PodAnnotations),\n\t\t\tWithLabels(o.PodLabels),\n\t\t\tWithImage(o.Image),\n\t\t\tWithResources(o.NodeAgentPodResources),\n\t\t\tWithSecret(secretPresent),\n\t\t\tWithServiceAccountName(serviceAccountName),\n\t\t}\n\t\tif len(o.Features) > 0 {\n\t\t\tdsOpts = append(dsOpts, WithFeatures(o.Features))\n\t\t}\n\t\tds := DaemonSet(o.Namespace, dsOpts...)\n\t\tif err := appendUnstructured(resources, ds); err != nil {\n\t\t\tfmt.Printf(\"error appending DaemonSet %s: %s\\n\", ds.GetName(), err.Error())\n\t\t}\n\t}\n\n\treturn resources\n}",
"func PodFitsResourcesPredicate(pod *v1.Pod, podRequest *schedulernodeinfo.Resource, ignoredExtendedResources sets.String, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {\n\tnode := nodeInfo.Node()\n\tif node == nil {\n\t\treturn false, nil, fmt.Errorf(\"node not found\")\n\t}\n\n\tvar predicateFails []PredicateFailureReason\n\tallowedPodNumber := nodeInfo.AllowedPodNumber()\n\tif len(nodeInfo.Pods())+1 > allowedPodNumber {\n\t\tpredicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourcePods, 1, int64(len(nodeInfo.Pods())), int64(allowedPodNumber)))\n\t}\n\n\tif ignoredExtendedResources == nil {\n\t\tignoredExtendedResources = sets.NewString()\n\t}\n\n\tif podRequest == nil {\n\t\tpodRequest = GetResourceRequest(pod)\n\t}\n\tif podRequest.MilliCPU == 0 &&\n\t\tpodRequest.Memory == 0 &&\n\t\tpodRequest.EphemeralStorage == 0 &&\n\t\tlen(podRequest.ScalarResources) == 0 {\n\t\treturn len(predicateFails) == 0, predicateFails, nil\n\t}\n\n\tallocatable := nodeInfo.AllocatableResource()\n\tif allocatable.MilliCPU < podRequest.MilliCPU+nodeInfo.RequestedResource().MilliCPU {\n\t\tpredicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceCPU, podRequest.MilliCPU, nodeInfo.RequestedResource().MilliCPU, allocatable.MilliCPU))\n\t}\n\tif allocatable.Memory < podRequest.Memory+nodeInfo.RequestedResource().Memory {\n\t\tpredicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceMemory, podRequest.Memory, nodeInfo.RequestedResource().Memory, allocatable.Memory))\n\t}\n\tif allocatable.EphemeralStorage < podRequest.EphemeralStorage+nodeInfo.RequestedResource().EphemeralStorage {\n\t\tpredicateFails = append(predicateFails, NewInsufficientResourceError(v1.ResourceEphemeralStorage, podRequest.EphemeralStorage, nodeInfo.RequestedResource().EphemeralStorage, allocatable.EphemeralStorage))\n\t}\n\n\tfor rName, rQuant := range podRequest.ScalarResources {\n\t\tif v1helper.IsExtendedResourceName(rName) {\n\t\t\t// If this resource is one of the extended resources that should be\n\t\t\t// ignored, we will skip checking it.\n\t\t\tif ignoredExtendedResources.Has(string(rName)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tif allocatable.ScalarResources[rName] < rQuant+nodeInfo.RequestedResource().ScalarResources[rName] {\n\t\t\tpredicateFails = append(predicateFails, NewInsufficientResourceError(rName, podRequest.ScalarResources[rName], nodeInfo.RequestedResource().ScalarResources[rName], allocatable.ScalarResources[rName]))\n\t\t}\n\t}\n\n\tif klog.V(10) && len(predicateFails) == 0 {\n\t\t// We explicitly don't do klog.V(10).Infof() to avoid computing all the parameters if this is\n\t\t// not logged. There is visible performance gain from it.\n\t\tklog.Infof(\"Schedule Pod %+v on Node %+v is allowed, Node is running only %v out of %v Pods.\",\n\t\t\tpodName(pod), node.Name, len(nodeInfo.Pods()), allowedPodNumber)\n\t}\n\treturn len(predicateFails) == 0, predicateFails, nil\n}",
"func (g *VpcGenerator) InitResources() error {\n\tclient, err := g.LoadClientFromProfile()\n\tif err != nil {\n\t\treturn err\n\t}\n\tremaining := 1\n\tpageNumber := 1\n\tpageSize := 10\n\n\tallVpcs := make([]vpc.Vpc, 0)\n\n\tfor remaining > 0 {\n\t\traw, err := client.WithVpcClient(func(vpcClient *vpc.Client) (interface{}, error) {\n\t\t\trequest := vpc.CreateDescribeVpcsRequest()\n\t\t\trequest.RegionId = client.RegionID\n\t\t\trequest.PageSize = requests.NewInteger(pageSize)\n\t\t\trequest.PageNumber = requests.NewInteger(pageNumber)\n\t\t\treturn vpcClient.DescribeVpcs(request)\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresponse := raw.(*vpc.DescribeVpcsResponse)\n\t\tallVpcs = append(allVpcs, response.Vpcs.Vpc...)\n\t\tremaining = response.TotalCount - pageNumber*pageSize\n\t\tpageNumber++\n\t}\n\n\tfor _, Vpc := range allVpcs {\n\t\tresource := resourceFromVpcResponse(Vpc)\n\t\tg.Resources = append(g.Resources, resource)\n\t}\n\n\treturn nil\n}",
"func (p *DockerPod) Finit() error {\n\tfor _, task := range p.conTasks {\n\t\tfor _, ex := range task.ExtendedResources {\n\t\t\tif err := p.resourceManager.ReleaseExtendedResources(ex.Name, task.TaskId); err != nil {\n\t\t\t\t// do not break\n\t\t\t\tlogs.Errorf(\"release extended resources %v failed, err %s\", ex, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\tif p.netTask != nil {\n\t\tfor _, ex := range p.netTask.ExtendedResources {\n\t\t\tif err := p.resourceManager.ReleaseExtendedResources(ex.Name, p.netTask.TaskId); err != nil {\n\t\t\t\t// do not break\n\t\t\t\tlogs.Errorf(\"release extended resources %v failed, err %s\", ex, err.Error())\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (r *ReconcileKogitoApp) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tlog.Info(\"Reconciling KogitoApp\")\n\n\t// Fetch the KogitoApp instance\n\tinstance := &v1alpha1.KogitoApp{}\n\tif exists, err := kubernetes.ResourceC(r.client).FetchWithKey(request.NamespacedName, instance); err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if !exists {\n\t\treturn reconcile.Result{}, nil\n\t}\n\n\t// Set some CR defaults\n\tif len(instance.Spec.Name) == 0 {\n\t\tinstance.Spec.Name = instance.Name\n\t}\n\tif instance.Spec.Runtime != v1alpha1.SpringbootRuntimeType {\n\t\tinstance.Spec.Runtime = v1alpha1.QuarkusRuntimeType\n\t}\n\n\tlog.Infof(\"Checking if all resources for '%s' are created\", instance.Spec.Name)\n\t// create resources in the cluster that do not exist\n\tkogitoInv, err := builder.BuildOrFetchObjects(&builder.Context{\n\t\tClient: r.client,\n\t\tKogitoApp: instance,\n\t\tPreCreate: func(object meta.ResourceObject) error {\n\t\t\tif object != nil {\n\t\t\t\tlog.Debugf(\"Setting controller reference pre create for '%s' kind '%s'\", object.GetName(), object.GetObjectKind().GroupVersionKind().Kind)\n\t\t\t\treturn controllerutil.SetControllerReference(instance, object, r.scheme)\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// ensure builds\n\tlog.Infof(\"Checking if build for '%s' is finished\", instance.Spec.Name)\n\tif imageExists, err := r.ensureApplicationImageExists(kogitoInv, instance); err != nil {\n\t\treturn reconcile.Result{}, err\n\t} else if !imageExists {\n\t\t// let's wait for the build to finish\n\t\tif status.SetProvisioning(instance) {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t\tlog.Infof(\"Build for '%s' still running\", instance.Spec.Name)\n\t\treturn reconcile.Result{RequeueAfter: time.Duration(30) * time.Second}, nil\n\t}\n\n\t// checks for dc updates\n\tif kogitoInv.DeploymentConfig != nil {\n\t\tif dcUpdated, err := r.updateDeploymentConfigs(instance, *kogitoInv.DeploymentConfig); err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t} else if dcUpdated && status.SetProvisioning(instance) {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t}\n\n\t// Setting route to the status\n\tif kogitoInv.Service != nil {\n\t\tif serviceRoute := r.GetRouteHost(*kogitoInv.Route, instance); serviceRoute != \"\" {\n\t\t\tinstance.Status.Route = fmt.Sprintf(\"http://%s\", serviceRoute)\n\t\t}\n\t}\n\n\t/*\n\n\t\tbcUpdated, err := r.updateBuildConfigs(instance, buildConfig)\n\t\tif err != nil {\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t\tif bcUpdated && status.SetProvisioning(instance) {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t*/\n\n\t// Fetch the cached KogitoApp instance\n\tcachedInstance := &v1alpha1.KogitoApp{}\n\terr = r.cache.Get(context.TODO(), request.NamespacedName, cachedInstance)\n\tif err != nil {\n\t\tif errors.IsNotFound(err) {\n\t\t\t// Request object not found, could have been deleted after reconcile request.\n\t\t\t// Owned objects are automatically garbage collected. For additional cleanup logic use finalizers.\n\t\t\t// Return and don't requeue\n\t\t\treturn reconcile.Result{}, nil\n\t\t}\n\t\t// Error reading the object - requeue the request.\n\t\tr.setFailedStatus(instance, v1alpha1.UnknownReason, err)\n\t\treturn reconcile.Result{}, err\n\t}\n\n\t// Update CR if needed\n\tif r.hasSpecChanges(instance, cachedInstance) {\n\t\tif status.SetProvisioning(instance) && instance.ResourceVersion == cachedInstance.ResourceVersion {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\tif r.hasStatusChanges(instance, cachedInstance) {\n\t\tif instance.ResourceVersion == cachedInstance.ResourceVersion {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\tif status.SetDeployed(instance) {\n\t\tif instance.ResourceVersion == cachedInstance.ResourceVersion {\n\t\t\treturn r.UpdateObj(instance)\n\t\t}\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\tlog.Infof(\"Reconcile for '%s' successfully finished\", instance.Spec.Name)\n\treturn reconcile.Result{}, nil\n}",
"func (c *Client) WaitForResources(timeout time.Duration, resources []*manifest.MappingResult) error {\n\treturn wait.Poll(5*time.Second, timeout, func() (bool, error) {\n\t\tstatefulSets := []appsv1.StatefulSet{}\n\t\tdeployments := []deployment{}\n\t\tfor _, r := range resources {\n\t\t\tswitch r.Metadata.Kind {\n\t\t\tcase \"ConfigMap\":\n\t\t\tcase \"Service\":\n\t\t\tcase \"ReplicationController\":\n\t\t\tcase \"Pod\":\n\t\t\tcase \"Deployment\":\n\t\t\t\tcurrentDeployment, err := c.clientset.AppsV1().Deployments(r.Metadata.ObjectMeta.Namespace).Get(context.TODO(), r.Metadata.ObjectMeta.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\t// Find RS associated with deployment\n\t\t\t\tnewReplicaSet, err := c.getNewReplicaSet(currentDeployment)\n\t\t\t\tif err != nil || newReplicaSet == nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tnewDeployment := deployment{\n\t\t\t\t\tnewReplicaSet,\n\t\t\t\t\tcurrentDeployment,\n\t\t\t\t}\n\t\t\t\tdeployments = append(deployments, newDeployment)\n\t\t\tcase \"StatefulSet\":\n\t\t\t\tsf, err := c.clientset.AppsV1().StatefulSets(r.Metadata.ObjectMeta.Namespace).Get(context.TODO(), r.Metadata.ObjectMeta.Name, metav1.GetOptions{})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, err\n\t\t\t\t}\n\t\t\t\tstatefulSets = append(statefulSets, *sf)\n\t\t\t}\n\t\t}\n\t\tisReady := c.statefulSetsReady(statefulSets) && c.deploymentsReady(deployments)\n\t\treturn isReady, nil\n\t})\n}",
"func (s *Synk) initialize(\n\tctx context.Context,\n\topts *ApplyOptions,\n\tresources ...*unstructured.Unstructured,\n) (*apps.ResourceSet, []*unstructured.Unstructured, error) {\n\t// Cleanup and sort resources.\n\tresources = filter(resources, func(r *unstructured.Unstructured) bool {\n\t\treturn !reflect.DeepEqual(*r, unstructured.Unstructured{}) && !isTestResource(r)\n\t})\n\tsortResources(resources)\n\n\tcrds, regulars := separateCRDsFromResources(resources)\n\n\tif err := s.populateNamespaces(ctx, opts.Namespace, crds, regulars...); err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"set default namespaces\")\n\t}\n\t// TODO: consider putting this and other validation as a step after initialize\n\t// so we can give validation errors in batch in the ResourceSet status.\n\tif opts.EnforceNamespace {\n\t\tfor _, r := range regulars {\n\t\t\tif ns := r.GetNamespace(); ns != \"\" && ns != opts.Namespace && ns != \"kube-system\" {\n\t\t\t\treturn nil, nil, errors.Errorf(\"invalid namespace %q on %q, expected %q or \\\"kube-system\\\"\", ns, resourceKey(r), opts.Namespace)\n\t\t\t}\n\t\t}\n\t}\n\n\t// Initialize and create next ResourceSet.\n\tvar err error\n\topts.version, err = s.next(ctx, opts.name)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"get next ResourceSet version\")\n\t}\n\n\tvar rs apps.ResourceSet\n\trs.Name = resourceSetName(opts.name, opts.version)\n\trs.Labels = map[string]string{\"name\": opts.name}\n\n\tgroupedResources := map[schema.GroupVersionKind][]apps.ResourceRef{}\n\tfor _, r := range resources {\n\t\tgvk := r.GroupVersionKind()\n\t\tgroupedResources[gvk] = append(groupedResources[gvk], apps.ResourceRef{\n\t\t\tNamespace: r.GetNamespace(),\n\t\t\tName: r.GetName(),\n\t\t})\n\t}\n\tfor gvk, res := range groupedResources {\n\t\trs.Spec.Resources = append(rs.Spec.Resources, apps.ResourceSetSpecGroup{\n\t\t\tGroup: gvk.Group,\n\t\t\tVersion: gvk.Version,\n\t\t\tKind: gvk.Kind,\n\t\t\tItems: res,\n\t\t})\n\t}\n\tsort.Slice(rs.Spec.Resources, func(i, j int) bool {\n\t\treturn lessResourceSetSpecGroup(&rs.Spec.Resources[i], &rs.Spec.Resources[j])\n\t})\n\n\trs.Status = apps.ResourceSetStatus{\n\t\tPhase: apps.ResourceSetPhasePending,\n\t\tStartedAt: metav1.Now(),\n\t}\n\tif err := s.createResourceSet(ctx, &rs); err != nil {\n\t\treturn nil, nil, errors.Wrapf(err, \"create resources object %q\", rs.Name)\n\t}\n\n\treturn &rs, resources, nil\n}",
"func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {\n\tklog.V(5).Infof(\"Syncing Pod %q: %+v\", format.Pod(pod), pod)\n\tklog.V(5).Infof(\"podstatus %v\", podStatus)\n\tif podStatus.SandboxStatuses != nil {\n\t\tklog.V(5).Infof(\"pod sandbox length %v\", len(podStatus.SandboxStatuses))\n\t\tfor _, sb := range podStatus.SandboxStatuses {\n\t\t\tklog.V(5).Infof(\"pod sandbox status %v\", sb)\n\t\t}\n\t}\n\n\tcreatePodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)\n\tchanges := podActions{\n\t\tKillPod: createPodSandbox,\n\t\tCreateSandbox: createPodSandbox,\n\t\tSandboxID: sandboxID,\n\t\tAttempt: attempt,\n\t\tContainersToStart: []int{},\n\t\tContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),\n\t\tContainersToUpdate: make(map[string][]containerToUpdateInfo),\n\t\tContainersToRestart: []int{},\n\t}\n\n\t// If we need to (re-)create the pod sandbox, everything will need to be\n\t// killed and recreated, and init containers should be purged.\n\tif createPodSandbox {\n\t\tif !shouldRestartOnFailure(pod) && attempt != 0 {\n\t\t\t// Should not restart the pod, just return.\n\t\t\t// we should not create a sandbox for a pod if it is already done.\n\t\t\t// if all containers are done and should not be started, there is no need to create a new sandbox.\n\t\t\t// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.\n\t\t\tchanges.CreateSandbox = false\n\t\t\treturn changes\n\t\t}\n\t\tif len(pod.Spec.InitContainers) != 0 {\n\t\t\t// Pod has init containers, return the first one.\n\t\t\tchanges.NextInitContainerToStart = &pod.Spec.InitContainers[0]\n\t\t\treturn changes\n\t\t}\n\t\t// Start all containers by default but exclude the ones that succeeded if\n\t\t// RestartPolicy is OnFailure.\n\t\tfor idx, c := range pod.Spec.Containers {\n\t\t\tif containerSucceeded(&c, podStatus) && pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\t\treturn changes\n\t}\n\n\t// Check initialization progress.\n\tinitLastStatus, next, done := findNextInitContainerToRun(pod, podStatus)\n\tif !done {\n\t\tif next != nil {\n\t\t\tinitFailed := initLastStatus != nil && isInitContainerFailed(initLastStatus)\n\t\t\tif initFailed && !shouldRestartOnFailure(pod) {\n\t\t\t\tchanges.KillPod = true\n\t\t\t} else {\n\t\t\t\t// Always try to stop containers in unknown state first.\n\t\t\t\tif initLastStatus != nil && initLastStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\tchanges.ContainersToKill[initLastStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: next.Name,\n\t\t\t\t\t\tcontainer: next,\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Init container is in %q state, try killing it before restart\",\n\t\t\t\t\t\t\tinitLastStatus.State),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tchanges.NextInitContainerToStart = next\n\t\t\t}\n\t\t}\n\t\t// Initialization failed or still in progress. Skip inspecting non-init\n\t\t// containers.\n\t\treturn changes\n\t}\n\n\t// Number of running containers to keep.\n\tkeepCount := 0\n\n\t// check the status of containers.\n\tfor idx, container := range pod.Spec.Containers {\n\t\tcontainerStatus := podStatus.FindContainerStatusByName(container.Name)\n\n\t\t// Call internal container post-stop lifecycle hook for any non-running container so that any\n\t\t// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated\n\t\t// to it.\n\t\tif containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {\n\t\t\t\tklog.Errorf(\"internal container post-stop lifecycle hook failed for container %v in pod %v with error %v\",\n\t\t\t\t\tcontainer.Name, pod.Name, err)\n\t\t\t}\n\t\t}\n\n\t\t// If container does not exist, or is not running, check whether we\n\t\t// need to restart it.\n\t\tif containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {\n\t\t\tif kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {\n\t\t\t\tmessage := fmt.Sprintf(\"Container %+v is dead, but RestartPolicy says that we should restart it.\", container)\n\t\t\t\tklog.V(3).Infof(message)\n\t\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t\t\tif containerStatus != nil && containerStatus.State == kubecontainer.ContainerStateUnknown {\n\t\t\t\t\t// If container is in unknown state, we don't know whether it\n\t\t\t\t\t// is actually running or not, always try killing it before\n\t\t\t\t\t// restart to avoid having 2 running instances of the same container.\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container is in %q state, try killing it before restart\", containerStatus.State),\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\t// The container is running, but kill the container if any of the following condition is met.\n\t\tvar message string\n\t\trestart := shouldRestartOnFailure(pod)\n\t\tif _, _, changed := containerChanged(&container, containerStatus); changed {\n\t\t\tmessage = fmt.Sprintf(\"Container %s definition changed\", container.Name)\n\t\t\t// Restart regardless of the restart policy because the container\n\t\t\t// spec changed.\n\t\t\trestart = true\n\t\t} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {\n\t\t\t// If the container failed the liveness probe, we should kill it.\n\t\t\tmessage = fmt.Sprintf(\"Container %s failed liveness probe\", container.Name)\n\t\t} else {\n\t\t\tif utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {\n\t\t\t\tkeepCount++\n\t\t\t\tapiContainerStatuses := pod.Status.ContainerStatuses\n\t\t\t\tif pod.Spec.VirtualMachine != nil && pod.Status.VirtualMachineStatus != nil {\n\t\t\t\t\tvar vmContainerState v1.ContainerState\n\t\t\t\t\tif pod.Status.VirtualMachineStatus.State == v1.VmActive {\n\t\t\t\t\t\tvmContainerState = v1.ContainerState{Running: &v1.ContainerStateRunning{StartedAt: *pod.Status.StartTime}}\n\t\t\t\t\t}\n\t\t\t\t\tvmContainerId := kubecontainer.BuildContainerID(containerStatus.ID.Type, pod.Status.VirtualMachineStatus.VirtualMachineId)\n\t\t\t\t\tapiContainerStatuses = []v1.ContainerStatus{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: pod.Status.VirtualMachineStatus.Name,\n\t\t\t\t\t\t\tContainerID: vmContainerId.String(),\n\t\t\t\t\t\t\tState: vmContainerState,\n\t\t\t\t\t\t\tReady: pod.Status.VirtualMachineStatus.Ready,\n\t\t\t\t\t\t\tRestartCount: pod.Status.VirtualMachineStatus.RestartCount,\n\t\t\t\t\t\t\tImage: pod.Status.VirtualMachineStatus.Image,\n\t\t\t\t\t\t\tImageID: pod.Status.VirtualMachineStatus.ImageId,\n\t\t\t\t\t\t\tResources: pod.Status.VirtualMachineStatus.Resources,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif container.Resources.Limits == nil || len(apiContainerStatuses) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tapiContainerStatus, exists := podutil.GetContainerStatus(apiContainerStatuses, container.Name)\n\t\t\t\tif !exists || apiContainerStatus.State.Running == nil ||\n\t\t\t\t\tcontainerStatus.State != kubecontainer.ContainerStateRunning ||\n\t\t\t\t\tcontainerStatus.ID.String() != apiContainerStatus.ContainerID ||\n\t\t\t\t\tlen(diff.ObjectDiff(container.Resources.Requests, container.ResourcesAllocated)) != 0 ||\n\t\t\t\t\tlen(diff.ObjectDiff(apiContainerStatus.Resources, container.Resources)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// If runtime status resources is available from CRI or previous update, compare with it.\n\t\t\t\tif len(diff.ObjectDiff(containerStatus.Resources, container.Resources)) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tresizePolicy := make(map[v1.ResourceName]v1.ContainerResizePolicy)\n\t\t\t\tfor _, pol := range container.ResizePolicy {\n\t\t\t\t\tresizePolicy[pol.ResourceName] = pol.Policy\n\t\t\t\t}\n\t\t\t\tdetermineContainerResize := func(rName v1.ResourceName, specValue, statusValue int64) (bool, bool) {\n\t\t\t\t\tif specValue == statusValue {\n\t\t\t\t\t\treturn false, false\n\t\t\t\t\t}\n\t\t\t\t\tif resizePolicy[rName] == v1.RestartContainer {\n\t\t\t\t\t\treturn true, true\n\t\t\t\t\t}\n\t\t\t\t\treturn true, false\n\t\t\t\t}\n\t\t\t\tmarkContainerForUpdate := func(rName string, specValue, statusValue int64) {\n\t\t\t\t\tcUpdateInfo := containerToUpdateInfo{\n\t\t\t\t\t\tapiContainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tapiContainerStatus: &apiContainerStatus,\n\t\t\t\t\t\tkubeContainerStatus: containerStatus,\n\t\t\t\t\t}\n\t\t\t\t\t// Container updates are ordered so that resource decreases are applied before increases\n\t\t\t\t\tswitch {\n\t\t\t\t\tcase specValue > statusValue: // append\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], cUpdateInfo)\n\t\t\t\t\tcase specValue < statusValue: // prepend\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName] = append(changes.ContainersToUpdate[rName], containerToUpdateInfo{})\n\t\t\t\t\t\tcopy(changes.ContainersToUpdate[rName][1:], changes.ContainersToUpdate[rName])\n\t\t\t\t\t\tchanges.ContainersToUpdate[rName][0] = cUpdateInfo\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tspecLim := container.Resources.Limits\n\t\t\t\tspecReq := container.Resources.Requests\n\t\t\t\tstatusLim := apiContainerStatus.Resources.Limits\n\t\t\t\tstatusReq := apiContainerStatus.Resources.Requests\n\t\t\t\t// Runtime container status resources, if set, takes precedence.\n\t\t\t\tif containerStatus.Resources.Limits != nil {\n\t\t\t\t\tstatusLim = containerStatus.Resources.Limits\n\t\t\t\t}\n\t\t\t\tif containerStatus.Resources.Requests != nil {\n\t\t\t\t\tstatusReq = containerStatus.Resources.Requests\n\t\t\t\t}\n\t\t\t\tresizeMemLim, restartMemLim := determineContainerResize(v1.ResourceMemory, specLim.Memory().Value(), statusLim.Memory().Value())\n\t\t\t\tresizeCPUReq, restartCPUReq := determineContainerResize(v1.ResourceCPU, specReq.Cpu().MilliValue(), statusReq.Cpu().MilliValue())\n\t\t\t\tresizeCPULim, restartCPULim := determineContainerResize(v1.ResourceCPU, specLim.Cpu().MilliValue(), statusLim.Cpu().MilliValue())\n\t\t\t\tif restartMemLim || restartCPULim || restartCPUReq {\n\t\t\t\t\t// resize policy requires this container to restart\n\t\t\t\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\t\t\t\tname: containerStatus.Name,\n\t\t\t\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\t\t\t\tmessage: fmt.Sprintf(\"Container %s resize requires restart\", container.Name),\n\t\t\t\t\t}\n\t\t\t\t\tchanges.ContainersToRestart = append(changes.ContainersToRestart, idx)\n\t\t\t\t\tkeepCount--\n\t\t\t\t} else {\n\t\t\t\t\tif resizeMemLim {\n\t\t\t\t\t\tmarkContainerForUpdate(memLimit, specLim.Memory().Value(), statusLim.Memory().Value())\n\t\t\t\t\t}\n\t\t\t\t\tif resizeCPUReq {\n\t\t\t\t\t\tmarkContainerForUpdate(cpuRequest, specReq.Cpu().MilliValue(), statusReq.Cpu().MilliValue())\n\t\t\t\t\t}\n\t\t\t\t\tif resizeCPULim {\n\t\t\t\t\t\tmarkContainerForUpdate(cpuLimit, specLim.Cpu().MilliValue(), statusLim.Cpu().MilliValue())\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t// Keep the container.\n\t\t\tkeepCount++\n\t\t\tcontinue\n\t\t}\n\n\t\t// We need to kill the container, but if we also want to restart the\n\t\t// container afterwards, make the intent clear in the message. Also do\n\t\t// not kill the entire pod since we expect container to be running eventually.\n\t\tif restart {\n\t\t\tmessage = fmt.Sprintf(\"%s, will be restarted\", message)\n\t\t\tchanges.ContainersToStart = append(changes.ContainersToStart, idx)\n\t\t}\n\n\t\tchanges.ContainersToKill[containerStatus.ID] = containerToKillInfo{\n\t\t\tname: containerStatus.Name,\n\t\t\tcontainer: &pod.Spec.Containers[idx],\n\t\t\tmessage: message,\n\t\t}\n\t\tklog.V(2).Infof(\"Container %q (%q) of pod %s: %s\", container.Name, containerStatus.ID, format.Pod(pod), message)\n\t}\n\n\tif keepCount == 0 && len(changes.ContainersToStart) == 0 {\n\t\tchanges.KillPod = true\n\t\tif utilfeature.DefaultFeatureGate.Enabled(features.InPlacePodVerticalScaling) {\n\t\t\tif len(changes.ContainersToRestart) != 0 {\n\t\t\t\tchanges.KillPod = false\n\t\t\t}\n\t\t}\n\t}\n\n\t// always attempts to identify hotplug nic based on pod spec & pod status (got from runtime)\n\tif m.canHotplugNIC(pod, podStatus) {\n\t\tif len(podStatus.SandboxStatuses) > 0 && podStatus.SandboxStatuses[0].GetNetwork() != nil {\n\t\t\tnicsToAttach, nicsToDetach := computeNICHotplugs(pod.Spec.Nics, podStatus.SandboxStatuses[0].GetNetwork().GetNics())\n\t\t\tif len(nicsToAttach) > 0 {\n\t\t\t\tchanges.Hotplugs.NICsToAttach = nicsToAttach\n\t\t\t}\n\t\t\tif len(nicsToDetach) > 0 {\n\t\t\t\tchanges.Hotplugs.NICsToDetach = nicsToDetach\n\t\t\t}\n\t\t}\n\t}\n\n\treturn changes\n}",
"func GenerateContainers(vmi *v1.VirtualMachineInstance, podVolumeName string, podVolumeMountDir string) []kubev1.Container {\n\tvar containers []kubev1.Container\n\n\tinitialDelaySeconds := 2\n\ttimeoutSeconds := 5\n\tperiodSeconds := 5\n\tsuccessThreshold := 2\n\tfailureThreshold := 5\n\n\t// Make VirtualMachineInstance Image Wrapper Containers\n\tfor _, volume := range vmi.Spec.Volumes {\n\t\tif volume.ContainerDisk != nil {\n\n\t\t\tvolumeMountDir := generateVolumeMountDir(vmi, volume.Name)\n\t\t\tdiskContainerName := fmt.Sprintf(\"volume%s\", volume.Name)\n\t\t\tdiskContainerImage := volume.ContainerDisk.Image\n\t\t\tresources := kubev1.ResourceRequirements{}\n\t\t\tif vmi.IsCPUDedicated() {\n\t\t\t\tresources.Limits = make(kubev1.ResourceList)\n\t\t\t\t// TODO(vladikr): adjust the correct cpu/mem values - this is mainly needed to allow QemuImg to run correctly\n\t\t\t\tresources.Limits[kubev1.ResourceCPU] = resource.MustParse(\"200m\")\n\t\t\t\t// k8s minimum memory reservation is linuxMinMemory = 4194304\n\t\t\t\tresources.Limits[kubev1.ResourceMemory] = resource.MustParse(\"64M\")\n\t\t\t}\n\t\t\tcontainers = append(containers, kubev1.Container{\n\t\t\t\tName: diskContainerName,\n\t\t\t\tImage: diskContainerImage,\n\t\t\t\tImagePullPolicy: kubev1.PullIfNotPresent,\n\t\t\t\tCommand: []string{\"/entry-point.sh\"},\n\t\t\t\tEnv: []kubev1.EnvVar{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"COPY_PATH\",\n\t\t\t\t\t\tValue: volumeMountDir + \"/\" + filePrefix,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"IMAGE_PATH\",\n\t\t\t\t\t\tValue: volume.ContainerDisk.Path,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVolumeMounts: []kubev1.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: podVolumeName,\n\t\t\t\t\t\tMountPath: podVolumeMountDir,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tResources: resources,\n\n\t\t\t\t// The readiness probes ensure the volume coversion and copy finished\n\t\t\t\t// before the container is marked as \"Ready: True\"\n\t\t\t\tReadinessProbe: &kubev1.Probe{\n\t\t\t\t\tHandler: kubev1.Handler{\n\t\t\t\t\t\tExec: &kubev1.ExecAction{\n\t\t\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\t\t\"cat\",\n\t\t\t\t\t\t\t\t\"/tmp/healthy\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tInitialDelaySeconds: int32(initialDelaySeconds),\n\t\t\t\t\tPeriodSeconds: int32(periodSeconds),\n\t\t\t\t\tTimeoutSeconds: int32(timeoutSeconds),\n\t\t\t\t\tSuccessThreshold: int32(successThreshold),\n\t\t\t\t\tFailureThreshold: int32(failureThreshold),\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\treturn containers\n}",
"func (m *ManagedNodeGroupResourceSet) AddAllResources() error {\n\tm.resourceSet.template.Description = fmt.Sprintf(\n\t\t\"%s (SSH access: %v) %s\",\n\t\t\"EKS Managed Nodes\",\n\t\tapi.IsEnabled(m.nodeGroup.SSH.Allow),\n\t\t\"[created by eksctl]\")\n\n\tm.template.Mappings[servicePrincipalPartitionMapName] = servicePrincipalPartitionMappings\n\n\tvar nodeRole *gfnt.Value\n\tif m.nodeGroup.IAM.InstanceRoleARN == \"\" {\n\t\tif err := createRole(m.resourceSet, m.nodeGroup.IAM, true); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tnodeRole = gfnt.MakeFnGetAttString(cfnIAMInstanceRoleName, \"Arn\")\n\t} else {\n\t\tnodeRole = gfnt.NewString(m.nodeGroup.IAM.InstanceRoleARN)\n\t}\n\n\tsubnets, err := AssignSubnets(m.nodeGroup.AvailabilityZones, m.clusterStackName, m.clusterConfig, m.nodeGroup.PrivateNetworking)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscalingConfig := gfneks.Nodegroup_ScalingConfig{}\n\tif m.nodeGroup.MinSize != nil {\n\t\tscalingConfig.MinSize = gfnt.NewInteger(*m.nodeGroup.MinSize)\n\t}\n\tif m.nodeGroup.MaxSize != nil {\n\t\tscalingConfig.MaxSize = gfnt.NewInteger(*m.nodeGroup.MaxSize)\n\t}\n\tif m.nodeGroup.DesiredCapacity != nil {\n\t\tscalingConfig.DesiredSize = gfnt.NewInteger(*m.nodeGroup.DesiredCapacity)\n\t}\n\tmanagedResource := &gfneks.Nodegroup{\n\t\tClusterName: gfnt.NewString(m.clusterConfig.Metadata.Name),\n\t\tNodegroupName: gfnt.NewString(m.nodeGroup.Name),\n\t\tScalingConfig: &scalingConfig,\n\t\tSubnets: subnets,\n\t\t// Currently the API supports specifying only one instance type\n\t\tInstanceTypes: gfnt.NewStringSlice(m.nodeGroup.InstanceType),\n\t\tAmiType: gfnt.NewString(getAMIType(m.nodeGroup.InstanceType)),\n\t\tNodeRole: nodeRole,\n\t\tLabels: m.nodeGroup.Labels,\n\t\tTags: m.nodeGroup.Tags,\n\t}\n\n\tif api.IsEnabled(m.nodeGroup.SSH.Allow) {\n\t\tmanagedResource.RemoteAccess = &gfneks.Nodegroup_RemoteAccess{\n\t\t\tEc2SshKey: gfnt.NewString(*m.nodeGroup.SSH.PublicKeyName),\n\t\t\tSourceSecurityGroups: gfnt.NewStringSlice(m.nodeGroup.SSH.SourceSecurityGroupIDs...),\n\t\t}\n\t}\n\tif m.nodeGroup.VolumeSize != nil {\n\t\tmanagedResource.DiskSize = gfnt.NewInteger(*m.nodeGroup.VolumeSize)\n\t}\n\n\tm.newResource(\"ManagedNodeGroup\", managedResource)\n\n\treturn nil\n}",
"func CreateChaosResource(testsDetails *types.TestDetails, fileData []byte, namespace string, clients environment.ClientSets) error {\n\n\tdecoder := yamlutil.NewYAMLOrJSONDecoder(bytes.NewReader(fileData), 100)\n\n\t// for loop to install all the resouces\n\tfor {\n\t\t//runtime defines conversions between generic types and structs to map query strings to struct objects.\n\t\tvar rawObj runtime.RawExtension\n\t\tif err = decoder.Decode(&rawObj); err != nil {\n\t\t\t// if the object is null, successfully installed all manifest\n\t\t\tif rawObj.Raw == nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\n\t\t// NewDecodingSerializer adds YAML decoding support to a serializer that supports JSON.\n\t\tobj, gvk, _ := yaml.NewDecodingSerializer(unstructured.UnstructuredJSONScheme).Decode(rawObj.Raw, nil, nil)\n\t\tunstructuredMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tunstructuredObj := &unstructured.Unstructured{Object: unstructuredMap}\n\n\t\t// GetAPIGroupResources uses the provided discovery client to gather\n\t\t// discovery information and populate a slice of APIGroupResources.\n\t\tgr, err := restmapper.GetAPIGroupResources(clients.KubeClient.DiscoveryClient)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tmapper := restmapper.NewDiscoveryRESTMapper(gr)\n\n\t\t// RESTMapping returns a struct representing the resource path and conversion interfaces a\n\t\t// RESTClient should use to operate on the provided group/kind in order of versions.\n\t\tmapping, err := mapper.RESTMapping(gvk.GroupKind(), gvk.Version)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t//ResourceInterface is an API interface to a specific resource under a dynamic client\n\t\tvar dri dynamic.ResourceInterface\n\t\tif mapping.Scope.Name() == meta.RESTScopeNameNamespace {\n\t\t\tunstructuredObj.SetNamespace(namespace)\n\t\t\tdri = clients.DynamicClient.Resource(mapping.Resource).Namespace(unstructuredObj.GetNamespace())\n\t\t} else {\n\t\t\tdri = clients.DynamicClient.Resource(mapping.Resource)\n\t\t}\n\n\t\t// Create Chaos Resource using dynamic resource interface\n\t\tif _, err := dri.Create(unstructuredObj, v1.CreateOptions{}); err != nil {\n\t\t\tif !k8serrors.IsAlreadyExists(err) {\n\t\t\t\treturn err\n\t\t\t} else {\n\t\t\t\tif unstructuredObj.GetKind() == \"ChaosEngine\" {\n\t\t\t\t\treturn UpdateEngine(testsDetails, clients)\n\t\t\t\t} else if unstructuredObj.GetKind() == \"ChaosExperiment\" {\n\t\t\t\t\treturn UpdateExperiment(testsDetails, clients)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (r *RPC) ResourceAll(c context.Context, a *struct{}, res *[]*model.Resource) (err error) {\n\t*res = r.s.ResourceAll(c)\n\treturn\n}",
"func (et *emulatorTest) populateResources(ctx context.Context, c *Client, resources []storage_v1_tests.Resource) {\n\tfor _, resource := range resources {\n\t\tswitch resource {\n\t\tcase storage_v1_tests.Resource_BUCKET:\n\t\t\tbkt := c.Bucket(bucketIDs.New())\n\t\t\tif err := bkt.Create(ctx, projectID, &BucketAttrs{}); err != nil {\n\t\t\t\tet.Fatalf(\"creating bucket: %v\", err)\n\t\t\t}\n\t\t\tattrs, err := bkt.Attrs(ctx)\n\t\t\tif err != nil {\n\t\t\t\tet.Fatalf(\"getting bucket attrs: %v\", err)\n\t\t\t}\n\t\t\tet.resources.bucket = attrs\n\t\tcase storage_v1_tests.Resource_OBJECT:\n\t\t\t// Assumes bucket has been populated first.\n\t\t\tobj := c.Bucket(et.resources.bucket.Name).Object(objectIDs.New())\n\t\t\tw := obj.NewWriter(ctx)\n\t\t\tif _, err := w.Write(randomBytesToWrite); err != nil {\n\t\t\t\tet.Fatalf(\"writing object: %v\", err)\n\t\t\t}\n\t\t\tif err := w.Close(); err != nil {\n\t\t\t\tet.Fatalf(\"closing object: %v\", err)\n\t\t\t}\n\t\t\tattrs, err := obj.Attrs(ctx)\n\t\t\tif err != nil {\n\t\t\t\tet.Fatalf(\"getting object attrs: %v\", err)\n\t\t\t}\n\t\t\tet.resources.object = attrs\n\t\tcase storage_v1_tests.Resource_NOTIFICATION:\n\t\t\t// Assumes bucket has been populated first.\n\t\t\tn, err := c.Bucket(et.resources.bucket.Name).AddNotification(ctx, &Notification{\n\t\t\t\tTopicProjectID: projectID,\n\t\t\t\tTopicID: notificationIDs.New(),\n\t\t\t\tPayloadFormat: JSONPayload,\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tet.Fatalf(\"adding notification: %v\", err)\n\t\t\t}\n\t\t\tet.resources.notification = n\n\t\tcase storage_v1_tests.Resource_HMAC_KEY:\n\t\t\tkey, err := c.CreateHMACKey(ctx, projectID, serviceAccountEmail)\n\t\t\tif err != nil {\n\t\t\t\tet.Fatalf(\"creating HMAC key: %v\", err)\n\t\t\t}\n\t\t\tet.resources.hmacKey = key\n\t\t}\n\t}\n}",
"func (m *masterService) allocateContainers() {\n\tdefMap := m.db.ListDefinitions()\n\tcontMap := m.db.ListContainers()\n\tnodeMap := m.db.ListNodes()\n\n\t//\n\t// definition -> container map\n\t//\n\tdefContMapList := make(map[string][]*model.Container)\n\tfor _, cont := range contMap {\n\t\tconts := defContMapList[cont.DefinitionName]\n\t\tconts = append(conts, cont)\n\t\tdefContMapList[cont.DefinitionName] = conts\n\t}\n\n\t//\n\t// node -> container map\n\t//\n\tnodeContMap := make(map[string][]*model.Container)\n\tfor nodeName := range nodeMap {\n\t\tconts := make([]*model.Container, 0)\n\t\tfor _, cont := range contMap {\n\t\t\tif cont.NodeName == nodeName {\n\t\t\t\tconts = append(conts, cont)\n\t\t\t}\n\t\t}\n\t\tnodeContMap[nodeName] = conts\n\t}\n\n\t//\n\t// todo\n\t//\n\tfor k, def := range defMap {\n\t\tconts := defContMapList[k]\n\t\tn := len(conts)\n\t\tif def.Count < n {\n\t\t\t// deallocate some containers for definition\n\t\t\tdiff := n - def.Count\n\t\t\tlog.Info(\"Adjusting container count (%d delta)\", diff)\n\t\t\tfor i := 0; i < diff; i++ {\n\t\t\t\tidx := rand.Intn(len(conts))\n\t\t\t\tcont := conts[idx]\n\t\t\t\tconts = append(conts[:idx], conts[idx+1:]...)\n\t\t\t\tlog.Info(\"Deleting container id %s/%s\", cont.ContainerID, cont.Name)\n\t\t\t\tm.db.DeleteContainer(cont.ContainerID)\n\t\t\t}\n\t\t} else if def.Count > n {\n\t\t\t// allocate more containers for definition\n\t\t\tdiff := def.Count - n\n\t\t\tlog.Info(\"Adjusting container count (%d delta)\", diff)\n\t\t\tfor i := 0; i < diff; i++ {\n\t\t\t\tc := &model.Container{}\n\t\t\t\tc.Name = fmt.Sprintf(\"%s-%d\", def.Name, m.db.NextAutoIncrement(\"inc.container\", def.Name))\n\t\t\t\tc.DefinitionName = def.Name\n\n\t\t\t\t//\n\t\t\t\t// find node with least numbers of containers\n\t\t\t\t//\n\t\t\t\tcurrentN := 999999999\n\t\t\t\tvar currentNodeName string\n\t\t\t\tlog.Debug(\"nodeContMap: %s\", nodeContMap)\n\t\t\t\tfor nodeName, contSlice := range nodeContMap {\n\t\t\t\t\tn := len(contSlice)\n\t\t\t\t\tlog.Debug(\"checking node for number of containers (%d) less than %d\", n, currentN)\n\t\t\t\t\tif currentN > n {\n\t\t\t\t\t\tcurrentNodeName = nodeName\n\t\t\t\t\t\tcurrentN = n\n\t\t\t\t\t}\n\n\t\t\t\t}\n\t\t\t\tif currentNodeName == \"\" {\n\t\t\t\t\tlog.Warn(\"Not able to create node %s...no nodes available!\", c.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tc.NodeName = currentNodeName\n\n\t\t\t\t//\n\t\t\t\tc.Image = def.Image\n\t\t\t\tc.Running = false\n\t\t\t\tc.HTTPPort = def.HTTPPort\n\t\t\t\t// generate a mapping nodeHttpPort -> httpPort\n\t\t\t\tif c.HTTPPort > 0 {\n\t\t\t\t\tc.NodeHTTPPort = minHTTPPort + m.db.NextAutoIncrement(\"http.port\", \"http.port\")\n\t\t\t\t}\n\t\t\t\tlog.Info(\"Creating container id %s/%s\", c.ContainerID, c.Name)\n\t\t\t\tif err := m.db.SaveContainer(c); err != nil {\n\t\t\t\t\tlog.Error(\"Error saving container %s\", c.Name)\n\t\t\t\t} else {\n\t\t\t\t\tnodeContMap[c.NodeName] = append(nodeContMap[c.NodeName], c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func PodRequestsAndLimits(\n\tpod *corev1.Pod) (reqs map[corev1.ResourceName]resource.Quantity, limits map[corev1.ResourceName]resource.Quantity) {\n\treqs, limits = map[corev1.ResourceName]resource.Quantity{}, map[corev1.ResourceName]resource.Quantity{}\n\tfor _, container := range pod.Spec.Containers {\n\t\tfor name, quantity := range container.Resources.Requests {\n\t\t\tif value, ok := reqs[name]; !ok {\n\t\t\t\treqs[name] = quantity.DeepCopy()\n\t\t\t} else {\n\t\t\t\tvalue.Add(quantity)\n\t\t\t\treqs[name] = value\n\t\t\t}\n\t\t}\n\t\tfor name, quantity := range container.Resources.Limits {\n\t\t\tif value, ok := limits[name]; !ok {\n\t\t\t\tlimits[name] = quantity.DeepCopy()\n\t\t\t} else {\n\t\t\t\tvalue.Add(quantity)\n\t\t\t\tlimits[name] = value\n\t\t\t}\n\t\t}\n\t}\n\t// init containers define the minimum of any resource\n\tfor _, container := range pod.Spec.InitContainers {\n\t\tfor name, quantity := range container.Resources.Requests {\n\t\t\tvalue, ok := reqs[name]\n\t\t\tif !ok {\n\t\t\t\treqs[name] = quantity.DeepCopy()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif quantity.Cmp(value) > 0 {\n\t\t\t\treqs[name] = quantity.DeepCopy()\n\t\t\t}\n\t\t}\n\t\tfor name, quantity := range container.Resources.Limits {\n\t\t\tvalue, ok := limits[name]\n\t\t\tif !ok {\n\t\t\t\tlimits[name] = quantity.DeepCopy()\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif quantity.Cmp(value) > 0 {\n\t\t\t\tlimits[name] = quantity.DeepCopy()\n\t\t\t}\n\t\t}\n\t}\n\treturn\n}",
"func (p *DockerPod) Init() error {\n\tp.status = container.PodStatus_INIT\n\tp.message = \"Pod is initing\"\n\n\tenvHost := container.BcsKV{\n\t\tKey: \"BCS_CONTAINER_IP\",\n\t\tValue: util.GetIPAddress(),\n\t}\n\tp.netTask.Env = append(p.netTask.Env, envHost)\n\t//assignment for environments\n\tcontainer.EnvOperCopy(p.netTask)\n\n\tcleanExtendedResourceFunc := func() {\n\t\tif p.netTask != nil {\n\t\t\tfor _, ex := range p.netTask.ExtendedResources {\n\t\t\t\tif err := p.resourceManager.ReleaseExtendedResources(ex.Name, p.netTask.TaskId); err != nil {\n\t\t\t\t\t// do not break\n\t\t\t\t\tlogs.Errorf(\"release extended resources %v failed, err %s\", ex, err.Error())\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tvar extendedErr error\n\t//if task contains extended resources, need connect device plugin to allocate resources\n\tfor _, ex := range p.netTask.ExtendedResources {\n\t\tlogs.Infof(\"task %s contains extended resource %s, then allocate it\", p.netTask.TaskId, ex.Name)\n\t\tenvs, err := p.resourceManager.ApplyExtendedResources(ex, p.netTask.TaskId)\n\t\tif err != nil {\n\t\t\tlogs.Errorf(\"apply extended resource failed, err %s\", err.Error())\n\t\t\textendedErr = err\n\t\t\tbreak\n\t\t}\n\t\tlogs.Infof(\"add env %v for task %s\", envs, p.netTask.TaskId)\n\n\t\t//append response docker envs to task.envs\n\t\tfor k, v := range envs {\n\t\t\tkv := container.BcsKV{\n\t\t\t\tKey: k,\n\t\t\t\tValue: v,\n\t\t\t}\n\t\t\tp.netTask.Env = append(p.netTask.Env, kv)\n\t\t}\n\t}\n\t//if allocate extended resource failed, then return and exit\n\tif extendedErr != nil {\n\t\tlogs.Errorf(extendedErr.Error())\n\t\tp.status = container.PodStatus_FAILED\n\t\tp.message = extendedErr.Error()\n\t\tcleanExtendedResourceFunc()\n\t\treturn extendedErr\n\t}\n\n\t//fix(developerJim): all containers in pod can not create PortMappings separately,\n\t// so we need to copy all PortMappings from other containers to Network\n\t// container, network container applies all PortMappings with docker\n\tp.copyPortMappings()\n\n\t//step: creating network container\n\tvar createErr error\n\tif p.netTask.RuntimeConf, createErr = p.conClient.CreateContainer(p.netTask.Name, p.netTask); createErr != nil {\n\t\tlogs.Errorf(\"DockerPod init failed in Creating master container. err: %s\\n\", createErr.Error())\n\t\tp.status = container.PodStatus_FAILED\n\t\tp.message = createErr.Error()\n\t\tcleanExtendedResourceFunc()\n\t\treturn createErr\n\t}\n\tp.netTask.RuntimeConf.Status = container.ContainerStatus_CREATED\n\tp.netTask.RuntimeConf.Message = \"container is created\"\n\tp.netTask.RuntimeConf.Resource = p.netTask.Resource\n\n\tlogs.Infof(\"task %s cpu %f memory %f\", p.netTask.Name, p.netTask.Resource.Cpus, p.netTask.Resource.Mem)\n\n\t//setting preStart\n\tif p.events != nil && p.events.PreStart != nil {\n\t\tif preErr := p.events.PreStart(p.netTask); preErr != nil {\n\t\t\tp.conClient.RemoveContainer(p.netTask.Name, true)\n\t\t\tp.netTask.RuntimeConf.Status = container.ContainerStatus_EXITED\n\t\t\tp.netTask.RuntimeConf.Message = \"container PreSetting failed: \" + preErr.Error()\n\t\t\tp.status = container.PodStatus_FAILED\n\t\t\tp.message = preErr.Error()\n\t\t\tcleanExtendedResourceFunc()\n\t\t\treturn preErr\n\t\t}\n\t}\n\n\tif err := p.conClient.StartContainer(p.netTask.RuntimeConf.ID); err != nil {\n\t\tlogs.Errorln(\"DockerPod init failed in Starting master container, err: \", err.Error())\n\t\tp.conClient.RemoveContainer(p.netTask.RuntimeConf.ID, true)\n\t\tp.status = container.PodStatus_FAILED\n\t\tp.message = err.Error()\n\t\tp.netTask.RuntimeConf.Status = container.ContainerStatus_EXITED\n\t\tp.netTask.RuntimeConf.Message = \"container start failed: \" + err.Error()\n\t\tcleanExtendedResourceFunc()\n\t\treturn err\n\t}\n\t//todo(developerJim): is it useful to check status? or just waiting for containerMonitor\n\tinfo, conErr := p.conClient.InspectContainer(p.netTask.RuntimeConf.ID)\n\tif conErr != nil {\n\t\tlogs.Errorln(\"DockerPod init failed in inspecting master container, err: \", conErr.Error())\n\t\tp.status = container.PodStatus_FAILED\n\t\tp.message = conErr.Error()\n\t\tcleanExtendedResourceFunc()\n\t\treturn conErr\n\t}\n\tif info.Status != container.ContainerStatus_RUNNING {\n\t\tlogs.Errorf(\"DockerPod init stage failed, inspectContainer %s, but %s needed\\n\", info.Status, container.ContainerStatus_RUNNING)\n\t\tp.status = container.PodStatus_FAILED\n\t\tp.message = \"docker pod master container init failed\"\n\t\tcleanExtendedResourceFunc()\n\t\treturn fmt.Errorf(\"docker pod master container init failed\")\n\t}\n\tp.cnmIPAddr = info.IPAddress\n\tp.netTask.RuntimeConf.Message = \"container is starting\"\n\tp.netTask.RuntimeConf.NodeAddress = util.GetIPAddress()\n\tp.netTask.RuntimeConf.IPAddress = info.IPAddress\n\tp.netTask.RuntimeConf.NetworkMode = info.NetworkMode\n\tp.runningContainer[p.netTask.RuntimeConf.Name] = p.netTask.RuntimeConf\n\tlogs.Infof(\"DockerPod treat container [%s] net container, ip: %s\\n\", p.netTask.RuntimeConf.Name, p.cnmIPAddr)\n\treturn nil\n}",
"func (cm *Runc) refreshAll() {\n\tlist, err := ioutil.ReadDir(cm.opts.root)\n\truncFailOnErr(err)\n\n\tfor _, i := range list {\n\t\tif i.IsDir() {\n\t\t\tname := i.Name()\n\t\t\t// attempt to load\n\t\t\tlibc := cm.GetLibc(name)\n\t\t\tif libc == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t_ = cm.MustGet(i.Name()) // ensure container exists\n\t\t}\n\t}\n\n\t// queue all existing containers for refresh\n\tfor id, _ := range cm.containers {\n\t\tcm.needsRefresh <- id\n\t}\n\tlog.Debugf(\"queued %d containers for refresh\", len(cm.containers))\n}",
"func (c *container) AddUnifiedResourcesFromAnnotations(annotationsMap map[string]string) error {\n\tif c.config == nil || c.config.Labels == nil {\n\t\treturn nil\n\t}\n\tcontainerName := c.config.Labels[kubeletTypes.KubernetesContainerNameLabel]\n\tif containerName == \"\" {\n\t\treturn nil\n\t}\n\n\tannotationKey := fmt.Sprintf(\"%s.%s\", crioann.UnifiedCgroupAnnotation, containerName)\n\tannotation := annotationsMap[annotationKey]\n\tif annotation == \"\" {\n\t\treturn nil\n\t}\n\n\tif c.spec.Config.Linux == nil {\n\t\tc.spec.Config.Linux = &rspec.Linux{}\n\t}\n\tif c.spec.Config.Linux.Resources == nil {\n\t\tc.spec.Config.Linux.Resources = &rspec.LinuxResources{}\n\t}\n\tif c.spec.Config.Linux.Resources.Unified == nil {\n\t\tc.spec.Config.Linux.Resources.Unified = make(map[string]string)\n\t}\n\tfor _, r := range strings.Split(annotation, \";\") {\n\t\tparts := strings.SplitN(r, \"=\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn fmt.Errorf(\"invalid annotation %q\", crioann.UnifiedCgroupAnnotation)\n\t\t}\n\t\td, err := b64.StdEncoding.DecodeString(parts[1])\n\t\t// if the value is not specified in base64, then use its raw value.\n\t\tv := \"\"\n\t\tif err == nil {\n\t\t\tv = string(d)\n\t\t} else {\n\t\t\tv = parts[1]\n\t\t}\n\t\tc.spec.Config.Linux.Resources.Unified[parts[0]] = v\n\t}\n\n\treturn nil\n}",
"func BuildResources(namespace string, args []string) ([]*pb.Resource, error) {\n\tswitch len(args) {\n\tcase 0:\n\t\treturn nil, errors.New(\"No resource arguments provided\")\n\tcase 1:\n\t\treturn parseResources(namespace, \"\", args)\n\tdefault:\n\t\tif res, err := k8s.CanonicalResourceNameFromFriendlyName(args[0]); err == nil && res != k8s.All {\n\t\t\t// --namespace my-ns deploy foo1 foo2 ...\n\t\t\treturn parseResources(namespace, args[0], args[1:])\n\t\t}\n\n\t\treturn parseResources(namespace, \"\", args)\n\t}\n}",
"func (m *podManager) processCNIRequests() {\n\tfor request := range m.requests {\n\t\tresult := m.processRequest(request)\n\t\trequest.Result <- result\n\t}\n\tpanic(\"stopped processing CNI pod requests!\")\n}",
"func WaitForPodsRunningReady(c clientset.Interface, ns string, minPods, allowedNotReadyPods int32, timeout time.Duration, ignoreLabels map[string]string) error {\n\tif minPods == -1 || allowedNotReadyPods == -1 {\n\t\treturn nil\n\t}\n\n\tignoreSelector := labels.SelectorFromSet(map[string]string{})\n\tstart := time.Now()\n\tframework.Logf(\"Waiting up to %v for all pods (need at least %d) in namespace '%s' to be running and ready\",\n\t\ttimeout, minPods, ns)\n\tvar ignoreNotReady bool\n\tbadPods := []v1.Pod{}\n\tdesiredPods := 0\n\tnotReady := int32(0)\n\tvar lastAPIError error\n\n\tif wait.PollImmediate(poll, timeout, func() (bool, error) {\n\t\t// We get the new list of pods, replication controllers, and\n\t\t// replica sets in every iteration because more pods come\n\t\t// online during startup and we want to ensure they are also\n\t\t// checked.\n\t\treplicas, replicaOk := int32(0), int32(0)\n\t\t// Clear API error from the last attempt in case the following calls succeed.\n\t\tlastAPIError = nil\n\n\t\trcList, err := c.CoreV1().ReplicationControllers(ns).List(context.TODO(), metav1.ListOptions{})\n\t\tlastAPIError = err\n\t\tif err != nil {\n\t\t\treturn handleWaitingAPIError(err, false, \"listing replication controllers in namespace %s\", ns)\n\t\t}\n\t\tfor _, rc := range rcList.Items {\n\t\t\treplicas += *rc.Spec.Replicas\n\t\t\treplicaOk += rc.Status.ReadyReplicas\n\t\t}\n\n\t\trsList, err := c.AppsV1().ReplicaSets(ns).List(context.TODO(), metav1.ListOptions{})\n\t\tlastAPIError = err\n\t\tif err != nil {\n\t\t\treturn handleWaitingAPIError(err, false, \"listing replication sets in namespace %s\", ns)\n\t\t}\n\t\tfor _, rs := range rsList.Items {\n\t\t\treplicas += *rs.Spec.Replicas\n\t\t\treplicaOk += rs.Status.ReadyReplicas\n\t\t}\n\n\t\tpodList, err := c.CoreV1().Pods(ns).List(context.TODO(), metav1.ListOptions{})\n\t\tlastAPIError = err\n\t\tif err != nil {\n\t\t\treturn handleWaitingAPIError(err, false, \"listing pods in namespace %s\", ns)\n\t\t}\n\t\tnOk := int32(0)\n\t\tnotReady = int32(0)\n\t\tbadPods = []v1.Pod{}\n\t\tdesiredPods = len(podList.Items)\n\t\tfor _, pod := range podList.Items {\n\t\t\tif len(ignoreLabels) != 0 && ignoreSelector.Matches(labels.Set(pod.Labels)) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres, err := testutils.PodRunningReady(&pod)\n\t\t\tswitch {\n\t\t\tcase res && err == nil:\n\t\t\t\tnOk++\n\t\t\tcase pod.Status.Phase == v1.PodSucceeded:\n\t\t\t\tframework.Logf(\"The status of Pod %s is Succeeded, skipping waiting\", pod.ObjectMeta.Name)\n\t\t\t\t// it doesn't make sense to wait for this pod\n\t\t\t\tcontinue\n\t\t\tcase pod.Status.Phase != v1.PodFailed:\n\t\t\t\tframework.Logf(\"The status of Pod %s is %s (Ready = false), waiting for it to be either Running (with Ready = true) or Failed\", pod.ObjectMeta.Name, pod.Status.Phase)\n\t\t\t\tnotReady++\n\t\t\t\tbadPods = append(badPods, pod)\n\t\t\tdefault:\n\t\t\t\tif metav1.GetControllerOf(&pod) == nil {\n\t\t\t\t\tframework.Logf(\"Pod %s is Failed, but it's not controlled by a controller\", pod.ObjectMeta.Name)\n\t\t\t\t\tbadPods = append(badPods, pod)\n\t\t\t\t}\n\t\t\t\t// ignore failed pods that are controlled by some controller\n\t\t\t}\n\t\t}\n\n\t\tframework.Logf(\"%d / %d pods in namespace '%s' are running and ready (%d seconds elapsed)\",\n\t\t\tnOk, len(podList.Items), ns, int(time.Since(start).Seconds()))\n\t\tframework.Logf(\"expected %d pod replicas in namespace '%s', %d are Running and Ready.\", replicas, ns, replicaOk)\n\n\t\tif replicaOk == replicas && nOk >= minPods && len(badPods) == 0 {\n\t\t\treturn true, nil\n\t\t}\n\t\tignoreNotReady = (notReady <= allowedNotReadyPods)\n\t\tLogPodStates(badPods)\n\t\treturn false, nil\n\t}) != nil {\n\t\tif !ignoreNotReady {\n\t\t\treturn errorBadPodsStates(badPods, desiredPods, ns, \"RUNNING and READY\", timeout, lastAPIError)\n\t\t}\n\t\tframework.Logf(\"Number of not-ready pods (%d) is below the allowed threshold (%d).\", notReady, allowedNotReadyPods)\n\t}\n\treturn nil\n}",
"func generateResourceList(mgr manager.Manager, s *releasev1.HelmRelease) (kube.ResourceList, error) {\n\tchartDir, err := downloadChart(mgr.GetClient(), s)\n\tif err != nil {\n\t\tklog.Error(err, \" - Failed to download the chart\")\n\t\treturn nil, err\n\t}\n\n\tvar values map[string]interface{}\n\n\treqBodyBytes := new(bytes.Buffer)\n\n\terr = json.NewEncoder(reqBodyBytes).Encode(s.Spec)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = yaml.Unmarshal(reqBodyBytes.Bytes(), &values)\n\tif err != nil {\n\t\tklog.Error(err, \" - Failed to Unmarshal the spec \", s.Spec)\n\t\treturn nil, err\n\t}\n\n\tklog.V(3).Info(\"ChartDir: \", chartDir)\n\n\tchart, err := loader.LoadDir(chartDir)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load chart dir: %w\", err)\n\t}\n\n\trcg, err := newRESTClientGetter(mgr, s.Namespace)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get REST client getter from manager: %w\", err)\n\t}\n\n\tkubeClient := kube.New(rcg)\n\n\tactionConfig := &action.Configuration{}\n\tif err := actionConfig.Init(rcg, s.GetNamespace(), \"secret\", func(_ string, _ ...interface{}) {}); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to initialized actionConfig: %w\", err)\n\t}\n\n\tinstall := action.NewInstall(actionConfig)\n\tinstall.ReleaseName = s.Name\n\tinstall.Namespace = s.Namespace\n\tinstall.DryRun = true\n\tinstall.ClientOnly = true\n\tinstall.Replace = true\n\n\trelease, err := install.Run(chart, values)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresources, err := kubeClient.Build(bytes.NewBufferString(release.Manifest), false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to build kubernetes objects from release manifest: %w\", err)\n\t}\n\n\treturn resources, nil\n}",
"func (handler *ResourceHandler) handlePostResources(w http.ResponseWriter, r *http.Request) {\n\tvar req postResourcesRequest\n\tif err := json.NewDecoder(r.Body).Decode(&req); err != nil {\n\t\thttperror.WriteErrorResponse(w, ErrInvalidJSON, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\t_, err := govalidator.ValidateStruct(req)\n\tif err != nil {\n\t\thttperror.WriteErrorResponse(w, ErrInvalidRequestFormat, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\tvar resourceControlType portainer.ResourceControlType\n\tswitch req.Type {\n\tcase \"container\":\n\t\tresourceControlType = portainer.ContainerResourceControl\n\tcase \"service\":\n\t\tresourceControlType = portainer.ServiceResourceControl\n\tcase \"volume\":\n\t\tresourceControlType = portainer.VolumeResourceControl\n\tdefault:\n\t\thttperror.WriteErrorResponse(w, portainer.ErrInvalidResourceControlType, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\tif len(req.Users) == 0 && len(req.Teams) == 0 && !req.AdministratorsOnly {\n\t\thttperror.WriteErrorResponse(w, ErrInvalidRequestFormat, http.StatusBadRequest, handler.Logger)\n\t\treturn\n\t}\n\n\trc, err := handler.ResourceControlService.ResourceControlByResourceID(req.ResourceID)\n\tif err != nil && err != portainer.ErrResourceControlNotFound {\n\t\thttperror.WriteErrorResponse(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n\tif rc != nil {\n\t\thttperror.WriteErrorResponse(w, portainer.ErrResourceControlAlreadyExists, http.StatusConflict, handler.Logger)\n\t\treturn\n\t}\n\n\tvar userAccesses = make([]portainer.UserResourceAccess, 0)\n\tfor _, v := range req.Users {\n\t\tuserAccess := portainer.UserResourceAccess{\n\t\t\tUserID: portainer.UserID(v),\n\t\t\tAccessLevel: portainer.ReadWriteAccessLevel,\n\t\t}\n\t\tuserAccesses = append(userAccesses, userAccess)\n\t}\n\n\tvar teamAccesses = make([]portainer.TeamResourceAccess, 0)\n\tfor _, v := range req.Teams {\n\t\tteamAccess := portainer.TeamResourceAccess{\n\t\t\tTeamID: portainer.TeamID(v),\n\t\t\tAccessLevel: portainer.ReadWriteAccessLevel,\n\t\t}\n\t\tteamAccesses = append(teamAccesses, teamAccess)\n\t}\n\n\tresourceControl := portainer.ResourceControl{\n\t\tResourceID: req.ResourceID,\n\t\tSubResourceIDs: req.SubResourceIDs,\n\t\tType: resourceControlType,\n\t\tAdministratorsOnly: req.AdministratorsOnly,\n\t\tUserAccesses: userAccesses,\n\t\tTeamAccesses: teamAccesses,\n\t}\n\n\tsecurityContext, err := security.RetrieveRestrictedRequestContext(r)\n\tif err != nil {\n\t\thttperror.WriteErrorResponse(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n\n\tif !security.AuthorizedResourceControlCreation(&resourceControl, securityContext) {\n\t\thttperror.WriteErrorResponse(w, portainer.ErrResourceAccessDenied, http.StatusForbidden, handler.Logger)\n\t\treturn\n\t}\n\n\terr = handler.ResourceControlService.CreateResourceControl(&resourceControl)\n\tif err != nil {\n\t\thttperror.WriteErrorResponse(w, err, http.StatusInternalServerError, handler.Logger)\n\t\treturn\n\t}\n\n\treturn\n}",
"func ProtoToRunServiceTemplateContainersResources(p *runpb.RunServiceTemplateContainersResources) *run.ServiceTemplateContainersResources {\n\tif p == nil {\n\t\treturn nil\n\t}\n\tobj := &run.ServiceTemplateContainersResources{\n\t\tCpuIdle: dcl.Bool(p.GetCpuIdle()),\n\t}\n\treturn obj\n}",
"func getRequestedResources(container corev1.Container) (map[string]int64, error) {\n\tfor _, v := range container.Env {\n\t\tif strings.HasPrefix(v.Name, \"FPGA_REGION\") || strings.HasPrefix(v.Name, \"FPGA_AFU\") {\n\t\t\treturn nil, errors.Errorf(\"environment variable '%s' is not allowed\", v.Name)\n\t\t}\n\t}\n\n\t// Container may happen to have Requests, but not Limits. Check Requests first,\n\t// then in the next loop iterate over Limits.\n\tfor resourceName, resourceQuantity := range container.Resources.Requests {\n\t\trname := strings.ToLower(string(resourceName))\n\t\tif !strings.HasPrefix(rname, namespace) {\n\t\t\t// Skip non-FPGA resources in Requests.\n\t\t\tcontinue\n\t\t}\n\n\t\tif container.Resources.Limits[resourceName] != resourceQuantity {\n\t\t\treturn nil, errors.Errorf(\n\t\t\t\t\"'limits' and 'requests' for %q must be equal as extended resources cannot be overcommitted\",\n\t\t\t\trname)\n\t\t}\n\t}\n\n\tresources := make(map[string]int64)\n\tfor resourceName, resourceQuantity := range container.Resources.Limits {\n\t\trname := strings.ToLower(string(resourceName))\n\t\tif !strings.HasPrefix(rname, namespace) {\n\t\t\t// Skip non-FPGA resources in Limits.\n\t\t\tcontinue\n\t\t}\n\n\t\tif container.Resources.Requests[resourceName] != resourceQuantity {\n\t\t\treturn nil, errors.Errorf(\n\t\t\t\t\"'limits' and 'requests' for %q must be equal as extended resources cannot be overcommitted\",\n\t\t\t\trname)\n\t\t}\n\n\t\tquantity, ok := resourceQuantity.AsInt64()\n\t\tif !ok {\n\t\t\treturn nil, errors.Errorf(\"resource quantity isn't of integral type for %q\", rname)\n\t\t}\n\n\t\tresources[rname] = quantity\n\t}\n\n\treturn resources, nil\n}",
"func collectPreferredResources(discovery discovery.DiscoveryInterface) ([]*metav1.APIResourceList, error) {\n\tresources, err := discovery.ServerPreferredNamespacedResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, res := range resources {\n\t\tres.APIResources = excludeSubresources(res.APIResources)\n\t\t// Some resources appear not to have permissions to list, need to exclude those.\n\t\tres.APIResources = listAllowed(res.APIResources)\n\t}\n\n\treturn resources, nil\n}",
"func (r *ReconcileKogitoInfra) createRequiredResources(instance *v1alpha1.KogitoInfra) (resources map[reflect.Type][]resource.KubernetesResource, err error) {\n\tresourcesInfinispan, err := infinispan.CreateRequiredResources(instance, r.client)\n\tif err != nil {\n\t\treturn\n\t}\n\tresourcesKafka, err := kafka.CreateRequiredResources(instance)\n\tif err != nil {\n\t\treturn\n\t}\n\tresourcesKeycloak, err := keycloak.CreateRequiredResources(instance)\n\tif err != nil {\n\t\treturn\n\t}\n\tresources = make(map[reflect.Type][]resource.KubernetesResource, len(resourcesInfinispan)+len(resourcesKafka)+len(resourcesKeycloak))\n\tmergeResourceMaps(resources, resourcesKafka, resourcesInfinispan, resourcesKeycloak)\n\treturn\n}",
"func (ds *DevicesScheduler) ReturnPodResources(podInfo *types.PodInfo, nodeInfo *types.NodeInfo) error {\n\tfor index, d := range ds.Devices {\n\t\terr := d.ReturnPodResources(nodeInfo, podInfo, ds.RunGroupScheduler[index])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func updateApp(c *corev1.Container, sfs api.StatefulApp, podSpec *api.PodSpec, cr *api.PerconaXtraDBCluster) (corev1.Container, error) {\n\tres, err := sfs.Resources(podSpec.Resources)\n\tif err != nil {\n\t\treturn *c, fmt.Errorf(\"create resources error: %v\", err)\n\t}\n\n\tif c == nil {\n\t\tappC := sfs.AppContainer(podSpec, cr.Spec.SecretsName)\n\t\tappC.Resources = res\n\t\treturn appC, nil\n\t}\n\n\tif !reflect.DeepEqual(c.Resources, res) {\n\t\tc.Resources = res\n\t}\n\tc.Image = podSpec.Image\n\n\treturn *c, nil\n}",
"func (k *proxy) ListResources(ctx context.Context, labels map[string]string, namespaces ...string) ([]unstructured.Unstructured, error) {\n\tcs, err := k.newClientSet()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc, err := k.NewClient()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Get all the API resources in the cluster.\n\tresourceListBackoff := newReadBackoff()\n\tvar resourceList []*metav1.APIResourceList\n\tif err := retryWithExponentialBackoff(resourceListBackoff, func() error {\n\t\tresourceList, err = cs.Discovery().ServerPreferredResources()\n\t\treturn err\n\t}); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to list api resources\")\n\t}\n\n\t// Exclude from discovery the objects from the cert-manager/provider's CRDs.\n\t// Those objects are not part of the components, and they will eventually be removed when removing the CRD definition.\n\tcrdsToExclude := sets.Set[string]{}\n\n\tcrdList := &apiextensionsv1.CustomResourceDefinitionList{}\n\tif err := retryWithExponentialBackoff(newReadBackoff(), func() error {\n\t\treturn c.List(ctx, crdList)\n\t}); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to list CRDs\")\n\t}\n\tfor _, crd := range crdList.Items {\n\t\tcomponent, isCoreComponent := labels[clusterctlv1.ClusterctlCoreLabel]\n\t\t_, isProviderResource := crd.Labels[clusterv1.ProviderNameLabel]\n\t\tif (isCoreComponent && component == clusterctlv1.ClusterctlCoreLabelCertManagerValue) || isProviderResource {\n\t\t\tfor _, version := range crd.Spec.Versions {\n\t\t\t\tcrdsToExclude.Insert(metav1.GroupVersionKind{\n\t\t\t\t\tGroup: crd.Spec.Group,\n\t\t\t\t\tVersion: version.Name,\n\t\t\t\t\tKind: crd.Spec.Names.Kind,\n\t\t\t\t}.String())\n\t\t\t}\n\t\t}\n\t}\n\n\t// Select resources with list and delete methods (list is required by this method, delete by the callers of this method)\n\tresourceList = discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{\"list\", \"delete\"}}, resourceList)\n\n\tvar ret []unstructured.Unstructured\n\tfor _, resourceGroup := range resourceList {\n\t\tfor _, resourceKind := range resourceGroup.APIResources {\n\t\t\t// Discard the resourceKind that exists in two api groups (we are excluding one of the two groups arbitrarily).\n\t\t\tif resourceGroup.GroupVersion == \"extensions/v1beta1\" &&\n\t\t\t\t(resourceKind.Name == \"daemonsets\" || resourceKind.Name == \"deployments\" || resourceKind.Name == \"replicasets\" || resourceKind.Name == \"networkpolicies\" || resourceKind.Name == \"ingresses\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Continue if the resource is an excluded CRD.\n\t\t\tgv, err := schema.ParseGroupVersion(resourceGroup.GroupVersion)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to parse GroupVersion\")\n\t\t\t}\n\t\t\tif crdsToExclude.Has(metav1.GroupVersionKind{\n\t\t\t\tGroup: gv.Group,\n\t\t\t\tVersion: gv.Version,\n\t\t\t\tKind: resourceKind.Kind,\n\t\t\t}.String()) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// List all the object instances of this resourceKind with the given labels\n\t\t\tif resourceKind.Namespaced {\n\t\t\t\tfor _, namespace := range namespaces {\n\t\t\t\t\tobjList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels), client.InNamespace(namespace)})\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\t\t\t\t\tret = append(ret, objList.Items...)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tobjList, err := listObjByGVK(ctx, c, resourceGroup.GroupVersion, resourceKind.Kind, []client.ListOption{client.MatchingLabels(labels)})\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tret = append(ret, objList.Items...)\n\t\t\t}\n\t\t}\n\t}\n\treturn ret, nil\n}",
"func (sp *StackPackage) applyResourceAnnotations(crdPath string, crd *apiextensions.CustomResourceDefinition) {\n\t// TODO(displague) which pattern should associate *resource.yaml to their matching *crd.yaml files?\n\t// * resource.yaml contain \"id=_kind_\" (or gvk)\n\t// * limit one-crd per path\n\t// * file names match their CRD: [_group_]/[_kind_.[_version_.]]{resource,crd}.yaml\n\tresourcePathsOrdered := orderStackResourceKeys(sp.Resources)\n\tfor _, resourcePath := range resourcePathsOrdered {\n\t\tdir := filepath.Dir(resourcePath)\n\t\tresource := sp.Resources[resourcePath]\n\t\tif strings.HasPrefix(crdPath, dir) && strings.EqualFold(resource.ID, crd.Spec.Names.Kind) {\n\t\t\tcrd.ObjectMeta.Annotations[annotationResourceTitle] = resource.Title\n\t\t\tcrd.ObjectMeta.Annotations[annotationResourceTitlePlural] = resource.TitlePlural\n\t\t\tcrd.ObjectMeta.Annotations[annotationResourceCategory] = resource.Category\n\t\t\tcrd.ObjectMeta.Annotations[annotationResourceDescription] = resource.Description\n\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func toOCIResources(r *pb.LinuxContainerResources) *rspec.LinuxResources {\n\tvar swap int64\n\tmemory := r.GetMemoryLimitInBytes()\n\tif cgroupHasMemorySwap() {\n\t\tswap = memory\n\t}\n\treturn &rspec.LinuxResources{\n\t\tCPU: &rspec.LinuxCPU{\n\t\t\tShares: proto.Uint64(uint64(r.GetCpuShares())),\n\t\t\tQuota: proto.Int64(r.GetCpuQuota()),\n\t\t\tPeriod: proto.Uint64(uint64(r.GetCpuPeriod())),\n\t\t\tCpus: r.GetCpusetCpus(),\n\t\t\tMems: r.GetCpusetMems(),\n\t\t},\n\t\tMemory: &rspec.LinuxMemory{\n\t\t\tLimit: proto.Int64(memory),\n\t\t\tSwap: proto.Int64(swap),\n\t\t},\n\t\t// TODO(runcom): OOMScoreAdj is missing\n\t}\n}",
"func (sp *schemaPuller) PullCRDs(context context.Context, resourceNames ...string) (map[schema.GroupResource]*apiextensionsv1.CustomResourceDefinition, error) {\n\tcrds := map[schema.GroupResource]*apiextensionsv1.CustomResourceDefinition{}\n\t_, apiResourcesLists, err := sp.discoveryClient.ServerGroupsAndResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpullAllResources := len(resourceNames) == 0\n\tresourcesToPull := sets.NewString(resourceNames...)\n\n\tapiResourceNames := map[schema.GroupVersion]sets.String{}\n\tfor _, apiResourcesList := range apiResourcesLists {\n\t\tgv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tapiResourceNames[gv] = sets.NewString()\n\t\tfor _, apiResource := range apiResourcesList.APIResources {\n\t\t\tapiResourceNames[gv].Insert(apiResource.Name)\n\t\t}\n\n\t}\n\n\tapiResourcesLists, err = sp.discoveryClient.ServerPreferredResources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, apiResourcesList := range apiResourcesLists {\n\t\tgv, err := schema.ParseGroupVersion(apiResourcesList.GroupVersion)\n\t\tif err != nil {\n\t\t\tklog.Errorf(\"skipping discovery due to error parsing GroupVersion %s: %v\", apiResourcesList.GroupVersion, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfor _, apiResource := range apiResourcesList.APIResources {\n\t\t\tgroupResource := schema.GroupResource{\n\t\t\t\tGroup: gv.Group,\n\t\t\t\tResource: apiResource.Name,\n\t\t\t}\n\t\t\tif !pullAllResources && !resourcesToPull.Has(groupResource.String()) && !resourcesToPull.Has(apiResource.Name) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif genericcontrolplanescheme.Scheme.IsGroupRegistered(gv.Group) && !genericcontrolplanescheme.Scheme.IsVersionRegistered(gv) {\n\t\t\t\tklog.Warningf(\"ignoring an apiVersion since it is part of the core KCP resources, but not compatible with KCP version: %s\", gv.String())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgvk := gv.WithKind(apiResource.Kind)\n\t\t\tif genericcontrolplanescheme.Scheme.Recognizes(gvk) || extensionsapiserver.Scheme.Recognizes(gvk) {\n\t\t\t\tklog.Infof(\"ignoring a resource since it is part of the core KCP resources: %s (%s)\", apiResource.Name, gvk.String())\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcrdName := apiResource.Name\n\t\t\tif gv.Group == \"\" {\n\t\t\t\tcrdName = crdName + \".core\"\n\t\t\t} else {\n\t\t\t\tcrdName = crdName + \".\" + gv.Group\n\t\t\t}\n\n\t\t\tvar resourceScope apiextensionsv1.ResourceScope\n\t\t\tif apiResource.Namespaced {\n\t\t\t\tresourceScope = apiextensionsv1.NamespaceScoped\n\t\t\t} else {\n\t\t\t\tresourceScope = apiextensionsv1.ClusterScoped\n\t\t\t}\n\n\t\t\tklog.Infof(\"processing discovery for resource %s (%s)\", apiResource.Name, crdName)\n\t\t\tvar schemaProps apiextensionsv1.JSONSchemaProps\n\t\t\tvar additionalPrinterColumns []apiextensionsv1.CustomResourceColumnDefinition\n\t\t\tcrd, err := sp.crdClient.CustomResourceDefinitions().Get(context, crdName, metav1.GetOptions{})\n\t\t\tif err == nil {\n\t\t\t\tif apihelpers.IsCRDConditionTrue(crd, apiextensionsv1.NonStructuralSchema) {\n\t\t\t\t\tklog.Warningf(\"non-structural schema for resource %s (%s): the resources will not be validated\", apiResource.Name, gvk.String())\n\t\t\t\t\tschemaProps = apiextensionsv1.JSONSchemaProps{\n\t\t\t\t\t\tType: \"object\",\n\t\t\t\t\t\tXPreserveUnknownFields: boolPtr(true),\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tvar versionFound bool\n\t\t\t\t\tfor _, version := range crd.Spec.Versions {\n\t\t\t\t\t\tif version.Name == gv.Version {\n\t\t\t\t\t\t\tschemaProps = *version.Schema.OpenAPIV3Schema\n\t\t\t\t\t\t\tadditionalPrinterColumns = version.AdditionalPrinterColumns\n\t\t\t\t\t\t\tversionFound = true\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\tif !versionFound {\n\t\t\t\t\t\tklog.Errorf(\"expected version not found in CRD %s: %s\", crdName, gv.Version)\n\t\t\t\t\t\tschemaProps = apiextensionsv1.JSONSchemaProps{\n\t\t\t\t\t\t\tType: \"object\",\n\t\t\t\t\t\t\tXPreserveUnknownFields: boolPtr(true),\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tif !errors.IsNotFound(err) {\n\t\t\t\t\tklog.Errorf(\"error looking up CRD for %s: %v\", crdName, err)\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t\tprotoSchema := sp.models[gvk]\n\t\t\t\tif protoSchema == nil {\n\t\t\t\t\tklog.Infof(\"ignoring a resource that has no OpenAPI Schema: %s (%s)\", apiResource.Name, gvk.String())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tswaggerSpecDefinitionName := protoSchema.GetPath().String()\n\n\t\t\t\tvar errors []error\n\t\t\t\tconverter := &SchemaConverter{\n\t\t\t\t\tschemaProps: &schemaProps,\n\t\t\t\t\tschemaName: swaggerSpecDefinitionName,\n\t\t\t\t\tvisited: sets.NewString(),\n\t\t\t\t\terrors: &errors,\n\t\t\t\t}\n\t\t\t\tprotoSchema.Accept(converter)\n\t\t\t\tif len(*converter.errors) > 0 {\n\t\t\t\t\tklog.Errorf(\"error during the OpenAPI schema import of resource %s (%s) : %v\", apiResource.Name, gvk.String(), *converter.errors)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\thasSubResource := func(subResource string) bool {\n\t\t\t\tgroupResourceNames := apiResourceNames[gv]\n\t\t\t\tif groupResourceNames != nil {\n\t\t\t\t\treturn groupResourceNames.Has(apiResource.Name + \"/\" + subResource)\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tstatusSubResource := &apiextensionsv1.CustomResourceSubresourceStatus{}\n\t\t\tif !hasSubResource(\"status\") {\n\t\t\t\tstatusSubResource = nil\n\t\t\t}\n\n\t\t\tscaleSubResource := &apiextensionsv1.CustomResourceSubresourceScale{\n\t\t\t\tSpecReplicasPath: \".spec.replicas\",\n\t\t\t\tStatusReplicasPath: \".status.replicas\",\n\t\t\t}\n\t\t\tif !hasSubResource(\"scale\") {\n\t\t\t\tscaleSubResource = nil\n\t\t\t}\n\n\t\t\tcrd = &apiextensionsv1.CustomResourceDefinition{\n\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\tKind: \"CustomResourceDefinition\",\n\t\t\t\t\tAPIVersion: \"apiextensions.k8s.io/v1\",\n\t\t\t\t},\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName: crdName,\n\t\t\t\t\tLabels: map[string]string{},\n\t\t\t\t\tAnnotations: map[string]string{},\n\t\t\t\t},\n\t\t\t\tSpec: apiextensionsv1.CustomResourceDefinitionSpec{\n\t\t\t\t\tGroup: gv.Group,\n\t\t\t\t\tVersions: []apiextensionsv1.CustomResourceDefinitionVersion{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: gv.Version,\n\t\t\t\t\t\t\tSchema: &apiextensionsv1.CustomResourceValidation{\n\t\t\t\t\t\t\t\tOpenAPIV3Schema: &schemaProps,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSubresources: &apiextensionsv1.CustomResourceSubresources{\n\t\t\t\t\t\t\t\tStatus: statusSubResource,\n\t\t\t\t\t\t\t\tScale: scaleSubResource,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tServed: true,\n\t\t\t\t\t\t\tStorage: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tScope: resourceScope,\n\t\t\t\t\tNames: apiextensionsv1.CustomResourceDefinitionNames{\n\t\t\t\t\t\tPlural: apiResource.Name,\n\t\t\t\t\t\tKind: apiResource.Kind,\n\t\t\t\t\t\tCategories: apiResource.Categories,\n\t\t\t\t\t\tShortNames: apiResource.ShortNames,\n\t\t\t\t\t\tSingular: apiResource.SingularName,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tif len(additionalPrinterColumns) != 0 {\n\t\t\t\tcrd.Spec.Versions[0].AdditionalPrinterColumns = additionalPrinterColumns\n\t\t\t}\n\t\t\tapiextensionsv1.SetDefaults_CustomResourceDefinition(crd)\n\n\t\t\t// In Kubernetes, to make it clear to the API consumer that APIs in *.k8s.io or *.kubernetes.io domains\n\t\t\t// should be following all quality standards of core Kubernetes, CRDs under these domains\n\t\t\t// are expected to go through the API Review process and so must link the API review approval PR\n\t\t\t// in an `api-approved.kubernetes.io` annotation.\n\t\t\t// Without this annotation, a CRD under the *.k8s.io or *.kubernetes.io domains is rejected by the API server\n\t\t\t//\n\t\t\t// Of course here we're simply adding already-known resources of existing physical clusters as CRDs in KCP.\n\t\t\t// But to please this Kubernetes approval requirement, let's add the required annotation in imported CRDs\n\t\t\t// with one of the KCP PRs that hacked Kubernetes CRD support for KCP.\n\t\t\tif apihelpers.IsProtectedCommunityGroup(gv.Group) {\n\t\t\t\tcrd.ObjectMeta.Annotations[\"api-approved.kubernetes.io\"] = \"https://github.com/kcp-dev/kubernetes/pull/4\"\n\t\t\t}\n\t\t\tcrds[groupResource] = crd\n\t\t}\n\t}\n\treturn crds, nil\n}",
"func RunServiceTemplateContainersResourcesToProto(o *run.ServiceTemplateContainersResources) *runpb.RunServiceTemplateContainersResources {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &runpb.RunServiceTemplateContainersResources{}\n\tp.SetCpuIdle(dcl.ValueOrEmptyBool(o.CpuIdle))\n\tmLimits := make(map[string]string, len(o.Limits))\n\tfor k, r := range o.Limits {\n\t\tmLimits[k] = r\n\t}\n\tp.SetLimits(mLimits)\n\treturn p\n}",
"func (g *RoleGenerator) InitResources() error {\n\tclient := g.Args[\"mackerelClient\"].(*mackerel.Client)\n\n\tservices, err := client.FindServices()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, service := range services {\n\t\troles, err := client.FindRoles(service.Name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tg.Resources = append(g.Resources, g.createResources(service.Name, roles)...)\n\t}\n\treturn nil\n}",
"func TestPopulateResources(t *testing.T) {\n\ttestName := \"TestPopulateResources\"\n\n\tbeforeTest()\n\t// kinds to check for status\n\tvar kindsToCheckStatus = map[string]bool{}\n\n\tvar files = []string{\n\t\tKappnavConfigFile,\n\t\tCrdApplication,\n\t\tappBookinfo,\n\t\tappDetails,\n\t\tdeploymentDetailsV1,\n\t\tserviceDetails,\n\t\tingressBookinfo,\n\t\tappProductpage,\n\t\tnetworkpolicyProductpage,\n\t\tdeploymentProcuctpageV1,\n\t\tserviceProductpage,\n\t\tappRatings,\n\t\tdeploymentRatingsV1,\n\t\tserviceRatings,\n\t\tappReviews,\n\t\tnetworkpolicyReviews,\n\t\tdeploymentReviewsV1,\n\t\tdeploymentReviewsV2,\n\t\tdeploymentReviewsV3,\n\t\tserviceReview,\n\t\tcrdFoo,\n\t\tfooExample,\n\t\tappFoo,\n\t\tkappnavCRFile,\n\t}\n\n\titeration0IDs, err := readResourceIDs(files)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t/* Iteration 0: all normal */\n\ttestActions := newTestActions(testName, kindsToCheckStatus)\n\tvar emptyIDs = []resourceID{}\n\ttestActions.addIteration(iteration0IDs, emptyIDs)\n\n\t/* iteration 1: clean up */\n\ttestActions.addIteration(emptyIDs, emptyIDs)\n\n\t/* create a watcher that populates all resources */\n\tclusterWatcher, err := createClusterWatcher(iteration0IDs, testActions, StatusFailureRate)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer clusterWatcher.shutDown()\n\n\t// ensure we can find each resource\n\tfor _, res := range iteration0IDs {\n\t\texists, _ := resourceExists(clusterWatcher, res)\n\t\tif !exists {\n\t\t\tt.Fatal(fmt.Errorf(\"can't find resource for %s\\n,\", res.fileName))\n\t\t}\n\t}\n\n\terr = testActions.transitionAll()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n}",
"func setupResource(ctx context.Context, c *Container, s *specs.Spec) error {\n\tif s.Linux.Resources == nil {\n\t\ts.Linux.Resources = &specs.LinuxResources{}\n\t}\n\n\t// start to setup cpu and memory cgroup\n\tsetupCPU(ctx, c.HostConfig.Resources, s)\n\tsetupMemory(ctx, c.HostConfig.Resources, s)\n\n\t// start to setup blkio cgroup\n\tif err := setupBlkio(ctx, c.HostConfig.Resources, s); err != nil {\n\t\treturn err\n\t}\n\n\t// start to setup device cgroup\n\tif err := setupDevices(ctx, c, s); err != nil {\n\t\treturn err\n\t}\n\n\t// start to setup pids limit\n\ts.Linux.Resources.Pids = &specs.LinuxPids{\n\t\tLimit: c.HostConfig.PidsLimit,\n\t}\n\n\treturn nil\n}",
"func computeClusterResources(nodes map[string]*repository.KubeNode) map[metrics.ResourceType]*repository.KubeDiscoveredResource {\n\t// sum the capacities of the node resources\n\tcomputeResources := make(map[metrics.ResourceType]float64)\n\tfor _, node := range nodes {\n\t\tnodeActive := util.NodeIsReady(node.Node) && util.NodeIsSchedulable(node.Node)\n\t\tif nodeActive {\n\t\t\t// Iterate over all ready and schedulable compute resource types\n\t\t\tfor _, rt := range metrics.KubeComputeResourceTypes {\n\t\t\t\t// get the compute resource if it exists\n\t\t\t\tnodeResource, exists := node.ComputeResources[rt]\n\t\t\t\tif !exists {\n\t\t\t\t\tglog.Errorf(\"Missing %s resource in node %s\", rt, node.Name)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\t// add the capacity to the cluster compute resource map\n\t\t\t\tcomputeCap, exists := computeResources[rt]\n\t\t\t\tif !exists {\n\t\t\t\t\tcomputeCap = nodeResource.Capacity\n\t\t\t\t} else {\n\t\t\t\t\tcomputeCap = computeCap + nodeResource.Capacity\n\t\t\t\t}\n\t\t\t\tcomputeResources[rt] = computeCap\n\t\t\t}\n\t\t}\n\t}\n\n\t// create KubeDiscoveredResource object for each compute resource type\n\tclusterResources := make(map[metrics.ResourceType]*repository.KubeDiscoveredResource)\n\tfor _, rt := range metrics.KubeComputeResourceTypes {\n\t\tcapacity := computeResources[rt]\n\t\tr := &repository.KubeDiscoveredResource{\n\t\t\tType: rt,\n\t\t\tCapacity: capacity,\n\t\t}\n\t\tclusterResources[rt] = r\n\t}\n\treturn clusterResources\n}",
"func PodImages(pod v1.Pod) []string {\n\timages := []string{}\n\tfor _, ic := range pod.Spec.InitContainers {\n\t\timages = append(images, ic.Image)\n\t}\n\tfor _, c := range pod.Spec.Containers {\n\t\timages = append(images, c.Image)\n\t}\n\treturn images\n}",
"func (c *DSControl) collectPods(ctx context.Context, daemonSet *appsv1.DaemonSet) (map[string]v1.Pod, error) {\n\tvar labels map[string]string\n\tif daemonSet.Spec.Selector != nil {\n\t\tlabels = daemonSet.Spec.Selector.MatchLabels\n\t}\n\tpods, err := CollectPods(ctx, daemonSet.Namespace, labels, c.FieldLogger, c.Client, func(ref metav1.OwnerReference) bool {\n\t\treturn ref.Kind == KindDaemonSet && ref.UID == daemonSet.UID\n\t})\n\treturn pods, trace.Wrap(err)\n}",
"func RegisterAllInjectors(ctx context.Context, mgr ctrl.Manager, namespace string, watchCerts bool) error {\n\t// TODO: refactor\n\tsds := &secretDataSource{\n\t\tclient: mgr.GetClient(),\n\t}\n\tcds := &certificateDataSource{\n\t\tclient: mgr.GetClient(),\n\t}\n\tcfg := mgr.GetConfig()\n\tcaBundle, err := dataFromSliceOrFile(cfg.CAData, cfg.CAFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\tkds := &kubeconfigDataSource{\n\t\tapiserverCABundle: caBundle,\n\t}\n\t// Registers a c/r controller for each of APIService, CustomResourceDefinition, Mutating/ValidatingWebhookConfiguration\n\t// TODO: add a flag to allow users to configure which of these controllers should be registered\n\tfor _, setup := range injectorSetups {\n\t\tlog := ctrl.Log.WithValues(\"kind\", setup.resourceName)\n\t\tlog.Info(\"Registering a reconciler for injectable\")\n\t\tr := &genericInjectReconciler{\n\t\t\tinjector: setup.injector,\n\t\t\tnamespace: namespace,\n\t\t\tresourceName: setup.resourceName,\n\t\t\tlog: log,\n\t\t\tClient: mgr.GetClient(),\n\t\t\t// TODO: refactor\n\t\t\tsources: []caDataSource{\n\t\t\t\tsds,\n\t\t\t\tcds,\n\t\t\t\tkds,\n\t\t\t},\n\t\t}\n\n\t\t// Index injectable with a new field. If the injectable's CA is\n\t\t// to be sourced from a Secret, the field's value will be the\n\t\t// namespaced name of the Secret.\n\t\t// This field can then be used as a field selector when listing injectables of this type.\n\t\tsecretTyp := setup.injector.NewTarget().AsObject()\n\t\tif err := mgr.GetFieldIndexer().IndexField(ctx, secretTyp, injectFromSecretPath, injectableCAFromSecretIndexer); err != nil {\n\t\t\terr := fmt.Errorf(\"error making injectable indexable by inject-ca-from-secret annotation: %w\", err)\n\t\t\treturn err\n\t\t}\n\t\tpredicates := predicate.Funcs{\n\t\t\tUpdateFunc: func(e event.UpdateEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.ObjectNew)\n\t\t\t},\n\t\t\tCreateFunc: func(e event.CreateEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.Object)\n\t\t\t},\n\t\t\tDeleteFunc: func(e event.DeleteEvent) bool {\n\t\t\t\treturn hasInjectableAnnotation(e.Object)\n\t\t\t},\n\t\t}\n\n\t\tb := ctrl.NewControllerManagedBy(mgr).\n\t\t\tFor(setup.objType,\n\t\t\t\t// We watch all CRDs,\n\t\t\t\t// Validating/MutatingWebhookConfigurations,\n\t\t\t\t// APIServices because the only way how to tell\n\t\t\t\t// if an object is an injectable is from\n\t\t\t\t// annotation value and this cannot be used to\n\t\t\t\t// filter List/Watch. The earliest point where\n\t\t\t\t// we can use the annotation to filter\n\t\t\t\t// injectables is here where we define which\n\t\t\t\t// objects' events should trigger a reconcile.\n\t\t\t\tbuilder.WithPredicates(predicates)).\n\t\t\tWatches(&source.Kind{Type: new(corev1.Secret)}, handler.EnqueueRequestsFromMapFunc((&secretForInjectableMapper{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tlog: log,\n\t\t\t\tsecretToInjectable: buildSecretToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t}).Map))\n\t\tif watchCerts {\n\t\t\t// Index injectable with a new field. If the injectable's CA is\n\t\t\t// to be sourced from a Certificate's Secret, the field's value will be the\n\t\t\t// namespaced name of the Certificate.\n\t\t\t// This field can then be used as a field selector when listing injectables of this type.\n\t\t\tcertTyp := setup.injector.NewTarget().AsObject()\n\t\t\tif err := mgr.GetFieldIndexer().IndexField(ctx, certTyp, injectFromPath, injectableCAFromIndexer); err != nil {\n\t\t\t\terr := fmt.Errorf(\"error making injectable indexable by inject-ca-from path: %w\", err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tb.Watches(&source.Kind{Type: new(corev1.Secret)}, handler.EnqueueRequestsFromMapFunc((&secretForCertificateMapper{\n\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\tlog: log,\n\t\t\t\tcertificateToInjectable: buildCertToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t}).Map)).\n\t\t\t\tWatches(&source.Kind{Type: new(cmapi.Certificate)},\n\t\t\t\t\thandler.EnqueueRequestsFromMapFunc((&certMapper{\n\t\t\t\t\t\tClient: mgr.GetClient(),\n\t\t\t\t\t\tlog: log,\n\t\t\t\t\t\ttoInjectable: buildCertToInjectableFunc(setup.listType, setup.resourceName),\n\t\t\t\t\t}).Map))\n\t\t}\n\t\terr := b.Complete(r)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"error registering controller for %s: %w\", setup.objType.GetName(), err)\n\t\t}\n\t}\n\treturn nil\n}",
"func GetAllResources(targetRegions []string, excludeAfter time.Time, resourceTypes []string, configObj config.Config) (*AwsAccountResources, error) {\n\taccount := AwsAccountResources{\n\t\tResources: make(map[string]AwsRegionResource),\n\t}\n\n\tcount := 1\n\ttotalRegions := len(targetRegions)\n\tvar resourcesCache = map[string]map[string][]*string{}\n\n\tfor _, region := range targetRegions {\n\t\tlogging.Logger.Infof(\"Checking region [%d/%d]: %s\", count, totalRegions, region)\n\n\t\tsession, err := session.NewSession(&awsgo.Config{\n\t\t\tRegion: awsgo.String(region)},\n\t\t)\n\n\t\tif err != nil {\n\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t}\n\n\t\tresourcesInRegion := AwsRegionResource{}\n\n\t\t// The order in which resources are nuked is important\n\t\t// because of dependencies between resources\n\n\t\t// ASG Names\n\t\tasGroups := ASGroups{}\n\t\tif IsNukeable(asGroups.ResourceName(), resourceTypes) {\n\t\t\tgroupNames, err := getAllAutoScalingGroups(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(groupNames) > 0 {\n\t\t\t\tasGroups.GroupNames = awsgo.StringValueSlice(groupNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, asGroups)\n\t\t\t}\n\t\t}\n\t\t// End ASG Names\n\n\t\t// Launch Configuration Names\n\t\tconfigs := LaunchConfigs{}\n\t\tif IsNukeable(configs.ResourceName(), resourceTypes) {\n\t\t\tconfigNames, err := getAllLaunchConfigurations(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(configNames) > 0 {\n\t\t\t\tconfigs.LaunchConfigurationNames = awsgo.StringValueSlice(configNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, configs)\n\t\t\t}\n\t\t}\n\t\t// End Launch Configuration Names\n\n\t\t// LoadBalancer Names\n\t\tloadBalancers := LoadBalancers{}\n\t\tif IsNukeable(loadBalancers.ResourceName(), resourceTypes) {\n\t\t\telbNames, err := getAllElbInstances(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(elbNames) > 0 {\n\t\t\t\tloadBalancers.Names = awsgo.StringValueSlice(elbNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, loadBalancers)\n\t\t\t}\n\t\t}\n\t\t// End LoadBalancer Names\n\n\t\t// LoadBalancerV2 Arns\n\t\tloadBalancersV2 := LoadBalancersV2{}\n\t\tif IsNukeable(loadBalancersV2.ResourceName(), resourceTypes) {\n\t\t\telbv2Arns, err := getAllElbv2Instances(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(elbv2Arns) > 0 {\n\t\t\t\tloadBalancersV2.Arns = awsgo.StringValueSlice(elbv2Arns)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, loadBalancersV2)\n\t\t\t}\n\t\t}\n\t\t// End LoadBalancerV2 Arns\n\n\t\t// EC2 Instances\n\t\tec2Instances := EC2Instances{}\n\t\tif IsNukeable(ec2Instances.ResourceName(), resourceTypes) {\n\t\t\tinstanceIds, err := getAllEc2Instances(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(instanceIds) > 0 {\n\t\t\t\tec2Instances.InstanceIds = awsgo.StringValueSlice(instanceIds)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, ec2Instances)\n\t\t\t}\n\t\t}\n\t\t// End EC2 Instances\n\n\t\t// EBS Volumes\n\t\tebsVolumes := EBSVolumes{}\n\t\tif IsNukeable(ebsVolumes.ResourceName(), resourceTypes) {\n\t\t\tvolumeIds, err := getAllEbsVolumes(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(volumeIds) > 0 {\n\t\t\t\tebsVolumes.VolumeIds = awsgo.StringValueSlice(volumeIds)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, ebsVolumes)\n\t\t\t}\n\t\t}\n\t\t// End EBS Volumes\n\n\t\t// EIP Addresses\n\t\teipAddresses := EIPAddresses{}\n\t\tif IsNukeable(eipAddresses.ResourceName(), resourceTypes) {\n\t\t\tallocationIds, err := getAllEIPAddresses(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(allocationIds) > 0 {\n\t\t\t\teipAddresses.AllocationIds = awsgo.StringValueSlice(allocationIds)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, eipAddresses)\n\t\t\t}\n\t\t}\n\t\t// End EIP Addresses\n\n\t\t// AMIs\n\t\tamis := AMIs{}\n\t\tif IsNukeable(amis.ResourceName(), resourceTypes) {\n\t\t\timageIds, err := getAllAMIs(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(imageIds) > 0 {\n\t\t\t\tamis.ImageIds = awsgo.StringValueSlice(imageIds)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, amis)\n\t\t\t}\n\t\t}\n\t\t// End AMIs\n\n\t\t// Snapshots\n\t\tsnapshots := Snapshots{}\n\t\tif IsNukeable(snapshots.ResourceName(), resourceTypes) {\n\t\t\tsnapshotIds, err := getAllSnapshots(session, region, excludeAfter)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(snapshotIds) > 0 {\n\t\t\t\tsnapshots.SnapshotIds = awsgo.StringValueSlice(snapshotIds)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, snapshots)\n\t\t\t}\n\t\t}\n\t\t// End Snapshots\n\n\t\t// ECS resources\n\t\tecsServices := ECSServices{}\n\t\tif IsNukeable(ecsServices.ResourceName(), resourceTypes) {\n\t\t\tclusterArns, err := getAllEcsClusters(session)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\t\t\tif len(clusterArns) > 0 {\n\t\t\t\tserviceArns, serviceClusterMap, err := getAllEcsServices(session, clusterArns, excludeAfter)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t\t}\n\t\t\t\tecsServices.Services = awsgo.StringValueSlice(serviceArns)\n\t\t\t\tecsServices.ServiceClusterMap = serviceClusterMap\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, ecsServices)\n\t\t\t}\n\t\t}\n\t\t// End ECS resources\n\n\t\t// EKS resources\n\t\teksClusters := EKSClusters{}\n\t\tif IsNukeable(eksClusters.ResourceName(), resourceTypes) {\n\t\t\tif eksSupportedRegion(region) {\n\t\t\t\teksClusterNames, err := getAllEksClusters(session, excludeAfter)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t\t}\n\t\t\t\tif len(eksClusterNames) > 0 {\n\t\t\t\t\teksClusters.Clusters = awsgo.StringValueSlice(eksClusterNames)\n\t\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, eksClusters)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t// End EKS resources\n\n\t\t// RDS DB Instances\n\t\tdbInstances := DBInstances{}\n\t\tif IsNukeable(dbInstances.ResourceName(), resourceTypes) {\n\t\t\tinstanceNames, err := getAllRdsInstances(session, excludeAfter)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\n\t\t\tif len(instanceNames) > 0 {\n\t\t\t\tdbInstances.InstanceNames = awsgo.StringValueSlice(instanceNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, dbInstances)\n\t\t\t}\n\t\t}\n\t\t// End RDS DB Instances\n\n\t\t// RDS DB Clusters\n\t\t// These reference the Aurora Clusters, for the use it's the same resource (rds), but AWS\n\t\t// has different abstractions for each.\n\t\tdbClusters := DBClusters{}\n\t\tif IsNukeable(dbClusters.ResourceName(), resourceTypes) {\n\t\t\tclustersNames, err := getAllRdsClusters(session, excludeAfter)\n\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t}\n\n\t\t\tif len(clustersNames) > 0 {\n\t\t\t\tdbClusters.InstanceNames = awsgo.StringValueSlice(clustersNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, dbClusters)\n\t\t\t}\n\t\t}\n\t\t// End RDS DB Clusters\n\n\t\t// S3 Buckets\n\t\ts3Buckets := S3Buckets{}\n\t\tif IsNukeable(s3Buckets.ResourceName(), resourceTypes) {\n\t\t\tvar bucketNamesPerRegion map[string][]*string\n\n\t\t\t// AWS S3 buckets list operation lists all buckets irrespective of regions.\n\t\t\t// For each bucket we have to make a separate call to find the bucket region.\n\t\t\t// Hence for x buckets and a total of y target regions - we need to make:\n\t\t\t// (x + 1) * y calls i.e. 1 call to list all x buckets, x calls to find out\n\t\t\t// each bucket's region and repeat the process for each of the y regions.\n\n\t\t\t// getAllS3Buckets returns a map of regions to buckets and we call it only once -\n\t\t\t// thereby reducing total calls from (x + 1) * y to only (x + 1) for the first region -\n\t\t\t// followed by a cache lookup for rest of the regions.\n\n\t\t\t// Cache lookup to check if we already obtained bucket names per region\n\t\t\tbucketNamesPerRegion, ok := resourcesCache[\"S3\"]\n\n\t\t\tif !ok {\n\t\t\t\tbucketNamesPerRegion, err = getAllS3Buckets(session, excludeAfter, targetRegions, \"\", s3Buckets.MaxConcurrentGetSize(), configObj)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, errors.WithStackTrace(err)\n\t\t\t\t}\n\n\t\t\t\tresourcesCache[\"S3\"] = make(map[string][]*string)\n\n\t\t\t\tfor bucketRegion, _ := range bucketNamesPerRegion {\n\t\t\t\t\tresourcesCache[\"S3\"][bucketRegion] = bucketNamesPerRegion[bucketRegion]\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbucketNames, ok := resourcesCache[\"S3\"][region]\n\n\t\t\tif ok && len(bucketNames) > 0 {\n\t\t\t\ts3Buckets.Names = aws.StringValueSlice(bucketNames)\n\t\t\t\tresourcesInRegion.Resources = append(resourcesInRegion.Resources, s3Buckets)\n\t\t\t}\n\t\t}\n\t\t// End S3 Buckets\n\n\t\tif len(resourcesInRegion.Resources) > 0 {\n\t\t\taccount.Resources[region] = resourcesInRegion\n\t\t}\n\t\tcount++\n\t}\n\n\treturn &account, nil\n}",
"func CreateAllResources(args *FactoryArgs) ([]runtime.Object, error) {\n\tvar resources []runtime.Object\n\tfor group := range factoryFunctions {\n\t\trs, err := CreateResourceGroup(group, args)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresources = append(resources, rs...)\n\t}\n\treturn resources, nil\n}",
"func CreateAllResources(args *FactoryArgs) ([]runtime.Object, error) {\n\tvar resources []runtime.Object\n\tfor group := range factoryFunctions {\n\t\trs, err := CreateResourceGroup(group, args)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tresources = append(resources, rs...)\n\t}\n\treturn resources, nil\n}",
"func All(c client.ContainerAPIClient) ([]*Server, error) {\n\tcontainers, err := c.ContainerList(\n\t\tcontext.Background(),\n\t\tdocker.ContainerListOptions{All: true},\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"listing docker containers: %s\", err)\n\t}\n\n\tnames := make([]string, len(containers))\n\tfor i, c := range containers {\n\t\tnames[i] = strings.Replace(c.Names[0], \"/\", \"\", 1)\n\t}\n\n\tservers := make([]*Server, 0)\n\n\tfor _, n := range names {\n\t\ts, err := Get(c, n)\n\t\tif err != nil {\n\t\t\tif errors.Is(err, &NotCraftError{}) || !s.IsRunning() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn nil, fmt.Errorf(\"creating client for container '%s': %s\", n, err)\n\t\t}\n\n\t\tservers = append(servers, s)\n\t}\n\n\treturn servers, nil\n}",
"func (rt *WithPod) SetDefaults() {\n\tfor idx, v := range rt.Spec.Template.Spec.Volumes {\n\t\t// TODO(mattmoor): ProjectedVolumeSource\n\t\tif v.VolumeSource.ConfigMap == nil {\n\t\t\tcontinue\n\t\t}\n\t\trt.Spec.Template.Spec.Volumes[idx].VolumeSource.ConfigMap.LocalObjectReference.Name =\n\t\t\tFreezeConfigMap(rt.Namespace, v.VolumeSource.ConfigMap.LocalObjectReference.Name)\n\t}\n\tfor _, c := range rt.Spec.Template.Spec.InitContainers {\n\t\tfor idx, env := range c.Env {\n\t\t\tif env.ValueFrom == nil || env.ValueFrom.ConfigMapKeyRef == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Env[idx].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name =\n\t\t\t\tFreezeConfigMap(rt.Namespace, env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)\n\t\t}\n\t}\n\tfor _, c := range rt.Spec.Template.Spec.Containers {\n\t\tfor idx, env := range c.Env {\n\t\t\tif env.ValueFrom == nil || env.ValueFrom.ConfigMapKeyRef == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tc.Env[idx].ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name =\n\t\t\t\tFreezeConfigMap(rt.Namespace, env.ValueFrom.ConfigMapKeyRef.LocalObjectReference.Name)\n\t\t}\n\t}\n}",
"func (a *Client) StartDeploymentResourceInstancesAll(params *StartDeploymentResourceInstancesAllParams, authInfo runtime.ClientAuthInfoWriter) (*StartDeploymentResourceInstancesAllAccepted, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewStartDeploymentResourceInstancesAllParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"start-deployment-resource-instances-all\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/deployments/{deployment_id}/{resource_kind}/{ref_id}/instances/_start\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &StartDeploymentResourceInstancesAllReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*StartDeploymentResourceInstancesAllAccepted)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for start-deployment-resource-instances-all: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func createAllPodsLW(cl *client.Client) *listWatch {\n\treturn &listWatch{\n\t\tclient: cl,\n\t\tfieldSelector: labels.Everything(),\n\t\tresource: \"pods\",\n\t}\n}",
"func (b *Botanist) DeleteAllContainerRuntimeResources(ctx context.Context) error {\n\treturn b.deleteContainerRuntimeResources(ctx, sets.NewString())\n}",
"func (cm *Docker) refreshAllContainers() {\n\tallContainers, err := cm.client.ContainerList(cm.currentContext, types.ContainerListOptions{All: true})\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Refreshing all containers:%s\", err))\n\t}\n\n\tfor _, i := range allContainers {\n\t\tinsp := cm.inspectContainer(i.ID)\n\t\tc := cm.MustGetContainer(i.ID)\n\t\tc.SetMeta(\"name\", shortName(insp.Name))\n\t\tc.SetState(insp.State.Status)\n\t\tcm.needsRefreshContainers <- insp.ID\n\t}\n}",
"func Init(myIp string, namespace string, listOptions metav1.ListOptions, f NotifyFunc, client *kubernetes.Clientset, debugMode bool) ([]string, error) {\n\n\tlibConfig.debugMode = debugMode\n\n\t// Fetch initial pods from API\n\tinitialPods, err := getInitialPods(client, namespace, listOptions, myIp)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"PodWatch: could not get initial pod list: %v\", err)\n\t}\n\n\tif len(initialPods) <= 0 {\n\t\treturn nil, errors.New(\"PodWatch: no pods detected, not even self\")\n\t}\n\tpodIps := initialPods.Keys()\n\n\t// Start monitoring for pod transitions, to keep pool up to date\n\tgo monitorPodState(client, namespace, listOptions, myIp, initialPods, f)\n\n\treturn podIps, nil\n}"
] | [
"0.6328543",
"0.6126641",
"0.6088365",
"0.6029017",
"0.60047114",
"0.5906855",
"0.5852475",
"0.57923305",
"0.56798255",
"0.5666807",
"0.5631742",
"0.5615351",
"0.55859125",
"0.55045414",
"0.54977345",
"0.54548454",
"0.5432338",
"0.5423368",
"0.5408374",
"0.5392601",
"0.53862274",
"0.5384044",
"0.537387",
"0.5368495",
"0.53585625",
"0.5347617",
"0.53416586",
"0.53279674",
"0.53263927",
"0.5306047",
"0.5297723",
"0.52864",
"0.5281167",
"0.52694315",
"0.5252825",
"0.5240756",
"0.5221295",
"0.52208745",
"0.5170022",
"0.5159863",
"0.515914",
"0.5156081",
"0.51422465",
"0.51412237",
"0.51399547",
"0.51390743",
"0.5136302",
"0.5134178",
"0.51210827",
"0.51152563",
"0.5102775",
"0.5091233",
"0.5084772",
"0.50790584",
"0.50559807",
"0.5055329",
"0.5051862",
"0.5048717",
"0.5048275",
"0.50409687",
"0.50350344",
"0.5033621",
"0.503054",
"0.5026201",
"0.5022346",
"0.5018557",
"0.5014284",
"0.50112486",
"0.5001826",
"0.4990151",
"0.4987842",
"0.49605334",
"0.49544722",
"0.49476993",
"0.49458838",
"0.49338424",
"0.49325454",
"0.49320185",
"0.49264184",
"0.49256998",
"0.49241343",
"0.4917221",
"0.49171862",
"0.491048",
"0.48890752",
"0.48862228",
"0.48789194",
"0.48787227",
"0.48783785",
"0.4877623",
"0.48776108",
"0.4871389",
"0.4871389",
"0.48699355",
"0.48675665",
"0.48623326",
"0.48536286",
"0.48461565",
"0.48426285",
"0.48418376"
] | 0.83383745 | 0 |
Drones LEDs should stop blinking for 10s | func TestBind(t *testing.T) {
driver := NewDriver("192.168.0.1:50000")
driver.onError = func(err error) {
t.Error("fail", err)
}
driver.Start()
time.Sleep(time.Second * 10)
driver.Halt()
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (sh *stmHelper) blinkMaintenanceLED(ctx context.Context) {\n\tdefer sh.ClearDisplay()\n\tfor {\n\t\tsh.setLED(stm.LED1, yellow)\n\t\ttime.Sleep(time.Second)\n\t\tsh.setLED(stm.LED1, off)\n\n\t\tsh.setLED(stm.LED2, yellow)\n\t\ttime.Sleep(time.Second)\n\t\tsh.setLED(stm.LED2, off)\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn\n\t\tdefault:\n\t\t}\n\t}\n}",
"func Blink(led LEDColor, interval time.Duration, count int) {\n\tticker := time.NewTicker(interval)\n\tdefer ticker.Stop()\n\tfor i := 0; i < count; i++ {\n\t\t<-ticker.C\n\t\tSetLEDColor(led)\n\t\t<-ticker.C\n\t\tSetLEDColor(NONE)\n\t}\n}",
"func blinkErr(led *GPIO) {\n\tled.Set(false)\n\ttime.Sleep(50 * time.Millisecond)\n\tled.Set(true)\n}",
"func LEDController() {\n\tfor true {\n\t\tfor _,led := range ledList {\n\t\t\tif led.QueuedState != led.State {\n\t \tled.State = led.QueuedState\n\t\t\t switch led.State {\n\t\t\t case LEDBLINK:\n\t\t\t go LEDLight(LEDBLINK, led)\n\t\t\t case LEDON: \n\t\t\t \tLEDLight(LEDON, led)\n\t\t\t default:\n\t\t\t LEDLight(LEDOFF, led)\n\t\t\t }\n\t\t\t}\n\t\t} \n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n}",
"func (r *RunningLED) Blink() {\n\n\tr.ledOne.blink(r.currentState)\n\tr.ledTwo.blink(r.currentState)\n\tr.ledThree.blink(r.currentState)\n\n\tr.currentState++\n\tif r.currentState > 8 {\n\t\tr.currentState = 0\n\t}\n\n}",
"func blinkOK(led *GPIO) {\n\tled.Set(true)\n\ttime.Sleep(10 * time.Millisecond)\n\tled.Set(false)\n}",
"func LEDLight(State int, led *Led) {\n\tif State == LEDON {\n\t\t// Leave the LED on.\n log.Info(\"Led set to ON!\")\n\t\treturn\n\t}\n\tif State == LEDOFF {\n\t\t// Leave the LED off.\n log.Info(\"Led set to OFF!\")\n\t\treturn \n\t}\n\tfor true {\n\t\tif led.State != LEDBLINK {\n\t\t\treturn \n\t\t}\n\t\tlog.Info(\"Blinking!\")\n\t\ttime.Sleep(1 * time.Second)\n\t}\n}",
"func (r *RunningLED) TurnOff() {\n\tr.ledOne.l.Low()\n\tr.ledTwo.l.Low()\n\tr.ledThree.l.Low()\n}",
"func TestDvLIRClient_Blink(t *testing.T) {\n\tip := viper.GetString(\"IPAddress\")\n\tpw := viper.GetString(\"Password\")\n\n\tdvlirClient, err := NewDvLIRClient(ip, pw)\n\tif !assert.NoError(t, err, \"Error while creating Api client\") {\n\t\treturn\n\t}\n\n\terr = dvlirClient.Login()\n\tif !assert.NoError(t, err, \"Error during Login\") {\n\t\treturn\n\t}\n\n\tnum, err := dvlirClient.Blink(500, 10)\n\tif !assert.NoError(t, err, \"Error during Blink request\") {\n\t\treturn\n\t}\n\tif !assert.Equal(t, 123, num, \"Device didn't return correct return value\") {\n\t\treturn\n\t}\n\n\tfmt.Println(num)\n\n\tdefer func() {\n\t\terr = dvlirClient.Logout()\n\t\tif !assert.NoError(t, err, \"Error during Logout\") {\n\t\t\treturn\n\t\t}\n\t}()\n}",
"func controlAppLEDStatus() {\n\tvar appled extend.AppLED\n\tif err := appled.Prepare(sys.GetBusManagerCfg().Model); err != nil {\n\t\tbuslog.LOG.Warningf(\"prepare app led failed, errmsg: %v\", err)\n\t\treturn\n\t}\n\tdefer appled.CleanUp()\n\n\tstatus := 0\n\n\t// loop\n\tfor {\n\t\t// toggle status\n\t\tstatus = status ^ 1\n\n\t\t// sleep for a moment, interval set by mqtt connect/disconnect handler\n\t\ttime.Sleep(time.Millisecond * time.Duration(LEDSetInterval))\n\n\t\tif err := appled.SetLEDStatus(status); err != nil {\n\t\t\t// log.Printf(\"set appled %v\", err)\n\t\t}\n\t}\n}",
"func (t *TrafficLightTTY) Blink() {\n\tt.send([]byte(BLINK))\n}",
"func (r *RunningLED) Exploding() {\n\tr.ledOne.l.Set(!r.ledOne.l.Get())\n\tr.ledTwo.l.Set(!r.ledTwo.l.Get())\n\tr.ledThree.l.Set(!r.ledThree.l.Get())\n}",
"func off(bl blinkt.Blinkt) {\n\tbl.Clear()\n\tbl.Show()\n}",
"func off(bl blinkt.Blinkt) {\n\tbl.Clear()\n\tbl.Show()\n}",
"func (d *Dots) Stop() {\n\td.c <- true\n}",
"func (s *Servo) LED() (bool, error) {\n\tv, err := s.getRegister(reg.Led)\n\treturn utils.IntToBool(v), err\n}",
"func (e *Engine) startBrd() {\n\tticker := time.NewTicker(broadcastInterval)\n\tfor {\n\t\tselect {\n\t\tcase <-e.brdStop:\n\t\t\tticker.Stop()\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\te.broadcastChanges()\n\t\t}\n\t}\n}",
"func ToggleLight(launchNumber int64, out *portmidi.Stream, signal chan struct{}) error {\n\tfor {\n\t\tselect {\n\t\t// Make sure the light is on when we finish.\n\t\tcase <-signal:\n\t\t\tmidiInfo := apc20.IntToMidiMessageDown[launchNumber]\n\t\t\tif err := out.WriteShort(midiInfo.Status, midiInfo.Data1, midiInfo.Data2); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not write short to midi device\")\n\t\t\t}\n\t\t\treturn nil\n\t\t// Run flash the red light.\n\t\tdefault:\n\t\t\tmidiInfo := apc20.IntToMidiMessageDown[launchNumber]\n\t\t\tif err := out.WriteShort(midiInfo.Status, midiInfo.Data1, 3); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not write short to midi device\")\n\t\t\t}\n\t\t\ttime.Sleep(250 * time.Millisecond)\n\t\t\tmidiInfo = apc20.IntToMidiMessageUp[launchNumber]\n\t\t\tif err := out.WriteShort(midiInfo.Status, midiInfo.Data1, 0); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not write short to midi device\")\n\t\t\t}\n\t\t\ttime.Sleep(250 * time.Millisecond)\n\t\t}\n\t}\n}",
"func main () {\n\tfor timer := 20; timer >= 0; timer-- {\n\t\tif timer == 0 {\n\t\t\tfmt.Println(\"Bboooomm!!!!\")\n\t\t\tbreak\n\t\t}\n\tfmt.Println(timer)\n\ttime.Sleep(1 * time.Second)\n\t}\n}",
"func (tc *TermColor) Blink() *TermColor {\n\ttc.settingsCount++\n\ttc.blink = true\n\treturn tc\n}",
"func BrightnessPulse() {\n\tfor {\n\t\tfor i := 255; i >= 0; i-- {\n\t\t\ts := strconv.Itoa(i)\n\t\t\tBrightnessFileHandler(s)\n\t\t\ttime.Sleep(25 * time.Millisecond)\n\t\t}\n\t\tfor i := 1; i <= 255; i++ {\n\t\t\ts := strconv.Itoa(i)\n\t\t\tBrightnessFileHandler(s)\n\t\t\ttime.Sleep(25 * time.Millisecond)\n\t\t}\n\t}\n}",
"func (buzzer *ActiveBuzzer) StopTone() {\n\tbuzzer.pin.Low()\n}",
"func SetLights(states AllStates, id string) {\n\tfor floor := 0; floor < NFLOORS; floor++ { //loop through and checks all\n\t\televio.SetButtonLamp(elevio.BT_Cab, floor, states.States[id].CabRequests[floor])\n\t\televio.SetButtonLamp(elevio.BT_HallUp, floor, states.HallRequests[floor][elevio.BT_HallUp])\n\t\televio.SetButtonLamp(elevio.BT_HallDown, floor, states.HallRequests[floor][elevio.BT_HallDown])\n\t}\n\n}",
"func (d *Device) startOutputEnableTimer() {\n\tcount := d.brightness << d.colorBit\n\tfor i := uint32(0); i < count; i++ {\n\t\td.oe.Low()\n\t}\n\td.oe.High()\n}",
"func TextViewBlink() {\n\tfor {\n\t\tTextViewBlinkMu.Lock()\n\t\tif TextViewBlinker == nil {\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\treturn // shutdown..\n\t\t}\n\t\tTextViewBlinkMu.Unlock()\n\t\t<-TextViewBlinker.C\n\t\tTextViewBlinkMu.Lock()\n\t\tif BlinkingTextView == nil || BlinkingTextView.This() == nil {\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tif BlinkingTextView.IsDestroyed() || BlinkingTextView.IsDeleted() {\n\t\t\tBlinkingTextView = nil\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\ttv := BlinkingTextView\n\t\tif tv.Viewport == nil || !tv.HasFocus() || !tv.IsFocusActive() || !tv.This().(gi.Node2D).IsVisible() {\n\t\t\tBlinkingTextView = nil\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\twin := tv.ParentWindow()\n\t\tif win == nil || win.IsResizing() || win.IsClosed() || !win.IsWindowInFocus() {\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\tif win.IsUpdating() {\n\t\t\tTextViewBlinkMu.Unlock()\n\t\t\tcontinue\n\t\t}\n\t\ttv.BlinkOn = !tv.BlinkOn\n\t\ttv.RenderCursor(tv.BlinkOn)\n\t\tTextViewBlinkMu.Unlock()\n\t}\n}",
"func DisableLights() error {\n\treturn writeSysfsValue(\"state\", \"0\")\n}",
"func (d *DV4Mini) GreenLedOff() {\n\t// []byte{ADFGREENLED, 0x01, 0x00}\n\td.sendCmd([]byte{ADFGREENLED, 0x00})\n}",
"func (b *Blinker) Start() {\n\tb.stop = make(chan struct{})\n\tgo b.blink()\n}",
"func (device *ServoBrick) DisableStatusLED() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionDisableStatusLED), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func setAllLights(elevState cost.AssignedOrderInformation, ID string) {\n\tfor floor := 0; floor < FLOORS; floor++ {\n\t\televio.SetButtonLamp(elevio.BT_Cab, floor, elevState.States[ID].CabRequests[floor])\n\t\tfor button := elevio.BT_HallUp; button < elevio.BT_Cab; button++ {\n\t\t\televio.SetButtonLamp(button, floor, elevState.HallRequests[floor][button])\n\t\t}\n\t}\n}",
"func (device *SilentStepperBrick) DisableStatusLED() (err error) {\n\tvar buf bytes.Buffer\n\n\tresultBytes, err := device.device.Set(uint8(FunctionDisableStatusLED), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func Blink() tea.Msg {\n\treturn initialBlinkMsg{}\n}",
"func Blink(arg interface{}) Value {\n\treturn SlowBlink(arg)\n}",
"func Break(t time.Duration) string {\n\tbuf := bytes.Buffer{}\n\tfor ; t > 10*time.Second; t -= 10 * time.Second {\n\t\tbuf.WriteString(fmt.Sprintf(`<break time=\"%s\"/>`, 10*time.Second))\n\t}\n\tbuf.WriteString(fmt.Sprintf(`<break time=\"%s\"/>`, t))\n\treturn buf.String()\n}",
"func (d *Display) BacklightOff() error {\n\t_, err := d.port.Write([]byte(BacklightOff))\n\treturn err\n}",
"func (p *Puck) LED2Reset(name ...string) error {\n\tcmd := []byte(\"LED2.reset();\\n\")\n\terr := p.command(name, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (p *Puck) LED2Set(name ...string) error {\n\tcmd := []byte(\"LED2.set();\\n\")\n\terr := p.command(name, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *SwitchTicker) Stop() {\n\tc.slowTicker.Stop()\n\tc.fastTicker.Stop()\n}",
"func BacklightOff(lcd *device.Lcd) {\n\tm.Lock()\n\terr := lcd.BacklightOff()\n\tm.Unlock()\n\tif err != nil {\n\t\tlg.Fatalf(\"BacklightOff: %s\", err)\n\t}\n\treturn\n}",
"func Sleep(){\n\titwoc, err := i2c.NewI2C(pca9685.I2CAddress,1)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tpwm := pca9685.Context{Debug: true}\n\tpwm.PWMServoDriver(1,itwoc)\n\tpwm.Begin()\n\tpwm.SetOscillatorFrequency(27000000)\n\tpwm.SetPWMFrequency(ServoFreq)\n\ttime.Sleep(time.Millisecond *10)\n\tpwm.Sleep()\n\tpwm.Bus.Close()\n}",
"func RapidBlink(arg interface{}) Value {\n\tif val, ok := arg.(Value); ok {\n\t\treturn val.RapidBlink()\n\t}\n\treturn value{value: arg, color: RapidBlinkFm}\n}",
"func Bottomline(str string, d time.Duration) {\n\tconfig := &rgbmatrix.DefaultConfig\n\tconfig.Rows = *rows\n\tconfig.Cols = *cols\n\tconfig.Parallel = *parallel\n\tconfig.ChainLength = *chain\n\tconfig.Brightness = *brightness\n\tconfig.HardwareMapping = *hardware_mapping\n\tconfig.ShowRefreshRate = *show_refresh\n\tconfig.InverseColors = *inverse_colors\n\tconfig.DisableHardwarePulsing = *disable_hardware_pulsing\n\tconfig.PWMLSBNanoseconds = 200\n\n\tm, err := rgbmatrix.NewRGBLedMatrix(config)\n\tfatal(err)\n\n\tc := rgbmatrix.NewCanvas(m)\n\tdefer c.Close()\n\taddLabel(c, 0, 13+16, str)\n\tc.Render()\n\ttime.Sleep(d)\n}",
"func initI2C() {\n\tlog.Info(\"Starting I2C init\")\n\terr := ioexp.InitChip()\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\t//Beep test\n\tlog.Debug(\"Beep called\")\n\tfor ii := 0; ii < 3; ii++ {\n\n\t\terr = rpigpio.BeepOn()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(50 * time.Millisecond)\n\t\terr = rpigpio.BeepOff()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\n\t//testing LEDs\n\tconst blinkTime = 200\n\tlog.Debug(\"Blinking LEDs\")\n\tfor ii := 0; ii < 2; ii++ {\n\n\t\tlog.Debug(\"Yellow\")\n\t\terr := ioexp.WritePin(ioexp.YellowLed, true)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(blinkTime * time.Millisecond)\n\t\terr = ioexp.WritePin(ioexp.YellowLed, false)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debug(\"Red\")\n\t\terr = ioexp.WritePin(ioexp.RedLed, true)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(blinkTime * time.Millisecond)\n\t\terr = ioexp.WritePin(ioexp.RedLed, false)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debug(\"Green\")\n\t\terr = ioexp.WritePin(ioexp.GreenLed, true)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(blinkTime * time.Millisecond)\n\t\terr = ioexp.WritePin(ioexp.GreenLed, false)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\n\t\tlog.Debug(\"Blue\")\n\t\terr = ioexp.WritePin(ioexp.BlueLed, true)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t\ttime.Sleep(blinkTime * time.Millisecond)\n\t\terr = ioexp.WritePin(ioexp.BlueLed, false)\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func BacklightOn(lcd *device.Lcd) {\n\tm.Lock()\n\terr := lcd.BacklightOn()\n\tm.Unlock()\n\tif err != nil {\n\t\tlg.Fatalf(\"BacklightOn: %s\", err)\n\t}\n\treturn\n}",
"func (ld *LEDraw) DrawLED(num int) {\n\tled := LEData[num]\n\tfor _, seg := range led {\n\t\tld.DrawSeg(seg)\n\t}\n}",
"func (d *Display) CursorBlinkOff() error {\n\t_, err := d.port.Write([]byte(CursorBlinkOff))\n\treturn err\n}",
"func Blink(attrs []htmlgo.Attribute, children ...HTML) HTML {\n\treturn &htmlgo.Tree{Tag: \"blink\", Attributes: attrs, Children: children}\n}",
"func (this *LedManager) SetLed(which int, period, interval time.Duration) {\n\n\tif which >= len(this.periods) {\n\t\treturn\n\t}\n\n\tif this.periods[which] != period {\n\t\tthis.periods[which] = period\n\t}\n\n\tif this.intervals[which] != interval {\n\t\tthis.intervals[which] = interval\n\n\t\tif t := this.intervalTickers[which]; t != nil {\n\t\t\tt.Stop()\n\t\t}\n\t\tif interval == 0 || period == 0 {\n\t\t\tdevice.GpioSetValue(this.ledsFD[which], LED_OFF)\n\t\t} else if interval < 0 || period < 0 {\n\t\t\tdevice.GpioSetValue(this.ledsFD[which], LED_ON)\n\t\t} else {\n\t\t\tthis.intervalTickers[which] = time.NewTicker(interval + period)\n\t\t}\n\n\t}\n\n}",
"func (s HueServer) Blink(ctx context.Context, in *hue.LightsRequest) (*hue.LightsResponse, error) {\n\tlog.WithField(\"message\", in).\n\t\tWithField(\"call\", \"Blink\").Print(\"Incoming gRPC call\")\n\tgg := groups.New(hueBridge.Bridge, hueBridge.Username)\n\tg, err := gg.GetGroup(int(in.GetGroup()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toldState := g.Action\n\n\tbrightness := uint8(255 * in.GetBrightnessPercent())\n\tgg.SetGroupState(g.ID, lights.State{On: true, Bri: brightness, Alert: \"lselect\"})\n\tgg.SetGroupState(g.ID, oldState)\n\treturn &hue.LightsResponse{Success: true}, nil\n}",
"func (d *Display) BacklightOn(time byte) error {\n\tcommand := []byte(BacklightOn)\n\tcommand = append(command, time)\n\t_, err := d.port.Write([]byte(command))\n\treturn err\n}",
"func (buzzer *ActiveBuzzer) ToggleTone() {\n\tif buzzer.pin.Read() == 1 {\n\t\tbuzzer.pin.Low()\n\t} else {\n\t\tbuzzer.pin.High()\n\t}\n}",
"func InitLEDs(gpioPinRed, gpioPinGreen, gpioPinBlue uint64) {\n\tvar setupError error\n\tredLEDPin, setupError = gpio.SetupGPIOOutputPort(gpioPinRed)\n\tif setupError != nil {\n\t\tlog.Fatalf(\"Error setting up pin 7: %s\\n\", setupError)\n\t}\n\tgreenLEDPin, setupError = gpio.SetupGPIOOutputPort(gpioPinGreen)\n\tif setupError != nil {\n\t\tlog.Fatalf(\"Error setting up pin 8: %s\\n\", setupError)\n\t}\n\tblueLEDPin, setupError = gpio.SetupGPIOOutputPort(gpioPinBlue)\n\tif setupError != nil {\n\t\tlog.Fatalf(\"Error setting up pin 25: %s\\n\", setupError)\n\t}\n}",
"func (y *Yeelight) Off() string {\n\treturn y.SetPower(\"off\", \"smooth\", \"1000\")\n}",
"func (r *Rele) SwitchOff() {\n\tpin := rpio.Pin(r.Pin)\n\tpin.High()\n\tif pin.Read() == rpio.High {\n\t\tr.On = false\n\t} else {\n\t\tr.On = true\n\t}\n}",
"func (module *ScreensaverModule) Loop(srv *Server) {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tmodule.Tick(srv)\n\t\t}\n\t}\n}",
"func BreakPound() {\n\tfmt.Println(color.HiBlueString(\"#########\"))\n}",
"func (c *Controller) Loop() error {\n\ttimer.Delay(500)\n\tserial.Println(\" Write PIN 2 -> HIGH\")\n\tdigital.Write(2, digital.High)\n\ttimer.Delay(500)\n\tdigital.Write(2, digital.Low)\n\tserial.Println(\" Write PIN 2 -> LOW\")\n\treturn nil\n}",
"func (dt *discoveryTool) stop() {\n\tclose(dt.done)\n\n\t//Shutdown timer\n\ttimer := time.NewTimer(time.Second * 3)\n\tdefer timer.Stop()\nL:\n\tfor { //Unblock go routine by reading from dt.dataChan\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tbreak L\n\t\tcase <-dt.dataChan:\n\t\t}\n\t}\n\n\tdt.wg.Wait()\n}",
"func DisableClock() {\n\trcc.RCC.FMCEN().AtomicClear()\n}",
"func (t *TrafficLightTTY) Red() {\n\tt.send([]byte(RED))\n}",
"func (s *status) stopping() error { return s.set(\"stopping\", \"STOPPING=1\") }",
"func TestLight(t *testing.T) {\n\t_ = lp.Reset()\n\n\tx, y := uint8(0), uint8(8)\n\n\tif err := lp.Light(x, y, launchpad.Color{\n\t\tGreen: launchpad.Full,\n\t\tRed: launchpad.Off,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttime.Sleep(500 * time.Millisecond)\n\n\tif err := lp.Light(x, y, launchpad.Color{\n\t\tGreen: launchpad.Off,\n\t\tRed: launchpad.Full,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttime.Sleep(500 * time.Millisecond)\n\n\tif err := lp.Light(x, y, launchpad.Color{\n\t\tGreen: launchpad.Full,\n\t\tRed: launchpad.Full,\n\t}); err != nil {\n\t\tt.Fatal(err)\n\t}\n\ttime.Sleep(500 * time.Millisecond)\n\n\t_ = lp.Reset()\n}",
"func light(action string) error {\n\tvar cmd *exec.Cmd\n\n\tswitch action {\n\tcase \"on\":\n\t\tcmd = exec.Command(\"sh\", \"-c\", \"echo none > /sys/class/leds/led0/trigger; echo 0 > /sys/class/leds/led0/brightness\")\n\tcase \"off\":\n\t\tcmd = exec.Command(\"sh\", \"-c\", \"echo none > /sys/class/leds/led0/trigger; echo 1 > /sys/class/leds/led0/brightness\")\n\tcase \"heartbeat\":\n\t\tcmd = exec.Command(\"sh\", \"-c\", \"echo heartbeat > /sys/class/leds/led0/trigger\")\n\tcase \"default\":\n\t\tcmd = exec.Command(\"sh\", \"-c\", \"echo mmc0 > /sys/class/leds/led0/trigger\")\n\tdefault:\n\t\treturn rpiMsg(fmt.Sprintf(\"Unknown parameter %s for LIGHT\", action))\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn Log(fmt.Sprintf(\"Error while trying to LIGHT %s :\", action), err)\n\t}\n\n\treturn nil\n}",
"func Restart(start, pwdn gpio.PinIO) {\n\tif err := pwdn.Out(gpio.Low); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttime.Sleep(500 * time.Millisecond)\n\n\tpwdn.Out(gpio.High)\n\n\tif err := start.Out(gpio.Low); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\ttime.Sleep(2 * time.Second)\n\n}",
"func (s *Servo) SetLED(state bool) error {\n\treturn s.setRegister(reg.Led, utils.BoolToInt(state))\n}",
"func (p *Puck) LED1Set(name ...string) error {\n\tcmd := []byte(\"LED1.set();\\n\")\n\terr := p.command(name, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func main() {\n\n\t// iterates over the values as they arrive evey 500ms\n\tticker := time.NewTicker(500 * time.Millisecond)\n\tgo func() {\n\t\tfor t := range ticker.C {\n\t\t\tfmt.Println(\"Tick at\", t)\n\t\t}\n\t}()\n\n\ttime.Sleep(1600 * time.Millisecond)\n\t// tickers can be stopped like timers. once a ticker is stopped, it won't receive any more values on its channel.\n\t// here, it's stopped after 1600ms, so the ticker should tick 3 times before stopping\n\tticker.Stop()\n\tfmt.Println(\"Ticker stopped\")\n}",
"func (p *Puck) LED1Reset(name ...string) error {\n\tcmd := []byte(\"LED1.reset();\\n\")\n\terr := p.command(name, cmd)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Blink(txt string) string {\n\treturn Render(blink, normal, txt)\n}",
"func (rf *Raft) Kill() {\n\t// Your code here, if desired.\n\trf.mu.Lock()\n\trf.transitionState(Stop)\n\trf.mu.Unlock()\n\tsleep(2000)\n}",
"func startThermalCam() {\n\tfor {\n\t\tgrid = amg.ReadPixels()\n\t\ttime.Sleep(time.Duration(*refresh) * time.Millisecond)\n\t}\n}",
"func ledHandler(w http.ResponseWriter, r *http.Request) {\n\trequest := &ledStatusRequset{}\n\tjson.NewDecoder(r.Body).Decode(request)\n\ttoSend := \"off\"\n\tif request.Status {\n\t\ttoSend = \"on\"\n\t}\n\tpublish(1, topicLED, toSend)\n\thttpJSON(w, httpMessageReturn{Message: \"OK\"}, http.StatusOK, nil)\n}",
"func (d *Display) CursorBlinkOn() error {\n\t_, err := d.port.Write([]byte(CursorBlinkOn))\n\treturn err\n}",
"func Break() {\n\tfmt.Println(color.HiMagentaString(\"-------\"))\n}",
"func lesson56(){\n\ttick := time.Tick(100 * time.Microsecond)\n\tboom := time.After(500 * time.Microsecond)\n\t\n\tfor {\n\t\tselect {\n\t\t\tcase <- tick:\n\t\t\t\tfmt.Println(\"tick.\")\n\t\t\tcase <- boom:\n\t\t\t\tfmt.Println(\"BOOM!\")\n\t\t\t\t//break ここでbreakしてもforからは抜けない\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tfmt.Println(\" .\")\n\t\t\t\ttime.Sleep(50 * time.Microsecond)\n\t\t}\n\t}\n}",
"func (d *BH1750FVI) Run() {\n\tgo func() {\n\t\td.quit = make(chan bool)\n\n\t\ttimer := time.Tick(time.Duration(d.Poll) * time.Millisecond)\n\n\t\tvar lighting float64\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase d.lightingReadings <- lighting:\n\t\t\tcase <-timer:\n\t\t\t\tl, err := d.measureLighting()\n\t\t\t\tif err == nil {\n\t\t\t\t\tlighting = l\n\t\t\t\t}\n\t\t\t\tif err == nil && d.lightingReadings == nil {\n\t\t\t\t\td.lightingReadings = make(chan float64)\n\t\t\t\t}\n\t\t\tcase <-d.quit:\n\t\t\t\td.lightingReadings = nil\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\treturn\n}",
"func (wd *Watchdog) loop() {\n\tvar t0 int64\nidle:\n\tt0 = <-wd.resets\n\tt0 = wd.pump(t0)\nloop:\n\tt0 = t0 - time.Now().UnixNano()\n\ttime.Sleep(time.Duration(t0))\n\tnow := time.Now().UnixNano()\n\tt0 = wd.pump(now)\n\tif t0 == now {\n\t\twd.timeouts <- true\n\t\tgoto idle\n\t}\n\tgoto loop\n}",
"func (m *Model) blinkCmd() tea.Cmd {\n\tif m.cursorMode != CursorBlink {\n\t\treturn nil\n\t}\n\n\tif m.blinkCtx != nil && m.blinkCtx.cancel != nil {\n\t\tm.blinkCtx.cancel()\n\t}\n\n\tctx, cancel := context.WithTimeout(m.blinkCtx.ctx, m.BlinkSpeed)\n\tm.blinkCtx.cancel = cancel\n\n\tm.blinkTag++\n\n\treturn func() tea.Msg {\n\t\tdefer cancel()\n\t\t<-ctx.Done()\n\t\tif ctx.Err() == context.DeadlineExceeded {\n\t\t\treturn blinkMsg{id: m.id, tag: m.blinkTag}\n\t\t}\n\t\treturn blinkCanceled{}\n\t}\n}",
"func (r *RoverDriver) Stop() {\n r.commands <- stop\n}",
"func hallCallLightDriver() {\n\tfor {\n\t\tvar lights [store.NumFloors][2]bool\n\t\tallElevators := store.GetAll()\n\n\t\tfor _, elevator := range allElevators {\n\t\t\tfor _, hallCall := range elevator.GetAllHallCalls() {\n\t\t\t\tif hallCall.Direction == elevators.DirectionUp {\n\t\t\t\t\tlights[hallCall.Floor][1] = true\n\t\t\t\t} else if hallCall.Direction == elevators.DirectionDown {\n\t\t\t\t\tlights[hallCall.Floor][0] = true\n\t\t\t\t} else if hallCall.Direction == elevators.DirectionBoth {\n\t\t\t\t\tlights[hallCall.Floor][1] = true\n\t\t\t\t\tlights[hallCall.Floor][0] = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor floor, value := range lights {\n\t\t\televio.SetButtonLamp(elevio.BT_HallDown, floor, value[0])\n\t\t\televio.SetButtonLamp(elevio.BT_HallUp, floor, value[1])\n\t\t}\n\n\t\t<-store.ShouldRecalculateHCLightsChannel\n\t}\n}",
"func (j *Joint) Breakoff() {\n\tif j.broken == 1 {\n\t\treturn\n\t}\n\tif atomic.CompareAndSwapInt32(&j.broken, 0, 1) {\n\t\tclose(j.breakC)\n\t\tclose(j.reloadC)\n\t}\n}",
"func (rhost *rhostData) stop(tcd *trudp.ChannelData) {\n\trhost.reconnect(tcd)\n}",
"func (rf *Raft) Kill() {\n\t// Your code here, if desired.\n\tfor idx := range rf.clients {\n\t\tif idx != rf.me {\n\t\t\trf.clients[idx].Stop()\n\t\t}\n\t}\n\tDebugPrint(\"Kill Raft %d, fail rpc: %d\\n\", rf.me, rf.failCount)\n\t//for ts := 1; atomic.LoadInt32(&rf.stop) != 2 && ts < 20; ts ++ {\n\t//\ttime.Sleep(1000 * time.Millisecond)\n\t//}\n\trf.mu.Lock()\n\trf.stop = true\n\trf.mu.Unlock()\n\trf.stopChan <- true\n\tDebugPrint(\"Kill Raft %d\\n\", rf.me)\n}",
"func (b *BatchDelayNode) Shutdown() {\n\tb.running.Off()\n}",
"func (i2c I2C) stop() {\n\t// Send stop condition.\n\tavr.TWCR.Set(avr.TWCR_TWEN | avr.TWCR_TWINT | avr.TWCR_TWSTO)\n\n\t// Wait for stop condition to be executed on bus.\n\tfor !avr.TWCR.HasBits(avr.TWCR_TWSTO) {\n\t}\n}",
"func SlowBlink(arg interface{}) Value {\n\tif val, ok := arg.(Value); ok {\n\t\treturn val.SlowBlink()\n\t}\n\treturn value{value: arg, color: SlowBlinkFm}\n}",
"func (rf *Raft) Kill() {\n\tatomic.StoreInt32(&rf.dead, 1)\n\t// Your code here, if desired.\n\t//Once.Do(func() {\n\t//\tlog.Println(\n\t//\t\tatomic.LoadInt32(&AppendEntriesCounts),\n\t//\t\tatomic.LoadInt32(&AppendEntriesFailed),\n\t//\t\tatomic.LoadInt32(&StartsCounts),\n\t//\t\tatomic.LoadInt32(&BroadcastAppendCounts),\n\t//\n\t//\t)\n\t//})\n}",
"func flash_red(){\n // Set up options.\n options := serial.OpenOptions{\n PortName: \"/dev/ttyUSB0\",\n BaudRate: 19200,\n DataBits: 8,\n StopBits: 1,\n MinimumReadSize: 4,\n }\n\n // Open the port.\n port, err := serial.Open(options)\n if err != nil {\n log.Fatalf(\"serial.Open: %v\", err)\n }\n\n // Make sure to close it later.\n defer port.Close()\n\n // Write 2 bytes to the port.\n b := []byte(\"A<\")\n n, err := port.Write(b)\n if err != nil {\n log.Fatalf(\"port.Write: %v\", err)\n }\n\n fmt.Println(\"Wrote\", n, \"bytes.\")\n\n}",
"func (device *ServoBrick) Disable(servoNum uint8) (err error) {\n\tvar buf bytes.Buffer\n\tbinary.Write(&buf, binary.LittleEndian, servoNum)\n\n\tresultBytes, err := device.device.Set(uint8(FunctionDisable), buf.Bytes())\n\tif err != nil {\n\t\treturn err\n\t}\n\tif len(resultBytes) > 0 {\n\t\tvar header PacketHeader\n\n\t\theader.FillFromBytes(resultBytes)\n\n\t\tif header.Length != 8 {\n\t\t\treturn fmt.Errorf(\"Received packet of unexpected size %d, instead of %d\", header.Length, 8)\n\t\t}\n\n\t\tif header.ErrorCode != 0 {\n\t\t\treturn DeviceError(header.ErrorCode)\n\t\t}\n\n\t\tbytes.NewBuffer(resultBytes[8:])\n\n\t}\n\n\treturn nil\n}",
"func (tapi *TadiranAPI) Off() error {\n\ttapi.currentState = OFF\n\treturn tapi.send(\"POWER_OFF\")\n}",
"func delay(v int) {\n\tfor i := 0; i < 684000; i++ {\n\t\tfor j := 0; j < v; j++ {\n\t\t}\n\t}\n}",
"func main () {\n\tledList = []*Led{}\n var err error\n log, err = logger.New(\"cracker\", 1, os.Stdout)\n if err != nil {\n panic(err) \n }\n dbh = new(DBHandler)\n dbh.Init()\n defer dbh.Close()\n jh = new(JohnHandler)\n jh.Init()\n \n /* Start a new Session with JohnHandler */\n run := jh.Start(&Wpa{id:111, name:\"Keykey\", bssid:\"AA:BB:CC:DD:EE:FF\"})\n dbh.StoreRun(run)\n\n /* Check if there are any unfinnished runs. */\n listruns := dbh.GetAllNotDoneRuns()\n for _,run := range listruns {\n \n }\n \n\n /* Scan the given folders */\n // dbh.StoreWordlist(&Wordlist{id:0, name:\"rockyou.txt\", size:\"143MB\", avg_run:30012313})\n // dbh.GetAllWpa()\n // ScanUpdate()\n\n /* Adding Leds to a list */\n led := &Led{Name: \"internet_access\", Port: \"GPIO14\", State: LEDOFF, QueuedState: LEDBLINK}\n ledList = append(ledList, led)\n\n /* !TEST! LED code !TEST! */\n // go LEDController()\n\t// log.Info(\"Sleeping...\")\n\t// time.Sleep(10 * time.Second)\n // log.Info(\"Changing Led-State to ON\")\n // led.QueuedState = LEDON\n // time.Sleep(4 * time.Second)\n}",
"func (m *Milight) Off() error {\n\tcmd := []byte{0x31, 0x00, 0x00, 0x00, 0x03, 0x04, 0x00, 0x00, 0x00}\n\treturn m.sendCommand(cmd)\n}",
"func cooldownArduino(energy float32, serialPort io.ReadWriteCloser) error {\n\treturn sendArduinoCommand('c', energy, serialPort)\n}",
"func (d *Dev) SetBlink(freq BlinkFrequency) error {\n\tif _, err := d.dev.Write([]byte{displaySetup | displayOn | byte(freq)}); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func Backoff(try int) {\n\tnf := math.Pow(2, float64(try))\n\tnf = math.Max(1, nf)\n\tnf = math.Min(nf, 16)\n\tr := rand.Int31n(int32(nf))\n\td := time.Duration(r) * time.Second\n\ttime.Sleep(d)\n}",
"func cmdPoweroff() {\n\tswitch state := status(B2D.VM); state {\n\tcase vmUnregistered:\n\t\tlog.Fatalf(\"%s is not registered.\", B2D.VM)\n\tcase vmRunning:\n\t\tif err := vbm(\"controlvm\", B2D.VM, \"poweroff\"); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\tdefault:\n\t\tlog.Printf(\"%s is not running.\", B2D.VM)\n\t}\n}",
"func timerSleep(ticks uint32) {\n\ttimerWakeup.Set(0)\n\n\t// CK_INT = APB1 x2 = 84mhz\n\t// prescale counter down from 84mhz to 10khz aka 0.1 ms frequency.\n\tstm32.TIM3.PSC.Set(84000000/10000 - 1) // 8399\n\n\t// set duty aka duration\n\tarr := (ticks / 100) - 1 // convert from microseconds to 0.1 ms\n\tif arr == 0 {\n\t\tarr = 1 // avoid blocking\n\t}\n\tstm32.TIM3.ARR.Set(arr)\n\n\t// Enable the hardware interrupt.\n\tstm32.TIM3.DIER.SetBits(stm32.TIM_DIER_UIE)\n\n\t// Enable the timer.\n\tstm32.TIM3.CR1.SetBits(stm32.TIM_CR1_CEN)\n\n\t// wait till timer wakes up\n\tfor timerWakeup.Get() == 0 {\n\t\tarm.Asm(\"wfi\")\n\t}\n}",
"func (c *lscChoco) Stop() {\n\tc.status.State = state.STANDBY\n\tfor i := range c.sensors {\n\t\tc.sensors[i].SetState(state.STANDBY)\n\t}\n}",
"func InfiniteRainbow() {\n\tcolors := make([]string, 0, 6)\n\t// generate range of rainbow values\n\tfor i := 0; i <= 255; i++ {\n\t\tc := RGBColor{255, i, 0}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor i := 255; i >= 0; i-- {\n\t\tc := RGBColor{i, 255, 0}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor i := 0; i <= 255; i++ {\n\t\tc := RGBColor{0, 255, i}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor i := 255; i >= 0; i-- {\n\t\tc := RGBColor{0, i, 255}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor i := 0; i <= 255; i++ {\n\t\tc := RGBColor{i, 0, 255}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor i := 255; i >= 0; i-- {\n\t\tc := RGBColor{255, 0, i}\n\t\tcolors = append(colors, c.GetColorInHex())\n\t}\n\n\tfor {\n\t\tfor _, c := range colors {\n\t\t\tColorFileHandler(c)\n\t\t\ttime.Sleep(time.Nanosecond)\n\t\t}\n\t}\n}",
"func (tw *TimingWheel) Stop() {\n\ttw.ticker.Stop()\n}"
] | [
"0.7167407",
"0.706318",
"0.6571996",
"0.65486085",
"0.6354301",
"0.62981844",
"0.61297184",
"0.5831457",
"0.5776189",
"0.57494134",
"0.56868124",
"0.54084325",
"0.5395688",
"0.5395688",
"0.5353163",
"0.53415465",
"0.5334647",
"0.5312089",
"0.5297992",
"0.52786934",
"0.5264421",
"0.5199337",
"0.518527",
"0.5176387",
"0.5161079",
"0.5145648",
"0.51430076",
"0.5140853",
"0.51233584",
"0.51067007",
"0.5097651",
"0.50944406",
"0.50921905",
"0.50837797",
"0.5082971",
"0.5063497",
"0.50252616",
"0.50175405",
"0.50173306",
"0.50170964",
"0.5015577",
"0.5011917",
"0.5008746",
"0.50059676",
"0.49935925",
"0.49927282",
"0.49893174",
"0.49687403",
"0.49407",
"0.4921647",
"0.4919036",
"0.49135184",
"0.49096325",
"0.49083",
"0.4901109",
"0.4887017",
"0.48732793",
"0.48608705",
"0.48493582",
"0.48450762",
"0.48346135",
"0.48280647",
"0.48257118",
"0.48160407",
"0.4805714",
"0.48012385",
"0.47952983",
"0.479156",
"0.47913778",
"0.47909853",
"0.4788237",
"0.47778726",
"0.47721368",
"0.47602472",
"0.4759103",
"0.47539565",
"0.4753405",
"0.47457618",
"0.47417784",
"0.47386134",
"0.47355783",
"0.47266474",
"0.47165754",
"0.47121084",
"0.47113344",
"0.47081047",
"0.4703512",
"0.46885437",
"0.46862042",
"0.46769628",
"0.46562356",
"0.46503523",
"0.46496344",
"0.46495542",
"0.4643961",
"0.4642412",
"0.4641246",
"0.46362868",
"0.46220508",
"0.46188685",
"0.46153525"
] | 0.0 | -1 |
Replacer: copies file whilst replacing a given string Example (not compiled): go run replacer.go /path/to/big/input.txt replaced.txt password '' Reads input.txt, copies it to the PWD, replasing all occurences of "password" with There is an optional unsafe flag to use bytetostring conversion via unsafe pointers, too: ./replace unsafe in.log out.log search replace convert read buffer to string | func bufferToString(buffer *bytes.Buffer, unsafePtr *bool) string {
defer buffer.Reset()//ensure buffer is reset
if !*unsafePtr {
return buffer.String()
}
bb := buffer.Bytes()
s := *(*string)(unsafe.Pointer(&bb))
return s
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func main() {\n\tswitch len(os.Args) {\n\tcase 2:\n\t\treplacer.Produce(os.Args[1])\n\tcase 3:\n\t\treplacer.Produce(os.Args[2])\n\tdefault:\n\t\tpanic(\"Bad usage. Pass 1 or 2 arguments. The last one should be path to file, estimated arguments will be ignored.\")\n\t}\n}",
"func Replace_strings(inFile string, kvMap map[string]string) (string, error) {\n\n\t// read original file contents into buffer\n\tbuffer, err := ioutil.ReadFile(inFile)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcontents := string(buffer)\n\n\t// replace strings using a map\n\t// keys k are the old values being replaced\n\t// values v are the values replacing the coresponding keys\n\tfor k, v := range kvMap {\n\t\tcontents = strings.Replace(contents, k, v, -1)\n\t}\n\n\treturn contents, nil\n}",
"func replace(src, dst string) error {\n\tkernel32, err := syscall.LoadLibrary(\"kernel32.dll\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer syscall.FreeLibrary(kernel32)\n\tmoveFileExUnicode, err := syscall.GetProcAddress(kernel32, \"MoveFileExW\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcString, err := syscall.UTF16PtrFromString(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdstString, err := syscall.UTF16PtrFromString(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsrcPtr := uintptr(unsafe.Pointer(srcString))\n\tdstPtr := uintptr(unsafe.Pointer(dstString))\n\n\tMOVEFILE_REPLACE_EXISTING := 0x1\n\tflag := uintptr(MOVEFILE_REPLACE_EXISTING)\n\n\t_, _, callErr := syscall.Syscall(uintptr(moveFileExUnicode), 3, srcPtr, dstPtr, flag)\n\tif callErr != 0 {\n\t\treturn callErr\n\t}\n\n\treturn nil\n}",
"func replaceFile(filePath, lines string) {\n\tbytesToWrite := []byte(lines) //data written\n\terr := ioutil.WriteFile(filePath, bytesToWrite, 0644) //filename, byte array (binary representation), and 0644 which represents permission number. (0-777) //will create a new text file if that text file does not exist yet\n\tif isError(err) {\n\t\tfmt.Println(\"Error Writing to file:\", filePath, \"=\", err)\n\t\treturn\n\t}\n}",
"func searchAndReplace(file, search, replace string, c chan string) {\n\ts := regexp.MustCompile(search)\n\n\tb, err := ioutil.ReadFile(file)\n\tif err != nil {\n\t\tc <- \"\"\n\t\treturn\n\t}\n\n\tcontents := s.ReplaceAll(b, []byte(replace))\n\n\tif string(contents) != string(b) {\n\t\terr := ioutil.WriteFile(file, contents, 0644)\n\t\tif err != nil {\n\t\t\tc <- \"\"\n\t\t\treturn\n\t\t}\n\n\t\tc <- fmt.Sprintf(\"\\tReplacements in %s\", file)\n\t\treturn\n\t}\n\n\tc <- \"\"\n\treturn\n}",
"func main() {\n\t// func Replace(s, old, new string, n int) string\n\tfmt.Println(strings.Replace(\"aaaa\", \"a\", \"b\", 2)) // 2 indicates how many times to do the replacement\n\t// => \"bbaa\"\n\n}",
"func Do(file, pattern, value string) error {\n\tpath, err := filepath.Abs(file)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"file path\")\n\t}\n\tcontent, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"not found\")\n\t}\n\tre, err := regexp.Compile(pattern)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"regexp\")\n\t}\n\tres := re.ReplaceAllString(string(content), value)\n\treturn ioutil.WriteFile(path, []byte(res), os.ModeAppend)\n}",
"func Replace(filePath string, replacements ...PlaceholderReplacement) error {\n\tcontent, err := ReadFile(filePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, replacement := range replacements {\n\t\tcontent = strings.ReplaceAll(content, replacement.PlaceholderValue, replacement.DesiredValue)\n\t}\n\n\treturn ioutil.WriteFile(filePath, []byte(content), 0666)\n}",
"func replace_string(src []byte, what string, to string) []byte {\n\tvar result = []byte{}\n\twhat_b := []byte(what)\n\tto_b := []byte(to)\n\ti := bytes.Index(src, what_b)\n\tj := 0\n\tfor i >= 0 {\n\t\ti = i + j\n\t\tresult = append(result, src[j:i]...)\n\t\tresult = append(result, to_b...)\n\t\tif i+len(what_b) < len(src) {\n\t\t\tj = i + len(what_b)\n\t\t\ti = bytes.Index(src[i+len(what_b):], what_b)\n\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\n\t}\n\tresult = append(result, src[j:]...)\n\treturn result\n}",
"func RewriteFile(input string, output string) {\n\tlog.Printf(\"Opening %s\\n\", input)\n\tinfile, _ := os.Open(input)\n\tlog.Printf(\"Opening %s\\n\", output)\n\toutfile, _ := os.OpenFile(output, os.O_RDWR|os.O_APPEND, 0660)\n\n\tdefer infile.Close()\n\tdefer outfile.Close()\n\n\tlog.Println(\"Rewriting\")\n\tfor {\n\t\tb := make([]byte, 1024)\n\t\tread, _ := infile.Read(b)\n\t\t\n\t\toutfile.Write(b[:read])\n\n\t\tif read < 1024 {\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func main() {\n scanner := bufio.NewScanner(os.Stdin)\n scanner.Buffer(make([]byte, 1000000), 1000000)\n\n scanner.Scan()\n X := scanner.Text()\n // _ = X // to avoid unused error\n scanner.Scan()\n Y := scanner.Text()\n // _ = Y // to avoid unused error\n\n replaceTargets := []string{}\n replacedChars := []string{}\n for i := 0; i < len(X); i++ {\n x := string(X[i])\n y := string(Y[i])\n\n if x != y && !contains(replaceTargets, x) {\n replaceTargets = append(replaceTargets, x)\n replacedChars = append(replacedChars, y)\n }\n }\n\n replaceResult := X\n for i := 0; i < len(replaceResult); i++ {\n c := string(replaceResult[i])\n if contains(replaceTargets, c) {\n index := indexOf(replaceTargets, c)\n replaceResult = replaceResult[0:i] + replacedChars[index] + replaceResult[i+1:]\n }\n }\n\n // fmt.Fprintln(os.Stderr, \"Debug messages...\")\n // fmt.Println(\"anwser\")// Write answer to stdout\n if X == Y {\n fmt.Println(\"NONE\")\n } else if replaceResult == Y {\n for i := 0; i < len(replaceTargets); i++ {\n fmt.Println(replaceTargets[i] + \"->\" + replacedChars[i])\n }\n } else {\n fmt.Println(\"CAN'T\")\n }\n}",
"func replaceFileHash(input, filePath string, hv hashVar) (string, error) {\n\tfor i := range hv.submatches {\n\t\th := hv.values[i]\n\n\t\thashValue, err := getHash(filePath, h.hashFn)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tinput = h.regex.ReplaceAllString(input, hashValue)\n\t}\n\n\treturn input, nil\n}",
"func NewStreamReplacer(r io.Reader, patternReplacementPairs ...[]byte) io.Reader {\n\trepl := NewBytesReplacer(patternReplacementPairs...)\n\tmaxLen := repl.MaxPatternLen() - 1\n\tif maxLen == 0 {\n\t\tmaxLen = 1\n\t}\n\tpr, pw := io.Pipe()\n\tgo func() {\n\t\tn := 4096\n\t\tfor n < maxLen {\n\t\t\tn <<= 1\n\t\t}\n\t\tscratch := make([]byte, n)\n\t\tfor {\n\t\t\tn, readErr := io.ReadAtLeast(r, scratch, maxLen)\n\t\t\tif n == 0 && readErr == nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tvar writeErr error\n\t\t\tif n > 0 {\n\t\t\t\tscratch = repl.Replace(scratch[:n])\n\t\t\t\tif readErr != nil {\n\t\t\t\t\tn, writeErr = pw.Write(scratch)\n\t\t\t\t} else {\n\t\t\t\t\tn, writeErr = pw.Write(scratch[:len(scratch)-maxLen])\n\t\t\t\t}\n\t\t\t\tscratch = scratch[n:]\n\t\t\t}\n\t\t\tif readErr != nil {\n\t\t\t\tif len(scratch) > 0 {\n\t\t\t\t\t_, _ = pw.Write(scratch)\n\t\t\t\t}\n\t\t\t\tpw.CloseWithError(readErr)\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif writeErr != nil {\n\t\t\t\tpw.CloseWithError(writeErr)\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\treturn pr\n}",
"func replaceFunction(from, to uintptr) (original []byte) {\n\tjumpData := jmpToFunctionValue(to)\n\tf := rawMemoryAccess(from, len(jumpData))\n\toriginal = make([]byte, len(f))\n\tcopy(original, f)\n\n\tcopyToLocation(from, jumpData)\n\treturn\n}",
"func overrideFile(fileName string, err error, contents []byte, perm os.FileMode, vfn verify.VerifyFn) error {\n\ttimeNano := time.Now().UnixNano()\n\t// write the new contents to a temporary file\n\tnewFileName := fmt.Sprintf(\"%s.tmp%d\", fileName, timeNano)\n\tlog.Printf(\"Writing contents to temporary file %s...\\n\", newFileName)\n\terr = ioutil.WriteFile(newFileName, contents, perm)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to write new file %s, err: %v\\n\", newFileName, err)\n\t\treturn err\n\t}\n\n\t// Sanity check the new file against the existing one\n\tif vfn != nil {\n\t\terr = vfn(fileName, newFileName)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"invalid content: %q, error: %v\", newFileName, err)\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// move the contents of the old file to a backup file\n\tbakFileName := fmt.Sprintf(\"%s.bak%d\", fileName, timeNano)\n\tlog.Printf(\"Renaming original file %s to backup file %s...\\n\", fileName, bakFileName)\n\terr = os.Rename(fileName, bakFileName)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to rename file %s to %s, err: %v\\n\", fileName, bakFileName, err)\n\t\treturn err\n\t}\n\t// move the new contents to the original location\n\tlog.Printf(\"Renaming temporary file %s to requested file %s...\\n\", newFileName, fileName)\n\terr = os.Rename(newFileName, fileName)\n\tif err != nil {\n\t\tlog.Printf(\"Unable to rename file %s to %s, err: %v\\n\", newFileName, fileName, err)\n\t\t// before returning try to restore the original file\n\t\tos.Rename(bakFileName, fileName)\n\t\treturn err\n\t}\n\t// remove the temporary backup file\n\tlog.Printf(\"Removing backup file %s...\\n\", bakFileName)\n\tos.Remove(bakFileName)\n\treturn nil\n}",
"func StringReplace(src, rep, sub string) (n string) {\n\t// make sure the src has the char we want to replace. \n\tif strings.Count(src, rep) > 0 {\n\t\trunes := src // convert to utf-8 runes. \n\t\tfor i := 0; i < len(runes); i++ {\n\t\t\tl := string(runes[i]) // grab our rune and convert back to string. \n\t\t\tif l == rep {\n\t\t\t\tn += sub\n\t\t\t} else {\n\t\t\t\tn += l\n\t\t\t}\n\t\t}\n\t\treturn n\n\t}\n\treturn src\n}",
"func rewrite(name string, t transform.Transformer) error {\n\t// open original file\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\t// create temp file\n\tpattern := filepath.Base(name) + \"-temp-*\"\n\ttmp, err := ioutil.TempFile(filepath.Dir(name), pattern)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer os.Remove(tmp.Name())\n\tdefer tmp.Close()\n\t// replace while copying from f to tmp\n\tif _, err := io.Copy(tmp, transform.NewReader(f, t)); err != nil {\n\t\treturn err\n\t}\n\t// make sure the tmp file was successfully written to\n\tif err := tmp.Close(); err != nil {\n\t\treturn err\n\t}\n\t// close the file we're reading from\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\t// overwrite the original file with the temp file\n\treturn os.Rename(tmp.Name(), name)\n}",
"func (tf *transformer) writeSourceFile(basename, obfuscated string, content []byte) (string, error) {\n\t// Uncomment for some quick debugging. Do not delete.\n\t// fmt.Fprintf(os.Stderr, \"\\n-- %s/%s --\\n%s\", curPkg.ImportPath, basename, content)\n\n\tif flagDebugDir != \"\" {\n\t\tpkgDir := filepath.Join(flagDebugDir, filepath.FromSlash(tf.curPkg.ImportPath))\n\t\tif err := os.MkdirAll(pkgDir, 0o755); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdstPath := filepath.Join(pkgDir, basename)\n\t\tif err := os.WriteFile(dstPath, content, 0o666); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\t// We use the obfuscated import path to hold the temporary files.\n\t// Assembly files do not support line directives to set positions,\n\t// so the only way to not leak the import path is to replace it.\n\tpkgDir := filepath.Join(sharedTempDir, tf.curPkg.obfuscatedImportPath())\n\tif err := os.MkdirAll(pkgDir, 0o777); err != nil {\n\t\treturn \"\", err\n\t}\n\tdstPath := filepath.Join(pkgDir, obfuscated)\n\tif err := writeFileExclusive(dstPath, content); err != nil {\n\t\treturn \"\", err\n\t}\n\treturn dstPath, nil\n}",
"func main() {\n\t// param\n\tif len(os.Args) < 2 {\n\t\tprintln(\"VERSION\", VERSION, \": call\", filepath.Base(os.Args[0]), \"<root path>\")\n\t\tos.Exit(1)\n\t}\n\tvar root = os.Args[1]\n\n\t// check changes\n\thash := changeHash(root)\n\tb, err := ioutil.ReadFile(filepath.Join(root, outDivs))\n\tif err == nil && strings.Contains(string(b), hash) {\n\t\tos.Exit(0) // no changes -> exit\n\t}\n\n\t// scan root\n\trows := new(strings.Builder)\n\tdivs := new(strings.Builder)\n\tfor _, m := range Scan(root) {\n\t\trows.WriteString(m.TableRow(maxImageSize))\n\t\tdivs.WriteString(m.Div(maxImageSize))\n\t}\n\n\t// ----------------------------------------------\n\n\t// read template\n\tb, err = ioutil.ReadFile(templateRows)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// replace\n\thtml := strings.ReplaceAll(string(b), templateHash, hash)\n\thtml = strings.ReplaceAll(html, insertRows, rows.String())\n\t// write file\n\terr = ioutil.WriteFile(filepath.Join(root, outRows), []byte(html), 0600)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// ----------------------------------------------\n\n\t// read template\n\tb, err = ioutil.ReadFile(templateDivs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t// replace\n\thtml = strings.ReplaceAll(string(b), templateHash, hash)\n\thtml = strings.ReplaceAll(html, insertDivs, divs.String())\n\t// write file\n\terr = ioutil.WriteFile(filepath.Join(root, outDivs), []byte(html), 0600)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func InReplace(text, allowed []byte, c byte) []byte {\n\tif len(text) == 0 {\n\t\treturn nil\n\t}\n\n\tvar found bool\n\tfor x := 0; x < len(text); x++ {\n\t\tfound = false\n\t\tfor y := 0; y < len(allowed); y++ {\n\t\t\tif text[x] == allowed[y] {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\ttext[x] = c\n\t\t}\n\t}\n\treturn text\n}",
"func CustomSanitize(dumpFile string, regex string, replacement []byte) error {\n\tre := regexp.MustCompile(regex)\n\tdata, err := ioutil.ReadFile(dumpFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsanitized := re.ReplaceAll(data, replacement)\n\n\treturn ioutil.WriteFile(dumpFile, sanitized, 0644)\n\n}",
"func ResetString2File(path, content string) error {\n\tfile, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0664)\n\tif err != nil {\n\t\tglog.Errorf(\"Can not open file: %s\", path)\n\t\treturn err\n\t}\n\tdefer file.Close()\n\n\tfile.WriteString(content)\n\treturn nil\n}",
"func main() {\n\tapp := cli.NewApp()\n\tapp.Version = \"0.0.1\"\n\tapp.Name = \"sub\"\n\tapp.Usage = \"A command-line tool for substituting patterns from a stream.\"\n\tapp.UsageText = \"sub [pattern] [replacement]\"\n\tapp.Author = \"Kevin Cantwell\"\n\tapp.Email = \"[email protected]\"\n\tapp.Action = func(c *cli.Context) error {\n\t\tpattern := c.Args().Get(0)\n\t\treplacement := []byte(c.Args().Get(1))\n\n\t\tregex, err := regexp.Compile(pattern)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tscanner := bufio.NewScanner(os.Stdin)\n\t\tfor scanner.Scan() {\n\t\t\tmatches := regex.FindAllSubmatch(scanner.Bytes(), -1)\n\t\t\tfor _, submatches := range matches {\n\t\t\t\treplaced, err := replace(replacement, submatches)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tos.Stdout.Write(replaced)\n\t\t\t\tos.Stdout.Write([]byte(\"\\n\"))\n\t\t\t}\n\t\t}\n\t\tif err := scanner.Err(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treturn nil\n\t}\n\tif err := app.Run(os.Args); err != nil {\n\t\texit(err.Error(), 1)\n\t}\n}",
"func reflinkFile(src, dst string, fallback bool) error {\n\ts, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\t// generate temporary file for output\n\ttmp, err := ioutil.TempFile(filepath.Dir(dst), \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// copy to temp file\n\terr = reflinkInternal(tmp, s)\n\n\t// if reflink failed but we allow fallback, first attempt using copyFileRange (will actually clone bytes on some filesystems)\n\tif (err != nil) && fallback {\n\t\tvar st fs.FileInfo\n\t\tst, err = s.Stat()\n\t\tif err == nil {\n\t\t\t_, err = copyFileRange(tmp, s, 0, 0, st.Size())\n\t\t}\n\t}\n\n\t// if everything failed and we fallback, attempt io.Copy\n\tif (err != nil) && fallback {\n\t\t// reflink failed but fallback enabled, perform a normal copy instead\n\t\t_, err = io.Copy(tmp, s)\n\t}\n\ttmp.Close() // we're not writing to this anymore\n\n\t// if an error happened, remove temp file and signal error\n\tif err != nil {\n\t\tos.Remove(tmp.Name())\n\t\treturn err\n\t}\n\n\t// keep src file mode if possible\n\tif st, err := s.Stat(); err == nil {\n\t\ttmp.Chmod(st.Mode())\n\t}\n\n\t// replace dst file\n\terr = os.Rename(tmp.Name(), dst)\n\tif err != nil {\n\t\t// failed to rename (dst is not writable?)\n\t\tos.Remove(tmp.Name())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func ReplaceStringInDirectoryFiles(filesPath string, oldString string, newString string) error {\n\tvar (\n\t\terr error\n\t\tfileDescriptors []os.FileInfo\n\t)\n\n\tif fileDescriptors, err = ioutil.ReadDir(filesPath); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, fileDescriptor := range fileDescriptors {\n\t\tif !fileDescriptor.IsDir() && strings.HasSuffix(fileDescriptor.Name(), \".sql\") {\n\n\t\t\tfileContent, err := ioutil.ReadFile(filesPath + \"/\" + fileDescriptor.Name())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tnewContent := strings.Replace(\n\t\t\t\tstring(fileContent),\n\t\t\t\toldString,\n\t\t\t\tnewString,\n\t\t\t\t-1,\n\t\t\t)\n\n\t\t\terr = ioutil.WriteFile(filesPath+\"/\"+fileDescriptor.Name(), []byte(newContent), 0)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func replaceUserdataFile(machineName, machineOS, hostname string, userdataContent, customScriptContent []byte, newUserDataFile *os.File) error {\n\tvar err error\n\tvar encodedData string\n\tcf := make(map[interface{}]interface{})\n\n\tswitch {\n\tcase bytes.HasPrefix(userdataContent, []byte(\"#!\")):\n\t\t// The user provided a script file, so the customInstallScript contents is appended to user script\n\t\t// and added to the \"runcmd\" section so modified user data is always in cloud config format.\n\n\t\t// Remove the shebang\n\t\tuserdataContent = regexp.MustCompile(`^#!.*\\n`).ReplaceAll(userdataContent, nil)\n\n\t\tencodedData, err = gzipEncode(bytes.Join([][]byte{userdataContent, customScriptContent}, []byte(\"\\n\\n\")))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\tcase bytes.HasPrefix(userdataContent, []byte(\"#cloud-config\")):\n\t\t// The user provided a cloud-config file, so the customInstallScript context is added to the\n\t\t// \"runcmd\" section of the YAML.\n\t\tif err := yaml.Unmarshal(userdataContent, &cf); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tencodedData, err = gzipEncode(customScriptContent)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\tdefault:\n\t\treturn fmt.Errorf(\"existing userdata file does not begin with '#!' or '#cloud-config'\")\n\t}\n\n\treturn writeCloudConfig(machineName, encodedData, machineOS, hostname, cf, newUserDataFile)\n}",
"func (re *RegexpStd) ReplaceAllString(src, repl string) string {\n\trep, err := re.p.Replace(src, repl, 0, -1)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn src\n\t}\n\treturn rep\n}",
"func handleFile(config *Config, fileName string) {\n\tlog.Println(\"Edit\", fileName)\n\tnew_content, err_sed := execCmdWithOutput(config.SedCMD, \"-e\", \"s/\"+config.ReplaceFrom+\"/\"+config.ReplaceTo+\"/g\", fileName)\n\tif err_sed != nil {\n\t\tlog.Panic(err_sed)\n\t\treturn\n\t}\n\n\tfile, err := os.OpenFile(fileName, os.O_WRONLY, 0666)\n\tif err != nil {\n\t\tlog.Fatal(\"File cannot opened\", fileName)\n\t}\n\n\tfile.Truncate(0)\n\tfile.WriteString(new_content)\n\tfile.Close()\n}",
"func DoReplacements(input string, mapping MappingFunc) interface{} {\n\tvar buf strings.Builder\n\tcheckpoint := 0\n\tfor cursor := 0; cursor < len(input); cursor++ {\n\t\tif input[cursor] == operator && cursor+1 < len(input) {\n\t\t\t// Copy the portion of the input string since the last\n\t\t\t// checkpoint into the buffer\n\t\t\tbuf.WriteString(input[checkpoint:cursor])\n\n\t\t\t// Attempt to read the variable name as defined by the\n\t\t\t// syntax from the input string\n\t\t\tread, isVar, advance := tryReadVariableName(input[cursor+1:])\n\n\t\t\tif isVar {\n\t\t\t\t// We were able to read a variable name correctly;\n\t\t\t\t// apply the mapping to the variable name and copy the\n\t\t\t\t// bytes into the buffer\n\t\t\t\tmapped := mapping(read)\n\t\t\t\tif input == syntaxWrap(read) {\n\t\t\t\t\t// Preserve the type of variable\n\t\t\t\t\treturn mapped\n\t\t\t\t}\n\n\t\t\t\t// Variable is used in a middle of a string\n\t\t\t\tbuf.WriteString(fmt.Sprintf(\"%v\", mapped))\n\t\t\t} else {\n\t\t\t\t// Not a variable name; copy the read bytes into the buffer\n\t\t\t\tbuf.WriteString(read)\n\t\t\t}\n\n\t\t\t// Advance the cursor in the input string to account for\n\t\t\t// bytes consumed to read the variable name expression\n\t\t\tcursor += advance\n\n\t\t\t// Advance the checkpoint in the input string\n\t\t\tcheckpoint = cursor + 1\n\t\t}\n\t}\n\n\t// Return the buffer and any remaining unwritten bytes in the\n\t// input string.\n\treturn buf.String() + input[checkpoint:]\n}",
"func (re *RegexpStd) ReplaceAll(src, repl []byte) []byte {\n\t// n := 2\n\t// if bytes.IndexByte(repl, '$') >= 0 {\n\t// \tn = 2 * (re.numSubexp + 1)\n\t// }\n\t// srepl := \"\"\n\t// b := re.replaceAll(src, \"\", n, func(dst []byte, match []int) []byte {\n\t// \tif len(srepl) != len(repl) {\n\t// \t\tsrepl = string(repl)\n\t// \t}\n\t// \treturn re.expand(dst, srepl, src, \"\", match)\n\t// })\n\t// return b\n\tpanic(\"\")\n}",
"func Replacer(old, new string) NameMapper {\n\treturn func(s string) string {\n\t\treturn strings.ReplaceAll(s, old, new)\n\t}\n}",
"func main() {\n\tm1 := \"Hello word\"\n\t//m2 := \"word\"\n\tfmt.Println(strings.ReplaceAll(m1, \"Hello\", \"My\"))\n}",
"func modify(fid string) (error) {\n var (\n justCopy bool = false\n looking4SOBInsertionPoint bool = false\n looking4SubjectLine bool = true\n looking4BuglinkInsertionPoint bool = false\n subjectLine bool = false\n cveInsertionPoint bool = false\n buglinkInsertionPoint bool = false\n sobInsertionPoint bool = false\n existingCVEs []string\n existingBugIds []string\n existingAcks []string\n existingSobs []string\n existingCps []string\n existingBps []string\n buglinkBaseUrl string = \"http://bugs.launchpad.net/bugs/\"\n cpRC = regexp.MustCompile(\"cherry picked from commit ([0-9a-zA-Z]+)\")\n bpRC = regexp.MustCompile(\"backported from commit ([0-9a-zA-Z]+) upstream\")\n )\n\n // Open the input file, return the error if there is one.\n //\n inputFile, err := os.Open(fid)\n if err != nil {\n return err\n }\n defer inputFile.Close()\n\n // Open the temp. file, return the error if there is one.\n //\n dst, err := ioutil.TempFile(\"./\", \"mp__\")\n if err != nil {\n return err\n }\n defer dst.Close()\n\n scanner := bufio.NewScanner(inputFile)\n for scanner.Scan() {\n line := scanner.Text()\n\n if justCopy {\n dst.WriteString(line)\n dst.WriteString(\"\\n\")\n continue\n }\n\n // If we are looking for the Sob insertion point then we've handled\n // all the other cases and we just need to find the sob section.\n //\n if looking4SOBInsertionPoint {\n looking4SOBInsertionPoint = true\n if line == \"---\" {\n sobInsertionPoint = true\n looking4SOBInsertionPoint = false\n } else {\n if strings.Contains(line, \"Acked-by:\") {\n id := strings.Replace(line, \"Acked-by:\", \"\", -1)\n if !hasString(existingAcks, id) {\n existingAcks = append(existingAcks, id)\n }\n }\n\n if strings.Contains(line, \"Signed-off-by:\") {\n id := strings.Replace(line, \"Signed-off-by:\", \"\", -1)\n if !hasString(existingSobs, id) {\n existingSobs = append(existingSobs, id)\n }\n }\n\n if strings.Contains(line, \"cherry picked\") {\n result := cpRC.FindStringSubmatch(line)\n existingCps = append(existingCps, result[1])\n }\n\n if strings.Contains(line, \"backported from\") {\n result := bpRC.FindStringSubmatch(line)\n existingBps = append(existingBps, result[1])\n }\n }\n }\n\n if sobInsertionPoint {\n dst.WriteString(sobBlock(existingAcks, existingSobs, existingCps, existingBps))\n sobInsertionPoint = false\n justCopy = true\n }\n\n // After the first blank line after the subject line is where we\n // want to insert our CVE lines if we need to insert any.\n //\n if cveInsertionPoint {\n cveInsertionPoint = true\n if strings.Contains(line, \"CVE-\") {\n cve := strings.Replace(line, \"CVE-\", \"\", -1)\n existingCVEs = append(existingCVEs, cve)\n } else {\n // Add the CVE id here.\n //\n if args.CVE != \"\" {\n if !hasString(existingCVEs, args.CVE) {\n dst.WriteString(\"CVE-\")\n dst.WriteString(args.CVE)\n dst.WriteString(\"\\n\")\n dst.WriteString(\"\\n\") // One blank line after the CVE line (this assumes there is only one CVE)\n }\n }\n cveInsertionPoint = false\n looking4BuglinkInsertionPoint = true\n\n // We don't know at this point if we are going to insert a Buglink\n // so we can't write out the current line of text.\n }\n }\n\n // After the first blank line after the CVE lines is where the Buglinks are to be\n // inserted.\n //\n if looking4BuglinkInsertionPoint {\n if line != \"\" {\n looking4BuglinkInsertionPoint = false\n buglinkInsertionPoint = true\n }\n }\n\n if buglinkInsertionPoint {\n buglinkInsertionPoint = true\n // Just like the CVEs we skip past any existing BugLink lines and build a list of existing\n // buglinks so we don't duplicate any.\n //\n if strings.Contains(line, \"BugLink:\") {\n s := strings.Split(line, \"/\")\n id := s[len(s)-1]\n existingBugIds = append(existingBugIds, id)\n } else {\n if len(args.Bugs) > 0 {\n for _, id := range args.Bugs {\n if !hasString(existingBugIds, id) {\n dst.WriteString(fmt.Sprintf(\"BugLink: %s%s\\n\", buglinkBaseUrl, id))\n }\n }\n dst.WriteString(\"\\n\") // One blank line after the BugLink line\n }\n buglinkInsertionPoint = false\n looking4SOBInsertionPoint = true\n }\n }\n\n // Once we've found the subject line, we look for the first blank line after it.\n //\n if subjectLine {\n if line == \"\" {\n cveInsertionPoint = true\n subjectLine = false\n }\n }\n\n // All modificatins that we make are made after the subject line, therefore that's\n // the first thing we look for.\n //\n if looking4SubjectLine {\n if strings.Contains(line, \"Subject:\") {\n subjectLine = true\n looking4SubjectLine = false\n }\n }\n\n dst.WriteString(line)\n dst.WriteString(\"\\n\")\n }\n\n // If the scanner encountered an error, return it.\n //\n if err := scanner.Err(); err != nil {\n return err\n }\n\n os.Rename(dst.Name(), inputFile.Name())\n return nil\n}",
"func overrideFile(filename, newcontent string) {\n\tfile, fileErr := os.Create(filename) //open given file\n\tif fileErr != nil {\n\t\tfmt.Println(fileErr)\n\t}\n\tfile.WriteString(newcontent) //write the new content to the file\n\tfile.Close()\n\treturn\n}",
"func replaceDemo(src string, sub string, str string, i int) string {\n\treturn strings.Replace(src, sub, str, i)\n}",
"func Template(tempName string, templatePath string, replacings ...Replacement) *os.File {\n\treplacedFile, err := ioutil.TempFile(\"/tmp\", tempName+\"-*.yaml\")\n\tExpect(err).ToNot(HaveOccurred())\n\n\ttemplateContent, err := ioutil.ReadFile(templatePath)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treplacedStr := \"\"\n\tfor _, rep := range replacings {\n\t\tcontent := \"\"\n\t\tif replacedStr == \"\" {\n\t\t\tcontent = string(templateContent)\n\t\t} else {\n\t\t\tcontent = replacedStr\n\t\t}\n\t\treplacedStr = strings.ReplaceAll(content, rep.Old, rep.New)\n\t}\n\n\terr = ioutil.WriteFile(replacedFile.Name(), []byte(replacedStr), 0644)\n\tExpect(err).ToNot(HaveOccurred())\n\n\treturn replacedFile\n}",
"func TestReplacerWithWriter(t *testing.T) {\n\tnewReader := func(orig io.Reader, tf transform.Transformer) io.Reader {\n\t\tr, w := io.Pipe()\n\t\tgo func() {\n\t\t\t_, err := io.Copy(transform.NewWriter(w, tf), orig)\n\t\t\tw.CloseWithError(err)\n\t\t}()\n\t\treturn r\n\t}\n\ttestReplacerWithReader(t, newReader)\n}",
"func ReplaceAll(old, new string) MapFunc {\n\treturn func(s string) string { return strings.ReplaceAll(s, old, new) }\n}",
"func ReplaceLines(data map[string]string)(err error){\n sourceDownload := map[string]map[string]string{}\n sourceDownload[\"ruleset\"] = map[string]string{}\n sourceDownload[\"ruleset\"][\"sourceDownload\"] = \"\"\n sourceDownload,err = GetConf(sourceDownload)\n pathDownloaded := sourceDownload[\"ruleset\"][\"sourceDownload\"]\n if err != nil {\n logs.Error(\"ReplaceLines error loading data from main.conf: \"+ err.Error())\n return err\n }\n \n //split path \n splitPath := strings.Split(data[\"path\"], \"/\")\n pathSelected := splitPath[len(splitPath)-2]\n\n saved := false\n rulesFile, err := os.Create(\"_creating-new-file.txt\")\n defer rulesFile.Close()\n var validID = regexp.MustCompile(`sid:(\\d+);`)\n\n newFileDownloaded, err := os.Open(pathDownloaded + pathSelected + \"/rules/\" + \"drop.rules\")\n\n scanner := bufio.NewScanner(newFileDownloaded)\n for scanner.Scan() {\n for x := range data{\n sid := validID.FindStringSubmatch(scanner.Text())\n if (sid != nil) && (sid[1] == string(x)) {\n if data[x] == \"N/A\"{\n saved = true\n continue\n }else{\n _, err = rulesFile.WriteString(string(data[x])) \n _, err = rulesFile.WriteString(\"\\n\") \n saved = true\n continue\n }\n }\n }\n if !saved{\n _, err = rulesFile.WriteString(scanner.Text())\n _, err = rulesFile.WriteString(\"\\n\") \n }\n saved = false\n }\n\n input, err := ioutil.ReadFile(\"_creating-new-file.txt\")\n err = ioutil.WriteFile(\"rules/drop.rules\", input, 0644)\n\n _ = os.Remove(\"_creating-new-file.txt\")\n\n if err != nil {\n logs.Error(\"ReplaceLines error writting new lines: \"+ err.Error())\n return err\n }\n return nil\n}",
"func TestReplace(t *testing.T) {\n\tdb, err := Open(db_filename, \"c\")\n\tdefer db.Close()\n\tdefer os.Remove(db_filename)\n\n\tif err != nil {\n\t\tt.Error(\"Couldn't create database\")\n\t}\n\n\terr = db.Insert(\"foo\", \"bar\")\n\terr = db.Replace(\"foo\", \"biz\")\n\tkey, err := db.Fetch(\"foo\")\n\tif err != nil || key != \"biz\" {\n\t\tt.Error(\"Replace didn't update key correctly\")\n\t}\n}",
"func replace(s string) string {\n\treturn matches[s]\n}",
"func writeInplace(filename string, contents io.Reader) error {\n\ttempname := filename + \".tmp\"\n\toutput, err := os.Create(tempname)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(output, contents)\n\tif err == nil {\n\t\terr = output.Sync()\n\t}\n\n\tif st, err := os.Stat(filename); err == nil {\n\t\tlogInformationalError(output.Chmod(st.Mode()))\n\n\t\tif os.Getuid() == 0 {\n\t\t\tif ust, ok := st.Sys().(*syscall.Stat_t); ok {\n\t\t\t\tlogInformationalError(output.Chown(int(ust.Uid), int(ust.Gid)))\n\t\t\t}\n\t\t}\n\t} else {\n\t\tlogInformationalError(err)\n\t}\n\n\tlogInformationalError(output.Close())\n\n\tif err != nil {\n\t\tlogInformationalError(os.Remove(tempname))\n\t\treturn fmt.Errorf(\"error while writing output: %v\", err)\n\t}\n\n\terr = os.Rename(tempname, filename)\n\tif err != nil {\n\t\tlogInformationalError(os.Remove(tempname))\n\t\treturn fmt.Errorf(\"error while renaming temporary file to destination file: %v\", err)\n\t}\n\n\treturn nil\n}",
"func ReplaceFile(fs afero.Fs, filename string, contents []byte, newPerms os.FileMode) (rerr error) {\n\texists, err := afero.Exists(fs, filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unable to determine if file %q exists: %v\", filename, err)\n\t}\n\n\t// Create a temp file first.\n\tr := rand.New(rand.NewSource(time.Now().UnixNano()))\n\tbFilename := fmt.Sprintf(\"redpanda-%v\", r.Int())\n\ttemp := filepath.Join(filepath.Dir(filename), bFilename)\n\n\t// If the directory does not exist, create it. We do not preserve perms\n\t// if not-exist because there are no perms to preserve.\n\tif err := fs.MkdirAll(filepath.Dir(filename), 0o755); err != nil {\n\t\treturn err\n\t}\n\n\terr = afero.WriteFile(fs, temp, contents, newPerms)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing to temporary file: %v\", err)\n\t}\n\tdefer func() {\n\t\tif rerr != nil {\n\t\t\tif removeErr := fs.Remove(temp); removeErr != nil {\n\t\t\t\trerr = fmt.Errorf(\"%s, unable to remove temp file: %v\", rerr, removeErr)\n\t\t\t} else {\n\t\t\t\trerr = fmt.Errorf(\"%s, temp file removed from disk\", rerr)\n\t\t\t}\n\t\t}\n\t}()\n\n\t// If we are replacing an existing file, we try to preserve the original\n\t// file ownership.\n\tif exists {\n\t\tstat, err := fs.Stat(filename)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to stat existing file: %v\", err)\n\t\t}\n\n\t\terr = fs.Chmod(temp, stat.Mode())\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"unable to chmod temp config file: %v\", err)\n\t\t}\n\n\t\terr = PreserveUnixOwnership(fs, stat, temp)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = fs.Rename(temp, filename)\n\treturn err\n}",
"func Replace(s, old, new string, n int) string {return s}",
"func (c *Seaweed) Replace(fileID string, newContent io.Reader, fileName string, size int64, collection, ttl string, deleteFirst bool) (err error) {\n\tfp := NewFilePartFromReader(ioutil.NopCloser(newContent), fileName, size)\n\tfp.Collection, fp.TTL = collection, ttl\n\tfp.FileID = fileID\n\terr = c.ReplaceFilePart(fp, deleteFirst)\n\treturn\n}",
"func (re *RegexpStd) ReplaceAllLiteralString(src, repl string) string {\n\t// return string(re.ReplaceAll(nil, src, 2, func(dst []byte, match []int) []byte {\n\t// \treturn append(dst, repl...)\n\t// }))\n\tpanic(\"\")\n}",
"func (p *commitloggerParser) doReplace() error {\n\tf, err := os.Open(p.path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmetered := diskio.NewMeteredReader(f, p.metrics.TrackStartupReadWALDiskIO)\n\tp.reader = bufio.NewReaderSize(metered, 1*1024*1024)\n\n\t// errUnexpectedLength indicates that we could not read the commit log to the\n\t// end, for example because the last element on the log was corrupt.\n\tvar errUnexpectedLength error\n\n\tfor {\n\t\tvar commitType CommitType\n\n\t\terr := binary.Read(p.reader, binary.LittleEndian, &commitType)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\terrUnexpectedLength = errors.Wrap(err, \"read commit type\")\n\t\t\tbreak\n\t\t}\n\n\t\tif CommitTypeReplace.Is(commitType) {\n\t\t\tif err := p.parseReplaceNode(); err != nil {\n\t\t\t\terrUnexpectedLength = errors.Wrap(err, \"read replace node\")\n\t\t\t\tbreak\n\t\t\t}\n\t\t} else {\n\t\t\tf.Close()\n\t\t\treturn errors.Errorf(\"found a %s commit on a replace bucket\", commitType.String())\n\t\t}\n\t}\n\n\tfor _, node := range p.replaceCache {\n\t\tvar opts []SecondaryKeyOption\n\t\tif p.memtable.secondaryIndices > 0 {\n\t\t\tfor i, secKey := range node.secondaryKeys {\n\t\t\t\topts = append(opts, WithSecondaryKey(i, secKey))\n\t\t\t}\n\t\t}\n\t\tif node.tombstone {\n\t\t\tp.memtable.setTombstone(node.primaryKey, opts...)\n\t\t} else {\n\t\t\tp.memtable.put(node.primaryKey, node.value, opts...)\n\t\t}\n\t}\n\n\tif errUnexpectedLength != nil {\n\t\tf.Close()\n\t\treturn errUnexpectedLength\n\t}\n\n\treturn f.Close()\n}",
"func prime_the_change_buffer(){\nchange_buffer= nil\n\n\n/*21:*/\n\n\n//line gocommon.w:182\n\nfor true{\nchange_line++\nif err:=input_ln(change_file);err!=nil{\nreturn\n}\nif len(buffer)<2{\ncontinue\n}\nif buffer[0]!='@'{\ncontinue\n}\nif unicode.IsUpper(buffer[1]){\nbuffer[1]= unicode.ToLower(buffer[1])\n}\nif buffer[1]=='x'{\nbreak\n}\nif buffer[1]=='y'||buffer[1]=='z'||buffer[1]=='i'{\nloc= 2\nerr_print(\"! Missing @x in change file\")\n\n}\n}\n\n\n\n/*:21*/\n\n\n//line gocommon.w:170\n\n\n\n/*22:*/\n\n\n//line gocommon.w:209\n\nfor true{\nchange_line++\nif err:=input_ln(change_file);err!=nil{\nerr_print(\"! Change file ended after @x\")\n\nreturn\n}\nif len(buffer)!=0{\nbreak\n}\n}\n\n\n\n/*:22*/\n\n\n//line gocommon.w:171\n\n\n\n/*23:*/\n\n\n//line gocommon.w:222\n\n{\nchange_buffer= buffer\nbuffer= nil\n}\n\n\n\n/*:23*/\n\n\n//line gocommon.w:172\n\n}",
"func RewriteFile(name string, replace ReplaceFunc) error {\n\n\t// create an empty fileset.\n\tfset := token.NewFileSet()\n\n\t// parse the .go file.\n\t// we are parsing the entire file with comments, so we don't lose anything\n\t// if we need to write it back out.\n\tf, err := parser.ParseFile(fset, name, nil, parser.ParseComments)\n\tif err != nil {\n\t\te := err.Error()\n\t\tmsg := \"expected 'package', found 'EOF'\"\n\t\tif e[len(e)-len(msg):] == msg {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\t// iterate through the import paths. if a change occurs update bool.\n\tchange := false\n\tfor _, i := range f.Imports {\n\n\t\t// unquote the import path value.\n\t\tpath, err := strconv.Unquote(i.Path.Value)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// replace the value using the replace function\n\t\tpath, err = replace(name, path)\n\t\tif err != nil {\n\t\t\tif err == ErrSkip {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\ti.Path.Value = strconv.Quote(path)\n\t\tchange = true\n\t}\n\n\tfor _, cg := range f.Comments {\n\t\tfor _, c := range cg.List {\n\t\t\tif strings.HasPrefix(c.Text, \"// import \\\"\") {\n\n\t\t\t\t// trim off extra comment stuff\n\t\t\t\tctext := c.Text\n\t\t\t\tctext = strings.TrimPrefix(ctext, \"// import\")\n\t\t\t\tctext = strings.TrimSpace(ctext)\n\n\t\t\t\t// unquote the comment import path value\n\t\t\t\tctext, err := strconv.Unquote(ctext)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\t// match the comment import path with the given replacement map\n\t\t\t\tctext, err = replace(name, ctext)\n\t\t\t\tif err != nil {\n\t\t\t\t\tif err == ErrSkip {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tc.Text = \"// import \" + strconv.Quote(ctext)\n\t\t\t\tchange = true\n\t\t\t}\n\t\t}\n\t}\n\n\t// if no change occured, then we don't need to write to disk, just return.\n\tif !change {\n\t\treturn nil\n\t}\n\n\t// create a temporary file, this easily avoids conflicts.\n\ttemp := name + \".temp\"\n\tw, err := os.Create(temp)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer w.Close()\n\n\t// write changes to .temp file, and include proper formatting.\n\terr = (&printer.Config{Mode: printer.TabIndent | printer.UseSpaces, Tabwidth: 8}).Fprint(w, fset, f)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// close the writer\n\terr = w.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// rename the .temp to .go\n\treturn os.Rename(temp, name)\n}",
"func ReplaceFirst(re *regexp.Regexp, src, repl []byte) []byte {\n\tif m := re.FindSubmatchIndex(src); m != nil {\n\t\tout := make([]byte, m[0])\n\t\tcopy(out, src[0:m[0]])\n\t\tout = re.Expand(out, repl, src, m)\n\t\tif m[1] < len(src) {\n\t\t\tout = append(out, src[m[1]:]...)\n\t\t}\n\t\treturn out\n\t}\n\tout := make([]byte, len(src))\n\tcopy(out, src)\n\treturn out\n}",
"func ChangeUserPasswordFile(inFile, outFile string, pwOld, pwNew string, conf *model.Configuration) (err error) {\n\tif conf == nil {\n\t\treturn errors.New(\"pdfcpu: missing configuration for change user password\")\n\t}\n\n\tconf.Cmd = model.CHANGEUPW\n\tconf.UserPW = pwOld\n\tconf.UserPWNew = &pwNew\n\n\tvar f1, f2 *os.File\n\n\tif f1, err = os.Open(inFile); err != nil {\n\t\treturn err\n\t}\n\n\ttmpFile := inFile + \".tmp\"\n\tif outFile != \"\" && inFile != outFile {\n\t\ttmpFile = outFile\n\t\tlog.CLI.Printf(\"writing %s...\\n\", outFile)\n\t} else {\n\t\tlog.CLI.Printf(\"writing %s...\\n\", inFile)\n\t}\n\n\tif f2, err = os.Create(tmpFile); err != nil {\n\t\tf1.Close()\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tf2.Close()\n\t\t\tf1.Close()\n\t\t\tif outFile == \"\" || inFile == outFile {\n\t\t\t\tos.Remove(tmpFile)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif err = f2.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = f1.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif outFile == \"\" || inFile == outFile {\n\t\t\terr = os.Rename(tmpFile, inFile)\n\t\t}\n\t}()\n\n\treturn ChangeUserPassword(f1, f2, pwOld, pwNew, conf)\n}",
"func main() {\n patFile, err := ioutil.ReadFile(\"patterns.txt\")\n if err != nil {\n log.Fatal(err)\n }\n textFile, err := ioutil.ReadFile(\"text.txt\")\n if err != nil {\n log.Fatal(err)\n }\n patterns := strings.Split(string(patFile), \" \")\n fmt.Printf(\"\\nRunning: Set Backward Oracle Matching algorithm.\\n\\n\")\n if debugMode==true { \n fmt.Printf(\"Searching for %d patterns/words:\\n\",len(patterns))\n }\n for i := 0; i < len(patterns); i++ {\n if (len(patterns[i]) > len(textFile)) {\n log.Fatal(\"There is a pattern that is longer than text! Pattern number:\", i+1)\n }\n if debugMode==true { \n fmt.Printf(\"%q \", patterns[i])\n }\n }\n if debugMode==true { \n fmt.Printf(\"\\n\\nIn text (%d chars long): \\n%q\\n\\n\",len(textFile), textFile)\n }\n sbom(string(textFile), patterns)\n}",
"func ReplaceInContent(vbytes []byte, replaceWith string, index int) (old, new string, loc []int, newcontents []byte, err error) {\n\treturn replace(vbytes, replaceWith, \"\", index)\n}",
"func Substitute(str string, mapping map[string]string) string {\n\tfor key, val := range mapping {\n\t\tstr = strings.Replace(str, key, val, -1)\n\t}\n\treturn str\n}",
"func main() {\n\tcensor := &Censor{\n\t\tos.Stdin,\n\t\t[]string{\"fuck\", \"shit\"},\n\t\t\"@#!\",\n\t}\n\t_, _ = io.Copy(os.Stdout, censor)\n}",
"func (re *RegexpStd) ReplaceAllFunc(src []byte, repl func([]byte) []byte) []byte {\n\t// return re.replaceAll(src, \"\", 2, func(dst []byte, match []int) []byte {\n\t// \treturn append(dst, repl(src[match[0]:match[1]])...)\n\t// })\n\tpanic(\"\")\n}",
"func Replace(old, new string, n int) MapFunc {\n\treturn func(s string) string { return strings.Replace(s, old, new, n) }\n}",
"func (that *StrAnyMap) Replace(data map[string]interface{}) {\n\tthat.mu.Lock()\n\tthat.data = data\n\tthat.mu.Unlock()\n}",
"func copyPackage(dirSrc, dirDst, search, replace string) {\n\terr := filepath.Walk(dirSrc, func(file string, info os.FileInfo, err error) error {\n\t\tbase := filepath.Base(file)\n\t\tif err != nil || info.IsDir() ||\n\t\t\t!strings.HasSuffix(base, \".go\") ||\n\t\t\tstrings.HasSuffix(base, \"_test.go\") ||\n\t\t\t// Don't process subdirectories.\n\t\t\tfilepath.Dir(file) != dirSrc {\n\t\t\treturn nil\n\t\t}\n\t\tb, err := os.ReadFile(file)\n\t\tif err != nil || bytes.Contains(b, []byte(\"\\n// +build ignore\")) {\n\t\t\treturn err\n\t\t}\n\t\t// Fix paths.\n\t\tb = bytes.Replace(b, []byte(search), []byte(replace), -1)\n\t\tb = bytes.Replace(b, []byte(\"internal/export\"), []byte(\"\"), -1)\n\t\t// Remove go:generate lines.\n\t\tb = goGenRE.ReplaceAllLiteral(b, nil)\n\t\tcomment := \"// Code generated by running \\\"go generate\\\" in golang.org/x/text. DO NOT EDIT.\\n\\n\"\n\t\tif !bytes.HasPrefix(b, []byte(comment)) {\n\t\t\tb = append([]byte(comment), b...)\n\t\t}\n\t\tif b, err = format.Source(b); err != nil {\n\t\t\tfmt.Println(\"Failed to format file:\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tfile = filepath.Join(dirDst, base)\n\t\tvprintf(\"=== COPY %s\\n\", file)\n\t\treturn os.WriteFile(file, b, 0666)\n\t})\n\tif err != nil {\n\t\tfmt.Println(\"Copying exported files failed:\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func reset_input(){\nloc= 0\nfile= file[:0]\n\n\n/*30:*/\n\n\n//line gocommon.w:371\n\nif wf,err:=os.Open(file_name[0]);err!=nil{\nfile_name[0]= alt_file_name\nif wf,err= os.Open(file_name[0]);err!=nil{\nfatal(\"! Cannot open input file \",file_name[0])\n\n}else{\nfile= append(file,bufio.NewReader(wf))\n}\n}else{\nfile= append(file,bufio.NewReader(wf))\n}\nif cf,err:=os.Open(change_file_name);err!=nil{\nfatal(\"! Cannot open change file \",change_file_name)\n\n}else{\nchange_file= bufio.NewReader(cf)\n}\n\n\n\n/*:30*/\n\n\n//line gocommon.w:356\n\ninclude_depth= 0\nline= line[:0]\nline= append(line,0)\nchange_line= 0\nchange_depth= include_depth\nchanging= true\nprime_the_change_buffer()\nchanging= !changing\nloc= 0\ninput_has_ended= false\n}",
"func (t *testRunner) writeString(file, data string) {\n\tt.Helper()\n\n\tnewf, err := os.CreateTemp(t.dir, \"\")\n\trequire.NoError(t, err)\n\n\t_, err = newf.WriteString(data)\n\trequire.NoError(t, err)\n\trequire.NoError(t, newf.Close())\n\n\terr = os.Rename(newf.Name(), file)\n\trequire.NoError(t, err)\n}",
"func (g *TemplateGenerator) Replace(args []string) ([]string, error) {\n\tcompiled := make([]string, len(args))\n\tfor i, a := range args {\n\t\tstr, err := g.replaceArgument(a)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to compile argument at position %d\", i)\n\t\t}\n\t\tcompiled[i] = str\n\t}\n\n\treturn compiled, nil\n}",
"func Replace(d Ploop, p *ReplaceParam) error {\n\tvar a C.struct_ploop_replace_param\n\n\ta.file = C.CString(p.file)\n\tdefer cfree(a.file)\n\n\tif p.uuid != \"\" {\n\t\ta.guid = C.CString(p.uuid)\n\t\tdefer cfree(a.guid)\n\t} else if p.curFile != \"\" {\n\t\ta.cur_file = C.CString(p.curFile)\n\t\tdefer cfree(a.cur_file)\n\t} else {\n\t\ta.level = C.int(p.level)\n\t}\n\n\ta.flags = C.int(p.flags)\n\n\tret := C.ploop_replace_image(d.d, &a)\n\n\treturn mkerr(ret)\n}",
"func replace() {\n\tfor k, v := range replacements {\n\t\t//Get all the indexes for a Key\n\t\tindexes := allIndiciesForString(k, molecule)\n\t\tfor _, i := range indexes {\n\t\t\t//Save the head up to the index\n\t\t\thead := molecule[:i]\n\t\t\t//Save the tail from the index + lenght of the searched key\n\t\t\ttail := molecule[i+len(k):]\n\n\t\t\t//Create a string for all the replacement possbilities\n\t\t\tfor _, com := range v {\n\t\t\t\tnewMol := head + com + tail\n\t\t\t\tif !arrayutils.ContainsString(combinations, newMol) {\n\t\t\t\t\tcombinations = append(combinations, newMol)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func RewriteFileContents(filename, instruction, content string) error {\n\ttext, err := ioutil.ReadFile(filename)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error in getting contents from the file, %v\", err)\n\t}\n\n\texistingContent := string(text)\n\n\tmodifiedContent, err := appendContent(existingContent, instruction, content)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = ioutil.WriteFile(filename, []byte(modifiedContent), defaultPermission)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error writing modified contents to file, %v\", err)\n\t}\n\treturn nil\n}",
"func Reencrypt(filepath string, singleLine, disableValidation bool) error {\n\tplaintext, err := PlainText(filepath)\n\tcheck(err)\n\terr = CipherText(plaintext, filepath, singleLine, disableValidation)\n\treturn err\n}",
"func InsertStringToFile(path, str string, index int) error {\n\tfmt.Printf(\"%#v \\n\", dblines)\n\tlines := dblines\n\t/*\n\tif err != nil {\n\t\treturn err\n\t}\n\t*/\n\tvar inserted bool = false\n\tfileContent := \"\"\n\tfor i, line := range lines {\n\t\tif i == index {\n\t\t\tfileContent += str\n\t\t\tinserted = true\n\t\t}\n\t\tfileContent += line\n\t\tfileContent += \"\\n\"\n\t}\n\tif !inserted{\n\t\tfileContent += str\n\t}\n\n\tioutil.WriteFile(path, []byte(fileContent), 0644)\n\tvar er error\n\tdblines, er = File2lines(config_dbpath)\n\treturn er\n}",
"func DoTheThing() {\n\tmapDef := `#################################################################################\n#...#.......#....a..#...........#..e....#.....#...#...#...........#.............#\n#.#.#.#####.#.#####.#.#######.###.###.#.#.###.#.#.###.#.#########.#.###.#######.#\n#.#.#.#.#...#.#.K...#...#...#.....#.#.#.#.#.....#.#...#t......#...#.#...#.......#\n#.###B#.#.#.#.#.#######.###.#######.#.###.#######.#.#########.#.#####.###.#######\n#.#q..#.#.#.#.#...#.....#...#.......#...#...#.#...#.........#.#.......#.#.#.....#\n#.#.###.#.#.#.###.#.#####.#.#.#####.###.###.#.#.#####.#######.#########.#.###.#.#\n#...#...#.#.#...#.#.......#...#.....#...#...#.........#.....#.......#.#...#m..#.#\n#.#####.#.#####.#.#######.#########.#.###F#############.###.###.###.#.#.###.#####\n#...#...#.......#.......#.#......h#.#...#.#.....#.......#.....#.#...#.#.#.......#\n###.#.###########.#####.#.#.#####.#####.#.#.###.#.#####.#####.#.#.###G#.###.###.#\n#.#.#.......#...#...#...#.#...J.#.#.....#.#...#.#.....#.#...#.#.#.#...#...#...#.#\n#.#.#####.###.#.###.#####.#####.#.#.###.#.###.#.#####.#.#.#.#.###.#.#.###.#####S#\n#w#...#...#...#...#...#...#...#.#...#...#.#...#.....#.#.#.#.#.....#.#...#.......#\n#.###.#.###.#####.###.#.#####.#.#####.###.#.###.###.#.###.#.#######.###.#######.#\n#...#...#...#.....#...#.#...#.#...#.#.#.#.#.#.#.#...#.....#...#...#.#.#.........#\n#.#####.#.###.#####.###.#.#.#.###.#.#.#.#.#.#.#.###.#########.#.#.#.#.###########\n#.......#...#.....#.#.....#...#.#.#...#.#...#.#...#.....#...#.#.#.#.....#.......#\n#C#########.#####.#.#.#######.#.#.###.#.#.###.###.#####.#.###.#.#######.###.###.#\n#.#.......#.#.#...#...#...#.....#...#...#.......#.#.....#.#...#.....#.#...#...#.#\n#.###.#.#.#.#.#.###.###.#.#########.###.#####.###.#.#####.#.#####.#.#.###.###.###\n#...#.#.#.#.#.#.#y..#...#...#.....#.#...#.....#...#.#.....#...#...#.....#...#...#\n###.###.#I#.#.#.###########.#.###.#.#####.#####.###.#.###.#####.###########.#.#.#\n#...#...#.#.#.#...........#...#...#.#...#...#...#.#...#...#.....#.........#.#.#.#\n#.###.###.#.#.#####.###########.###.#.#.#####.###.#.#####.###.###.#######.#.###.#\n#.....#...#...#...#.............#.....#.#.....#...#.#...#.....#...#.....#...#...#\n#####.#######.#.#.###############.#####.#.#######.#.#.#.#####.#.###.###.#####.###\n#.....#.....#...#.#...#.............#...#.......#...#.#.#.....#.#.#.#.#.........#\n#.#####.###.#####.#.###.###########.#.###.#####.#####.#.#######.#.#.#.#########.#\n#...#.#.#.#.......#...........#...#.#...#.....#.......#...#.....#...#.....#...#.#\n###.#.#.#.###############.#####.#.#.###.#####.###########.#.#######.###.###.#.#.#\n#...#.#...#.......#.....#.#.....#.#...#.#.....#...#.....#...#.....#.#...#...#...#\n#.###.###.#.#####.#.###.###.#####.#####.#.#####.###.###.#####.###.#.#.###.#####.#\n#.#.....#.#...#.....#...#...#.....#.....#...#.......#...#.....#.#.#...#...#.....#\n#.###L#.#.#.#.#######.###.###.#####.###.###.#.#######.#.###.###.#.###.#.###.#####\n#o..#.#.#.#.#.....#.#.#...#.#...#...#.#.#.#.#.....#...#.....#.#...#...#.#.......#\n###.###.#.#######.#.#.#.#.#.###.#.###.#.#.#.#######.#########.#.#######.#######.#\n#.#...#.#.......#.#.#.#.#.#.#...#.#...#.#.#.#...#...#.........#.#.....#.......#.#\n#.###.#.#######.#.#.#.###.#.#.###.#.###.#.#.#.#.#.###.#######.#.#.###.#######.#.#\n#.............#...#.......#.......#...........#...#.........#.....#...........#.#\n#######################################.@.#######################################\n#.....#.....#.#.........#.#...#.....#...........#..u........#...#.......#.....Q.#\n#.#.###.#.#.#.#.#######.#.#.#.#.#.###.#.#.#.###.#.#########.###.#.#.###.#######.#\n#.#.#...#.#.#.#.....#...#.R.#.#.#.....#.#.#...#...#.......#r....#.#...#.........#\n#.#.#.###.#.#.#####.###.#####.#.#######.#.###.#######.###.#####.#.###.#####.#####\n#.#...#...#.......#...#.....#.#.#.#...#.#.#.#...#...#.#...#.....#b#.#...#...#..c#\n#.#####.#############.#####.#.#.#.#.#.#.#.#.###.#.#.#.#.###.#####.#.###.#####.#.#\n#p#...V.#..x#.......#...#...#.....#.#...#.#...#.#.#...#...#.#.#...#...#.....#.#.#\n###.#####.#.#.#####.###.#.###U#####.###.#.#.#.#.#.#######.#.#.#.###.#.#####.#.#.#\n#...#.....#.....#...#...#.#.#.#.....#.#.#...#.#.#.#...#.#.#...#...#.#.....#...#.#\n#.###.###########.###.###.#.#.#.#####.#.#.#####.#.#.#.#.#.###.###.#.#####.#####.#\n#.......#.......#...#.#.#.#...#...#.....#.#...#.#.#.#.#...#...#.#.#.....#...#.#.#\n#.#####.#.#####.###.#.#.#.#.#####.#######.#.#.#.#.#H#.###.#.###.#.#.###.###.#.#.#\n#.#...#.#.#...#.....#.#...#.....#...#...#.#.#...#.#.#...#.#.....#.#...#.#.#...#.#\n#.#.#.###.#.#######.#.###.#########.#.#.###.#####.#.###.###.#####.#####.#.###.#.#\n#.#.#.....#...#.....#...#.#.......#...#.#...#.....#.#.#.....#...#.......#.....#.#\n#.#.#########.#.#######.#.#.#####.#####.#.###.#.###.#.#######.#.#########.#####.#\n#.#.........#.#.#.#.....#...#...#.....#.#...#.#.#.........#...#...#.....#...#...#\n#.#########.#.#.#.#.#########.#.#####.#.#.#.#.###.#########.###.###.#.#####.#.#.#\n#.#.......#.#.#...#.#.......#.#.....#.#.#.#.#.....#.#.........#.#...#.#.....#.#.#\n#.#######.#.#.###.#.###.#####.#####.#.#.#.#.###.###.#.#########.#.###.#.#####.#.#\n#......z..#.#.#...#...#.....#.#.....#.#.#.#...#.....#.......#.#.#.#.#...#...#.#.#\n#########.#.#.#.#######.###.#.#.###.#.#.#.###.###########.###.#.#.#.#######.#.#.#\n#.#.......#.#...#.....#.#.#.#.#.#...#.#.#.#.#.............#...#.#.......#...#.#.#\n#.#.#######.#.###.#.#.#.#.#.#.#.#.###.#.#.#.###############.###.#######.#.###.#.#\n#...#.......#...#.#.#.#...#...#.#...#.#.#....j#...#...#....d#...#...#...#...#.#.#\n#.###.#########.###.#.###.#####.#####.#.#####.#.###.#.#.#######.#.#.#.#####.#.#.#\n#...#.#.......#.#...#...#...#.#.....#...#...#.#.....#.#.Z.#.....#.#...#.....#.#.#\n#D###.#####.###.#.#####.###.#.#####.###.###.#.#######.###.###.###.#######.###.#.#\n#.#...#...#..n#...#.N.#.#...#.....#.....#...#.....#...#.#...#.....#.....#.#...#.#\n###.###.#.###.#######.#.#.#####.#.#######.#######.#.#.#.###.#.#####.###.#.#.###W#\n#...#...#.............#.#.....#.#.....M.#.#.......#.#...#...#.......#.....#...#.#\n#.###.#.###############.#####.###.#####.#.#.#######.###.#.#######.#####.#####.#.#\n#.#.P.#...#...#.......#.#...#.....#...#.#.#...#.....#...#...#.#...#...#.#.....#i#\n#.#######.#.#.#.#####.#.#.#.#######.#.#.#.###.###.###.#####.#.#.###.#.###.#####.#\n#.......#...#.......#...#.#.......#.#...#...#....f#.E.#...#.#.....#.#.#...#.O.#.#\n#.#####.#################.###.#####.#####.#########.#####.#.#######X#.#.###.#.###\n#.#.#...#...#...T...#.A.#...#..k..#.#...#.#...#...#.#...#.#.....#...#.#.....#...#\n#.#.#.###.#.#.#####.#.#.###.#####.#.#.#.#.#.#.#.#.#.#.#.#.#####.#.###.#########.#\n#...#.....#.......#...#.........#.Y...#.#..s#...#..g..#.......#v..#............l#\n#################################################################################`\n\n\tlines := strings.Split(mapDef, \"\\n\")\n\n\t//find points of interest\n\tvar (\n\t\torigin pointOfInterest\n\t)\n\tkeys := make([]*pointOfInterest, 0)\n\tkeyRegexp = regexp.MustCompile(\"[a-z]\")\n\tdoorRegexp = regexp.MustCompile(\"[A-Z]\")\n\tentranceRegexp = regexp.MustCompile(\"@\")\n\trequiredKeys := 0\n\n\tfor i, line := range lines {\n\t\tindexOfAt := strings.IndexRune(line, '@')\n\t\tif indexOfAt > 0 {\n\t\t\torigin = pointOfInterest{name: \"entrance\", coords: coordinate{x: indexOfAt, y: i}}\n\t\t}\n\n\t\tkeysIndexes := keyRegexp.FindAllStringIndex(line, -1)\n\t\tif keysIndexes != nil {\n\t\t\tfor _, index := range keysIndexes {\n\t\t\t\tpoi := pointOfInterest{name: line[index[0]:index[1]], coords: coordinate{x: index[0], y: i}, key: rune(line[index[0]])}\n\t\t\t\trequiredKeys = requiredKeys | (1 << (poi.key - 'a'))\n\t\t\t\tkeys = append(keys, &poi)\n\t\t\t}\n\t\t}\n\t}\n\n\t// for _, line := range lines {\n\t// \tfmt.Println(line)\n\t// }\n\n\torigin.neighbours = scoreDestinations(origin, lines, keys)\n\tfor _, key := range keys {\n\t\t(*key).neighbours = scoreDestinations(*key, lines, keys)\n\t}\n\n\toriginState := state{distance: 0, keys: 0, poi: []*pointOfInterest{&origin}, currentKeys: make([]rune, 1)}\n\twinningState := dijkstra(originState, requiredKeys)\n\tfmt.Printf(\"Shortest Path: %d\\n\", winningState.distance)\n\n\tmapDef = `#################################################################################\n#...#.......#....a..#...........#..e....#.....#...#...#...........#.............#\n#.#.#.#####.#.#####.#.#######.###.###.#.#.###.#.#.###.#.#########.#.###.#######.#\n#.#.#.#.#...#.#.K...#...#...#.....#.#.#.#.#.....#.#...#t......#...#.#...#.......#\n#.###B#.#.#.#.#.#######.###.#######.#.###.#######.#.#########.#.#####.###.#######\n#.#q..#.#.#.#.#...#.....#...#.......#...#...#.#...#.........#.#.......#.#.#.....#\n#.#.###.#.#.#.###.#.#####.#.#.#####.###.###.#.#.#####.#######.#########.#.###.#.#\n#...#...#.#.#...#.#.......#...#.....#...#...#.........#.....#.......#.#...#m..#.#\n#.#####.#.#####.#.#######.#########.#.###F#############.###.###.###.#.#.###.#####\n#...#...#.......#.......#.#......h#.#...#.#.....#.......#.....#.#...#.#.#.......#\n###.#.###########.#####.#.#.#####.#####.#.#.###.#.#####.#####.#.#.###G#.###.###.#\n#.#.#.......#...#...#...#.#...J.#.#.....#.#...#.#.....#.#...#.#.#.#...#...#...#.#\n#.#.#####.###.#.###.#####.#####.#.#.###.#.###.#.#####.#.#.#.#.###.#.#.###.#####S#\n#w#...#...#...#...#...#...#...#.#...#...#.#...#.....#.#.#.#.#.....#.#...#.......#\n#.###.#.###.#####.###.#.#####.#.#####.###.#.###.###.#.###.#.#######.###.#######.#\n#...#...#...#.....#...#.#...#.#...#.#.#.#.#.#.#.#...#.....#...#...#.#.#.........#\n#.#####.#.###.#####.###.#.#.#.###.#.#.#.#.#.#.#.###.#########.#.#.#.#.###########\n#.......#...#.....#.#.....#...#.#.#...#.#...#.#...#.....#...#.#.#.#.....#.......#\n#C#########.#####.#.#.#######.#.#.###.#.#.###.###.#####.#.###.#.#######.###.###.#\n#.#.......#.#.#...#...#...#.....#...#...#.......#.#.....#.#...#.....#.#...#...#.#\n#.###.#.#.#.#.#.###.###.#.#########.###.#####.###.#.#####.#.#####.#.#.###.###.###\n#...#.#.#.#.#.#.#y..#...#...#.....#.#...#.....#...#.#.....#...#...#.....#...#...#\n###.###.#I#.#.#.###########.#.###.#.#####.#####.###.#.###.#####.###########.#.#.#\n#...#...#.#.#.#...........#...#...#.#...#...#...#.#...#...#.....#.........#.#.#.#\n#.###.###.#.#.#####.###########.###.#.#.#####.###.#.#####.###.###.#######.#.###.#\n#.....#...#...#...#.............#.....#.#.....#...#.#...#.....#...#.....#...#...#\n#####.#######.#.#.###############.#####.#.#######.#.#.#.#####.#.###.###.#####.###\n#.....#.....#...#.#...#.............#...#.......#...#.#.#.....#.#.#.#.#.........#\n#.#####.###.#####.#.###.###########.#.###.#####.#####.#.#######.#.#.#.#########.#\n#...#.#.#.#.......#...........#...#.#...#.....#.......#...#.....#...#.....#...#.#\n###.#.#.#.###############.#####.#.#.###.#####.###########.#.#######.###.###.#.#.#\n#...#.#...#.......#.....#.#.....#.#...#.#.....#...#.....#...#.....#.#...#...#...#\n#.###.###.#.#####.#.###.###.#####.#####.#.#####.###.###.#####.###.#.#.###.#####.#\n#.#.....#.#...#.....#...#...#.....#.....#...#.......#...#.....#.#.#...#...#.....#\n#.###L#.#.#.#.#######.###.###.#####.###.###.#.#######.#.###.###.#.###.#.###.#####\n#o..#.#.#.#.#.....#.#.#...#.#...#...#.#.#.#.#.....#...#.....#.#...#...#.#.......#\n###.###.#.#######.#.#.#.#.#.###.#.###.#.#.#.#######.#########.#.#######.#######.#\n#.#...#.#.......#.#.#.#.#.#.#...#.#...#.#.#.#...#...#.........#.#.....#.......#.#\n#.###.#.#######.#.#.#.###.#.#.###.#.###.#.#.#.#.#.###.#######.#.#.###.#######.#.#\n#.............#...#.......#.......#....@#@....#...#.........#.....#...........#.#\n#################################################################################\n#.....#.....#.#.........#.#...#.....#..@#@......#..u........#...#.......#.....Q.#\n#.#.###.#.#.#.#.#######.#.#.#.#.#.###.#.#.#.###.#.#########.###.#.#.###.#######.#\n#.#.#...#.#.#.#.....#...#.R.#.#.#.....#.#.#...#...#.......#r....#.#...#.........#\n#.#.#.###.#.#.#####.###.#####.#.#######.#.###.#######.###.#####.#.###.#####.#####\n#.#...#...#.......#...#.....#.#.#.#...#.#.#.#...#...#.#...#.....#b#.#...#...#..c#\n#.#####.#############.#####.#.#.#.#.#.#.#.#.###.#.#.#.#.###.#####.#.###.#####.#.#\n#p#...V.#..x#.......#...#...#.....#.#...#.#...#.#.#...#...#.#.#...#...#.....#.#.#\n###.#####.#.#.#####.###.#.###U#####.###.#.#.#.#.#.#######.#.#.#.###.#.#####.#.#.#\n#...#.....#.....#...#...#.#.#.#.....#.#.#...#.#.#.#...#.#.#...#...#.#.....#...#.#\n#.###.###########.###.###.#.#.#.#####.#.#.#####.#.#.#.#.#.###.###.#.#####.#####.#\n#.......#.......#...#.#.#.#...#...#.....#.#...#.#.#.#.#...#...#.#.#.....#...#.#.#\n#.#####.#.#####.###.#.#.#.#.#####.#######.#.#.#.#.#H#.###.#.###.#.#.###.###.#.#.#\n#.#...#.#.#...#.....#.#...#.....#...#...#.#.#...#.#.#...#.#.....#.#...#.#.#...#.#\n#.#.#.###.#.#######.#.###.#########.#.#.###.#####.#.###.###.#####.#####.#.###.#.#\n#.#.#.....#...#.....#...#.#.......#...#.#...#.....#.#.#.....#...#.......#.....#.#\n#.#.#########.#.#######.#.#.#####.#####.#.###.#.###.#.#######.#.#########.#####.#\n#.#.........#.#.#.#.....#...#...#.....#.#...#.#.#.........#...#...#.....#...#...#\n#.#########.#.#.#.#.#########.#.#####.#.#.#.#.###.#########.###.###.#.#####.#.#.#\n#.#.......#.#.#...#.#.......#.#.....#.#.#.#.#.....#.#.........#.#...#.#.....#.#.#\n#.#######.#.#.###.#.###.#####.#####.#.#.#.#.###.###.#.#########.#.###.#.#####.#.#\n#......z..#.#.#...#...#.....#.#.....#.#.#.#...#.....#.......#.#.#.#.#...#...#.#.#\n#########.#.#.#.#######.###.#.#.###.#.#.#.###.###########.###.#.#.#.#######.#.#.#\n#.#.......#.#...#.....#.#.#.#.#.#...#.#.#.#.#.............#...#.#.......#...#.#.#\n#.#.#######.#.###.#.#.#.#.#.#.#.#.###.#.#.#.###############.###.#######.#.###.#.#\n#...#.......#...#.#.#.#...#...#.#...#.#.#....j#...#...#....d#...#...#...#...#.#.#\n#.###.#########.###.#.###.#####.#####.#.#####.#.###.#.#.#######.#.#.#.#####.#.#.#\n#...#.#.......#.#...#...#...#.#.....#...#...#.#.....#.#.Z.#.....#.#...#.....#.#.#\n#D###.#####.###.#.#####.###.#.#####.###.###.#.#######.###.###.###.#######.###.#.#\n#.#...#...#..n#...#.N.#.#...#.....#.....#...#.....#...#.#...#.....#.....#.#...#.#\n###.###.#.###.#######.#.#.#####.#.#######.#######.#.#.#.###.#.#####.###.#.#.###W#\n#...#...#.............#.#.....#.#.....M.#.#.......#.#...#...#.......#.....#...#.#\n#.###.#.###############.#####.###.#####.#.#.#######.###.#.#######.#####.#####.#.#\n#.#.P.#...#...#.......#.#...#.....#...#.#.#...#.....#...#...#.#...#...#.#.....#i#\n#.#######.#.#.#.#####.#.#.#.#######.#.#.#.###.###.###.#####.#.#.###.#.###.#####.#\n#.......#...#.......#...#.#.......#.#...#...#....f#.E.#...#.#.....#.#.#...#.O.#.#\n#.#####.#################.###.#####.#####.#########.#####.#.#######X#.#.###.#.###\n#.#.#...#...#...T...#.A.#...#..k..#.#...#.#...#...#.#...#.#.....#...#.#.....#...#\n#.#.#.###.#.#.#####.#.#.###.#####.#.#.#.#.#.#.#.#.#.#.#.#.#####.#.###.#########.#\n#...#.....#.......#...#.........#.Y...#.#..s#...#..g..#.......#v..#............l#\n#################################################################################`\n\n\tlines = strings.Split(mapDef, \"\\n\")\n\n\torigins := make([]*pointOfInterest, 0)\n\tkeys = make([]*pointOfInterest, 0)\n\tkeyRegexp = regexp.MustCompile(\"[a-z]\")\n\tdoorRegexp = regexp.MustCompile(\"[A-Z]\")\n\trequiredKeys = 0\n\n\tfor i, line := range lines {\n\t\tentrancesIndexes := entranceRegexp.FindAllStringIndex(line, -1)\n\t\tif entrancesIndexes != nil {\n\t\t\tfor _, index := range entrancesIndexes {\n\t\t\t\tpoi := pointOfInterest{name: line[index[0]:index[1]], coords: coordinate{x: index[0], y: i}, key: '@'}\n\t\t\t\torigins = append(origins, &poi)\n\t\t\t}\n\t\t}\n\n\t\tkeysIndexes := keyRegexp.FindAllStringIndex(line, -1)\n\t\tif keysIndexes != nil {\n\t\t\tfor _, index := range keysIndexes {\n\t\t\t\tpoi := pointOfInterest{name: line[index[0]:index[1]], coords: coordinate{x: index[0], y: i}, key: rune(line[index[0]])}\n\t\t\t\trequiredKeys = requiredKeys | (1 << (poi.key - 'a'))\n\t\t\t\tkeys = append(keys, &poi)\n\t\t\t}\n\t\t}\n\t}\n\n\tfor _, o := range origins {\n\t\to.neighbours = scoreDestinations(*o, lines, keys)\n\t}\n\tfor _, key := range keys {\n\t\tkey.neighbours = scoreDestinations(*key, lines, keys)\n\t}\n\n\toriginState = state{poi: origins, currentKeys: make([]rune, len(origins))}\n\twinningState = dijkstra(originState, requiredKeys)\n\tfmt.Printf(\"Shortest Path: %d\\n\", winningState.distance)\n\n}",
"func ExampleReplaceTable() {\n\tt := ReplaceStringTable{\n\t\t\"Hello\", \"Hi\",\n\t\t\"World\", \"Gophers\",\n\t}\n\tr := transform.NewReader(strings.NewReader(\"Hello, World\"), ReplaceAll(t))\n\tio.Copy(os.Stdout, r)\n\t// Output: Hi, Gophers\n}",
"func main() {\n\n\tfileName := \"./temp/tempfile.txt\"\n\twriteToFile(fileName)\n\treadFromFile(fileName)\n\tpwd()\n\twriteManyStringToFile(\"./temp/lgfile.txt\", 10)\n\tprtFileStat(fileName)\n\n}",
"func (ed *Editor) Replace(s string) {\n\ted.initTransformation()\n\ted.putString(s)\n\ted.commitTransformation()\n\ted.autoscroll()\n\ted.dirty = true\n}",
"func (s stringSource) ReplaceRunes(byteOffset, runeCount int64, str string) {\n}",
"func Replace(fstTmpl *fasttemplate.Template, replaceMap map[string]string, allowUnresolved bool, prefixFilter string) (string, error) {\n\tvar unresolvedErr error\n\treplacedTmpl := fstTmpl.ExecuteFuncString(func(w io.Writer, tag string) (int, error) {\n\t\tif !strings.HasPrefix(tag, prefixFilter) {\n\t\t\treturn w.Write([]byte(fmt.Sprintf(\"{{%s}}\", tag)))\n\t\t}\n\t\treplacement, ok := replaceMap[tag]\n\t\tif !ok {\n\t\t\tif allowUnresolved {\n\t\t\t\t// just write the same string back\n\t\t\t\treturn w.Write([]byte(fmt.Sprintf(\"{{%s}}\", tag)))\n\t\t\t}\n\t\t\tunresolvedErr = errors.Errorf(errors.CodeBadRequest, \"failed to resolve {{%s}}\", tag)\n\t\t\treturn 0, nil\n\t\t}\n\t\t// The following escapes any special characters (e.g. newlines, tabs, etc...)\n\t\t// in preparation for substitution\n\t\treplacement = strconv.Quote(replacement)\n\t\treplacement = replacement[1 : len(replacement)-1]\n\t\treturn w.Write([]byte(replacement))\n\t})\n\tif unresolvedErr != nil {\n\t\treturn \"\", unresolvedErr\n\t}\n\treturn replacedTmpl, nil\n}",
"func (d *DataPacket) replace(startIndex int, replacement []byte) {\n\td.data = append(d.data[:startIndex],\n\t\tappend(replacement, d.data[len(replacement)+startIndex:]...)...)\n}",
"func TestPathRewrite(t *testing.T) {\n\tw := httptest.NewRecorder()\n\trecordRequest := NewResponseRecorder(w)\n\treader := strings.NewReader(`{\"username\": \"dennis\"}`)\n\n\trequest, err := http.NewRequest(\"POST\", \"http://getcaddy.com/index.php?key=value\", reader)\n\tif err != nil {\n\t\tt.Fatalf(\"Request Formation Failed: %s\\n\", err.Error())\n\t}\n\turlCopy := *request.URL\n\turlCopy.Path = \"a/custom/path.php\"\n\tctx := context.WithValue(request.Context(), OriginalURLCtxKey, urlCopy)\n\trequest = request.WithContext(ctx)\n\n\trepl := NewReplacer(request, recordRequest, \"\")\n\n\tif got, want := repl.Replace(\"This path is '{path}'\"), \"This path is 'a/custom/path.php'\"; got != want {\n\t\tt.Errorf(\"{path} replacement failed; got '%s', want '%s'\", got, want)\n\t}\n\n\tif got, want := repl.Replace(\"This path is {rewrite_path}\"), \"This path is /index.php\"; got != want {\n\t\tt.Errorf(\"{rewrite_path} replacement failed; got '%s', want '%s'\", got, want)\n\t}\n\tif got, want := repl.Replace(\"This path is '{uri}'\"), \"This path is 'a/custom/path.php?key=value'\"; got != want {\n\t\tt.Errorf(\"{uri} replacement failed; got '%s', want '%s'\", got, want)\n\t}\n\n\tif got, want := repl.Replace(\"This path is {rewrite_uri}\"), \"This path is /index.php?key=value\"; got != want {\n\t\tt.Errorf(\"{rewrite_uri} replacement failed; got '%s', want '%s'\", got, want)\n\t}\n\n}",
"func makeArgsReplacer(args []string) *caddy.Replacer {\n\trepl := caddy.NewEmptyReplacer()\n\trepl.Map(func(key string) (any, bool) {\n\t\t// TODO: Remove the deprecated {args.*} placeholder\n\t\t// support at some point in the future\n\t\tif matches := argsRegexpIndexDeprecated.FindStringSubmatch(key); len(matches) > 0 {\n\t\t\t// What's matched may be a substring of the key\n\t\t\tif matches[0] != key {\n\t\t\t\treturn nil, false\n\t\t\t}\n\n\t\t\tvalue, err := strconv.Atoi(matches[1])\n\t\t\tif err != nil {\n\t\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\t\"Placeholder {args.\" + matches[1] + \"} has an invalid index\")\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif value >= len(args) {\n\t\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\t\"Placeholder {args.\" + matches[1] + \"} index is out of bounds, only \" + strconv.Itoa(len(args)) + \" argument(s) exist\")\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\"Placeholder {args.\" + matches[1] + \"} deprecated, use {args[\" + matches[1] + \"]} instead\")\n\t\t\treturn args[value], true\n\t\t}\n\n\t\t// Handle args[*] form\n\t\tif matches := argsRegexpIndex.FindStringSubmatch(key); len(matches) > 0 {\n\t\t\t// What's matched may be a substring of the key\n\t\t\tif matches[0] != key {\n\t\t\t\treturn nil, false\n\t\t\t}\n\n\t\t\tif strings.Contains(matches[1], \":\") {\n\t\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\t\"Variadic placeholder {args[\" + matches[1] + \"]} must be a token on its own\")\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tvalue, err := strconv.Atoi(matches[1])\n\t\t\tif err != nil {\n\t\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\t\"Placeholder {args[\" + matches[1] + \"]} has an invalid index\")\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\tif value >= len(args) {\n\t\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\t\"Placeholder {args[\" + matches[1] + \"]} index is out of bounds, only \" + strconv.Itoa(len(args)) + \" argument(s) exist\")\n\t\t\t\treturn nil, false\n\t\t\t}\n\t\t\treturn args[value], true\n\t\t}\n\n\t\t// Not an args placeholder, ignore\n\t\treturn nil, false\n\t})\n\treturn repl\n}",
"func sanitiseBINDFileInput(s string) string {\n\t// Remove SOA records.\n\tsoaRe := regexp.MustCompile(`(?m)[\\r\\n]+^.*IN\\s+SOA.*$`)\n\ts = soaRe.ReplaceAllString(s, \"\")\n\n\t// Remove all comments.\n\tcommentRe := regexp.MustCompile(`(?m)[\\r\\n]+^.*;;.*$`)\n\ts = commentRe.ReplaceAllString(s, \"\")\n\n\t// Swap all the tabs to spaces.\n\tr := strings.NewReplacer(\n\t\t\"\\t\", \" \",\n\t\t\"\\n\\n\", \"\\n\",\n\t)\n\ts = r.Replace(s)\n\ts = strings.TrimSpace(s)\n\n\treturn s\n}",
"func replace(s split, save func(string)) {\n\tif len(s.R) > 0 {\n\t\tfor _, b := range letters {\n\t\t\tc := string(b)\n\t\t\tsave(s.L + c + s.R[1:])\n\t\t}\n\t}\n}",
"func raceSafeCopy(old, new string) error {\n\toldInfo, err := os.Stat(old)\n\tif err != nil {\n\t\treturn err\n\t}\n\tnewInfo, err := os.Stat(new)\n\tif err == nil && newInfo.Size() == oldInfo.Size() {\n\t\treturn nil\n\t}\n\tdata, err := os.ReadFile(old)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// The module cache has unwritable directories by default.\n\t// Restore the user write bit in the directory so we can create\n\t// the new go.mod file. We clear it again at the end on a\n\t// best-effort basis (ignoring failures).\n\tdir := filepath.Dir(old)\n\tinfo, err := os.Stat(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := os.Chmod(dir, info.Mode()|writeBits); err != nil {\n\t\treturn err\n\t}\n\tdefer os.Chmod(dir, info.Mode())\n\t// Note: create the file writable, so that a racing go command\n\t// doesn't get an error before we store the actual data.\n\tf, err := os.OpenFile(new, os.O_CREATE|os.O_WRONLY, writeBits&^0o111)\n\tif err != nil {\n\t\t// If OpenFile failed because a racing go command completed our work\n\t\t// (and then OpenFile failed because the directory or file is now read-only),\n\t\t// count that as a success.\n\t\tif size(old) == size(new) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\tdefer os.Chmod(new, oldInfo.Mode())\n\tif _, err := f.Write(data); err != nil {\n\t\tf.Close()\n\t\treturn err\n\t}\n\treturn f.Close()\n}",
"func TestReplacer_Transform(t *testing.T) {\n\tdata := []struct {\n\t\t// input\n\t\told, new []byte\n\t\tdst, src []byte\n\t\tatEOF bool\n\n\t\t// expected\n\t\tnDst, nSrc int\n\t\thasErr bool\n\t\texpected []byte\n\t}{\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefgabcd\"),\n\t\t\tatEOF: true,\n\t\t\tnDst: 11,\n\t\t\tnSrc: 11,\n\t\t\texpected: []byte(`ABCdefgABCd`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefgabcd\"),\n\t\t\tatEOF: false,\n\t\t\tnDst: 11,\n\t\t\tnSrc: 11,\n\t\t\texpected: []byte(`ABCdefgABCd`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefgabca\"),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 11,\n\t\t\texpected: []byte(`ABCdefgABC`),\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefgabca\"),\n\t\t\tatEOF: true,\n\t\t\tnDst: 11,\n\t\t\tnSrc: 11,\n\t\t\texpected: []byte(`ABCdefgABCa`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefabca\"),\n\t\t\tatEOF: false,\n\t\t\tnDst: 9,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`ABCdefABC`),\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefabca\"),\n\t\t\tatEOF: true,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`ABCdefABCa`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(\"abcdefgabc\"),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`ABCdefgABC`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 2),\n\t\t\tsrc: []byte(`abc`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 2,\n\t\t\tnSrc: 3,\n\t\t\texpected: []byte(`AB`),\n\t\t\thasErr: true,\n\t\t},\n\t\t{ // 8\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 2),\n\t\t\tsrc: []byte(`abc`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 2,\n\t\t\tnSrc: 3,\n\t\t\texpected: []byte(`AB`),\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 2),\n\t\t\tsrc: []byte(`ab`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 0,\n\t\t\tnSrc: 2,\n\t\t\texpected: []byte(``),\n\t\t\thasErr: true,\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 2),\n\t\t\tsrc: []byte(`ab`),\n\t\t\tatEOF: true,\n\t\t\tnDst: 2,\n\t\t\tnSrc: 2,\n\t\t\texpected: []byte(`ab`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`abc`),\n\t\t\tnew: []byte(`ABC`),\n\t\t\tdst: make([]byte, 2),\n\t\t\tsrc: []byte(`xxxabc`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 2,\n\t\t\tnSrc: 2,\n\t\t\texpected: []byte(`xx`),\n\t\t\thasErr: true,\n\t\t},\n\t\t// tests for nil and empty bytes\n\t\t{\n\t\t\told: nil,\n\t\t\tnew: nil,\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: nil,\n\t\t\tnew: []byte(`abc`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`123`),\n\t\t\tnew: nil,\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 7,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`12a`),\n\t\t\tnew: nil,\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte{},\n\t\t\tnew: []byte{},\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte{},\n\t\t\tnew: []byte(`abc`),\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`123`),\n\t\t\tnew: []byte{},\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 7,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte(`12a`),\n\t\t\tnew: []byte{},\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: []byte{},\n\t\t\tnew: nil,\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t{\n\t\t\told: nil,\n\t\t\tnew: []byte{},\n\t\t\tdst: make([]byte, 100),\n\t\t\tsrc: []byte(`0123456789`),\n\t\t\tatEOF: false,\n\t\t\tnDst: 10,\n\t\t\tnSrc: 10,\n\t\t\texpected: []byte(`0123456789`),\n\t\t},\n\t\t// -- end of tests for nil and empty bytes\n\t}\n\n\tfor i, d := range data {\n\t\tnDst, nSrc, err := NewReplacer(d.old, d.new, nil).Transform(d.dst, d.src, d.atEOF)\n\t\tswitch {\n\t\tcase d.hasErr && err == nil:\n\t\t\tt.Errorf(\"data[%d] must occur an error but not occured\", i)\n\t\t\tcontinue\n\t\tcase !d.hasErr && err != nil:\n\t\t\tt.Errorf(\"data[%d] must not occur an error but error occured: %v\", i, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif nDst != d.nDst {\n\t\t\tt.Errorf(\"data[%d]'s expected nDst is %d but %d\", i, d.nDst, nDst)\n\t\t} else if bytes.Compare(d.dst[:nDst], d.expected) != 0 {\n\t\t\tt.Errorf(\"data[%d]'s expected dst is %v but %v\", i, d.expected, d.dst[:nDst])\n\t\t}\n\n\t\tif nSrc != d.nSrc {\n\t\t\tt.Errorf(\"data[%d]'s expected nSrc is %d but %d\", i, d.nSrc, nSrc)\n\t\t}\n\t}\n}",
"func ChangeOwnerPasswordFile(inFile, outFile string, pwOld, pwNew string, conf *model.Configuration) (err error) {\n\tif conf == nil {\n\t\treturn errors.New(\"pdfcpu: missing configuration for change owner password\")\n\t}\n\tconf.Cmd = model.CHANGEOPW\n\tconf.OwnerPW = pwOld\n\tconf.OwnerPWNew = &pwNew\n\n\tvar f1, f2 *os.File\n\n\tif f1, err = os.Open(inFile); err != nil {\n\t\treturn err\n\t}\n\n\ttmpFile := inFile + \".tmp\"\n\tif outFile != \"\" && inFile != outFile {\n\t\ttmpFile = outFile\n\t\tlog.CLI.Printf(\"writing %s...\\n\", outFile)\n\t} else {\n\t\tlog.CLI.Printf(\"writing %s...\\n\", inFile)\n\t}\n\n\tif f2, err = os.Create(tmpFile); err != nil {\n\t\treturn err\n\t}\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tf2.Close()\n\t\t\tf1.Close()\n\t\t\tif outFile == \"\" || inFile == outFile {\n\t\t\t\tos.Remove(tmpFile)\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t\tif err = f2.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif err = f1.Close(); err != nil {\n\t\t\treturn\n\t\t}\n\t\tif outFile == \"\" || inFile == outFile {\n\t\t\terr = os.Rename(tmpFile, inFile)\n\t\t}\n\t}()\n\n\treturn ChangeOwnerPassword(f1, f2, pwOld, pwNew, conf)\n}",
"func (cc *computer) replaceInstruction(ind int, instr instruction) {\n\tif ind < 0 || ind >= len(cc.instructions) {\n\t\treturn\n\t}\n\tcc.instructions[ind] = instr\n}",
"func replace(n, replacement int, pos uint) int {\n\ti1 := n & MASKARRAY[pos]\n\tmask2 := replacement << (4 * pos)\n\treturn (i1 ^ mask2)\n}",
"func Replace(haystack, needle, gold string) string {\n\treturn strings.Replace(haystack, needle, gold, -1)\n}",
"func overwriteFileWith(oldPath string, newPath string) {\n\tdata, err1 := ioutil.ReadFile(newPath) // create the new data ([]byte) from reading the new file\n\tif err1 != nil {\n\t\tprint(\"error returned in ReadFile\")\n\t\treturn\n\t}\n\n\terr2 := ioutil.WriteFile(oldPath, data, 644) // write new data over the old file\n\tif err2 != nil {\n\t\tprint(\"error returned in WriteFile\")\n\t\treturn\n\t}\n}",
"func textReplace(data string) string {\n\tfor k, v := range textReplaceMap {\n\t\tdata = strings.Replace(data, k, v, -1)\n\t}\n\treturn data\n}",
"func (br BytesReplacer) Replace(p []byte) []byte {\n\tfor _, pair := range br {\n\t\tp = bytes.Replace(p, pair[0], pair[1], -1)\n\t}\n\treturn p\n}",
"func replace(input, target, replacement string) string {\n\tif idx := strings.LastIndex(input, target); idx >= 0 {\n\t\treturn input[0:idx] + replacement\n\t}\n\n\treturn input\n}",
"func ReplaceAllMapped(original string, toReplace map[string]string) string {\n\tvar (\n\t\tfilledInDocument = original\n\t\tkeys = MapKeys(toReplace)\n\t)\n\n\tsort.SliceStable(keys, func(i, j int) bool {\n\t\treturn len(keys[i]) > len(keys[j])\n\t})\n\n\tfor _, key := range keys {\n\t\tfilledInDocument = strings.ReplaceAll(filledInDocument, key, toReplace[key])\n\t}\n\treturn filledInDocument\n}",
"func replace(vbytes []byte, replace, part string, index int) (old, new string, loc []int, newcontents []byte, err error) {\n\tre := regexp.MustCompile(semverMatcher)\n\tif index == 0 {\n\t\tloc = re.FindIndex(vbytes)\n\t} else {\n\t\tlocs := re.FindAllIndex(vbytes, -1)\n\t\tif locs == nil {\n\t\t\treturn \"\", \"\", nil, nil, fmt.Errorf(\"Did not find semantic version\")\n\t\t}\n\t\tlocsLen := len(locs)\n\t\tif index >= locsLen {\n\t\t\treturn \"\", \"\", nil, nil, fmt.Errorf(\"semver index to replace out of range. Found %v, want %v\", locsLen, index)\n\t\t}\n\t\tif index < 0 {\n\t\t\tloc = locs[locsLen+index]\n\t\t} else {\n\t\t\tloc = locs[index]\n\t\t}\n\t}\n\t// fmt.Println(loc)\n\tif loc == nil {\n\t\treturn \"\", \"\", nil, nil, fmt.Errorf(\"Did not find semantic version\")\n\t}\n\tvs := string(vbytes[loc[0]:loc[1]])\n\n\tif replace == \"\" {\n\t\tv := semver.New(vs)\n\t\tswitch part {\n\t\tcase \"major\":\n\t\t\tv.BumpMajor()\n\t\tcase \"minor\":\n\t\t\tv.BumpMinor()\n\t\tdefault:\n\t\t\tv.BumpPatch()\n\t\t}\n\t\treplace = v.String()\n\t}\n\n\tlen1 := loc[1] - loc[0]\n\tadditionalBytes := len(replace) - len1\n\t// Create and fill an extended buffer\n\tb := make([]byte, len(vbytes)+additionalBytes)\n\tcopy(b[:loc[0]], vbytes[:loc[0]])\n\tcopy(b[loc[0]:loc[1]+additionalBytes], replace)\n\tcopy(b[loc[1]+additionalBytes:], vbytes[loc[1]:])\n\t// fmt.Printf(\"writing: '%v'\", string(b))\n\n\treturn vs, replace, loc, b, nil\n}",
"func includefile(filename string) string {\n\tdata, err := ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\treturn \"\"\n\t}\n\treturn codemap.Replace(string(data))\n}",
"func Update(file, content string, args ...interface{}) error {\n\tpermission := uint32(0644)\n\n\tif len(args) > 0 {\n\t\tpermission = args[0].(uint32)\n\t}\n\n\tf, err := os.OpenFile(file, os.O_APPEND|os.O_WRONLY, os.FileMode(permission))\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := f.WriteString(content); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func reifyNewSite(goProjectPath, projectPath string) error {\n\tfiles, err := collectFiles(projectPath, []string{\".go\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// For each go file within project, make sure the refs are to the new site,\n\t// not to the template site\n\trelGoProjectPath := projectPathRelative(goProjectPath)\n\trelProjectPath := projectPathRelative(projectPath)\n\tfor _, f := range files {\n\t\t// Load the file, if it contains refs to goprojectpath, replace them with relative project path imports\n\t\tdata, err := ioutil.ReadFile(f)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Substitutions - consider reifying instead if it is any more complex\n\t\tfileString := string(data)\n\t\tif strings.Contains(fileString, relGoProjectPath) {\n\t\t\tfileString = strings.Replace(fileString, relGoProjectPath, relProjectPath, -1)\n\t\t}\n\n\t\terr = ioutil.WriteFile(f, []byte(fileString), permissions)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\treturn nil\n}",
"func (e *Editor) queryReplace() {\n\te.Searchtext = e.getInput(\"Query replace: \")\n\tif len(e.Searchtext) < 1 {\n\t\treturn\n\t}\n\te.Replace = e.getInput(\"With: \")\n\tslen := len(e.Searchtext)\n\tbp := e.CurrentBuffer\n\topoint := bp.Point\n\tlpoint := -1\n\task := true\n\t/* build query replace question string */\n\tquestion := fmt.Sprintf(\"Replace '%s' with '%s' ? \", e.Searchtext, e.Replace)\n\t/* scan through the file, from point */\n\tnumsub := 0\nouter:\n\tfor {\n\t\tfound := bp.searchForward(bp.Point, e.Searchtext)\n\t\t/* if not found set the point to the last point of replacement, or where we started */\n\t\tif found == -1 {\n\t\t\tif lpoint == -1 {\n\t\t\t\tbp.SetPoint(opoint)\n\t\t\t} else {\n\t\t\t\tbp.SetPoint(lpoint)\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t\tbp.SetPoint(found)\n\t\t/* search_forward places point at end of search, move to start of search */\n\t\tfor k := 0; k < slen; k++ {\n\t\t\tbp.PointPrevious()\n\t\t}\n\t\te.Display(e.CurrentWindow, true)\n\n\t\tif ask == true {\n\t\t\tanswer := e.getInput(question)\n\n\t\tinner:\n\t\t\tfor {\n\t\t\t\te.Display(e.CurrentWindow, true)\n\t\t\t\tresp := []rune(answer)\n\t\t\t\tc := ' '\n\t\t\t\tif len(resp) > 0 {\n\t\t\t\t\tc = resp[0]\n\t\t\t\t}\n\t\t\t\tswitch c {\n\t\t\t\tcase 'y': /* yes, substitute */\n\t\t\t\t\tbreak inner\n\t\t\t\tcase 'n': /* no, find next */\n\t\t\t\t\tbp.SetPoint(found) /* set to end of search string */\n\t\t\t\tcase '!': /* yes/stop asking, do the lot */\n\t\t\t\t\task = false\n\t\t\t\t\tbreak inner\n\t\t\t\t//case 0x1B: /* esc */\n\t\t\t\t//flushinp() /* discard any escape sequence without writing in buffer */\n\t\t\t\tcase 'q': /* controlled exit */\n\t\t\t\t\tbreak outer\n\t\t\t\tdefault: /* help me */\n\t\t\t\t\tanswer = e.getInput(\"(y)es, (n)o, (!)do the rest, (q)uit: \")\n\t\t\t\t\t//continue inner\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tfor k := 0; k < slen; k++ { // delete found search text\n\t\t\tbp.Delete()\n\t\t}\n\t\tbp.Insert(e.Replace) // qed\n\t\tlpoint = bp.Point\n\t\tnumsub++\n\t}\n\te.msg(\"%d substitutions\", numsub)\n}",
"func testPasswd(t *testing.T, dir string, extraArgs ...string) {\n\t// Change password using \"-extpass\"\n\targs := []string{\"-q\", \"-passwd\", \"-extpass\", \"echo test\"}\n\targs = append(args, extraArgs...)\n\targs = append(args, dir)\n\tcmd := exec.Command(test_helpers.GocryptfsBinary, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Run()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t// Change password using stdin\n\targs = []string{\"-q\", \"-passwd\"}\n\targs = append(args, extraArgs...)\n\targs = append(args, dir)\n\tcmd = exec.Command(test_helpers.GocryptfsBinary, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tp, err := cmd.StdinPipe()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmd.Start()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\t// Old password\n\tp.Write([]byte(\"test\\n\"))\n\t// New password\n\tp.Write([]byte(\"newpasswd\\n\"))\n\tp.Close()\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n}",
"func makeFileString(baseDir, fileName string, s string) string {\n\treturn makeFile(baseDir, fileName, []byte(s))\n}",
"func (re *RegexpStd) ReplaceAllStringFunc(src string, repl func(string) string) string {\n\trep, err := re.p.ReplaceFunc(src, makeRepFunc(repl), 0, -1)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn src\n\t}\n\treturn rep\n}",
"func ReplaceTokens(file *[]byte, fmap *map[string] string) *[]byte {\n\tfor k, v := range *fmap {\n\t\t//fmt.Printf(\"k:%v v:%v\\n\", k, v)\n\n\t\tarr := ReplaceToken(k, v, file)\n\t\tfmt.Printf(\"arr:\\n%s\\n\", arr)\n\t\tfile = arr\n\t}\n\treturn file\n}",
"func main() {\n\tvar inputGzPath string\n\tvar outputGoPath string\n\tflag.StringVar(&inputGzPath, \"inputGzPath\", \"build/GeoLite2-Country.mmdb.gz\", \"input gz file path\")\n\tflag.StringVar(&outputGoPath, \"outputGoPath\", \"buildData.go\", \"output go file path\")\n\tflag.Parse()\n\tgzContent, err := ioutil.ReadFile(inputGzPath)\n\tif err != nil {\n\t\tfmt.Println(\"read input file\", err)\n\t\treturn\n\t}\n\tr := bytes.NewReader(gzContent)\n\tgzReader, err := gzip.NewReader(r)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tuncompressData, err := ioutil.ReadAll(gzReader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t//writeGoFileWithAsm(uncompressData)\n\tmustWriteFile(outputGoPath,getGoFileContentByteStringV2(uncompressData))\n\t/*\n\terr = ioutil.WriteFile(outputGoPath, getGoFileContentByteStringV2(content), os.FileMode(0644))\n\tif err != nil {\n\t\tfmt.Println(\"write output file\", err)\n\t\treturn\n\t}*/\n\treturn\n}",
"func (s service) EncryptPassword(src string) string {\n\tbytes, _ := bcrypt.GenerateFromPassword([]byte(s.getRawPasswordWithSalt(src)), bcrypt.MinCost)\n\treturn string(bytes)\n}",
"func OverwriteString(filename, s string) error {\n\treturn Overwrite(filename, []byte(s))\n}"
] | [
"0.593332",
"0.58920455",
"0.58307135",
"0.5759988",
"0.5640343",
"0.56176907",
"0.54462165",
"0.5417814",
"0.5408237",
"0.53750044",
"0.5279094",
"0.5179909",
"0.51240623",
"0.51058346",
"0.50940335",
"0.50249094",
"0.50169015",
"0.5014492",
"0.49995518",
"0.4928472",
"0.49246618",
"0.49210894",
"0.49135163",
"0.4886205",
"0.48830825",
"0.48688754",
"0.48688218",
"0.4861009",
"0.48269793",
"0.48057628",
"0.47885564",
"0.47746065",
"0.47739744",
"0.47624117",
"0.47502857",
"0.47439015",
"0.47412547",
"0.47367838",
"0.47215736",
"0.47206885",
"0.4711192",
"0.47093213",
"0.47013137",
"0.46904844",
"0.46799693",
"0.46778065",
"0.4676544",
"0.4658415",
"0.4656853",
"0.46484828",
"0.46430868",
"0.46422306",
"0.4638013",
"0.46307883",
"0.4616707",
"0.46080437",
"0.45993638",
"0.45977345",
"0.45743266",
"0.45631608",
"0.45617768",
"0.45595765",
"0.45552897",
"0.45542437",
"0.45526895",
"0.45449975",
"0.45436952",
"0.45393306",
"0.4537",
"0.45336926",
"0.45262924",
"0.45240918",
"0.45223245",
"0.4521891",
"0.45209283",
"0.45193955",
"0.45126143",
"0.45121711",
"0.45103142",
"0.44967273",
"0.44803262",
"0.4477527",
"0.44754702",
"0.44565168",
"0.4438197",
"0.4426767",
"0.44150776",
"0.44121435",
"0.44068775",
"0.4394067",
"0.4389026",
"0.43867427",
"0.43855062",
"0.4381707",
"0.43794447",
"0.4376392",
"0.4370844",
"0.43700725",
"0.43675372",
"0.43650064",
"0.43572855"
] | 0.0 | -1 |
help, or invalid flag | func usage() {
fmt.Printf("Usage of %s:\n", os.Args[0])
fmt.Println(" [-unsafe] inputfile outfile search-string replace-string")
fmt.Println("\nAvailable options:")
flag.PrintDefaults()
fmt.Printf("\nThe -unsafe flag can increase performance. The performance gain is, however, marginal\n")
fmt.Println("and is generally not worth the added risks, hence the name: unsafe")
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Help(name string, writer io.Writer) string {\n\t_, err := parseFlags(name, []string{\"-h\"}, writer)\n\t// Error is always present because -h is passed\n\treturn err.Error()\n}",
"func (flags *Flags) Help() string {\n\treturn \"\"\n}",
"func (e *BadSpotError) IsShowAllHelp() {}",
"func failWithHelp(messageFormat string, args ...interface{}) {\n\tconst usageFormat = `Usage: %s <line>\\n`\n\n\tfmt.Fprintf(os.Stderr, messageFormat, args...)\n\tfmt.Fprintf(os.Stderr, usageFormat, os.Args[0])\n\n\tos.Exit(1)\n}",
"func (c *RunCommand) Help() string {\n\thelpText := `\nsource_fileをコンパイル後、problem_noで指定された番号の問題のテストを実行する\n\nUsage:\n\tgoyuki run problem_no source_file\n\nOptions:\n\t-language=lang, -l\t\t実行する言語を指定します (デフォルト 拡張子から判別)\n\t-validater=validater, -V テストの一致方法を指定します (デフォルト diff validater)\n\t-verbose, -vb\t\tコンパイル時、実行時の標準出力、標準エラー出力を表示する\n\t-place=n, -p\t\t\t出力される数値を小数点以下n桁に丸める (float validater時のみ) (0<=n<=15)\n\n\n`\n\treturn strings.TrimSpace(helpText)\n}",
"func show_happiness()bool{\nreturn flags['h']/* should lack of errors be announced? */\n}",
"func flagHelp() string {\n\tvar bd strings.Builder\n\n\tw := tabwriter.NewWriter(&bd, 3, 10, helpTabWidth, ' ', 0)\n\n\tfmt.Fprintf(w, \"Flags:\\n\")\n\tvar count int\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tcount++\n\t\tif f.DefValue == \"\" {\n\t\t\tfmt.Fprintf(w, \"\\t-%v\\t%v\\n\", f.Name, f.Usage)\n\t\t} else {\n\t\t\tfmt.Fprintf(w, \"\\t-%v\\t%v\\t(%v)\\n\", f.Name, f.Usage, f.DefValue)\n\t\t}\n\t})\n\tif count == 0 {\n\t\treturn \"\\n\"\n\t}\n\n\tw.Flush()\n\n\treturn bd.String()\n}",
"func ShouldGiveHelp(arg string) bool {\n\treturn isOneOf(arg, HelpArgs)\n}",
"func flagUsage (){\n\tusageText := \"This program is an example cli tool \\n\" +\n\t\t\"Usage: \\n\" +\n\t\t\"ArgsSub command [arguments] \\n\" +\n\t\t\"The commands are \\n\" +\n\t\t\"uppercase uppercase a string \\n\" +\n\t\t\"lowercase lowercase a string \\n\" +\n\t\t\"use \\\"ArgsSubTest.txt [command] -- help \\\" for more information about a command \"\n\n\tfmt.Println(os.Stderr, \"%s \\n\", usageText)\n}",
"func showUsage(message string, args ...interface{}) {\n\tflag.PrintDefaults()\n\tif message != \"\" {\n\t\tfmt.Printf(\"\\n[error] \"+message+\"\\n\", args...)\n\t\tos.Exit(1)\n\t}\n\n\tos.Exit(0)\n}",
"func (h *validateCommand) Help() int {\n\tutil.WriteString(h.out, `validate input against the dgo type system\n\nUsage:\n dgo validate [flags]\n\nAliases:\n v, val, valid\n\nFlags:\n -i, --input Relative or absolute path to a yaml file containing input to validate\n -s, --spec Relative or absolute path to a yaml or dgo file with the parameter definitions\n\nGlobal Flags:\n -v, --verbose Be verbose in output\n`)\n\treturn 0\n}",
"func (fv *Enum) Help() string {\n\tif fv.CaseSensitive {\n\t\treturn fmt.Sprintf(\"one of %v (case-sensitive)\", fv.Choices)\n\t}\n\treturn fmt.Sprintf(\"one of %v\", fv.Choices)\n}",
"func (fv *EnumSet) Help() string {\n\tif fv.CaseSensitive {\n\t\treturn fmt.Sprintf(\"one of %v (case-sensitive)\", fv.Choices)\n\t}\n\treturn fmt.Sprintf(\"one of %v\", fv.Choices)\n}",
"func verify(err error, str interface{}) {\n\tif err != nil {\n\t\tfmt.Printf(\"Error retrieving flag for %s \\n\", str)\n\t\tos.Exit(1)\n\t}\n}",
"func (c *Completer) Help(ctx *Context, content IContent, line interface{}, index int) (interface{}, bool) {\n\treturn nil, false\n}",
"func TestHelpFlagInHelp(t *testing.T) {\n\tparentCmd := &Command{Use: \"parent\", Run: func(*Command, []string) {}}\n\n\tchildCmd := &Command{Use: \"child\", Run: func(*Command, []string) {}}\n\tparentCmd.AddCommand(childCmd)\n\n\toutput, err := executeCommand(parentCmd, \"help\", \"child\")\n\tif err != nil {\n\t\tt.Errorf(\"Unexpected error: %v\", err)\n\t}\n\n\tcheckStringContains(t, output, \"[flags]\")\n}",
"func (a API) HelpChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan HelpRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}",
"func (fv *Enums) Help() string {\n\tif fv.CaseSensitive {\n\t\treturn fmt.Sprintf(\"one of %v (case-sensitive)\", fv.Choices)\n\t}\n\treturn fmt.Sprintf(\"one of %v\", fv.Choices)\n}",
"func TestHelpFlagInHelp(t *testing.T) {\n\toutput := new(bytes.Buffer)\n\tparent := &Command{Use: \"parent\", Run: func(*Command, []string) {}}\n\tparent.SetOutput(output)\n\n\tchild := &Command{Use: \"child\", Run: func(*Command, []string) {}}\n\tparent.AddCommand(child)\n\n\tparent.SetArgs([]string{\"help\", \"child\"})\n\terr := parent.Execute()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tif !strings.Contains(output.String(), \"[flags]\") {\n\t\tt.Errorf(\"\\nExpecting to contain: %v\\nGot: %v\", \"[flags]\", output.String())\n\t}\n}",
"func pflagFlagErrMsgFlag(errMsg string) string {\n\tflagText := strings.TrimPrefix(errMsg, \"no such flag \")\n\tflagText = strings.TrimPrefix(flagText, \"unknown flag: \")\n\tflagText = strings.TrimPrefix(flagText, \"bad flag syntax: \")\n\t// unknown shorthand flag: 'x' in -x\n\tflagText = strings.TrimPrefix(flagText, \"unknown shorthand flag: \")\n\n\tif flagText == errMsg {\n\t\treturn \"\"\n\t}\n\n\tshorthandSplit := strings.Split(flagText, \"' in \")\n\tif len(shorthandSplit) > 1 {\n\t\tflagText = shorthandSplit[1]\n\t}\n\n\treturn flagText\n}",
"func help() {\n\tfmt.Println(\"\\n--------------Command--------------\")\n\tfmt.Println(\"1. status\")\n\tfmt.Println(\"2. input [tipe identitas: string] [nomor identitas: integer]\")\n\tfmt.Println(\"3. leave [nomor loker: integer]\")\n\tfmt.Println(\"4. find [nomor identitas: integer]\")\n\tfmt.Println(\"5. search [tipe identitas: string]\")\n\tfmt.Println(\"6. exit\")\n\tfmt.Println(\"--------------End Command--------------\\n\")\n}",
"func (v *ValidateCmd) Help() string {\n\treturn `\nChecks the currently logged in account and verifies its ability to access TFE API. \nIf successful, emits a notification to macOS with the name of the user. \n`\n}",
"func addHelpFlag(name string, fs *pflag.FlagSet) {\n\tfs.BoolP(flagHelp, flagHelpShorthand, false, fmt.Sprintf(\"Help for %s.\", name))\n}",
"func (a *SLCommand) Help(command string) string {\n\tvar help string\n\n\tglobal := `\n -username \"...\" Sofleyer Username (env: IMAGES_SL_USERNAME)\n -api-key \"...\" Softlayer API Key (env: IMAGES_SL_API_KEY)\n`\n\tswitch command {\n\tcase \"modify\":\n\t\thelp = newModifyFlags().helpMsg\n\tcase \"list\":\n\t\thelp = newListFlags().helpMsg\n\tcase \"delete\":\n\t\thelp = newDeleteFlags().helpMsg\n\tcase \"copy\":\n\t\thelp = newCopyFlags().helpMsg\n\tdefault:\n\t\treturn \"no help found for command \" + command\n\t}\n\n\thelp += global\n\treturn help\n}",
"func (cli *CLIInstance) HelpFlags(flags *flag.FlagSet) string {\n\tvar data [][]string\n\tfirstWidth := 0\n\tflags.VisitAll(func(f *flag.Flag) {\n\t\tn := \" -\" + f.Name\n\t\tu := strings.TrimSpace(f.Usage)\n\t\tif u != \"\" && u[0] == '|' {\n\t\t\ts := strings.SplitN(u, \"|\", 3)\n\t\t\tif len(s) == 3 {\n\t\t\t\tn += \" \" + strings.TrimSpace(s[1])\n\t\t\t\tu = strings.TrimSpace(s[2])\n\t\t\t}\n\t\t}\n\t\tif len(n) > firstWidth {\n\t\t\tfirstWidth = len(n)\n\t\t}\n\t\tdata = append(data, []string{n, u})\n\t})\n\topts := brimtext.NewDefaultAlignOptions()\n\topts.Widths = []int{0, brimtext.GetTTYWidth() - firstWidth - 2}\n\treturn brimtext.Align(data, opts)\n}",
"func Usage(set *flag.FlagSet, hzRule ...HorizontalRule) {\n\texe := filepath.Base(ExecutablePath())\n\tif IsHorizontalRuleInList(hzRule, LeadingHorizontalRule) {\n\t\tfmt.Fprintln(os.Stderr, \"--\")\n\t}\n\tfor _, ln := range []string{\n\t\t`usage:`,\n\t\t` ` + exe + ` [options] [args ...]`,\n\t\t``,\n\t\t`options:`,\n\t\t` -v Display version information`,\n\t\t` -V Display change history`,\n\t\t` -n Do not output a trailing newline`,\n\t\t` -e Enable interpretation of backslash escapes`,\n\t\t` -E Disable interpretation of backslash escapes (default true)`,\n\t\t` Accepted for compatibility with echo, but this flag is ignored.`,\n\t\t` -f fmt Output string according to fmt (implies -e)`,\n\t\t` See formatting section below for details.`,\n\t\t``,\n\t\t`formatting:`,\n\t\t` The fmt argument given to flag -f may contain special placeholder symbols`,\n\t\t` of the form \"{N}\", meaning the N'th command-line argument. The indexing does`,\n\t\t` not include flags or their arguments; it is strictly the N'th argument that`,\n\t\t` would be printed to the standard error stream if no flags were given at all.`,\n\t\t``,\n\t\t` Additionally, the -f flag enables interpretation of backslash sequences as`,\n\t\t` if the -e flag was given.`,\n\t\t``,\n\t\t` The following backslash escape sequences are recognized:`,\n\t\t``,\n\t\t` \\\\ backslash \\x5C`,\n\t\t` \\a alert \\x07 BEL`,\n\t\t` \\b backspace \\x08 BS`,\n\t\t` \\c produce no further output`,\n\t\t` \\e escape \\x1B ESC`,\n\t\t` \\f form feed \\x0C FF`,\n\t\t` \\n new line \\x0A LF`,\n\t\t` \\r carriage return \\x0D CR`,\n\t\t` \\t horizontal tab \\x09 TAB`,\n\t\t` \\v vertical tab \\x0B VT`,\n\t\t` \\0NNN byte with octal value NNN 1 to 3 digits`,\n\t\t` \\xHH byte with hexadecimal value HH 1 to 2 digits`,\n\t} {\n\t\tfmt.Fprintln(os.Stderr, ln)\n\t}\n\tif IsHorizontalRuleInList(hzRule, TrailingHorizontalRule) {\n\t\tfmt.Fprintln(os.Stderr, \"--\")\n\t}\n}",
"func Help(args []string, cmds []*command.Command) int {\n\tvar (\n\t\tcmd string\n\t)\n\tif len(args) >= 3 {\n\t\tcmd = args[2]\n\t}\n\t// Prints the help if the command exist.\n\tfor _, c := range cmds {\n\t\tif c.Name() == cmd {\n\t\t\treturn c.Usage()\n\t\t}\n\t}\n\tif cmd == \"\" {\n\t\tfmt.Fprintf(os.Stderr, \"missing help command. Usage:\\n\\n\\t$ bw help [command]\\n\\nAvailable help commands\\n\\n\")\n\t\tvar usage []string\n\t\tfor _, c := range cmds {\n\t\t\tname := c.Name()\n\t\t\tfor i := len(name); i < 12; i++ {\n\t\t\t\tname += \" \"\n\t\t\t}\n\t\t\tusage = append(usage, fmt.Sprintf(\"\\t%s\\t- %s\\n\", name, c.Short))\n\t\t}\n\t\tsort.Strings(usage)\n\t\tfor _, u := range usage {\n\t\t\tfmt.Fprint(os.Stderr, u)\n\t\t}\n\t\tfmt.Fprintln(os.Stderr, \"\")\n\t\treturn 0\n\t}\n\tfmt.Fprintf(os.Stderr, \"help command %q not recognized. Usage:\\n\\n\\t$ bw help\\n\\n\", cmd)\n\treturn 2\n}",
"func (c *Command) Help() string {\n\t// Some commands with subcommands (kv/snapshot) call this without initializing\n\t// any flags first, so exit early to avoid a panic\n\tif c.flagSet == nil {\n\t\treturn \"\"\n\t}\n\treturn c.helpFlagsFor(c.flagSet)\n}",
"func usage() {\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}",
"func flagUsage(f *flag.Flag) (string, string, string) {\n\targ, usage := flag.UnquoteUsage(f)\n\tif f.DefValue == \"\" {\n\t\treturn arg, usage, \"\"\n\t} else {\n\t\tif arg == \"string\" {\n\t\t\treturn arg, usage, fmt.Sprintf(\"(default %q)\", f.DefValue)\n\t\t} else if arg == \"\" { // Boolean value\n\t\t\treturn arg, usage, \"\"\n\t\t} else {\n\t\t\treturn arg, usage, fmt.Sprintf(\"(default %v)\", f.DefValue)\n\t\t}\n\t}\n}",
"func showCmdUsage(cmd *RunCmd) {\n\tvar shell = \"\"\n\t//noinspection GoBoolExpressions\n\tif config.ShowCmdShells {\n\t\tshell = fmt.Sprintf(\" (%s)\", cmd.Shell())\n\t}\n\tif !cmd.EnableHelp() {\n\t\tfmt.Fprintf(config.ErrOut, \"%s%s: No help available.\\n\", cmd.Name, shell)\n\t\treturn\n\t}\n\t// Usages\n\t//\n\tfor i, usage := range cmd.Config.Usages {\n\t\tor := \"or\"\n\t\tif i == 0 {\n\t\t\tfmt.Fprintf(config.ErrOut, \"Usage:\\n\")\n\t\t\tor = \" \" // 2 spaces\n\t\t}\n\t\tpad := strings.Repeat(\" \", len(cmd.Name)-1)\n\t\tif usage[0] == '(' {\n\t\t\tfmt.Fprintf(config.ErrOut, \" %s %s\\n\", pad, usage)\n\t\t} else {\n\t\t\tfmt.Fprintf(config.ErrOut, \" %s %s %s\\n\", or, cmd.Name, usage)\n\t\t}\n\t}\n\thasHelpShort := false\n\thasHelpLong := false\n\tfor _, opt := range cmd.Config.Opts {\n\t\tif opt.Short == 'h' {\n\t\t\thasHelpShort = true\n\t\t}\n\t\tif opt.Long == \"help\" {\n\t\t\thasHelpLong = true\n\t\t}\n\t}\n\t// Options\n\t//\n\tif len(cmd.Config.Opts) > 0 {\n\t\tfmt.Fprintln(config.ErrOut, \"Options:\")\n\t\tif !hasHelpShort || !hasHelpLong {\n\t\t\tswitch {\n\t\t\tcase !hasHelpShort && hasHelpLong:\n\t\t\t\tfmt.Fprintln(config.ErrOut, \" -h\")\n\t\t\tcase hasHelpShort && !hasHelpLong:\n\t\t\t\tfmt.Fprintln(config.ErrOut, \" --help\")\n\t\t\tdefault:\n\t\t\t\tfmt.Fprintln(config.ErrOut, \" -h, --help\")\n\t\t\t}\n\t\t\tfmt.Fprintln(config.ErrOut, \" Show full help screen\")\n\t\t}\n\t}\n\tfor _, opt := range cmd.Config.Opts {\n\t\tb := &strings.Builder{}\n\t\tb.WriteString(\" \")\n\t\tif opt.Short != 0 {\n\t\t\tb.WriteRune('-')\n\t\t\tb.WriteRune(opt.Short)\n\t\t}\n\t\tif opt.Long != \"\" {\n\t\t\tif opt.Short != 0 {\n\t\t\t\tb.WriteString(\", \")\n\t\t\t}\n\t\t\tb.WriteString(\"--\")\n\t\t\tb.WriteString(opt.Long)\n\t\t}\n\t\tif opt.Value != \"\" {\n\t\t\tb.WriteRune(' ')\n\t\t\tb.WriteRune('<')\n\t\t\tb.WriteString(opt.Value)\n\t\t\tb.WriteRune('>')\n\t\t}\n\t\tif opt.Desc != \"\" {\n\t\t\tif opt.Short != 0 && opt.Long == \"\" && opt.Value == \"\" {\n\t\t\t\tb.WriteString(\" \")\n\t\t\t} else {\n\t\t\t\tb.WriteString(\"\\n \")\n\t\t\t}\n\t\t\tb.WriteString(opt.Desc)\n\t\t}\n\t\tfmt.Fprintln(config.ErrOut, b.String())\n\t}\n}",
"func printHelp() {\n fmt.Printf(\"Usage: %s [flags] <in> <out>\\n\\n\", os.Args[0])\n flag.PrintDefaults()\n}",
"func usageMessage() {\n\tfmt.Printf(\"Help for %s\\n\", GetVersionString())\n\tfmt.Println(\" --Help Prints this help message.\")\n\tfmt.Println(\" --Version Prints the version number of this utility.\")\n\tfmt.Println(\" --Licence Prints the copyright licence this utility is release under.\")\n\tfmt.Println(\" --Problem <number> Specifies problem ID to evaluate.\")\n\tfmt.Println(\" --AllProblems Evaluates all problems for which there is code (overrides --Problem)\")\n\tfmt.Println(\" --Concurrent Sovlves problems concurrently (instead of the sequential default)\")\n\tos.Exit(0)\n}",
"func\thelp(argv []string, pwd string) error {\n\tif len(argv) != 1 {\n\t\treturn fmt.Errorf(\"usage: todo help [argument]\")\n\t}\n\n\tfor index, _ := range help_function {\n\t\tif help_function[index].Name == argv[0] {\n\t\t\treturn help_function[index].Call()\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"todo: unknown help topic \\\"%s\\\"\", argv[0])\n}",
"func buildOptionOrFlagHelpName(name, alias string) (styled, unstyled string) {\n\tstyled = customo.Format(\"--\"+name, customo.AttrBold)\n\tunstyled = \"--\" + name\n\n\tif alias != \"\" {\n\t\tstyled += \", \" + customo.Format(\"-\"+alias, customo.AttrBold)\n\t\tunstyled += \", -\" + alias\n\t}\n\n\treturn styled, unstyled\n}",
"func printHelp(parser *flags.Parser) {\n\tparser.WriteHelp(os.Stderr)\n\tos.Exit(0)\n}",
"func Help(cmdInfo CommandInfo) {\n\tif len(cmdInfo.CmdOps) == 1 {\n\t\t// When user only writes: ?help\n\t\tprettyPrintHelp(\n\t\t\t\"Error\",\n\t\t\t\"You must query a valid command.\",\n\t\t\tformat(\n\t\t\t\tcreateFields(\"EXAMPLE\", cmdInfo.Prefix+\"help search\", true),\n\t\t\t),\n\t\t\tcmdInfo,\n\t\t\t14886454,\n\t\t)\n\t\treturn\n\t}\n\tfull := strings.Join(cmdInfo.CmdOps[1:], \" \")\n\tif !find(full, cmdInfo) {\n\t\tprettyPrintHelp(\n\t\t\tfull,\n\t\t\t\"Command Not Found\",\n\t\t\tformat(\n\t\t\t\tcreateFields(\"To List All Commands:\", cmdInfo.Prefix+\"list\", true),\n\t\t\t),\n\t\t\tcmdInfo,\n\t\t\t14886454,\n\t\t)\n\t\treturn\n\t}\n\t// Valid commands\n\tswitch full {\n\tcase \"search\":\n\t\tprettyPrintHelp(\n\t\t\t\"Search\",\n\t\t\t\"Search will look up an item from New Horizon's bug and fish database.\",\n\t\t\tformat(\n\t\t\t\tcreateFields(\"EXAMPLE\", cmdInfo.Prefix+\"search emperor butterfly\", true),\n\t\t\t\tcreateFields(\"EXAMPLE\", cmdInfo.Prefix+\"search north bug\", true),\n\t\t\t),\n\t\t\tcmdInfo,\n\t\t\t9410425,\n\t\t)\n\tcase \"list\":\n\t\tprettyPrintHelp(\n\t\t\t\"List\",\n\t\t\t\"List will show all commands the bot understands.\",\n\t\t\tformat(\n\t\t\t\tcreateFields(\"EXAMPLE\", cmdInfo.Prefix+\"list\", true),\n\t\t\t),\n\t\t\tcmdInfo,\n\t\t\t9410425,\n\t\t)\n\tcase \"pong\":\n\t\tprettyPrintHelp(\n\t\t\t\"Pong\",\n\t\t\t\"Playing with pong.\",\n\t\t\tformat(\n\t\t\t\tcreateFields(\"EXAMPLE\", cmdInfo.Prefix+\"pong\", true),\n\t\t\t),\n\t\t\tcmdInfo,\n\t\t\t9410425,\n\t\t)\n\t}\n}",
"func help() {\r\n fmt.Printf(\"ORIGAMI\\n\")\r\n fmt.Printf(\"\\tA web app that checks the toner levels of printers at the Elizabethtown College campus.\\n\\n\")\r\n fmt.Printf(\"USAGE\\n\")\r\n fmt.Printf(\"\\tUsage: origami [-f filepath | -h]\\n\\n\")\r\n fmt.Printf(\"OPTIONS\\n\")\r\n fmt.Printf(\"\\t-f: specify the filepath of the config file (\\\"./origami.conf\\\" by default)\\n\")\r\n fmt.Printf(\"\\t-h: this menu\\n\\n\")\r\n fmt.Printf(\"AUTHOR\\n\")\r\n fmt.Printf(\"\\tRory Dudley (aka pinecat: https://github.com/pinecat/origamiv2)\\n\\n\")\r\n fmt.Printf(\"EOF\\n\")\r\n}",
"func MustHelp(name, text string) {\n\tstd.MustHelp(name, text)\n}",
"func IsExtendedHelp(input string) bool {\n\tpattern := \"^[1-9][0-9]*\\\\?$\"\n\tmatch, e := regexp.Match(pattern, []byte(input))\n\tpanicNonNil(e)\n\treturn match\n}",
"func HelpMessageCallback() {\n fmt.Fprintf(flag.CommandLine.Output(),\n \"ReadIcal by [email protected]\\n\\n\")\n fmt.Fprintf(flag.CommandLine.Output(), \"Usage of %s:\\n\", os.Args[0])\n flag.PrintDefaults()\n}",
"func validateFlags() error {\n\t// if username == \"\" {\n\t// \treturn fmt.Errorf(\"username is required\")\n\t// }\n\n\tif host == \"\" {\n\t\treturn fmt.Errorf(\"host is required\")\n\t}\n\n\treturn nil\n}",
"func showHelp(s string) {\n var commands = setHelpCommands()\n fmt.Println(\"gobash> showing help\")\n switch s {\n case \"all\", \"help\", \"h\", \"?\":\n color.Green(\"%v\\n\", commands[\"help\"])\n color.Green(\"%v\\n\", commands[\"cd\"])\n color.Green(\"%v\\n\", commands[\"ls\"])\n color.Green(\"%v\\n\", commands[\"exec\"])\n color.Green(\"%v\\n\", commands[\"exit\"])\n case \"cd\":\n color.Green(\"%v\\n\", commands[\"cd\"])\n case \"ls\":\n color.Green(\"%v\\n\", commands[\"ls\"])\n case \"exec\":\n color.Green(\"%v\\n\", commands[\"exec\"])\n case \"exit\":\n color.Green(\"%v\\n\", commands[\"exit\"])\n default:\n color.Red(\"help command unrecognized\")\n }\n}",
"func TestHelp(t *testing.T) {\n\ttable := []testCase{\n\t\t{\"HELP 'select'\", true, \"HELP 'select'\"},\n\t}\n\n\tRunTest(t, table, false)\n}",
"func usage() {\n\tfmt.Fprintf(os.Stderr, \"Usage: %s [flags] [EZ Key] [access.log]\\n\", os.Args[0])\n\tfmt.Fprintf(os.Stderr, \"\\nFlags:\\n\")\n\tflag.PrintDefaults()\n\tos.Exit(1)\n}",
"func expectFlag(flag, value string) {\n\tif value == \"\" {\n\t\texit.WithMessage(fmt.Sprintf(\"You must specify the %q flag\", flag))\n\t}\n}",
"func usageAndExit(msg string) {\n\tif msg != \"\" {\n\t\tfmt.Fprintf(os.Stderr, msg)\n\t\tfmt.Fprintf(os.Stderr, \"\\n\\n\")\n\t}\n\tflag.Usage()\n\tos.Exit(1)\n}",
"func (fv *Ints) Help() string {\n\tvar base, bitSize string\n\tif fv.Base != 0 {\n\t\tbase = fmt.Sprintf(\"base %d \", fv.Base)\n\t}\n\tif fv.BitSize != 0 {\n\t\tbitSize = fmt.Sprintf(\"%d-bit \", fv.BitSize)\n\t}\n\tif base != \"\" || bitSize != \"\" {\n\t\treturn fmt.Sprintf(\"a %s%sinteger\", bitSize, base)\n\t}\n\treturn \"an integer\"\n}",
"func (h *Aesthetic) Help(short bool) string {\n\tif short {\n\t\treturn \"a e s t h e t i c\"\n\t} else {\n\t\treturn fmt.Sprintf(\"Usage: `%vaesthetic aesthetic`\\n\\nReturn: a e s t h e t i c\", celexacreams.Prefix)\n\t}\n}",
"func TestHelpCommand(t *testing.T) {\n\n\t// Run a blank command\n\toutput := executeCommand(\"help\")\n\n\t// We should a complete usage / help dump\n\trequire.Nil(t, executeError, \"there should not have been an error: \", executeError)\n\trequire.Contains(t, output,\n\t\t\"mafia token-code [flags]\",\n\t\t\"Expected usage display\")\n}",
"func showHelp(exitStatus int) {\n\tfmt.Print(help.Help)\n\n\tos.Exit(exitStatus)\n}",
"func PrintHelp() {\n fs := setupFlags(&options{})\n fs.Usage()\n}",
"func parseShortAndLongFlag(short rune, long string) error {\n\n\tswitch short {\n\tcase utf8.RuneError:\n\t\treturn fmt.Errorf(\"cannot use flag with invalid rune: %q\", short)\n\tcase '-':\n\t\treturn fmt.Errorf(\"cannot use hyphen as a flag: %q\", short)\n\t}\n\n\tswitch {\n\tcase long == \"\":\n\t\treturn errors.New(\"cannot use empty flag string\")\n\tcase strings.HasPrefix(long, \"-\"):\n\t\treturn fmt.Errorf(\"cannot use flag that starts with a hyphen: %q\", long)\n\t}\n\n\treturn redefinition(short, long)\n}",
"func addHelpCommandFlag(usage string, fs *pflag.FlagSet) {\n\tfs.BoolP(flagHelp, flagHelpShorthand, false,\n\t\tfmt.Sprintf(\"Help for the %s command.\", color.GreenString(strings.Split(usage, \" \")[0])))\n}",
"func usage(err error) {\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\\n\", err)\n\t}\n\tfmt.Fprintf(os.Stderr, \"Usage%s:\\n\", os.Args[0])\n\tflag.PrintDefaults()\n\tos.Exit(2)\n}",
"func validateProgramArgs() {\n\tif *version {\n\t\tshowVersion()\n\t}\n\n\tif *helpUsage {\n\t\tusage(0)\n\t}\n\n\tif *helpFull {\n\t\tshowHelp(0)\n\t}\n\n\tif *LocalFolderPath == \"\" ||\n\t\t(*dpUsername != \"\" && *dpRestURL == \"\" && *dpSomaURL == \"\") {\n\t\tusage(1)\n\t}\n\n}",
"func validateArgs() {\n\tif *inputEndpoint == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n}",
"func (c *Subcommand) Help(flags *flag.FlagSet) {\n\tfmt.Printf(\"%s\\n\\n%s\\n\\n\", c.shortHelp, c.longHelp)\n\tflags.PrintDefaults()\n}",
"func isVaildCmd(c string) bool {\n\tif len(c) == 0 || c[0:1] == \"-\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func usage() {\n\tfmt.Fprintf(os.Stderr, \"%s [OPTIONS],,,\\n\", os.Args[0])\n\tflag.PrintDefaults()\n}",
"func scHelp() error {\n\tif verbose {\n\t\tfmt.Printf(\"\\n*** https://github.com/rokath/trice ***\\n\\n\")\n\t\tfmt.Printf(\"If a non-multi parameter is used more than one times the last value wins.\\n\")\n\t}\n\tcage.Enable()\n\tdefer cage.Disable()\n\n\tfmt.Println(\"syntax: 'trice sub-command' [params]\")\n\tvar ok bool\n\tx := []selector{\n\t\t{allHelp || displayServerHelp, displayServerInfo},\n\t\t{allHelp || helpHelp, helpInfo},\n\t\t{allHelp || logHelp, logInfo},\n\t\t{allHelp || refreshHelp, refreshInfo},\n\t\t{allHelp || renewHelp, renewInfo},\n\t\t{allHelp || scanHelp, scanInfo},\n\t\t{allHelp || shutdownHelp, shutdownInfo},\n\t\t{allHelp || versionHelp, versionInfo},\n\t\t{allHelp || updateHelp, updateInfo},\n\t\t{allHelp || zeroIDsHelp, zeroIDsInfo},\n\t}\n\tfor _, z := range x {\n\t\tif z.flag {\n\t\t\tmsg.FatalOnErr(z.info())\n\t\t\tok = true\n\t\t}\n\t}\n\tif !ok {\n\t\tfmt.Println(\"example 'trice h -help': Print help for help.\")\n\t}\n\treturn nil\n}",
"func (p *BeersPlugin) help(c bot.Connector, kind bot.Kind, message msg.Message, args ...any) bool {\n\tmsg := \"Beers: imbibe by using either beers +=,=,++ or with the !imbibe/drink \" +\n\t\t\"commands. I'll keep a count of how many beers you've had and then if you want \" +\n\t\t\"to reset, just !puke it all up!\"\n\tp.b.Send(c, bot.Message, message.Channel, msg)\n\treturn true\n}",
"func isOptionShort(inString string) (bool, []string) {\r\n\tvar rx *regexp.Regexp = regexp.MustCompile(`(?i)^(-)?([!?a-z0-9]+)$`)\r\n\tm := rx.FindAllStringSubmatch(inString, -1)\r\n\tif nil == m { return false, nil }\r\n\tif 1 == len(m[0][2]) {\r\n\t\treturn true, []string{m[0][2]}\r\n\t}\r\n\r\n\tres := make([]string, 0)\r\n\t// If necessary, split a compound into its components (ex: \"-vh\" => \"-v -h\").\r\n\tfor _, c := range m[0][2] {\r\n\t\tres = append(res, fmt.Sprintf(\"%c\", c))\r\n\t}\r\n\treturn true, res\r\n}",
"func (argv *argT) AutoHelp() bool {\n\treturn argv.Help\n}",
"func (argv *argT) AutoHelp() bool {\n\treturn argv.Help\n}",
"func setupUsage(fs *flag.FlagSet) {\n printNonEmpty := func(s string) {\n if s != \"\" {\n fmt.Fprintf(os.Stderr, \"%s\\n\", s)\n }\n }\n tmpUsage := fs.Usage\n fs.Usage = func() {\n printNonEmpty(CommandLineHelpUsage)\n tmpUsage()\n printNonEmpty(CommandLineHelpFooter)\n }\n}",
"func usage() {\r\n\r\n\r\n\t// get only the file name from the absolute path in os.Args[0]\r\n\t_, file := filepath.Split(os.Args[0])\r\n\r\n fmt.Fprintf(os.Stderr, \"\\nBasic Usage: %s IP_Adderess:TCP_Port\\n\\n\", file )\r\n fmt.Fprintf(os.Stderr, \"Advanced Flag Usage: %s Flags:\\n\", file )\r\n flag.PrintDefaults()\r\n fmt.Fprintf(os.Stderr, \"\\n\")\r\n os.Exit(2)\r\n}",
"func generateHelpText(commands Commands, publicOnly bool) (s string) {\n\tcmdNames := []string{}\n\tfor name := range commands {\n\t\tif commands[name].Type == \"text\" && commands[name].Message == \"\" {\n\t\t\t// Ignore empty-message commands\n\t\t\tcontinue\n\t\t}\n\t\tcmdNames = append(cmdNames, name)\n\t}\n\tsort.Strings(cmdNames)\n\tfor i := 0; i < len(cmdNames); i++ {\n\t\tname := cmdNames[i]\n\t\tcommand := commands[cmdNames[i]]\n\t\tif (publicOnly && !command.IsPublic) || (!publicOnly && command.IsAdminOnly) {\n\t\t\tcontinue\n\t\t}\n\t\ts += fmt.Sprintf(\"!%s %s\\n\", name, command.ArgumentsText)\n\t}\n\ts += \"\\n<argument> => required\"\n\ts += \"\\n[argument] => optional\"\n\ts += \"\\n{argument} => indicates exact value\"\n\ts += \"\\n\\nDefaults where applicable:\\n - Base ticker => ETH,\\n - Quote ticker => Halo\\n\" +\n\t\t\" - Address(es) => first/all item(s) saved on address book, if available\"\n\treturn\n}",
"func checkWatchSyntax(ctx *cli.Context) {\n\tif len(ctx.Args()) != 1 {\n\t\tcli.ShowCommandHelpAndExit(ctx, \"watch\", 1) // last argument is exit code\n\t}\n}",
"func Help() int {\n\tfmt.Print(helpMessage)\n\treturn 0\n}",
"func printCLIHelp() {\n\tfmt.Println(\"Valid Commands:\")\n\tfmt.Println(\"(if not joined) join intro\")\n\tfmt.Println(\"(if not joined) join [port_number]\")\n\tfmt.Println(\"(if joined) leave\")\n\tfmt.Println(\"(if joined) members\")\n\tfmt.Println(\"(if joined) id\")\n\tfmt.Println(\"(if joined) gossip\")\n\tfmt.Println(\"(if joined) all-to-all\")\n\tfmt.Println(\"(if joined) put [filepath]\")\n\tfmt.Println(\"(if joined) get [filename]\")\n\tfmt.Println(\"(if joined) delete [filename]\")\n\tfmt.Println(\"(if joined) ls [filename]\")\n\tfmt.Println(\"(if joined) store\")\n\tfmt.Println(\"(all scenarios) exit\")\n}",
"func help(cfg cli.Config) {\n\tfmt.Printf(`\n usage:\n scan ip/host [option]\n options:\n -p port-range or port number Specified range or port number (default is %s)\n -c TCP connect scan (default is TCP SYN scan)\n -4 Force IPv4\n -6 Force IPv6\n example:\n scan 8.8.8.8 -p 53\n scan www.google.com -p 1-500\n scan freebsd.org -6\n\t`,\n\t\tcfg.Scan.Port)\n}",
"func verifyFlags(opt *options, fs *flag.FlagSet) {\n args := fs.Args()\n if len(args) > 0 {\n opt.Root = args[0]\n args = args[1:]\n }\n if len(args) > 0 {\n patterns := make([]string, len(args))\n for i := range args {\n patterns[i] = fmt.Sprintf(\"(%s)\", args[i])\n }\n opt.SpecPattern = strings.Join(patterns, \"|\")\n }\n}",
"func (c *Command) helpFlagsFor(f *flag.FlagSet) string {\n\thttpFlagsClient := c.httpFlagsClient(nil)\n\thttpFlagsServer := c.httpFlagsServer(nil)\n\n\tvar out bytes.Buffer\n\n\tfirstHTTP := true\n\tif c.hasClientHTTP() {\n\t\tif firstHTTP {\n\t\t\tprintTitle(&out, \"HTTP API Options\")\n\t\t\tfirstHTTP = false\n\t\t}\n\t\thttpFlagsClient.VisitAll(func(f *flag.Flag) {\n\t\t\tprintFlag(&out, f)\n\t\t})\n\t}\n\tif c.hasServerHTTP() {\n\t\tif firstHTTP {\n\t\t\tprintTitle(&out, \"HTTP API Options\")\n\t\t\tfirstHTTP = false\n\t\t}\n\t\thttpFlagsServer.VisitAll(func(f *flag.Flag) {\n\t\t\tprintFlag(&out, f)\n\t\t})\n\t}\n\n\tfirstCommand := true\n\tf.VisitAll(func(f *flag.Flag) {\n\t\t// Skip HTTP flags as they will be grouped separately\n\t\tif flagContains(httpFlagsClient, f) || flagContains(httpFlagsServer, f) || flagContains(c.hidden, f) {\n\t\t\treturn\n\t\t}\n\t\tif firstCommand {\n\t\t\tprintTitle(&out, \"Command Options\")\n\t\t\tfirstCommand = false\n\t\t}\n\t\tprintFlag(&out, f)\n\t})\n\n\treturn strings.TrimRight(out.String(), \"\\n\")\n}",
"func askOpt() bool {\n\tvar userOpt string\n\tfmt.Print(\"\\nWould you like to add more? \\n('y' or 'Y'): \")\n\tfmt.Scan(&userOpt)\n\tfmt.Print(\"\\n\")\n\tif userOpt == \"y\" || userOpt == \"Y\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func hints(s string) *cli.Hint {\n\tif s == \"hello\" {\n\t\t// string, color, bold\n\t\treturn &cli.Hint{\" World\", 35, false}\n\t}\n\treturn nil\n}",
"func help() {\n fmt.Fprintf(os.Stderr,\"\\nUSAGE: %s -sstart_page -eend_page [ -f | -llines_per_page ]\" +\n\t\" [ -ddest ] [ in_filename ]\\n\", progname)\n}",
"func ShowHelp() {\n\tfmt.Printf(\"%v\\n\", helpText)\n}",
"func validateArgs() {\n\tif *optionsEndpoint == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tif *inputEndpoint == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n\tif *outputEndpoint == \"\" {\n\t\tflag.Usage()\n\t\tos.Exit(1)\n\t}\n}",
"func printAndExit(errorMsg string, printUsage bool) {\n\tif errorMsg != \"\" {\n\t\tfmt.Println(\"Error: \" + errorMsg)\n\t}\n\tif printUsage {\n\t\tfmt.Println()\n\t\tflag.Usage()\n\t}\n\tos.Exit(1)\n}",
"func help() {\n\tlog.Infoln(\"#: the number of the peer you want to connect to\")\n\tlog.Infoln(\"r: refresh peer list\")\n\tlog.Infoln(\"q: quit pcp\")\n\tlog.Infoln(\"?: this help message\")\n}",
"func checkArgs(c *cli.Context) (rtn int) {\n rtn = 0\n if c.NArg() < 3 {\n color.Red(\"Wrong Input.\")\n color.Red(\"Use mo sqlite3 <dbFullName> <UserToChange> <PasswordToSet>\")\n color.Red(\"Example: mo sqlite3 USER.DB admin 111111 \")\n rtn = 1\n return\n }\n\n _, err := os.Stat(c.Args().First())\n if err != nil {\n if os.IsNotExist(err) {\n color.Red(\"File %s does not exist.\", c.Args().First())\n rtn = 2\n return\n }\n }\n return\n\n}",
"func validateCommandSemanticsAndGenerateOutput(args ...string) (bool, string) {\n\tnewArgs := args\n\tif newArgs[0] == \"sudo\" && newArgs[1] == \"--preserve-env\" {\n\t\tnewArgs = newArgs[2:]\n\t}\n\n\terrMsg := fmt.Sprintf(\"Can't execute command '%v' when the guestfish status is %s\", strings.Join(args, \" \"), currentGuestfishStatus)\n\n\tif newArgs[1] == \"--listen\" {\n\t\tif Stopped == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Added\n\t\t\treturn true, \"GUESTFISH_PID=4513; export GUESTFISH_PID\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"run\" {\n\t\tif Added == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Started\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"findfs-label\" {\n\t\tif Started == currentGuestfishStatus {\n\t\t\treturn true, \"/dev/sda1\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"mount\" {\n\t\tif Started == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Mounted\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"mkdir-p\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"upload\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"umount-all\" {\n\t\tif Mounted == currentGuestfishStatus {\n\t\t\tcurrentGuestfishStatus = Started\n\t\t\treturn true, \"\"\n\t\t}\n\t\treturn false, errMsg\n\t}\n\n\tif newArgs[3] == \"exit\" {\n\t\tif Stopped < currentGuestfishStatus {\n\t\t\treturn true, \"\"\n\t\t}\n\n\t\treturn false, errMsg\n\t}\n\treturn true, \"\"\n}",
"func (c *Ping) Help() string {\n\treturn `Usage: PING [message] Returns PONG if no argument is provided, otherwise return a copy of the argument as a bulk.`\n}",
"func (c VerifyCmd) Help() string {\n\thelpText := `\nUsage: gtm verify <version-constraint>\n\n Check if gtm satisfies a Semantic Version 2.0 constraint.\n`\n\treturn strings.TrimSpace(helpText)\n}",
"func checkFlags() {\n\t// file flag is required\n\tif *filename == \"\" {\n\t\tlog.Fatalf(\"file is required\")\n\t}\n\n\tif *server == \"\" {\n\t\tlog.Fatalf(\"server is required\")\n\t}\n}",
"func usage() {\n\tfmt.Printf(\"%s\", helpString)\n}",
"func checkStringFlagReplaceWithUtilVersion(name string, arg string, compulsory bool) (exists bool) {\n\tvar hasArg bool\n\n\tif arg != \"\" {\n\t\texists = true\n\t}\n\n\t// Try to detect missing flag argument.\n\t// If an argument is another flag, argument has not been provided.\n\tif exists && !strings.HasPrefix(arg, \"-\") {\n\t\t// Option expecting an argument but has been followed by another flag.\n\t\thasArg = true\n\t}\n\t/*\n\t\twhere(fmt.Sprintf(\"-%s compulsory = %t\", name, compulsory))\n\t\twhere(fmt.Sprintf(\"-%s exists = %t\", name, exists))\n\t\twhere(fmt.Sprintf(\"-%s hasArg = %t\", name, hasArg))\n\t\twhere(fmt.Sprintf(\"-%s value = %s\", name, arg))\n\t*/\n\n\tif compulsory && !exists {\n\t\tfmt.Fprintf(os.Stderr, \"compulsory flag: -%s\\n\", name)\n\t\tprintUsage()\n\t\tos.Exit(2)\n\t}\n\n\tif exists && !hasArg {\n\t\tfmt.Fprintf(os.Stderr, \"flag -%s needs a valid argument (not: %s)\\n\", name, arg)\n\t\tprintUsage()\n\t\tos.Exit(3)\n\t}\n\n\treturn\n}",
"func (c *Authorize) Help(\n\tctx context.Context,\n) {\n\tout.Normf(\"\\nUsage: \")\n\tout.Boldf(\"warp authorize <username_or_token>\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\" Grants write access to a client of the current warp.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Errof(\" Be extra careful!\")\n\tout.Normf(\" Please make sure that the user you are granting write\\n\")\n\tout.Normf(\" access to is who you think they are. An attacker could take over your machine\\n\")\n\tout.Normf(\" in a split second with write access to one of your warps.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\" If the username of a user is ambiguous (multiple users connnected with the\\n\")\n\tout.Normf(\" same username), you must use the associated user token, as returned by the\\n\")\n\tout.Boldf(\" state\")\n\tout.Normf(\" command.\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Arguments:\\n\")\n\tout.Boldf(\" username_or_token\\n\")\n\tout.Normf(\" The username or token of a connected user.\\n\")\n\tout.Valuf(\" guest_JpJP50EIas9cOfwo goofy\\n\")\n\tout.Normf(\"\\n\")\n\tout.Normf(\"Examples:\\n\")\n\tout.Valuf(\" warp authorize goofy\\n\")\n\tout.Valuf(\" warp authorize guest_JpJP50EIas9cOfwo\\n\")\n\tout.Normf(\"\\n\")\n}",
"func (c Help) HandleHelp(ctx *multiplexer.Context) bool {\n\tctx.ChannelSend(\"Are you sure _you_ don't need help?\")\n\treturn true\n}",
"func (b *BotCommands) help(m *tb.Message) {\n\tb.Bot.Reply(m, `\n\tHello, So You Want To Search [Nyaa](https://nyaa.si/) huh? Let me help you with that. So, Here are the commands that you can use right now -\n\n\t/latest _<page no which should be a number>_\n\t*This command fetches you the latest animes available on nyaa on the page as certified.*\n\tNote - If no digit is specified, I fetch the 1 Page Results For You ~ UwU ~.\n\n\t/anime _<search term which can contain whitespaces>_\n\t*This command searches for the search term and fetches all the first page results for you.*\n\tNote - Remember, The term should atleast match for what you are looking for.\n\n\t`)\n}",
"func (o *Options) Usage() {\n\to.flags.Usage()\n}",
"func displayHelp(subcommand ...string) {\n\tswitch subcommand[0] {\n\tcase \"\":\n\t\tfmt.Println(MainHelp)\n\tcase \"run\":\n\t\tfmt.Println(RunHelp)\n\tcase \"build\":\n\t\tfmt.Println(BuildHelp)\n\tcase \"test\":\n\t\tfmt.Println(TestHelp)\n\tcase \"deps\":\n\t\tfmt.Println(DepsHelp)\n\tdefault:\n\t\tfmt.Println(MainHelp)\n\t}\n}",
"func checkNoArguments(_ *cobra.Command, args []string) error {\n\tif len(args) > 0 {\n\t\treturn errors.New(\"this command doesn't support any arguments\")\n\t}\n\n\treturn nil\n}",
"func PrintHelp() {\n\tfmt.Printf(\"[+] Author: brax (https://github.com/braaaax/gfz)\\n\")\n\tfmt.Printf(\"\\nUsage: gfz [options] <url>\\n\")\n\tfmt.Printf(\"Keyword: FUZZ, ..., FUZnZ wherever you put these keywords gfuzz will replace them with the values of the specified payload.\\n\\n\")\n\tfmt.Printf(\"Options:\\n\")\n\tfmt.Println(\"-h/--help : This help.\")\n\tfmt.Println(\"-w wordlist : Specify a wordlist file (alias for -z file,wordlist).\")\n\tfmt.Println(\"-z file/range/list,PAYLOAD : Where PAYLOAD is FILENAME or 1-10 or \\\"-\\\" separated sequence.\")\n\tfmt.Println(\"--hc/hl/hw/hh N[,N]+ : Hide responses with the specified code, lines, words, or chars.\")\n\tfmt.Println(\"--sc/sl/sw/sh N[,N]]+ : Show responses with the specified code, lines, words, or chars.\")\n\tfmt.Println(\"-t N : Specify the number of concurrent connections (10 default).\")\n\tfmt.Println(\"--post : Specify POST request method.\")\n\tfmt.Println(\"--post-form key=FUZZ : Specify form value eg key=value.\")\n\t// fmt.Println(\"--post-multipart file.FUZZ : Fuzz filename for file uploads.\")\n\tfmt.Println(\"-p IP:PORT : Specify proxy.\") // TODO: need better cmdline parse for two URLs\n\tfmt.Println(\"-b COOKIE : Specify cookie.\")\n\tfmt.Println(\"-ua USERAGENT : Specify user agent.\")\n\tfmt.Println(\"--password PASSWORD : Specify password for basic web auth.\")\n\tfmt.Println(\"--username USERNAME : Specify username.\")\n\tfmt.Println(\"--no-follow : Don't follow HTTP(S) redirections.\")\n\tfmt.Println(\"--no-color : Monotone output. (use for windows\")\n\tfmt.Println(\"--print-body : Print response body to stdout.\")\n\tfmt.Println(\"-k : Strict TLS connections (skip verify=false opposite of curl).\")\n\tfmt.Println(\"-q : No output.\")\n\tfmt.Println(\"-H : Add headers. (e.g. Key:Value)\")\n\tfmt.Printf(\"\\n\")\n\tfmt.Println(\"Examples: gfz -w users.txt -w pass.txt --sc 200 http://www.site.com/log.asp?user=FUZZ&pass=FUZ2Z\")\n\tfmt.Println(\" gfz -z file,default/common.txt -z list,-.php http://somesite.com/FUZZFUZ2Z\")\n\tfmt.Println(\" gfz -t 32 -w somelist.txt https://someTLSsite.com/FUZZ\")\n\tfmt.Println(\" gfz --print-body --sc 200 --post-form \\\"name=FUZZ\\\" -z file,somelist.txt http://somesite.com/form\")\n\tfmt.Println(\" gfz --post -b mycookie -ua normalbrowser --username admin --password FUZZ -z list,admin-password http://somesite.com\")\n}",
"func isOption(inString string) bool {\r\n\tvar rx *regexp.Regexp = regexp.MustCompile(`^--?`)\r\n\treturn rx.MatchString(inString)\r\n}",
"func usage(writer io.Writer, cfg *CmdConfig) {\n\tflags := flagSet(\"<global options help>\", cfg)\n\tflags.SetOutput(writer)\n\tflags.PrintDefaults()\n}",
"func (fv *EnumSetCSV) Help() string {\n\tseparator := \",\"\n\tif fv.Separator != \"\" {\n\t\tseparator = fv.Separator\n\t}\n\tif fv.CaseSensitive {\n\t\treturn fmt.Sprintf(\"%q-separated list of values from %v (case-sensitive)\", separator, fv.Choices)\n\t}\n\treturn fmt.Sprintf(\"%q-separated list of values from %v\", separator, fv.Choices)\n}",
"func (c *Commands) FlagDemo(m *discordgo.MessageCreate, f *arguments.Flag) error {\n\tvar fs = arguments.NewFlagSet()\n\n\topt := fs.Bool(\"opt\", false, \"\")\n\tstr := fs.String(\"str\", \"\", \"\")\n\n\tif err := f.With(fs); err != nil {\n\t\treturn errors.Wrap(err, \"Invalid flags\")\n\t}\n\n\targs := fs.Args()\n\n\treturn c.Context.Send(m.ChannelID, fmt.Sprintf(\n\t\t`opt: %v, str: \"%s\", args: %v`,\n\t\t*opt, *str, args),\n\t)\n}",
"func overrideFlagUsageText(f *flag.FlagSet) {\n\n\tf.Usage = func() {\n\t\tfmt.Println(\"\\nCross-Origin Resource Sharing Interrogator (CORSI) v\" + version + \" by Superhac\")\n\t\tfmt.Println(\"Usage: cori [OPTION]... [url]\")\n\t\tflag.PrintDefaults()\n\t}\n}"
] | [
"0.6840775",
"0.66086334",
"0.65521866",
"0.6406479",
"0.6251193",
"0.6246326",
"0.62193525",
"0.6139682",
"0.61359525",
"0.6126167",
"0.609346",
"0.60887957",
"0.60883915",
"0.6087776",
"0.60725486",
"0.60713327",
"0.60685796",
"0.6034294",
"0.60335374",
"0.6030243",
"0.60168606",
"0.59630805",
"0.5959379",
"0.5936697",
"0.5930347",
"0.59040135",
"0.5902092",
"0.5899907",
"0.5873371",
"0.5870853",
"0.58648485",
"0.58623075",
"0.5858375",
"0.58525413",
"0.58519906",
"0.58473533",
"0.583587",
"0.5835516",
"0.58317345",
"0.58277476",
"0.58045876",
"0.57997215",
"0.57931256",
"0.57929516",
"0.57905555",
"0.5781855",
"0.5769444",
"0.57614213",
"0.5759189",
"0.5756969",
"0.57556194",
"0.57302684",
"0.57217234",
"0.5719126",
"0.57185626",
"0.5705286",
"0.57047033",
"0.5696092",
"0.5690381",
"0.56897086",
"0.5684691",
"0.5682931",
"0.5664892",
"0.56586564",
"0.56586564",
"0.5655342",
"0.5654294",
"0.56494695",
"0.56451553",
"0.5631688",
"0.5611064",
"0.5605391",
"0.55927444",
"0.55906886",
"0.55862993",
"0.5582766",
"0.557061",
"0.5570101",
"0.5569739",
"0.5566449",
"0.5554257",
"0.554802",
"0.5545619",
"0.5539224",
"0.5534935",
"0.5534001",
"0.5526001",
"0.55131704",
"0.5510545",
"0.5509422",
"0.55092883",
"0.5508957",
"0.55046993",
"0.55039483",
"0.5492344",
"0.5490744",
"0.5490264",
"0.54893124",
"0.5487741",
"0.5486549"
] | 0.5896184 | 28 |
open files taken from argv | func openFiles(argv []string) (*os.File, *os.File) {
inFile, err := os.Open(argv[0])
if err != nil {
panic(err)
}
outFile, err := os.Create(argv[1])
if err != nil {
inFile.Close()//close here
panic(err)
}
return inFile, outFile
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func readfile(fileCount int) {\n\n for i := 0; i < fileCount; i++ {\n\n f := flag.Arg(i)\n\n if !fileexists(f) {\n fmt.Println(f, \"Could not be found... skipping\")\n continue\n }\n\n r, e := os.Open(f)\n\n if e != nil {\n fmt.Println(\"Could not open file \" + f, \"... skipping\")\n continue\n }\n\n readinput(r, f)\n }\n}",
"func main() {\n\tif len(os.Args) < 2 {\n\t\tcat(os.Stdin)\n\t\treturn\n\t}\n\n\tfor i := 1; i < len(os.Args); i++ {\n\t\tf, err := os.Open(os.Args[i])\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcat(f)\n\t\tf.Close()\n\t}\n}",
"func OpenFile(name string, flag int, perm os.FileMode,) (*os.File, error)",
"func (s *perfSuite) openGlob(pattern string) (readers []io.Reader) {\n\tassert := s.NewAssert()\n\n\ts.Pause(func() {\n\t\tglob, err := filepath.Glob(pattern)\n\t\tassert.NoError(err)\n\t\treaders = make([]io.Reader, len(glob))\n\t\tfor i, m := range glob {\n\t\t\tr, err := os.Open(m)\n\t\t\tassert.NoError(err)\n\t\t\treaders[i] = r\n\t\t}\n\t})\n\treturn\n}",
"func openFile(fileName string) (*os.File, error) {\n\n\t// Determine the path to where the application is running.\n\tex, err := os.Executable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\texPath := path.Dir(ex)\n\n\t// Open the file in the directory where the application is running and the fileName provided.\n\treturn os.Open(fmt.Sprintf(\"%s/%s\", exPath, fileName))\n}",
"func fileopen(name string) {\n\tf, er := os.Open(name)\n\n\t//er will be nil if the file exists else it returns an error object\n\tif er != nil {\n\t\tfmt.Println(er)\n\t\treturn\n\t} else {\n\t\tfmt.Println(\"file opened\", f.Name())\n\t}\n}",
"func (c *Command) open() {\n\tif len(c.parsed) <= 1 {\n\t\treturn\n\t}\n\tc.openFile(c.parsed[1:])\n\tc.done()\n}",
"func (s service) openFile(names *[]person.Names) error {\n\tf, err := os.Open(s.args[0])\n\tif err != nil {\n\t\treturn errors.New(\"error opening the file\")\n\t}\n\tdefer f.Close()\n\n\t// Read in line by line\n\tscanner := bufio.NewScanner(f)\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\t// send to get decoded\n\t\tp := &person.Names{}\n\t\ts.serializer.Decode(scanner.Bytes(), p)\n\t\t*names = append(*names, *p)\n\t}\n\n\tif len(*names) < 1 {\n\t\treturn errors.New(\"not able to get names from file, is it empty or not formatted correctly?\")\n\t}\n\n\treturn nil\n}",
"func tryOpen(filenames []string) (*os.File, error) {\n\tfor _, v := range filenames {\n\t\tf, err := os.Open(v)\n\t\tif err == nil {\n\t\t\treturn f, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"cannot find file %q\", filenames)\n}",
"func Open(name string) (*os.File, error)",
"func main() {\n\targs := os.Args\n\tif len(args) <= 1 {\n\t\tfmt.Println(\"Please provide a path to a file\")\n\t\tos.Exit(-1)\n\t}\n\tfp := args[1]\n\tbs, err := ioutil.ReadFile(fp)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n\tfmt.Println(string(bs))\n\n}",
"func OpenFileExplorer(pkg string) {\n\tDefaultRunner.Run(cmdOpenFileExplorer[0], AbsPath(pkg))\n}",
"func (localFileDriver) open(file string) (io.ReadCloser, error) {\n\treturn os.Open(file)\n}",
"func openToProcess(filespec *FileProcessSpecs) (*os.File, *os.File, func()) {\n\n\tfiles := map[string]*os.File{\n\t\t\"infile\": os.Stdin,\n\t\t\"outfile\": os.Stdout,\n\t}\n\n\tif filespec.inpath != \"\" {\n\t\tinfile, err := os.Open(filespec.inpath)\n\t\tfiles[\"infile\"] = infile\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tif filespec.outpathtmp {\n\t\toutfile, err := ioutil.TempFile(\"./\", filespec.outpath)\n\t\tfiles[\"outfile\"] = outfile\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tfilespec.outpath = outfile.Name()\n\n\t} else if filespec.outpath != \"\" {\n\t\toutfile, err := os.Create(filespec.outpath)\n\t\tfiles[\"outfile\"] = outfile\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tcloseFunc := func() {\n\t\tfiles[\"infile\"].Close()\n\t\tfiles[\"outfile\"].Close()\n\t}\n\treturn files[\"infile\"], files[\"outfile\"], closeFunc\n}",
"func openTemplateFile() (fp *os.File, err error) {\n\tfp, err = os.Open(*templateArg)\n\treturn\n}",
"func openat(directory int, path string, flags int, mode uint32) (int, error) {\n\treturn unix.Openat(directory, path, flags, mode)\n}",
"func openFile()( lines []string) {\n\tfile, err := os.Open(\"./eng.txt\")\n\tif err != nil {\n\t\tfmt.Print(err)\n\t\treturn nil\n\t}\n\tdefer file.Close()\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tlines = append(lines, scanner.Text())\n\t}\n\treturn\n}",
"func open(path string) {\n\tif err := exec.Command(\"/usr/bin/open\", path).Run(); err != nil {\n\t\tfmt.Printf(\"report.Open err: %x\\n\", err.Error())\n\t}\n}",
"func (c *Command) context() {\n\tfor _, arg := range c.Args {\n\t\tmatches := validFile.FindAllString(arg, -1)\n\t\tc.openFile(matches)\n\t}\n}",
"func (realFS) Open(name string) (File, error) { return os.Open(name) }",
"func (pieceMgr *PieceMgr) openFiles(sessionInfo *TrntSessionInfo) bool {\n\tvar er error\n\tvar fileNames []string\n\n\t// Build a list of file names\n\tif len(sessionInfo.metaInfo.Info.Name) > 0 {\n\t\tfileNames = append(fileNames, sessionInfo.metaInfo.Info.Name)\n\t} else {\n\t\tfor _, fileInfo := range sessionInfo.metaInfo.Info.Files {\n\t\t\tfileNames = append(fileNames, fileInfo.Path[0])\n\t\t}\n\t}\n\n\tif len(fileNames) <= 0 {\n\t\tlog.Println(DebugGetFuncName(), \"No files found\")\n\t\treturn false\n\t}\n\n\t// Open these files\n\tpieceMgr.Files = make([]*os.File, len(fileNames))\n\tfor i, f := range fileNames {\n\t\tif pieceMgr.Files[i], er = os.Open(f); er != nil {\n\t\t\tpieceMgr.Files[i], er = os.Create(f)\n\t\t}\n\t\tif er != nil {\n\t\t\tfor j := 0; j < i; j++ {\n\t\t\t\tpieceMgr.Files[j].Close()\n\t\t\t}\n\t\t\tlog.Println(DebugGetFuncName(), er)\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func open(path string, openmode int, perm uint32) (FileDescriptor, error) {\n\tfdint, err := syscall.Open(path, openmode, perm)\n\treturn FileDescriptor(fdint), err\n}",
"func commandLineFiles(fnames []string) []string {\n if runtime.GOOS == \"windows\" {\n args := make([]string, 0, len(fnames))\n\n for _, fname := range fnames {\n if matches, err := filepath.Glob(fname); err != nil {\n // not a valid pattern\n args = append(args, fname)\n } else if matches != nil {\n // at least one match\n args = append(args, matches...)\n }\n }\n\n return args\n }\n\n // not a Windows OS\n return fnames\n}",
"func openFile(fpath string) (*os.File, os.FileInfo, error) {\n\t// Open file\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// get file info\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// do not serve directory\n\tif fi.IsDir() {\n\t\treturn nil, nil, os.ErrNotExist\n\t}\n\n\treturn f, fi, nil\n}",
"func openFile(filePath string, flags int) *os.File {\n\tf, err := os.OpenFile(filePath, flags, 0644)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error opening file: %s\", err))\n\t}\n\treturn f\n}",
"func (p *Process) OpenFiles() ([]OpenFilesStat, error) {\n\treturn p.OpenFilesWithContext(context.Background())\n}",
"func main() {\n\tinitFlag()\n\tif flag.NArg() < 1 {\n\t\tflag.Usage()\n\t}\n\n\tfor _, arg := range flag.Args() {\n\t\tif err := processFile(arg); err != nil {\n\t\t\tfmt.Fprintln(os.Stderr, err)\n\t\t}\n\t}\n}",
"func getInfile(filename string) *os.File {\n\tf, err := os.Open(filename)\n\tcheck(err)\n\treturn f\n}",
"func (fs serveFiles) Open(name string) (http.File, error) {\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tname = filepath.Join(cwd, filepath.FromSlash(path.Clean(\"/\"+name)))\n\tfor _, fn := range fs.files {\n\t\tfn = filepath.Join(cwd, filepath.FromSlash(path.Clean(\"/\"+fn)))\n\t\tif name == fn {\n\t\t\tf, err := os.Open(fn)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlog.Debugf(\"Serving: %s\", fn)\n\t\t\treturn f, nil\n\t\t}\n\t}\n\treturn nil, fmt.Errorf(\"File %s not found\", name)\n}",
"func (h *Harvester) open() (encoding.Encoding, error) {\n\t// Special handling that \"-\" means to read from standard input\n\tif h.Path == \"-\" {\n\t\treturn h.openStdin()\n\t}\n\treturn h.openFile()\n}",
"func openTestFile(t *testing.T, fname string) io.Reader {\n\tf, err := os.Open(fname)\n\tassert.NoError(t, err, `Couldn't open file \"%s\"`, fname)\n\treturn f\n}",
"func (fs defaultFS) Open(name string) (File, error) { return os.Open(name) }",
"func openStdinOrFile() io.Reader {\n\tvar err error\n\tr := os.Stdin\n\tif len(os.Args) > 1 {\n\t\tr, err = os.Open(os.Args[1])\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn r\n}",
"func openFileAt(realPath string, offset int64, whence int) (fp *os.File, reader *bufio.Reader, err error) {\n\tif fp, err = os.Open(realPath); err != nil {\n\t\treturn\n\t}\n\n\tif _, err = fp.Seek(offset, whence); err != nil {\n\t\terr = errors.New(\"seek file failed: \" + realPath)\n\t\treturn\n\t}\n\n\treader = bufio.NewReaderSize(fp, 16*1024)\n\treturn\n}",
"func input() (io.Reader, error) {\n\targs := flag.Args()\n\tswitch len(args) {\n\tcase 0:\n\t\tfi, _ := os.Stdin.Stat()\n\t\tif (fi.Mode() & os.ModeCharDevice) != 0 {\n\t\t\tbreak\n\t\t}\n\t\treturn os.Stdin, nil\n\tcase 1:\n\t\treturn os.Open(args[0])\n\t}\n\treturn nil, errors.New(\"usage: etod [opts] <file> or cmd | etod\")\n}",
"func openTestFile(name, content string) (file *os.File, dir string, err error) {\n\tdir, err = ioutil.TempDir(\"\", \"pr-test\")\n\tif err != nil {\n\t\treturn nil, dir, err\n\t}\n\n\tfile, err = os.OpenFile(path.Join(dir, name), os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)\n\tif err != nil {\n\t\treturn nil, dir, err\n\t}\n\n\tfmt.Fprint(file, content)\n\n\t// close and re-open the file to keep file.Stat() happy\n\tfile.Close()\n\tfile, err = os.Open(file.Name())\n\tif err != nil {\n\t\treturn nil, dir, err\n\t}\n\n\treturn file, dir, err\n}",
"func Open(filenames []string, appendF func([]byte)) (string, error) {\n\tif len(filenames) == 1 {\n\t\treturn \"\", nil\n\t}\n\tfilename := filenames[1]\n\tfd, er := os.Open(filename)\n\tif er != nil {\n\t\treturn filename, er\n\t}\n\tdefer fd.Close()\n\tfp := bufio.NewReader(fd)\n\n\tvar err error\n\tfor line, err := fp.ReadBytes('\\n'); err == nil; line, err = fp.ReadBytes('\\n') {\n\t\t// Trim trailing newlines and carriage returns\n\t\tfor c := line[len(line)-1]; len(line) > 0 && (c == '\\n' || c == '\\r'); {\n\t\t\tline = line[:len(line)-1]\n\t\t\tif len(line) > 0 {\n\t\t\t\tc = line[len(line)-1]\n\t\t\t}\n\t\t}\n\t\tappendF(line)\n\t}\n\n\tif err != nil && err != io.EOF {\n\t\treturn \"\", err\n\t}\n\n\treturn filename, nil\n}",
"func Open(name *byte, mode, perm int32) int32",
"func OpenFile(path string) (*os.File, error) {\n return OpenFilePerm(path, DEFAULT_PERM)\n}",
"func OpenFileInEditor(filename string) (err error) {\n\teditor := os.Getenv(\"EDITOR\")\n\tif editor == \"\" {\n\t\teditor = \"nvim\"\n\t}\n\texe, err := exec.LookPath(editor)\n\tif err != nil {\n\t\treturn err\n\t}\n\tcmd := exec.Command(exe, filename)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\treturn cmd.Run()\n}",
"func main() {\n\tflag.Parse()\n\tvar files []string\n\tif flag.NArg() == 0 {\n\t\tfiles = append(files, stdinPlaceholder)\n\t}\n\tfor i := 0; i < flag.NArg(); i++ {\n\t\tfiles = append(files, flag.Arg(i))\n\t}\n\n\thandleError(Cat(files, os.Stdout))\n}",
"func open(uri string) error {\n\trun, ok := commands[runtime.GOOS]\n\tif !ok {\n\t\treturn fmt.Errorf(\"don't know how to open things on %s platform\", runtime.GOOS)\n\t}\n\n\tcmd := exec.Command(run, uri)\n\treturn cmd.Start()\n}",
"func (osFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }",
"func open(url string) error {\n\treturn exec.Command(\"open\", url).Start()\n}",
"func OpenFromPaths(obj any, file string, paths []string) error {\n\tfilename, err := dirs.FindFileOnPaths(paths, file)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\t// _, err = toml.DecodeFile(fp, obj)\n\tfp, err := os.Open(filename)\n\tdefer fp.Close()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn Read(obj, bufio.NewReader(fp))\n}",
"func readFiles(ctx context.Context, fs source.FileSource, uris []span.URI) (_ []source.FileHandle, err error) {\n\tfhs := make([]source.FileHandle, len(uris))\n\tfor i, uri := range uris {\n\t\tfhs[i], err = fs.ReadFile(ctx, uri)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn fhs, nil\n}",
"func findSrc() (io.Reader, error) {\n\tif isPiped() {\n\t\treturn os.Stdin, nil\n\t}\n\treturn os.Open(os.Args[1])\n}",
"func FileOpen(f *zip.File,) (io.ReadCloser, error)",
"func (fs osFsEval) Open(path string) (*os.File, error) {\n\treturn os.Open(path)\n}",
"func Open(path string) (Scanner, error) {\n\ttoks, err := lexer.ParseFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &scanner{toks: toks}, nil\n}",
"func openOutputFile() (filepointer *os.File, filename string, err error) {\n\n\tif len(*outputArg) > 0 {\n\t\tfilename = *outputArg\n\n\t} else {\n\t\t// if no cli outputFile provided,\n\t\ttemplateFile, err := os.Open(*templateArg)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", errors.New(\"cannot open templateFile \" + *templateArg)\n\t\t}\n\t\tdefer templateFile.Close()\n\n\t\t// match outputFile pattern, e.g. {{ #output: /tmp/output.txt }}\n\t\toutputFilePattern := regexp.MustCompile(\"{{[ ]{0,}#output: (.+?)}}\")\n\n\t\t// read first line and extract outputFile pattern\n\t\tscannerOutputFile := bufio.NewScanner(templateFile)\n\t\tscannerOutputFile.Scan()\n\t\tfirstLine := scannerOutputFile.Text()\n\t\tfirstLineGroups := outputFilePattern.FindStringSubmatch(firstLine)\n\n\t\t// if successful, use it as *outputArg, otherwise abort\n\t\tif len(firstLineGroups) > 0 && len(firstLineGroups[1]) > 0 {\n\t\t\tfilename = strings.Trim(firstLineGroups[1], \" \")\n\t\t} else {\n\t\t\treturn nil, \"\", errors.New(\"no output file specified, specify either --output argument or within template\")\n\t\t}\n\t}\n\n\tfilepointer, err = os.Create(filename)\n\tif err != nil {\n\t\treturn nil, \"\", errors.New(\"cannot create outputFile \" + filename)\n\t}\n\treturn\n}",
"func (fs *iofs) OpenFile(name string, flag int, perm os.FileMode) (FileManager, error) {\n\tf, err := os.OpenFile(name, flag, perm)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tiof := &IOFile{f}\n\treturn iof, err\n}",
"func OpenFileExeDir(pathSupplement string) (*os.File, error) {\n\ted, err := ExeDir()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfp := filepath.Join(ed, pathSupplement)\n\tfyle, err := os.Open(fp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fyle, err\n}",
"func (fsOnDisk) Open(name string) (File, error) { return os.Open(name) }",
"func parseArgs(args ...string) (*os.File, error) {\n\tif args != nil { // We only care about args[0], but using ...string allows args to be omitted\n\t\tpath := strings.Replace(strings.TrimSpace(args[0]), \"\\\\\", \"/\", -1)\n\t\t// Creates path to log file if it does not already exist\n\t\tif strings.Contains(path, \"/\") {\n\t\t\tif err := os.MkdirAll(path[0:strings.LastIndex(path, \"/\")], 0777); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t\t// Open log file\n\t\treturn os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0666)\n\t} else {\n\t\treturn os.Stdout, nil\n\t}\n}",
"func (osh *SystemHandler) OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {\n\treturn os.OpenFile(name, flag, perm)\n}",
"func (fo FileOperator) Open(file string) {\n\trelPath := mountedFilePath(fo.MountPoint, file)\n\tcmd := getOpenCmd(relPath)\n\n\tlog.Printf(\"Opening file %s\", relPath)\n\n\tif err := cmd.Run(); err != nil {\n\t\tlog.Printf(\"Error Opening file %s: %s\", relPath, err)\n\t}\n}",
"func (p *Parser) runFromFile(context value.Context, name string) {\n\tfd, err := os.Open(name)\n\tif err != nil {\n\t\tp.errorf(\"%s\", err)\n\t}\n\tp.runFromReader(context, name, fd, true)\n}",
"func (fs *EmbedFs) Open(path string) (file, error) {\n\tpath = filepath.Join(\"/\", path)\n\n\tif !fs.IsFileExist(path) {\n\t\treturn nil, ErrNoExist\n\t}\n\n\treturn &embedFileReader{\n\t\tstart: fs.index[path].offset,\n\t\tlength: fs.index[path].header.Size,\n\t\tsource: fs.origin,\n\t\tname: path,\n\t}, nil\n}",
"func For(proc LineProc) error {\n\tif !flag.Parsed() {\n\t\tflag.Parse()\n\t}\n\targs := flag.Args()\n\tif len(args) == 0 {\n\t\tFileNum = 0\n\t\tFilename = \"\"\n\t\treturn procReader(proc, os.Stdin)\n\t}\n\tfor i, name := range args {\n\t\tFileNum = i + 1\n\t\tif err := procFile(proc, name); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func onFileOpen(filename string) {\n\t//\n}",
"func open(url string) error {\n\treturn exec.Command(\"/usr/bin/open\", url).Start()\n}",
"func newIter(fname string) (*iter, error) {\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &iter{\n\t\tbufio.NewReader(f),\n\t}, nil\n}",
"func Map(f *os.File) (File, error) { return ifile.Open(f) }",
"func OpenFileInEditor(filename string, resolveEditor PreferredEditorResolver) error {\n\t// Get the full executable path for the editor.\n\texecutable, err := exec.LookPath(resolveEditor())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(executable, resolveEditorArguments(executable, filename)...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\treturn cmd.Run()\n}",
"func open(filename string) bool {\n\tvar err error\n\tfiledata, err = ioutil.ReadFile(filename)\n\tif err != nil {\n\t\tfmt.Printf(\"Error reading file %s: %v\\n\", filename, err)\n\t\treturn false\n\t}\n\tfmt.Printf(\"Loaded %d bytes from file %s\\n\", len(filedata), filename)\n\treturn true\n}",
"func openFile(file string, fp func(*os.File) (interface{}, error)) (ret interface{}, err error) {\n\tvar f *os.File\n\tf, err = os.Open(file)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\treturn fp(f)\n}",
"func open(url string) error {\n\tvar cmd string\n\tvar args []string\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd = \"cmd\"\n\t\targs = []string{\"/c\", \"start\"}\n\tcase \"darwin\":\n\t\tcmd = \"open\"\n\tdefault: // \"linux\", \"freebsd\", \"openbsd\", \"netbsd\"\n\t\tcmd = \"xdg-open\"\n\t}\n\targs = append(args, url)\n\treturn exec.Command(cmd, args...).Start()\n}",
"func open(url string) error {\n\tvar cmd string\n\tvar args []string\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd = \"cmd\"\n\t\targs = []string{\"/c\", \"start\"}\n\tcase \"darwin\":\n\t\tcmd = \"open\"\n\tdefault: // \"linux\", \"freebsd\", \"openbsd\", \"netbsd\"\n\t\tcmd = \"xdg-open\"\n\t}\n\targs = append(args, url)\n\treturn exec.Command(cmd, args...).Start()\n}",
"func open(url string) error {\n\tvar cmd string\n\tvar args []string\n\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\tcmd = \"cmd\"\n\t\targs = []string{\"/c\", \"start\"}\n\tcase \"darwin\":\n\t\tcmd = \"open\"\n\tdefault: // \"linux\", \"freebsd\", \"openbsd\", \"netbsd\"\n\t\tcmd = \"xdg-open\"\n\t}\n\targs = append(args, url)\n\treturn exec.Command(cmd, args...).Start()\n}",
"func Open(url string, options *Options) (*File, error) {\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\treturn OpenContext(ctx, url, options)\n}",
"func Open(capsule string) {\n\tFile, _ := ioutil.ReadFile(capsule)\n\tsource = string(File)\n\tload()\n}",
"func execParseFiles(arity int, p *gop.Context) {\n\targs := p.GetArgs(arity)\n\tconv := func(args []interface{}) []string {\n\t\tret := make([]string, len(args))\n\t\tfor i, arg := range args {\n\t\t\tret[i] = arg.(string)\n\t\t}\n\t\treturn ret\n\t}\n\tret, ret1 := template.ParseFiles(conv(args[0:])...)\n\tp.Ret(arity, ret, ret1)\n}",
"func getInputfile(args []string) (string, error) {\n\tvar filename = args[1]\n\n\tmatch, regexpErr := regexp.MatchString(core.RegexpFilename, filename)\n\n\tif regexpErr != nil {\n\t\treturn \"\", regexpErr\n\t}\n\n\tif match == false {\n\t\treturn \"\", errors.New(\"Not a CSV file\")\n\t}\n\n\treturn filename, nil\n}",
"func Open(fpath string, options ...ReadOption) (*Reader, error) {\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := header{}\n\tif err := h.read(f); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &Reader{header: h}\n\tfor _, option := range options {\n\t\toption(r)\n\t}\n\n\tif h.version == 0 {\n\t\tf.Close()\n\n\t\tvoOpts := []file_v0.ReadOption{}\n\t\tif r.interceptor != nil {\n\t\t\tvoOpts = append(voOpts, file_v0.ReadIntercept(r.interceptor))\n\t\t}\n\t\tif r.cacheIndex {\n\t\t\tvoOpts = append(voOpts, file_v0.CacheIndex())\n\t\t}\n\t\tvr, err := file_v0.Open(fpath, voOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.file_v0 = vr\n\t\treturn r, nil\n\t}\n\n\tif r.cacheIndex {\n\t\tr.cacheOffsets(f)\n\t}\n\n\tch := make(chan *os.File, 10)\n\tr.readers = ch\n\tch <- f\n\tfor i := 0; i < 9; i++ {\n\t\tf, err := os.Open(fpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tch <- f\n\t}\n\n\treturn r, nil\n}",
"func (h *fs) Open(filename string) (io.ReadCloser, error) {\n\treturn os.Open(filename)\n}",
"func openWithBackoff(fileName string) (*os.File, error) {\n\tvar (\n\t\tfileNamePtr *uint16\n\t\tfileHandle syscall.Handle\n\t\terr error\n\t)\n\tfileNamePtr, err = syscall.UTF16PtrFromString(fileName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%v: failed to open file: path contains an illegal character\", fileName)\n\t}\n\tfor i := 1; i <= 3; i++ {\n\t\t// We cannot use os.Open(), because we must set FILE_SHARE_ flags to avoid\n\t\t// \"the file is being used by another program\" errors.\n\t\t// Despite its name, CreateFile() will not create a new file if called with the OPEN_EXISTING flag.\n\t\tfileHandle, err = syscall.CreateFile(\n\t\t\tfileNamePtr,\n\t\t\tsyscall.GENERIC_READ,\n\t\t\tuint32(syscall.FILE_SHARE_READ|syscall.FILE_SHARE_WRITE|syscall.FILE_SHARE_DELETE),\n\t\t\tnil,\n\t\t\tsyscall.OPEN_EXISTING,\n\t\t\tsyscall.FILE_ATTRIBUTE_NORMAL,\n\t\t\t0)\n\t\tif err == nil {\n\t\t\treturn os.NewFile(uintptr(fileHandle), fileName), nil\n\t\t}\n\t\ttime.Sleep(time.Duration(i*125) * time.Millisecond)\n\t}\n\n\t// The fileTailer will check if the file exists using os.IsNotExists(err)\n\t// Return an error that can be used with os.IsNotExists().\n\terrno, ok := err.(syscall.Errno)\n\tif ok {\n\t\treturn nil, &os.PathError{\n\t\t\tOp: \"open\",\n\t\t\tPath: fileName,\n\t\t\tErr: errno,\n\t\t}\n\t} else {\n\t\treturn nil, fmt.Errorf(\"%v: failed to open file: %v\", fileName, err)\n\t}\n}",
"func GetOpenFile(file string) (*os.File, error) {\n\tvar fullpath = file\n\tif !filepath.IsAbs(fullpath) {\n\t\tdir, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfullpath = filepath.Join(dir, file)\n\t}\n\treturn os.Open(fullpath)\n}",
"func open(path string, flag int, perm os.FileMode) (*os.File, error) {\n\tif path == \"\" {\n\t\treturn nil, syscall.ERROR_FILE_NOT_FOUND\n\t}\n\n\tpathp, err := syscall.UTF16PtrFromString(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar access uint32\n\tswitch flag {\n\tcase syscall.O_RDONLY:\n\t\taccess = syscall.GENERIC_READ\n\tcase syscall.O_WRONLY:\n\t\taccess = syscall.GENERIC_WRITE\n\tcase syscall.O_RDWR:\n\t\taccess = syscall.GENERIC_READ | syscall.GENERIC_WRITE\n\tcase syscall.O_RDWR | syscall.O_CREAT:\n\t\taccess = syscall.GENERIC_ALL\n\tcase syscall.O_WRONLY | syscall.O_CREAT:\n\t\taccess = syscall.GENERIC_ALL\n\t}\n\n\tif flag&syscall.O_APPEND != 0 {\n\t\taccess &^= syscall.GENERIC_WRITE\n\t\taccess |= syscall.FILE_APPEND_DATA\n\t}\n\n\tvar sa *syscall.SecurityAttributes\n\tif flag&syscall.O_CLOEXEC == 0 {\n\t\tsa = makeInheritSa()\n\t}\n\n\tvar createflag uint32\n\tswitch {\n\tcase flag&(syscall.O_CREAT|syscall.O_EXCL) == (syscall.O_CREAT | syscall.O_EXCL):\n\t\tcreateflag = syscall.CREATE_NEW\n\tcase flag&(syscall.O_CREAT|syscall.O_TRUNC) == (syscall.O_CREAT | syscall.O_TRUNC):\n\t\tcreateflag = syscall.CREATE_ALWAYS\n\tcase flag&syscall.O_CREAT == syscall.O_CREAT:\n\t\tcreateflag = syscall.OPEN_ALWAYS\n\tcase flag&syscall.O_TRUNC == syscall.O_TRUNC:\n\t\tcreateflag = syscall.TRUNCATE_EXISTING\n\tdefault:\n\t\tcreateflag = syscall.OPEN_EXISTING\n\t}\n\n\tshareflag := uint32(syscall.FILE_SHARE_READ | syscall.FILE_SHARE_WRITE | syscall.FILE_SHARE_DELETE)\n\taccessAttr := uint32(syscall.FILE_ATTRIBUTE_NORMAL | 0x80000000)\n\n\tfd, err := syscall.CreateFile(pathp, access, shareflag, sa, createflag, accessAttr, 0)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn os.NewFile(uintptr(fd), path), nil\n}",
"func OpenFile(filename string) *bufio.Scanner {\n\tfile, err := os.Open(filename)\n\tif err != nil {\n\t\tfmt.Println(\"Unable to open file:\", err)\n\t\tos.Exit(1)\n\t}\n\tscanner := bufio.NewScanner(file)\n\treturn scanner\n}",
"func Open(src string) (s *Subtitles, err error) {\n\t// Open the file\n\tvar f *os.File\n\tif f, err = os.Open(src); err != nil {\n\t\terr = errors.Wrapf(err, \"opening %s failed\", src)\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\t// Parse the content\n\tswitch filepath.Ext(src) {\n\tcase \".srt\":\n\t\ts, err = ReadFromSRT(f)\n\tcase \".ttml\":\n\t\ts, err = ReadFromTTML(f)\n\tcase \".vtt\":\n\t\t//s, err = ReadFromVTT(f)\n\tdefault:\n\t\terr = ErrInvalidExtension\n\t}\n\treturn\n}",
"func FilesFromCommand(ctx context.Context) ([]*api.File, error) {\n\tmachineFiles := []*api.File{}\n\n\tlocalFiles, err := parseFiles(ctx, \"file-local\", func(value string, file *api.File) error {\n\t\tcontent, err := os.ReadFile(value)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"could not read file %s: %w\", value, err)\n\t\t}\n\t\trawValue := base64.StdEncoding.EncodeToString(content)\n\t\tfile.RawValue = &rawValue\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn machineFiles, fmt.Errorf(\"failed to read file-local: %w\", err)\n\t}\n\tmachineFiles = append(machineFiles, localFiles...)\n\n\tliteralFiles, err := parseFiles(ctx, \"file-literal\", func(value string, file *api.File) error {\n\t\tencodedValue := base64.StdEncoding.EncodeToString([]byte(value))\n\t\tfile.RawValue = &encodedValue\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn machineFiles, fmt.Errorf(\"failed to read file-literal: %w\", err)\n\t}\n\tmachineFiles = append(machineFiles, literalFiles...)\n\n\tsecretFiles, err := parseFiles(ctx, \"file-secret\", func(value string, file *api.File) error {\n\t\tfile.SecretName = &value\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn machineFiles, fmt.Errorf(\"failed to read file-secret: %w\", err)\n\t}\n\tmachineFiles = append(machineFiles, secretFiles...)\n\n\treturn machineFiles, nil\n}",
"func (fos *fakeOS) Open(name string) (io.ReadCloser, error) {\n\tr, ok := fos.readFiles[name]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"fakeOS: file not found (%s); not implemented.\", name))\n\t}\n\treturn ioutil.NopCloser(r), nil\n}",
"func (fs *OneFile) Open(id string) error {\n\treturn nil\n}",
"func openPaths(currentPath string, paths []string) error {\n\tvar wg sync.WaitGroup\n\terrs := make(chan error, len(paths))\n\tfor _, path := range paths {\n\t\twg.Add(1)\n\t\tgo func(cp, p string) {\n\t\t\tdefer wg.Done()\n\t\t\terr := openPath(cp, p)\n\t\t\tif err != nil {\n\t\t\t\terrs <- err\n\t\t\t}\n\t\t}(currentPath, path)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(errs)\n\t}()\n\n\tfor i := 0; i < len(paths); i++ {\n\t\tselect {\n\t\tcase err := <-errs:\n\t\t\tif err != nil {\n\t\t\t\tfmt.Println(err)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func OpenBinary(bs []byte, options ...FileOption) (*File, error) {\n\tr := bytes.NewReader(bs)\n\treturn OpenReaderAt(r, int64(r.Len()), options...)\n\n}",
"func parseArgs() {\n\tfirstArgWithDash := 1\n\tfor i := 1; i < len(os.Args); i++ {\n\t\tfirstArgWithDash = i\n\n\t\tif len(os.Args[i]) > 0 && os.Args[i][0] == '-' {\n\t\t\tbreak\n\t\t} else {\n\t\t\tfilePaths = append(filePaths, os.Args[i])\n\t\t}\n\t}\n\n\tflag.CommandLine.Parse(os.Args[firstArgWithDash:])\n}",
"func OpenFile(name string, flag int, perm os.FileMode) (file *os.File, err error) {\n\treturn os.OpenFile(name, syscall.O_DIRECT|flag, perm)\n}",
"func getReader(inputFile string) (io.Reader, error) {\n\tif inputFile == \"stdin\" {\n\t\treturn os.Stdin, nil\n\t}\n\treturn os.Open(inputFile)\n}",
"func OpenInput(path string) (*os.File, error) {\n\tvar input *os.File\n\n\tswitch path {\n\tcase \"-\":\n\t\tinput = os.Stdin\n\tdefault:\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"can't open file\")\n\t\t}\n\n\t\tinput = f\n\t}\n\n\tstat, err := input.Stat()\n\tif err != nil {\n\t\treturn input, fmt.Errorf(\"can't read from file: %v\", err)\n\t}\n\n\tif stat.Size() <= 0 {\n\t\treturn input, fmt.Errorf(\"empty file\")\n\t}\n\n\treturn input, nil\n}",
"func openGo() (*Lasf, error) {\n\treturn Open(in)\n}",
"func main() {\r\n\tif len(os.Args) < 2 {\r\n\t\tfindDuplicates(os.Stdin)\r\n\t} else {\r\n\t\tfilename := os.Args[1]\r\n\t\tfile, err := os.Open(filename)\r\n\t\tif err != nil {\r\n\t\t\tfmt.Printf(\"Error: %v\", err)\r\n\t\t}\r\n\t\tfindDuplicates(file)\r\n\t}\r\n}",
"func (c DirCollector) Open(file string) (http.File, error) {\n\tvf, err := c.GetFile(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata, _ := vf.Data()\n\treturn &httpFile{\n\t\tReader: bytes.NewReader(data),\n\t\tVFile: vf,\n\t}, nil\n}",
"func (a BuildBlock) openScFile() *string {\n\tvar err error\n\tvar f []byte\n\tf, err = ioutil.ReadFile(a.runpath + a.Filename + a.Extension)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Cannot open script file: %s\\n\", err)\n\t\treturn nil\n\t}\n\tfile := string(f)\n\treturn &file\n}",
"func (a *apiFileFS) Open(name string) (http.File, error) {\n\tid, err := strconv.Atoi(strings.TrimPrefix(name, \"/\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tf := finder.FindByID(id)\n\n\treturn os.Open(f.Path)\n}",
"func main() {\n\n/*\n\tCheckArgs(\"<repository-path>\")\n\tpath := os.Args[1]\n\tfmt.Println(path)\n\tr, err := git.PlainOpen(path)\n\tCheckIfError(err)\n\n\tInfo(\"git push\")\n\t// push using default options\n\terr = r.Push(&git.PushOptions{})\n\tCheckIfError(err)\n*/\n\topenFile()\n}",
"func (d Dir) Open(name string) (*os.File, error) {\n\tif d.env == \"\" {\n\t\treturn nil, errors.New(\"xdgdir: Open on zero Dir\")\n\t}\n\tpaths := d.SearchPaths()\n\tif len(paths) == 0 {\n\t\treturn nil, fmt.Errorf(\"xdgdir: open %s: %s is invalid or not set\", name, d.env)\n\t}\n\tvar firstErr error\n\tfor _, p := range paths {\n\t\tf, err := os.Open(filepath.Join(p, name))\n\t\tif err == nil {\n\t\t\treturn f, nil\n\t\t} else if !os.IsNotExist(err) {\n\t\t\tfirstErr = err\n\t\t}\n\t}\n\tif firstErr != nil {\n\t\treturn nil, firstErr\n\t}\n\treturn nil, &os.PathError{\n\t\tOp: \"Open\",\n\t\tPath: filepath.Join(\"$\"+d.env, name),\n\t\tErr: os.ErrNotExist,\n\t}\n}",
"func GetOpenListOfPid(pid int) []*os.File {\n\tvar err error\n\tvar file *os.File\n\tvar filelist []string\n\n\tfds := make([]*os.File, 0, 0)\n\n\tfile, err = os.Open(\"/proc/\" + strconv.Itoa(pid) + \"/fd/\")\n\tif err != nil {\n\t\tLogger.Errlogf(\"ERROR: %s\\n\", err)\n\t\treturn fds\n\t}\n\tdefer file.Close()\n\n\tfilelist, err = file.Readdirnames(1024)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tLogger.Errlogf(\"read dir end: %s, %v\\n\", err, filelist)\n\t\t} else {\n\t\t\tLogger.Errlogf(\"ERROR: %s\\n\", err)\n\t\t\treturn fds\n\t\t}\n\t}\n\t/*\n\t\trhinofly@rhinofly-Y570:~/data/liteide/build$ ls -l /proc/self/fd/\n\t\ttotal 0\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 0 -> /dev/pts/16\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 1 -> /dev/pts/16\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 2 -> /dev/pts/16\n\t\tlr-x------ 1 rhinofly rhinofly 64 Nov 4 09:47 3 -> /proc/29484/fd\n\t*/\n\ttmpid := strconv.Itoa(int(file.Fd()))\n\tif len(filelist) > 0 {\n\t\tfor idx := range filelist {\n\t\t\t//func NewFile(fd uintptr, name string) *File\n\t\t\tlink, _ := os.Readlink(\"/proc/\" + strconv.Itoa(pid) + \"/fd/\" + filelist[idx])\n\t\t\tif filelist[idx] == tmpid {\n\t\t\t\t//Logger.Errlogf(\"file in %d dir: %d, %v, link %s, is me %v\\n\", pid, idx, filelist[idx], link, file.Name())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//Logger.Errlogf(\"file in %d dir: %d, %v -> %s\\n\", pid, idx, filelist[idx], link)\n\t\t\tfd, err := strconv.Atoi(filelist[idx])\n\t\t\tif err != nil {\n\t\t\t\tLogger.Errlogf(\"strconv.Atoi(%v): %s\\n\", filelist[idx], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfds = append(fds, os.NewFile(uintptr(fd), link))\n\t\t}\n\t}\n\treturn fds\n}",
"func OpenFile(anchor string) (File, error) {\n\treturn get().OpenFile(anchor)\n}",
"func FileTest() {\n\tstr, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treadText(str + fileName)\n}"
] | [
"0.6357535",
"0.6275072",
"0.6177542",
"0.6041794",
"0.6003741",
"0.59987533",
"0.5993259",
"0.59458804",
"0.58511996",
"0.582843",
"0.5648734",
"0.5641688",
"0.5616907",
"0.56032234",
"0.55901015",
"0.5580287",
"0.5575955",
"0.55691886",
"0.5509945",
"0.5492141",
"0.5474016",
"0.54676336",
"0.5442105",
"0.5429829",
"0.5396132",
"0.5393962",
"0.53776246",
"0.53686434",
"0.5365719",
"0.5343779",
"0.533708",
"0.5333434",
"0.5326344",
"0.5318406",
"0.5313539",
"0.53081584",
"0.5289011",
"0.5279706",
"0.5250399",
"0.5244936",
"0.5243224",
"0.5229361",
"0.52144367",
"0.5201342",
"0.5188991",
"0.51677036",
"0.5161027",
"0.51513094",
"0.5145115",
"0.51442724",
"0.5139605",
"0.51379216",
"0.5120767",
"0.5104404",
"0.5083909",
"0.5066788",
"0.5066657",
"0.50612545",
"0.50593764",
"0.50593",
"0.50409216",
"0.50409013",
"0.50299364",
"0.5013976",
"0.50126004",
"0.50017184",
"0.49966216",
"0.49894255",
"0.49894255",
"0.49894255",
"0.49833328",
"0.4979878",
"0.49700364",
"0.49633524",
"0.4953914",
"0.49490044",
"0.49403378",
"0.49334162",
"0.4923828",
"0.4922035",
"0.4908778",
"0.49069944",
"0.48804095",
"0.48757955",
"0.48727584",
"0.48713458",
"0.48663667",
"0.48573127",
"0.48534042",
"0.48474836",
"0.48456004",
"0.48443362",
"0.48419973",
"0.48336068",
"0.48239458",
"0.48235005",
"0.48216987",
"0.4821018",
"0.48166457",
"0.48099104"
] | 0.6459291 | 0 |
open reader and writer | func openReadWrite(inFile *os.File, outFile *os.File) (*bufio.Reader, *bufio.Writer) {
return bufio.NewReader(inFile), bufio.NewWriter(outFile)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (cmd *baseCommand) writerOpen() error {\n\tif cmd.out == \"\" || cmd.out == \"-\" {\n\t\tif cmd.w == nil {\n\t\t\tcmd.w = os.Stdout\n\t\t}\n\t\treturn nil\n\t}\n\n\tdir, base := filepath.Split(cmd.out)\n\tbase = \".\" + base + \".\"\n\n\tvar err error\n\tif cmd.tmp, err = ioutil.TempFile(dir, base); err == nil {\n\t\tDebugf(\"writing to %s\\n\", cmd.tmp.Name())\n\t\tcmd.w = cmd.tmp\n\n\t\tuid, err := cmd.getUserId()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgid, err := cmd.getGroupId()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err = cmd.tmp.Chmod(cmd.mode); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif uid != -1 || gid != -1 {\n\t\t\tif err = cmd.tmp.Chown(uid, gid); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}",
"func Open(srcFn, inFn, outFn string) (srcReader, inReader io.Reader,\n\toutWriter io.Writer) {\n\tsrcReader = setupSrcReader(srcFn)\n\tinReader = setupInReader(inFn)\n\toutWriter = setupOutReader(outFn)\n\treturn srcReader, inReader, outWriter\n}",
"func OpenReader(name string) (*zip.ReadCloser, error)",
"func (w *RotateWriter) open() error {\n\tinfo, err := os.Stat(w.filename)\n\tif os.IsNotExist(err) {\n\t\tw.fp, err = os.Create(w.filename)\n\t\tw.fsize = int64(0)\n\t\treturn err\n\t}\n\tw.fp, err = os.OpenFile(w.filename, os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn err\n\t}\n\tw.fsize = info.Size()\n\treturn nil\n}",
"func (reader *Reader) Open() (e error) {\n\t// do not let file descriptor leaked\n\te = reader.Close()\n\tif e != nil {\n\t\treturn\n\t}\n\n\te = reader.OpenInput()\n\tif e != nil {\n\t\treturn\n\t}\n\n\te = reader.OpenRejected()\n\n\treturn\n}",
"func open(output string) (io.ReadWriteCloser, error) {\n\treturn os.Create(output)\n}",
"func (fc *FileCache) WriteOpen(meta *Meta, src io.Reader) (item *Item, written int64, err error) {\n\titem, err = fc.Create(meta)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\twritten, err = io.Copy(item.File, src)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t_, err = item.File.Seek(0, io.SeekStart)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn item, written, nil\n}",
"func Open(rw ReaderWriterAt) (*File, error) {\n\th, err := ReadHeader(io.NewSectionReader(rw, 0, 1<<31))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &File{rw: rw, Header: h}, nil\n}",
"func (t *tOps) open(f *tFile) (ch *cache.Handle, err error) {\n\tch = t.cache.Get(0, uint64(f.fd.Num), func() (size int, value cache.Value) {\n\t\tvar r storage.Reader\n\t\tr, err = t.s.stor.Open(f.fd)\n\t\tif err != nil {\n\t\t\treturn 0, nil\n\t\t}\n\n\t\tvar bcache *cache.NamespaceGetter\n\t\tif t.bcache != nil {\n\t\t\tbcache = &cache.NamespaceGetter{Cache: t.bcache, NS: uint64(f.fd.Num)}\n\t\t}\n\n\t\tvar tr *table.Reader\n\t\ttr, err = table.NewReader(r, f.size, f.fd, bcache, t.bpool, t.s.o.Options)\n\t\tif err != nil {\n\t\t\tr.Close()\n\t\t\treturn 0, nil\n\t\t}\n\t\treturn 1, tr\n\n\t})\n\tif ch == nil && err == nil {\n\t\terr = ErrClosed\n\t}\n\treturn\n}",
"func (env *E) Open(f *os.File, w *csv.Writer) (*os.File, *csv.Writer, error) {\n\tif w != nil {\n\t\tw.Flush()\n\t\tw = nil\n\t}\n\tif f != nil {\n\t\tf.Close()\n\t\tf = nil\n\t}\n\n\tsearching := true\n\tid := 1\n\tvar fn string\n\tfor searching {\n\t\te := path.Ext(env.CsvFilename)\n\t\tb := strings.Replace(env.CsvFilename, e, \"\", -1)\n\t\tfn = fmt.Sprintf(\"%s_%03d%s\", b, id, e)\n\t\tfn = path.Join(env.OutDir, fn)\n\t\tif _, err := os.Stat(fn); os.IsNotExist(err) {\n\t\t\tsearching = false\n\t\t} else {\n\t\t\tid++\n\t\t}\n\t}\n\terr := os.MkdirAll(path.Dir(fn), os.ModePerm)\n\tif err != nil {\n\t\treturn f, w, err\n\t}\n\tf, err = os.Create(fn)\n\tif err != nil {\n\t\treturn f, w, err\n\t}\n\tw = csv.NewWriter(f)\n\terr = w.Write(env.Headers)\n\tlog.Printf(\"open: %v\\n\", fn)\n\treturn f, w, err\n}",
"func OpenReader(name string) (*ReadCloser, error) {\n\tf, err := os.Open(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\n\tr := new(ReadCloser)\n\tif err := r.init(f, fi.Size(), false); err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\tr.f = f\n\n\treturn r, nil\n}",
"func openReader(\n\tctx context.Context,\n\tdb *sql.DB,\n\tstoreID uint64,\n\taddr gospel.Address,\n\tlimit *rate.Limiter,\n\tlogger twelf.Logger,\n\topts *options.ReaderOptions,\n) (*Reader, error) {\n\t// Note that runCtx is NOT derived from ctx, which is only used for the\n\t// opening of the reader itself.\n\trunCtx, cancel := context.WithCancel(context.Background())\n\n\taccetableLatency := getAcceptableLatency(opts)\n\n\tr := &Reader{\n\t\tlogger: logger,\n\t\tfacts: make(chan gospel.Fact, getReadBufferSize(opts)),\n\t\tend: make(chan struct{}),\n\t\tdone: make(chan error, 1),\n\t\tctx: runCtx,\n\t\tcancel: cancel,\n\t\taddr: addr,\n\t\tglobalLimit: limit,\n\t\tadaptiveLimit: rate.NewLimiter(rate.Every(accetableLatency), 1),\n\t\tacceptableLatency: accetableLatency,\n\t\tstarvationLatency: getStarvationLatency(opts),\n\t\taverageLatency: ewma.NewMovingAverage(averageLatencyAge),\n\t}\n\n\tif logger.IsDebug() {\n\t\tr.debug = &readerDebug{\n\t\t\topts: opts,\n\t\t\taveragePollRate: metrics.NewRateCounter(),\n\t\t\taverageFactRate: metrics.NewRateCounter(),\n\t\t}\n\t}\n\n\tif err := r.prepareStatement(ctx, db, storeID, opts); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr.logInitialization()\n\n\tgo r.run()\n\n\treturn r, nil\n}",
"func getreader(bn int, vname string) (io.Reader, io.Closer) {\n\tfn := config.BucketPath(bn, sourcedir)\n\tfn = path.Join(fn, fmt.Sprintf(\"%s.bin.sz\", vname))\n\tfid, err := os.Open(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\trdr := snappy.NewReader(fid)\n\treturn rdr, fid\n}",
"func (l *Log) writer() {\n\t// Open as O_RDWR (which should get lock) and O_DIRECT.\n\tf, err := os.OpenFile(l.filename, os.O_WRONLY|os.O_APPEND, 0660)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer f.Close()\n\tenc := json.NewEncoder(f)\n\tfor {\n\t\tr, ok := <-l.in\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tif r.m == nil {\n\t\t\tr.err <- fmt.Errorf(\"cannot write nil to wal\")\n\t\t\treturn\n\t\t}\n\t\t// serialize mutation and write to disk\n\t\tif err := enc.Encode(r.m); err != nil {\n\t\t\tr.err <- fmt.Errorf(\"wal encoding: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\t// sync\n\t\tif err := f.Sync(); err != nil {\n\t\t\tr.err <- fmt.Errorf(\"wal sync: %s\", err.Error())\n\t\t\treturn\n\t\t}\n\t\tr.err <- nil\n\t\t// send to reader\n\t\tif l.closed {\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (index *ind) open() (err error) {\n\tfilename := index.name\n\tif _, err := os.Stat(filename); !os.IsNotExist(err) {\n\t\tindexFile, _ := os.Open(filename)\n\t\td := gob.NewDecoder(indexFile)\n\t\tdecodeErr := d.Decode(&index)\n\t\tif decodeErr != nil {\n\t\t\tpanic(decodeErr)\n\t\t}\n\t\tdefer indexFile.Close()\n\t} else {\n\t\tindex.Storage = map[string][]string{}\n\t\tindex.Domains = map[string]bool{}\n\t}\n\n\treturn err\n}",
"func Open(name *byte, mode, perm int32) int32",
"func (localFileDriver) open(file string) (io.ReadCloser, error) {\n\treturn os.Open(file)\n}",
"func (w *writer) Open(\n\tnamespace ts.ID,\n\tblockSize time.Duration,\n\tshard uint32,\n\tblockStart time.Time,\n) error {\n\tshardDir := ShardDirPath(w.filePathPrefix, namespace, shard)\n\tif err := os.MkdirAll(shardDir, w.newDirectoryMode); err != nil {\n\t\treturn err\n\t}\n\tw.blockSize = blockSize\n\tw.start = blockStart\n\tw.currIdx = 0\n\tw.currOffset = 0\n\tw.checkpointFilePath = filesetPathFromTime(shardDir, blockStart, checkpointFileSuffix)\n\tw.err = nil\n\n\tvar infoFd, indexFd, dataFd, digestFd *os.File\n\tif err := openFiles(\n\t\tw.openWritable,\n\t\tmap[string]**os.File{\n\t\t\tfilesetPathFromTime(shardDir, blockStart, infoFileSuffix): &infoFd,\n\t\t\tfilesetPathFromTime(shardDir, blockStart, indexFileSuffix): &indexFd,\n\t\t\tfilesetPathFromTime(shardDir, blockStart, dataFileSuffix): &dataFd,\n\t\t\tfilesetPathFromTime(shardDir, blockStart, digestFileSuffix): &digestFd,\n\t\t},\n\t); err != nil {\n\t\treturn err\n\t}\n\n\tw.infoFdWithDigest.Reset(infoFd)\n\tw.indexFdWithDigest.Reset(indexFd)\n\tw.dataFdWithDigest.Reset(dataFd)\n\tw.digestFdWithDigestContents.Reset(digestFd)\n\n\treturn nil\n}",
"func Open(addr string) (*bufio.ReadWriter, error) {\n\tlog.Println(\"Dial \" + addr)\n\tconn, err := net.Dial(\"tcp\", addr)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\n\t}\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\n}",
"func (bk *RadosBucket) OpenWrite(key string) (wr ObjectWriter, err error) {\n\treturn nil, errNotImplemented\n}",
"func (r *resource) Open() io.ReadSeeker {\n\treturn bytes.NewReader(r.data)\n}",
"func (osFS) Open(name string) (io.ReadCloser, error) { return os.Open(name) }",
"func (r *regulator) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tr.enter()\n\tdefer r.exit()\n\n\treturn r.StorageDriver.Reader(ctx, path, offset)\n}",
"func (obsr *obsReader) Open(path string) (io.ReadCloser, error) {\n\tbk, key, err := util.ParseBucketAndKey(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse oss bucket and key: %v\", err)\n\t}\n\n\t_, err = obsr.client.HeadBucket(bk)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"OSS: bucket<%s> not found\", bk)\n\t}\n\n\tinput := &obs.GetObjectInput{}\n\tinput.Bucket = bk\n\tinput.Key = key\n\tresp, err := obsr.client.GetObject(input)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\n\treturn resp.Body, nil\n}",
"func (a *Archive) Open(name string) (*Reader, error) {\n\te, err := a.GetFileInfo(name)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsectionReader := io.NewSectionReader(a.reader, e.Offset, e.CompressedSize)\n\treturn &Reader{\n\t\treader: lz4.NewReader(sectionReader),\n\t}, nil\n}",
"func (a Afero) WriteReader(path string, r io.Reader) (err error) {\n\treturn WriteReader(a.Fs, path, r)\n}",
"func (bk *RadosBucket) OpenRead(key string) (rd ObjectReader, err error) {\n\treturn nil, errNotImplemented\n}",
"func (b *Buffer) Open(path string) {\n if util.FileExists(path) {\n\n } else {\n\n }\n\n b.setCursorXOffset()\n b.cursorXPos += b.getLineMetaChars()\n}",
"func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tbaseUrl := d.getBaseUrl(path)\n\n\tinfo, err := d.Bucket.Stat(ctx, path)\n\tif err != nil {\n\t\treturn nil, err;\n\t}\n\n\tif offset > info.Fsize {\n\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t}\n\n\thttpClient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", baseUrl, nil)\n\treq.Header.Add(\"Range\", \"bytes=\"+strconv.FormatInt(offset, 10)+\"-\")\n\tresp, err := httpClient.Do(req)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc,_ := ioutil.ReadAll(resp.Body)\n\n\tfmt.Print(\"content\"+string(c)+\"\\n\")\n\n\treturn resp.Body,err\n}",
"func (p *streamingPuller) open() error {\n\tp.c.L.Lock()\n\tdefer p.c.L.Unlock()\n\tp.openLocked()\n\treturn p.err\n}",
"func (tbz2 *TarBz2) Open(buf *[]byte) error {\n\ttbz2.wrapReader()\n\treturn tbz2.Tar.Open(buf)\n}",
"func NewReader(r io.Reader) io.Reader {\n return reader{r}\n}",
"func (m *InMemoryRepository) Reader(u fyne.URI) (fyne.URIReadCloser, error) {\n\tpath := u.Path()\n\n\tif path == \"\" {\n\t\treturn nil, fmt.Errorf(\"invalid path '%s'\", path)\n\t}\n\n\t_, ok := m.Data[path]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"no such path '%s' in InMemoryRepository\", path)\n\t}\n\n\treturn &nodeReaderWriter{path: path, repo: m}, nil\n}",
"func (fr *fetchResult) Open() (readSeekCloser, error) {\n\tswitch fr.f.ops {\n\tcase fetchOpsResolve:\n\t\tcontent := strings.NewReader(marshalInfo(fr.Version, fr.Time))\n\t\treturn struct {\n\t\t\tio.ReadCloser\n\t\t\tio.Seeker\n\t\t}{nopCloser{content}, content}, nil\n\tcase fetchOpsList:\n\t\tcontent := strings.NewReader(strings.Join(fr.Versions, \"\\n\"))\n\t\treturn struct {\n\t\t\tio.ReadCloser\n\t\t\tio.Seeker\n\t\t}{nopCloser{content}, content}, nil\n\tcase fetchOpsDownloadInfo:\n\t\treturn os.Open(fr.Info)\n\tcase fetchOpsDownloadMod:\n\t\treturn os.Open(fr.GoMod)\n\tcase fetchOpsDownloadZip:\n\t\treturn os.Open(fr.Zip)\n\t}\n\n\treturn nil, errors.New(\"invalid fetch operation\")\n}",
"func (n *dnode) Open(readOnly bool) (Stream, error) {\n\tif readOnly {\n\t\tn.mutex.RLock()\n\t} else {\n\t\tn.mutex.Lock()\n\t}\n\tif !n.IsFile() {\n\t\tif readOnly {\n\t\t\tn.mutex.RUnlock()\n\t\t} else {\n\t\t\tn.mutex.Unlock()\n\t\t}\n\t\treturn nil, ErrIsDir\n\t}\n\n\treturn newDnodeStream(n, readOnly), nil\n}",
"func (h *Harvester) open() (encoding.Encoding, error) {\n\t// Special handling that \"-\" means to read from standard input\n\tif h.Path == \"-\" {\n\t\treturn h.openStdin()\n\t}\n\treturn h.openFile()\n}",
"func NewReader(r io.Reader) *Reader { return &Reader{r: r} }",
"func osOpenAndRead(path string) {\n\tfile, err := os.Open(path)\n\tdefer file.Close()\n\tcheck(err)\n\t//设置读取字节的缓存\n\tb := make([]byte, 20)\n\t//返回值n表示实际读取的字节数\n\tn,err:=file.Read(b)\n\tcheck(err)\n\tlog.Println(string(b),n)\n}",
"func setReaderWriters(d4 *d4S, force bool) bool {\n\t//TODO implement other destination file, fifo unix_socket ...\n\tswitch (*d4).conf.source {\n\tcase \"stdin\":\n\t\t(*d4).src = os.Stdin\n\tcase \"pcap\":\n\t\tf, _ := os.Open(\"capture.pcap\")\n\t\t(*d4).src = f\n\tcase \"d4server\":\n\t\t// Create a new redis connection pool\n\t\t(*d4).redisInputPool = newPool((*d4).conf.redisHost+\":\"+(*d4).conf.redisPort, 16)\n\t\tvar err error\n\t\t(*d4).redisCon, err = (*d4).redisInputPool.Dial()\n\t\tif err != nil {\n\t\t\tlogger.Println(\"Could not connect to d4 Redis\")\n\t\t\treturn false\n\t\t}\n\t\t(*d4).src, err = inputreader.NewLPOPReader(&(*d4).redisCon, (*d4).conf.redisDB, (*d4).conf.redisQueue)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not create d4 Redis Descriptor %q \\n\", err)\n\t\t\treturn false\n\t\t}\n\tcase \"folder\":\n\t\tvar err error\n\t\t(*d4).src, err = inputreader.NewFileWatcherReader((*d4).conf.folderstr, (*d4).json, (*d4).daily, logger)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Could not create File Watcher %q \\n\", err)\n\t\t\treturn false\n\t\t}\n\t}\n\tisn, dstnet := config.IsNet((*d4).conf.destination)\n\tif isn {\n\t\t// We test whether a connection already exist\n\t\t// (case where the reader run out of data)\n\t\t// force forces to reset the connections after\n\t\t// failure to reuse it\n\t\tif _, ok := (*d4).dst.w.(net.Conn); !ok || force {\n\t\t\tif (*d4).tor {\n\t\t\t\tdialer := net.Dialer{\n\t\t\t\t\tTimeout: (*d4).ct,\n\t\t\t\t\tKeepAlive: (*d4).cka,\n\t\t\t\t\tFallbackDelay: 0,\n\t\t\t\t}\n\t\t\t\tdial, err := proxy.SOCKS5(\"tcp\", \"127.0.0.1:9050\", nil, &dialer)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Fatal(err)\n\t\t\t\t}\n\t\t\t\ttlsc := tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t}\n\t\t\t\tif (*d4).cc {\n\t\t\t\t\ttlsc = tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t\t\tRootCAs: &(*d4).ca,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tconn, errc := dial.Dial(\"tcp\", dstnet)\n\t\t\t\tif errc != nil {\n\t\t\t\t\tlogger.Println(errc)\n\t\t\t\t\treturn false\n\t\t\t\t}\n\t\t\t\tif (*d4).ce == true {\n\t\t\t\t\tconn = tls.Client(conn, &tlsc) // use tls\n\t\t\t\t}\n\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t} else {\n\t\t\t\tdial := net.Dialer{\n\t\t\t\t\tTimeout: (*d4).ct,\n\t\t\t\t\tKeepAlive: (*d4).cka,\n\t\t\t\t\tFallbackDelay: 0,\n\t\t\t\t}\n\t\t\t\ttlsc := tls.Config{\n\t\t\t\t\tInsecureSkipVerify: true,\n\t\t\t\t}\n\t\t\t\tif (*d4).cc {\n\t\t\t\t\ttlsc = tls.Config{\n\t\t\t\t\t\tInsecureSkipVerify: false,\n\t\t\t\t\t\tRootCAs: &(*d4).ca,\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif (*d4).ce == true {\n\t\t\t\t\tconn, errc := tls.DialWithDialer(&dial, \"tcp\", dstnet, &tlsc)\n\t\t\t\t\tif errc != nil {\n\t\t\t\t\t\tlogger.Println(errc)\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t\t} else {\n\t\t\t\t\tconn, errc := dial.Dial(\"tcp\", dstnet)\n\t\t\t\t\tif errc != nil {\n\t\t\t\t\t\treturn false\n\t\t\t\t\t}\n\t\t\t\t\t(*d4).dst = newD4Writer(conn, (*d4).conf.key)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tswitch (*d4).conf.destination {\n\t\tcase \"stdout\":\n\t\t\t(*d4).dst = newD4Writer(os.Stdout, (*d4).conf.key)\n\t\tcase \"file\":\n\t\t\tf, _ := os.Create(\"test.txt\")\n\t\t\t(*d4).dst = newD4Writer(f, (*d4).conf.key)\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"No suitable destination found, given :%q\", (*d4).conf.destination))\n\t\t}\n\t}\n\n\t// Create the copy buffer\n\t(*d4).dst.fb = make([]byte, HDR_SIZE+(*d4).conf.snaplen)\n\t(*d4).dst.pb = make([]byte, (*d4).conf.snaplen)\n\n\treturn true\n}",
"func (w *RotateWriter) Open() error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\treturn w.open()\n}",
"func (w *Writer) Close() error {}",
"func osOpenAndWrite(path string,content string){\n\tfile, err := os.Create(path)\n\tcheck(err)\n\tdefer file.Close()\n\tn1,err:=file.Write([]byte(content))\n\tcheck(err)\n\tlog.Println(n1)\n\n\tn2,err:=file.WriteString(\"\\n\")\n\tcheck(err)\n\tlog.Println(n2)\n\n\tn3,err:=file.WriteString(content)\n\tcheck(err)\n\tlog.Println(n3)\n}",
"func Open(rw eio.ReaderWriterAt) (q Qcow2, err error) {\n\tvar qi *qcow2\n\terr = eio.BacktraceWrap(func() {\n\t\tqi = &qcow2{}\n\t\tqi.header = &headerImpl{}\n\t\tqi.header.open(rw)\n\t})\n\treturn qi, err\n}",
"func Open(filename string) (l *Log, err error) {\n\tl = &Log{\n\t\tfilename: filename,\n\t\tin: make(chan (*writeRequest), 1000),\n\t}\n\tf, err := os.OpenFile(l.filename, os.O_RDONLY|os.O_CREATE, 0660)\n\tif err != nil {\n\t\treturn l, err\n\t}\n\tdefer f.Close()\n\tgo l.writer()\n\treturn l, nil\n}",
"func Open(addr string) (*bufio.ReadWriter, error) {\r\n\t// Dial the remote process.\r\n\t// Note that the local port is chosen on the fly. If the local port\r\n\t// must be a specific one, use DialTCP() instead.\r\n\tlog.Println(\"Dial \" + addr)\r\n\tconn, err := net.Dial(\"tcp\", addr)\r\n\tif err != nil {\r\n\t\treturn nil, errors.Wrap(err, \"Dialing \"+addr+\" failed\")\r\n\t}\r\n\treturn bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn)), nil\r\n}",
"func Open(r io.Reader) (*bencodeTorrent, error) {\n\tbto := bencodeTorrent{}\n\terr := bencode.Unmarshal(r, &bto)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &bto, nil\n}",
"func Open(fpath string, options ...ReadOption) (*Reader, error) {\n\tf, err := os.Open(fpath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\th := header{}\n\tif err := h.read(f); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &Reader{header: h}\n\tfor _, option := range options {\n\t\toption(r)\n\t}\n\n\tif h.version == 0 {\n\t\tf.Close()\n\n\t\tvoOpts := []file_v0.ReadOption{}\n\t\tif r.interceptor != nil {\n\t\t\tvoOpts = append(voOpts, file_v0.ReadIntercept(r.interceptor))\n\t\t}\n\t\tif r.cacheIndex {\n\t\t\tvoOpts = append(voOpts, file_v0.CacheIndex())\n\t\t}\n\t\tvr, err := file_v0.Open(fpath, voOpts...)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tr.file_v0 = vr\n\t\treturn r, nil\n\t}\n\n\tif r.cacheIndex {\n\t\tr.cacheOffsets(f)\n\t}\n\n\tch := make(chan *os.File, 10)\n\tr.readers = ch\n\tch <- f\n\tfor i := 0; i < 9; i++ {\n\t\tf, err := os.Open(fpath)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tch <- f\n\t}\n\n\treturn r, nil\n}",
"func (localFileDriver) openAppend(file string) (io.WriteCloser, error) {\n\treturn os.OpenFile(file, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)\n}",
"func (d *driver) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\toutput, err := d.Client.GetObject(&obs.GetObjectInput{\n\t\tGetObjectMetadataInput: obs.GetObjectMetadataInput{\n\t\t\tBucket: d.Bucket,\n\t\t\tKey: d.obsPath(path),\n\t\t},\n\t\tRangeStart: offset,\n\t})\n\n\tif err != nil {\n\t\tif obsErr, ok := err.(obs.ObsError); ok && obsErr.Code == \"InvalidRange\" {\n\t\t\treturn ioutil.NopCloser(bytes.NewReader(nil)), nil\n\t\t}\n\n\t\treturn nil, parseError(path, err)\n\t}\n\treturn output.Body, nil\n}",
"func Open(obj any, filename string) error {\n\tfp, err := os.Open(filename)\n\tdefer fp.Close()\n\tif err != nil {\n\t\tlog.Println(err)\n\t\treturn err\n\t}\n\treturn Read(obj, bufio.NewReader(fp))\n}",
"func Reader(cfg Config) store.Reader {\n\treturn reader{cfg: cfg, openFile: os.Open, reader: csv.NewReader}\n}",
"func Open(file string) (r Records, err error) {\n\tr.db, err = bolt.Open(file, 0600, nil)\n\treturn\n}",
"func getwriter(bn int, vname string) (io.WriteCloser, io.Closer) {\n\tfn := config.BucketPath(bn, targetdir)\n\tfn = path.Join(fn, fmt.Sprintf(\"%s.bin.sz\", vname))\n\tfid, err := os.Create(fn)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twtr := snappy.NewBufferedWriter(fid)\n\treturn wtr, fid\n}",
"func Open(path string) (readerutil.ReaderAtCloser, error) {\n\topenFileMu.Lock()\n\tof := openFiles[path]\n\tif of != nil {\n\t\tof.refCount++\n\t\topenFileMu.Unlock()\n\t\treturn &openFileHandle{false, of}, nil\n\t}\n\topenFileMu.Unlock() // release the lock while we call os.Open\n\n\twinner := false // this goroutine made it into Do's func\n\n\t// Returns an *openFile\n\tresi, err := openerGroup.Do(path, func() (interface{}, error) {\n\t\twinner = true\n\t\tf, err := wkfs.Open(path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tof := &openFile{\n\t\t\tFile: f,\n\t\t\tpath: path,\n\t\t\trefCount: 1,\n\t\t}\n\t\topenFileMu.Lock()\n\t\topenFiles[path] = of\n\t\topenFileMu.Unlock()\n\t\treturn of, nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tof = resi.(*openFile)\n\n\t// If our os.Open was dup-suppressed, we have to increment our\n\t// reference count.\n\tif !winner {\n\t\topenFileMu.Lock()\n\t\tif of.refCount == 0 {\n\t\t\t// Winner already closed it. Try again (rare).\n\t\t\topenFileMu.Unlock()\n\t\t\treturn Open(path)\n\t\t}\n\t\tof.refCount++\n\t\topenFileMu.Unlock()\n\t}\n\treturn &openFileHandle{false, of}, nil\n}",
"func (lm *SimpleManager) NewReader(r io.Reader) *Reader {\n\tlr := NewReader(r)\n\tlm.Manage(lr)\n\treturn lr\n}",
"func NewReader() Reader {\n\treturn reader{}\n}",
"func (h *ReOpen) open() error {\n\topts := []fs.OpenOption{}\n\tvar hashOption *fs.HashesOption\n\tvar rangeOption *fs.RangeOption\n\tfor _, option := range h.options {\n\t\tswitch option := option.(type) {\n\t\tcase *fs.HashesOption:\n\t\t\thashOption = option\n\t\tcase *fs.RangeOption:\n\t\t\trangeOption = option\n\t\tcase *fs.HTTPOption:\n\t\t\topts = append(opts, option)\n\t\tdefault:\n\t\t\tif option.Mandatory() {\n\t\t\t\tfs.Logf(h.src, \"Unsupported mandatory option: %v\", option)\n\t\t\t}\n\t\t}\n\t}\n\tif h.read == 0 {\n\t\tif rangeOption != nil {\n\t\t\topts = append(opts, rangeOption)\n\t\t}\n\t\tif hashOption != nil {\n\t\t\t// put hashOption on if reading from the start, ditch otherwise\n\t\t\topts = append(opts, hashOption)\n\t\t}\n\t} else {\n\t\tif rangeOption != nil {\n\t\t\t// range to the read point\n\t\t\topts = append(opts, &fs.RangeOption{Start: rangeOption.Start + h.read, End: rangeOption.End})\n\t\t} else {\n\t\t\t// seek to the read point\n\t\t\topts = append(opts, &fs.SeekOption{Offset: h.read})\n\t\t}\n\t}\n\th.tries++\n\tif h.tries > h.maxTries {\n\t\th.err = errorTooManyTries\n\t} else {\n\t\th.rc, h.err = h.src.Open(h.ctx, opts...)\n\t}\n\tif h.err != nil {\n\t\tif h.tries > 1 {\n\t\t\tfs.Debugf(h.src, \"Reopen failed after %d bytes read: %v\", h.read, h.err)\n\t\t}\n\t\treturn h.err\n\t}\n\th.opened = true\n\treturn nil\n}",
"func (c *DataChannel) OpenWrite(ctx context.Context, ptransformID string, instID instructionID) io.WriteCloser {\n\treturn c.makeWriter(ctx, clientID{ptransformID: ptransformID, instID: instID})\n}",
"func (base *Base) Reader(ctx context.Context, path string, offset int64) (io.ReadCloser, error) {\n\tctx, done := dcontext.WithTrace(ctx)\n\tdefer done(\"%s.Reader(%q, %d)\", base.Name(), path, offset)\n\n\tif offset < 0 {\n\t\treturn nil, storagedriver.InvalidOffsetError{Path: path, Offset: offset, DriverName: base.StorageDriver.Name()}\n\t}\n\n\tif !storagedriver.PathRegexp.MatchString(path) {\n\t\treturn nil, storagedriver.InvalidPathError{Path: path, DriverName: base.StorageDriver.Name()}\n\t}\n\n\trc, e := base.StorageDriver.Reader(ctx, path, offset)\n\treturn rc, base.setDriverName(e)\n}",
"func (fs *EmbedFs) Open(path string) (file, error) {\n\tpath = filepath.Join(\"/\", path)\n\n\tif !fs.IsFileExist(path) {\n\t\treturn nil, ErrNoExist\n\t}\n\n\treturn &embedFileReader{\n\t\tstart: fs.index[path].offset,\n\t\tlength: fs.index[path].header.Size,\n\t\tsource: fs.origin,\n\t\tname: path,\n\t}, nil\n}",
"func openWriterAndBackup(filename string) io.WriteCloser {\n\tfile, err := os.Create(temporaryName(filename))\n\tif err != nil {\n\t\tlog.Panicf(\"Failed to create temporary save file '%s': %v\", temporaryName(filename), err)\n\t}\n\treturn file\n}",
"func (fsi *fsIOPool) Open(path string) (*lock.RLockedFile, error) {\n\tif err := checkPathLength(path); err != nil {\n\t\treturn nil, err\n\t}\n\n\tfsi.Lock()\n\trlkFile, ok := fsi.lookupToRead(path)\n\tfsi.Unlock()\n\t// Locked path reference doesn't exist, acquire a read lock again on the file.\n\tif !ok {\n\t\t// Open file for reading with read lock.\n\t\tnewRlkFile, err := lock.RLockedOpenFile(path)\n\t\tif err != nil {\n\t\t\tswitch {\n\t\t\tcase os.IsNotExist(err):\n\t\t\t\treturn nil, errFileNotFound\n\t\t\tcase os.IsPermission(err):\n\t\t\t\treturn nil, errFileAccessDenied\n\t\t\tcase isSysErrIsDir(err):\n\t\t\t\treturn nil, errIsNotRegular\n\t\t\tcase isSysErrNotDir(err):\n\t\t\t\treturn nil, errFileAccessDenied\n\t\t\tcase isSysErrPathNotFound(err):\n\t\t\t\treturn nil, errFileNotFound\n\t\t\tdefault:\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\t/// Save new reader on the map.\n\n\t\t// It is possible by this time due to concurrent\n\t\t// i/o we might have another lock present. Lookup\n\t\t// again to check for such a possibility. If no such\n\t\t// file exists save the newly opened fd, if not\n\t\t// reuse the existing fd and close the newly opened\n\t\t// file\n\t\tfsi.Lock()\n\t\trlkFile, ok = fsi.lookupToRead(path)\n\t\tif ok {\n\t\t\t// Close the new fd, since we already seem to have\n\t\t\t// an active reference.\n\t\t\tnewRlkFile.Close()\n\t\t} else {\n\t\t\t// Save the new rlk file.\n\t\t\trlkFile = newRlkFile\n\t\t}\n\n\t\t// Save the new fd on the map.\n\t\tfsi.readersMap[path] = rlkFile\n\t\tfsi.Unlock()\n\n\t}\n\n\t// Success.\n\treturn rlkFile, nil\n}",
"func newReader(filePath string) (*Reader, func(), error) {\n\tf, err := os.Open(filePath)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfz, err := gzip.NewReader(f)\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, nil, err\n\t}\n\n\tcleanup := func() {\n\t\tf.Close()\n\t\tfz.Close()\n\t}\n\treturn &Reader{r: fz}, cleanup, nil\n}",
"func Open(r io.ReaderAt) (*Archive, error) {\n\tmagic := make([]byte, MagicLength)\n\tif num, err := r.ReadAt(magic, 0); err != nil {\n\t\treturn nil, err\n\t} else if num < MagicLength || strings.Compare(string(magic), \"KAR\\x00\") != 0 {\n\t\treturn nil, ErrFileFormat\n\t}\n\n\theaderSizeBytes := make([]byte, HeaderSizeNumberLength)\n\tif num, err := r.ReadAt(headerSizeBytes, MagicLength); err != nil {\n\t\treturn nil, err\n\t} else if num < HeaderSizeNumberLength {\n\t\treturn nil, ErrFileFormat\n\t}\n\n\theaderSize, err := binaryToint64(headerSizeBytes)\n\tif err != nil {\n\t\treturn nil, ErrFileFormat\n\t}\n\n\theaderBytes := make([]byte, headerSize)\n\tif num, err := r.ReadAt(headerBytes, MagicLength+HeaderSizeNumberLength); err != nil {\n\t\treturn nil, err\n\t} else if int64(num) < headerSize {\n\t\treturn nil, ErrFileFormat\n\t}\n\n\tvar header Header\n\tif err := gobDecode(&header, headerBytes); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Archive{\n\t\treader: r,\n\t\theader: header,\n\t}, nil\n}",
"func main() {\n\t// read only (this should return us a non-existence error\n\t// ---------\n\t// file1, err := os.Open(\"example1.txt\")\n\t// handleError(err)\n\t// defer func() { _ = file1.Close() }()\n\n\t// create and return handle for\n\t// ----------------------------\n\t// file2, err := os.Create(\"example2.txt\")\n\t// handleError(err)\n\t// defer func() { _ = file2.Close() }()\n\n\t// os package has a bunch of functions to move&rename (Rename), delete (Remove).\n\t// general file handle with filename, capabilities, file permission bits set\n\t// Capabilities:\n\t// os.O_RDONLY\n\t// os.O_WRONLY\n\t// os.O_RDWR\n\t// os.O_APPEND\n\t// os.O_CREATE\n\t// os.O_TRUNC\n\t// caps are combined with | as they are bitmapped\n\t// usage:\n\t// ------\n\t// file3, err := os.OpenFile(\"example3.txt\", os.O_CREATE|os.O_APPEND|os.O_RDWR, 0666)\n\t// handleError(err)\n\t// defer func() { _ = file3.Close() }()\n\n\n\n\t// reading a file to a byte slice:\n\torwell1984, err := os.Open(\"../assets/1984.html\") // from here\n\thandleError(err) //\n\t// defer func() { _ = orwell1984.Close() }() //\n\tb, err := ioutil.ReadAll(orwell1984) // to here is captured in `b, err = ioutil.ReadFile(<filename>)`\n\thandleError(err)\n\tfmt.Printf(\"%s\", b)\n\tcloseFile(orwell1984)\n\n\t// reading line by line using the bufio.Scanner\n\torwell1984, err = os.Open(\"../assets/1984.html\")\n\thandleError(err)\n\tvar fileReader = bufio.NewScanner(orwell1984)\n\tvar bbCount = 0\n\tvar lines = []string{}\n\tfor fileReader.Scan() {\n\t\tvar line = fileReader.Text()\n\t\tregex, _ := regexp.Compile(\"Big Brother|big brother\")\n\t\tvar bigBrother = regex.MatchString(line)\n\t\tif bigBrother{\n\t\t\tbbCount++\n\t\t\tlines = append(lines, line)\n\t\t}\n\t}\n\tfmt.Println(\"'Big Brother' appears\", bbCount, \"times in\", orwell1984.Name())\n\tout, err := os.OpenFile(\"BBLines.txt\", os.O_CREATE|os.O_RDWR, 0666)\n\thandleError(err)\n\t// writing to file using bufio.Writer\n\tvar scribe = bufio.NewWriter(out)\n\tfor _, line := range lines {\n\t\t_ = scribe.WriteString(line)\n\t}\n\t_ = scribe.Flush()\n\n}",
"func SetReader(r io.Reader) {\n\treader = r\n}",
"func (s *Opener) Open(name string) (f http.File, err error) {\n\tinfo := s.FileInfo\n\tinfo.name = name\n\tinfo.size = int64(len(s.Content))\n\treturn &File{\n\t\tReadSeeker: strings.NewReader(s.Content),\n\t\tinfo: info,\n\t}, nil\n}",
"func (lf *localFile) Reader() (io.ReadCloser, error) {\n\tif lf.matcher != nil && lf.matcher.Gzip {\n\t\t// We've got the gzipped contents cached in gzipped.\n\t\t// Note: we can't use lf.gzipped directly as a Reader, since we it discards\n\t\t// data after it is read, and we may read it more than once.\n\t\treturn io.NopCloser(bytes.NewReader(lf.gzipped.Bytes())), nil\n\t}\n\t// Not expected to fail since we did it successfully earlier in newLocalFile,\n\t// but could happen due to changes in the underlying filesystem.\n\treturn lf.fs.Open(lf.NativePath)\n}",
"func (p *TBufferedReadTransport) Open() error {\n\treturn nil\n}",
"func (s *mockFSServer) Reader(stream proto.FileSystem_ReaderServer) error {\n\tfor {\n\t\t_, err := stream.Recv()\n\t\tif err != nil {\n\t\t\ts.lock.Lock()\n\t\t\tdefer s.lock.Unlock()\n\t\t\ts.readOpen = false\n\t\t\treturn err\n\t\t}\n\n\t\ts.lock.Lock()\n\t\ts.readOpen = true\n\t\ts.lock.Unlock()\n\t}\n}",
"func (ctl *Control) reader() {\n\txl := ctl.xl\n\tdefer func() {\n\t\tif err := recover(); err != nil {\n\t\t\txl.Error(\"panic error: %v\", err)\n\t\t\txl.Error(string(debug.Stack()))\n\t\t}\n\t}()\n\tdefer ctl.readerShutdown.Done()\n\tdefer close(ctl.closedCh)\n\n\tencReader := crypto.NewReader(ctl.conn, []byte(ctl.clientCfg.Token))\n\tfor {\n\t\tm, err := msg.ReadMsg(encReader)\n\t\tif err != nil {\n\t\t\tif err == io.EOF {\n\t\t\t\txl.Debug(\"read from control connection EOF\")\n\t\t\t\treturn\n\t\t\t}\n\t\t\txl.Warn(\"read error: %v\", err)\n\t\t\tctl.conn.Close()\n\t\t\treturn\n\t\t}\n\t\tctl.readCh <- m\n\t}\n}",
"func NewReader(r io.Reader) *csv.Reader",
"func (mcm *MinioChunkManager) Reader(ctx context.Context, filePath string) (FileReader, error) {\n\treader, err := mcm.getMinioObject(ctx, mcm.bucketName, filePath, minio.GetObjectOptions{})\n\tif err != nil {\n\t\tlog.Warn(\"failed to get object\", zap.String(\"bucket\", mcm.bucketName), zap.String(\"path\", filePath), zap.Error(err))\n\t\treturn nil, err\n\t}\n\treturn reader, nil\n}",
"func (c *Chunk) Reader(rootPath *string) (file *os.File, err error) {\n\tvar path string\n\tif path, err = c.Path(rootPath); err != nil {\n\t\treturn\n\t}\n\treturn os.Open(path)\n}",
"func (c *DataChannel) OpenRead(ctx context.Context, ptransformID string, instID instructionID) io.ReadCloser {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tcid := clientID{ptransformID: ptransformID, instID: instID}\n\tif c.readErr != nil {\n\t\tlog.Errorf(ctx, \"opening a reader %v on a closed channel\", cid)\n\t\treturn &errReader{c.readErr}\n\t}\n\treturn c.makeReader(ctx, cid)\n}",
"func readAndWrite() {\n\tfileRead, err := os.Open(\"test.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer fileRead.Close()\n\n\tfmt.Println(fileRead)\n\tfmt.Println(reflect.TypeOf(fileRead)) // *os.File\n\n\tfileOpen, err := os.Create(\"test2.txt\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer fileOpen.Close()\n\n\t// slice를 생성하기 위해 make 내장함수 사용, 두번째 인자로 길이설정\n\tbuff := make([]byte, 30)\n\n\tfor {\n\t\t// file 읽기\n\t\tcnt, err := fileRead.Read(buff)\n\t\tif err == io.EOF {\n\t\t\tbreak // file의 끝까지 다읽었을경우 반복문 break\n\t\t}\n\n\t\t// byte to string\n\t\tstr := string(buff[:cnt])\n\t\t// string to byte\n\t\tnewBuff := []byte(str)\n\n\t\tfmt.Println(str, newBuff)\n\n\t\t// file 쓰기\n\t\t_, err = fileOpen.Write(buff[:cnt])\n\t\tif err != nil {\n\t\t\tpanic(err) // 현재 함수를 즉시 멈추고 현재 함수에 defer 함수들을 모두 실행한 후 즉시 리턴\n\t\t}\n\t}\n}",
"func (p *Book) Open(n string) (io.ReadCloser, error) {\n\treturn p.open(p.filename(n))\n}",
"func Reader() io.Reader {\n\treturn new(reader)\n}",
"func (l *InMemoryLog) Open(path string) error {\n\tl.init()\n\treturn nil\n}",
"func (fs *OneFile) Open(id string) error {\n\treturn nil\n}",
"func (o *ODirectReader) Close() error {\n\tif o.bufp != nil {\n\t\tif o.SmallFile {\n\t\t\tODirectPoolSmall.Put(o.bufp)\n\t\t} else {\n\t\t\tODirectPoolLarge.Put(o.bufp)\n\t\t}\n\t\to.bufp = nil\n\t\to.buf = nil\n\t}\n\to.err = errors.New(\"internal error: ODirectReader Read after Close\")\n\treturn o.File.Close()\n}",
"func (s *ScopedDataManager) OpenRead(ctx context.Context, id exec.StreamID) (io.ReadCloser, error) {\n\tch, err := s.open(ctx, id.Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ch.OpenRead(ctx, id.PtransformID, s.instID), nil\n}",
"func (m *MinTerm) getReadWriter() io.ReadWriter {\n\treturn WindowsReadWriter{r: m.termIn, w: logger.OutputWriterFromFile(m.termOut)}\n}",
"func (c *Command) open() {\n\tif len(c.parsed) <= 1 {\n\t\treturn\n\t}\n\tc.openFile(c.parsed[1:])\n\tc.done()\n}",
"func (r *Reader) Close() error {\n\treturn nil\n}",
"func (r *Reader) Close() error {\n\treturn nil\n}",
"func newReader(r io.Reader) *bufio.Reader {\n\t// TODO(nickng): use sync.Pool to reduce allocation per new connection.\n\treturn bufio.NewReader(r)\n}",
"func (reqParams *ReqParams) doReader() (io.ReadCloser, error) {\n\tresp, err := reqParams.do()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := reqParams.checkResp(resp); err != nil {\n\t\tresp.Body.Close()\n\t\treturn nil, err\n\t}\n\treturn resp.Body, nil\n}",
"func (js *jsonfileSessionRepository) Open(name string) error {\n\tpath := PathFromName(name)\n\n\t// create parent dir\n\terr := os.MkdirAll(filepath.Dir(path), 0700)\n\tif err != nil {\n\t\treturn errors.Errorf(\"cannot create directory for session file '%s': %v\", path, err)\n\t}\n\n\t// create and open locked session file\n\tlFile, err := lockedfile.OpenFile(path, os.O_RDWR|os.O_CREATE, 0600)\n\tif err != nil {\n\t\treturn errors.Errorf(\"cannot open session file '%s': %v\", path, err)\n\t}\n\tjs.lFile = lFile\n\tjs.path = name\n\n\treturn nil\n}",
"func openSnapshot(file string) (reader io.ReadCloser, err error) {\n\treturn os.Open(file)\n}",
"func (h *fs) Open(filename string) (io.ReadCloser, error) {\n\treturn os.Open(filename)\n}",
"func (v *File) OpenReadWrite() (*os.File, error) {\n\treturn v.openWrite(os.O_RDWR)\n}",
"func (o *OS) Open(path string) (ReadSeekCloser, error) {\n\treturn os.Open(path)\n}",
"func (s *ScopedDataManager) OpenWrite(ctx context.Context, id exec.StreamID) (io.WriteCloser, error) {\n\tch, err := s.open(ctx, id.Port)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ch.OpenWrite(ctx, id.PtransformID, s.instID), nil\n}",
"func Open(file string) (*Reader, error) {\n\treader, err := maxminddb.Open(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdbType, err := getDBType(reader)\n\treturn &Reader{reader, dbType}, err\n}",
"func openFile(name string) *os.File {\n\tfile, err := os.OpenFile(name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed opening %s for writing: %s\", name, err)\n\t}\n\treturn file\n}",
"func (p *Path) Open() ([]byte, error) {\n\tbuf, err := ioutil.ReadFile(p.Path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn buf, nil\n}",
"func OpenReaderAt(path string) (*ReaderAt, error) {\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cannot open file %q for reader: %w\", path, err)\n\t}\n\tvar r ReaderAt\n\tr.f = f\n\tr.stopCh = make(chan struct{})\n\tif !*disableMmap {\n\t\tfi, err := f.Stat()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error in stat: %w\", err)\n\t\t}\n\t\tsize := fi.Size()\n\t\tbm := &pageCacheBitmap{\n\t\t\tm: make([]uint64, 1+size/pageSize/64),\n\t\t}\n\t\tr.pageCacheBitmap.Store(bm)\n\t\tr.pageCacheBitmapWG.Add(1)\n\t\tgo func() {\n\t\t\tdefer r.pageCacheBitmapWG.Done()\n\t\t\tpageCacheBitmapCleaner(&r.pageCacheBitmap, r.stopCh)\n\t\t}()\n\n\t\tdata, err := mmapFile(f, size)\n\t\tif err != nil {\n\t\t\tMustClose(f)\n\t\t\treturn nil, fmt.Errorf(\"cannot init reader for %q: %w\", path, err)\n\t\t}\n\t\tr.mmapData = data\n\t}\n\treadersCount.Inc()\n\treturn &r, nil\n}",
"func TarReader(tw *tar.Writer, r io.Reader, path string) error {\n\tbytes, err := ioutil.ReadAll(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn TarBuffer(tw, bytes, path)\n}",
"func write(read io.ReadCloser, filename string) error {\n\tw, err := os.Create(filename)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer w.Close()\n\tdefer read.Close()\n\n\tif _, err := io.Copy(w, read); err != nil {\n\t\treturn fmt.Errorf(\"Error copying %v: %v\", filename, err)\n\t}\n\treturn nil\n}"
] | [
"0.621754",
"0.60433674",
"0.5988976",
"0.5887348",
"0.5861999",
"0.58603525",
"0.5860061",
"0.5679637",
"0.55914617",
"0.5582982",
"0.55580986",
"0.55473787",
"0.5471372",
"0.5468143",
"0.54669327",
"0.5463507",
"0.5439068",
"0.54318225",
"0.5390883",
"0.53740877",
"0.5369977",
"0.5368732",
"0.5366346",
"0.5321005",
"0.5307885",
"0.5292611",
"0.52907896",
"0.52818125",
"0.52599347",
"0.52427197",
"0.51937604",
"0.51830024",
"0.5182684",
"0.51812464",
"0.51812243",
"0.51619565",
"0.5158386",
"0.5154594",
"0.5153357",
"0.51457465",
"0.51396185",
"0.5129166",
"0.5127516",
"0.51172847",
"0.51166236",
"0.51054466",
"0.5101413",
"0.509292",
"0.50914925",
"0.5086804",
"0.5084907",
"0.5062813",
"0.5043216",
"0.5039641",
"0.5033651",
"0.50266206",
"0.5006917",
"0.50007087",
"0.4993682",
"0.49899593",
"0.49888885",
"0.49810374",
"0.49806663",
"0.497968",
"0.49713743",
"0.4969187",
"0.49684423",
"0.49622947",
"0.49617717",
"0.4961339",
"0.496077",
"0.495579",
"0.49541152",
"0.4954011",
"0.49492833",
"0.4944842",
"0.4944785",
"0.49440375",
"0.49427712",
"0.4940428",
"0.4931166",
"0.49302378",
"0.4925343",
"0.4924238",
"0.49236774",
"0.49236774",
"0.49201807",
"0.49158007",
"0.49090764",
"0.49053675",
"0.4905055",
"0.490501",
"0.48970035",
"0.489511",
"0.48947373",
"0.48938513",
"0.48934728",
"0.48897517",
"0.48880485",
"0.48855132"
] | 0.67128825 | 0 |
GetData => makes a http call to a dummy service and return data or error | func GetData() (m MyIP, err error) {
resp, err := http.Get(url)
if err == nil {
if resp.StatusCode != 400 {
body, err := ioutil.ReadAll(resp.Body)
bytBodyErr := []byte(body)
if err == nil {
_ = json.Unmarshal(bytBodyErr, &m)
}
} else {
return m, errors.New("400 Bad Request")
}
}
return m, err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *DefaultApiController) DataGet(w http.ResponseWriter, r *http.Request) {\n\tresult, err := c.service.DataGet(r.Context())\n\t//If an error occured, encode the error with the status code\n\tif err != nil {\n\t\tEncodeJSONResponse(err.Error(), &result.Code, w)\n\t\treturn\n\t}\n\t//If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, w)\n\n}",
"func HTTPServiceGetData(urlPath, payload, contentType string) ([]byte, error) {\n\treturn CheckHTTPResponse(HTTPQuery(CreateServiceHTTPDataRequest(\"GET\", urlPath, payload, contentType)))\n}",
"func GetData(url string, structToFill interface{}) error {\n\tlog.Printf(\"Getting data from URL: %s...\", url)\n\t// Make an HTTP client so we can add custom headers (currently used for adding in the Bearer token for inter-microservice communication)\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = setToken(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif req == nil {\n\t\tfmt.Printf(\"Alert! req is nil!\")\n\t}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Printf(\"Error on request: %s\", err.Error())\n\t\treturn err\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\terrorString, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn errors.New(string(errorString))\n\t}\n\n\terr = json.Unmarshal(b, structToFill)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Done.\")\n\treturn nil\n}",
"func (handlersImpl WeatherHandlersImpl) GetData(w http.ResponseWriter, req *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\n\tctx := req.Context()\n\n\tvars := mux.Vars(req)\n\tcityName := vars[\"cityName\"]\n\tresp, err := handlersImpl.svc.GetData(ctx, cityName)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n}",
"func (t *TestRuntime) GetData(url string) (io.ReadCloser, error) {\n\treturn t.request(\"GET\", url, nil)\n}",
"func TestGetDataFromUrlNon200HttpCode(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(201).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Error(t, err)\n}",
"func (client *RestClient) Get(data interface{}) error {\n\trestTD, ok := data.(*RestTestData)\n\tif !ok {\n\t\tTestLog.Fatalf(\"Fail to convert data to RestTestData\")\n\t}\n\n\turl := client.prefix + restTD.URI\n\tTestLog.Logf(\"GET URI=%s \\n\", url)\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(client.token) > 0 {\n\t\treq.Header.Set(\"Authorization\", \"Bearer \"+client.token)\n\t} else if len(client.userName) > 0 {\n\t\treq.SetBasicAuth(client.userName, client.userPwd)\n\t}\n\n\tresp, err := client.httpClient.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trestTD.actualStatus = resp.StatusCode\n\tTestLog.Logf(\"GET output=%v\", resp)\n\n\tdefer resp.Body.Close()\n\tif restTD.OutputBody != nil {\n\t\tbody, _ := ioutil.ReadAll(resp.Body)\n\t\tTestLog.Logf(\"GET Output body=%s\", body)\n\t\tif err := json.Unmarshal(body, restTD.OutputBody); err != nil {\n\t\t\tTestLog.Logf(\"Fail to unmarshal JSON format, err=%s\", err.Error())\n\t\t\trestTD.OutputBody = string(body)\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn nil\n}",
"func (a *RequestServiceApiService) GetDataExecute(r ApiGetDataRequest) (JsonSignedData, *_nethttp.Response, GenericOpenAPIError) {\n\tvar (\n\t\tlocalVarHTTPMethod = _nethttp.MethodGet\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFormFileName string\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\texecutionError GenericOpenAPIError\n\t\tlocalVarReturnValue JsonSignedData\n\t)\n\n\tlocalBasePath, err := a.client.cfg.ServerURLWithContext(r.ctx, \"RequestServiceApiService.GetData\")\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarPath := localBasePath + \"/requests/{uuid}/data\"\n\tlocalVarPath = strings.Replace(localVarPath, \"{\"+\"uuid\"+\"}\", _neturl.PathEscape(parameterToString(r.uuid, \"\")), -1)\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := _neturl.Values{}\n\tlocalVarFormParams := _neturl.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHTTPContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHTTPContentType := selectHeaderContentType(localVarHTTPContentTypes)\n\tif localVarHTTPContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHTTPContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHTTPHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHTTPHeaderAccept := selectHeaderAccept(localVarHTTPHeaderAccepts)\n\tif localVarHTTPHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHTTPHeaderAccept\n\t}\n\tif r.authorization != nil {\n\t\tlocalVarHeaderParams[\"Authorization\"] = parameterToString(*r.authorization, \"\")\n\t}\n\treq, err := a.client.prepareRequest(r.ctx, localVarPath, localVarHTTPMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFormFileName, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, nil, executionError\n\t}\n\n\tlocalVarHTTPResponse, err := a.client.callAPI(req)\n\tif err != nil || localVarHTTPResponse == nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tlocalVarBody, err := _ioutil.ReadAll(localVarHTTPResponse.Body)\n\tlocalVarHTTPResponse.Body.Close()\n\tlocalVarHTTPResponse.Body = _ioutil.NopCloser(bytes.NewBuffer(localVarBody))\n\tif err != nil {\n\t\texecutionError.error = err.Error()\n\t\treturn localVarReturnValue, localVarHTTPResponse, executionError\n\t}\n\n\tif localVarHTTPResponse.StatusCode >= 300 {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHTTPResponse.Status,\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHTTPResponse.Header.Get(\"Content-Type\"))\n\tif err != nil {\n\t\tnewErr := GenericOpenAPIError{\n\t\t\tbody: localVarBody,\n\t\t\terror: err.Error(),\n\t\t}\n\t\treturn localVarReturnValue, localVarHTTPResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHTTPResponse, executionError\n}",
"func (a *RequestServiceApiService) GetData(ctx _context.Context, uuid string) ApiGetDataRequest {\n\treturn ApiGetDataRequest{\n\t\tApiService: a,\n\t\tctx: ctx,\n\t\tuuid: uuid,\n\t}\n}",
"func TestGetDataFromUrlSuccessful(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"https://example.com\"\n\tapiPath := \"status\"\n\texpectedResponse := \"anything\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(200).\n\t\tBodyString(expectedResponse)\n\n\tactualResponse, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Equal(t, []byte(expectedResponse), actualResponse)\n\tassert.NoError(t, err)\n}",
"func get(data interface{}) {\n\n\t//GET API for fetching only GGUS alerts.\n\tapiurl := configJSON.CMSMONURL + \"/api/v1/alerts?active=true&silenced=false&inhibited=false&unprocessed=false\"\n\n\treq, err := http.NewRequest(\"GET\", apiurl, nil)\n\treq.Header.Add(\"Accept-Encoding\", \"identity\")\n\treq.Header.Add(\"Accept\", \"application/json\")\n\tif configJSON.Token != \"\" {\n\t\ttoken = TokenManager.ReadToken(configJSON.Token)\n\t\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"bearer %s\", token))\n\t}\n\n\ttimeout := time.Duration(configJSON.httpTimeout) * time.Second\n\tclient := &http.Client{Timeout: timeout}\n\tif token != \"\" {\n\t\trootCAs, err := TokenManager.LoadCAs(configJSON.Verbose)\n\t\tif err != nil {\n\t\t\tlog.Println(\"unable to load CERN ROOT CAs\", err)\n\t\t\treturn\n\t\t}\n\t\ttr, err := TokenManager.Transport(rootCAs, configJSON.Verbose)\n\t\tif err != nil {\n\t\t\tlog.Println(\"unable to initialize HTTP Transport\", err)\n\t\t\treturn\n\t\t}\n\t\tclient = &http.Client{Transport: tr}\n\t}\n\n\tif configJSON.Verbose > 1 {\n\t\tlog.Println(\"URL\", apiurl)\n\t\tdump, err := httputil.DumpRequestOut(req, true)\n\t\tif err == nil {\n\t\t\tlog.Println(\"Request: \", string(dump))\n\t\t}\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer resp.Body.Close()\n\n\tbyteValue, err := io.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\tlog.Printf(\"Unable to read %s JSON Data from AlertManager GET API, error: %v\\n\", service, err)\n\t\treturn\n\t}\n\n\terr = json.Unmarshal(byteValue, &data)\n\tif err != nil {\n\t\tif configJSON.Verbose > 0 {\n\t\t\tlog.Println(string(byteValue))\n\t\t}\n\t\tlog.Fatalf(\"Unable to parse %s JSON Data from AlertManager GET API, error: %v\\n\", service, err)\n\t}\n\n\tif configJSON.Verbose > 1 {\n\t\tdump, err := httputil.DumpResponse(resp, true)\n\t\tif err == nil {\n\t\t\tlog.Println(\"Response: \", string(dump))\n\t\t}\n\t}\n\n}",
"func GetjsonData(url string) (HttpStatusCode int, res []byte, er error) {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlog.Println(\"[E]\", r.(error))\n\t\t}\n\t}()\n\thttpOrhttps := strings.Split(url, \":\")\n\tswitch httpOrhttps[0] {\n\tcase \"https\":\n\t\ttr := &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t\t}\n\t\tclient := &http.Client{Transport: tr}\n\t\tresp, err := client.Get(url)\n\t\tif err != nil {\n\t\t\ter = err\n\t\t\treturn 500, []byte(\"\"), er\n\t\t}\n\n\t\tWritelog(\"D\", \"get url\"+url)\n\t\tHttpStatusCode = resp.StatusCode\n\t\tdefer resp.Body.Close()\n\t\tif HttpStatusCode == 200 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\ter = err\n\t\t\t\treturn 500, []byte(\"\"), er\n\t\t\t}\n\t\t\tres = body\n\t\t}\n\t\treturn HttpStatusCode, res, nil\n\n\tcase \"http\":\n\t\tresp, err := http.Get(url)\n\t\tif err != nil {\n\t\t\treturn 500, []byte(\"\"), err\n\t\t}\n\t\tWritelog(\"D\", \"get url\"+url)\n\n\t\tHttpStatusCode = resp.StatusCode\n\t\tdefer resp.Body.Close()\n\t\tif HttpStatusCode == 200 {\n\t\t\tbody, err := ioutil.ReadAll(resp.Body)\n\t\t\tif err != nil {\n\t\t\t\treturn 500, []byte(\"\"), err\n\t\t\t}\n\t\t\tres = body\n\t\t}\n\t}\n\treturn HttpStatusCode, res, nil\n\n}",
"func TestGetData(t *testing.T) {\n\n\tgaTemp := new(GAData)\n\n\t// initialise GAData object\n\tgaTemp.Init()\n\n\ttestRequest := GaRequest{\"ga:23949588\",\n\t\t\"2014-01-01\",\n\t\t\"2014-01-02\",\n\t\t\"ga:visits\",\n\t\t\"ga:day\",\n\t\t\"\",\n\t\t\"\",\n\t\t\"\",\n\t\t100,\n\t\t5}\n\n\tresult := gaTemp.GetData(1, &testRequest)\n\tlog.Println(result)\n}",
"func getData() {\n\n\tfor range time.Tick(time.Millisecond * UPDATE_INTERVAL) {\n\t\t// Build the request\n\t\treq, err := http.NewRequest(\"GET\", URL, nil)\n\t\tif err != nil {\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tclient := &http.Client{}\n\n\t\tresp, err := client.Do(req)\n\t\tif err != nil {\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.StatusCode != 200 { // OK\n\t\t\tData <- nil\n\t\t\tcontinue\n\t\t}\n\n\t\tvar response Response\n\t\tdecoder := json.NewDecoder(resp.Body)\n\t\terr = decoder.Decode(&response)\n\t\tData <- &response\n\t\tresp.Body.Close()\n\t}\n\tStop <- true\n}",
"func (c *DefaultApiController) DataDataIdGet(w http.ResponseWriter, r *http.Request) {\n\tparams := mux.Vars(r)\n\tdataId, err := parseInt32Parameter(params[\"dataId\"])\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tresult, err := c.service.DataDataIdGet(r.Context(), dataId)\n\t//If an error occured, encode the error with the status code\n\tif err != nil {\n\t\tEncodeJSONResponse(err.Error(), &result.Code, w)\n\t\treturn\n\t}\n\t//If no error, encode the body and the result code\n\tEncodeJSONResponse(result.Body, &result.Code, w)\n\n}",
"func GetData(base, path string) ([]byte, error) {\n\t// first we're going to make a call using v2 api using tokens\n\tdata, err := GetDataV2(base, path)\n\tif err == nil {\n\t\treturn data, nil\n\t}\n\tlog.Printf(\"failed to obtain metadata using v2 api: %v\\n\", err)\n\t// next, we'll try using v1 api\n\tdata, err = GetDataV1(base, path)\n\tif err != nil {\n\t\tlog.Printf(\"failed to obtain metadata using v1 api: %v\\n\", err)\n\t}\n\treturn data, err\n}",
"func (api *api) doGet(url string) (result []byte, err error) {\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlog.Error(\"this shouldn't happen: %v\", err)\n\t\treturn nil, err\n\t}\n\tresp, err := api.client.Do(req)\n\tif err != nil {\n\t\tlog.Debug(\"got error response for URL %s: %v\", url, err)\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tstatusCode, status := resp.StatusCode, resp.Status\n\tif statusCode != 200 {\n\t\treturn nil, fmt.Errorf(\"IB returned an error: %s: %s\", status, url)\n\t}\n\n\tresult, err = ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error reading response body: %v: %s\", err, url)\n\t}\n\n\tlog.Trace(\"%s : SUCCESS\", url)\n\treturn result, nil\n}",
"func GetData(c *gin.Context) {\r\n\tvar predict []Models.Predict\r\n\terr := Models.GetAllData(&predict)\r\n\tif err != nil {\r\n\t\tc.AbortWithStatus(http.StatusNotFound)\r\n\t} else {\r\n\t\tc.JSON(http.StatusOK, predict)\r\n\t}\r\n}",
"func getXeAPIData(from string, wg *sync.WaitGroup) (model.XEResponse, error) {\n\turl := config.GoDotEnvVariable(\"URL\")\n\tusername := config.GoDotEnvVariable(\"USERNAME\")\n\tpassword := config.GoDotEnvVariable(\"PASSWORD\")\n\txeResponseData := model.XEResponse{}\n\n\treq, requestErr := http.NewRequest(\"GET\", url, nil)\n\tif requestErr != nil {\n\t\treturn xeResponseData, requestErr\n\t}\n\n\tq := req.URL.Query()\n\tq.Add(\"to\", \"*\")\n\tq.Add(\"from\", from)\n\n\treq.URL.RawQuery = q.Encode()\n\treq.SetBasicAuth(username, password)\n\t// resp, clientErr := RequestForAPI(req)\n\t// if clientErr != nil {\n\t// \tlogger.WithField(\"read http response error\", clientErr.Error()).Error(\"read response failed\")\n\t// \treturn xeResponseData, clientErr\n\t// }\n client := &http.Client{}\n\n\tclient = &http.Client{\n\t\tTimeout: time.Second * time.Duration(1500),\n\t}\n\tresp, clientErr := client.Do(req)\n\tif clientErr != nil {\n\t\tlogger.WithField(\"error from api\", clientErr.Error()).Error(\"Get Request Failed\")\n\t\treturn xeResponseData, clientErr\n\t}\n\tif resp.StatusCode != 200 {\n\t\tlogger.WithField(\"error from api\", resp).Error(\"Get Request Failed\")\n\t\treturn xeResponseData, clientErr\n\t}\n\tbodyText, readErr := ioutil.ReadAll(resp.Body)\n\tif readErr != nil {\n\t\tlogger.WithField(\"read http response error\", readErr.Error()).Error(\"read response failed\")\n\t\treturn xeResponseData, readErr\n\t}\n\n\tunmarshalErr := json.Unmarshal(bodyText, &xeResponseData)\n\tif unmarshalErr != nil {\n\t\tlogger.WithField(\"unmarshal error\", unmarshalErr.Error()).Error(\"Unmarshal Failed\")\n\t\treturn xeResponseData, unmarshalErr\n\t}\n\n\tdefer resp.Body.Close()\n\t// logger.WithField(\"Total time taken for get api execution:\", requestElapsed).Info(\"Exec Time\")\n\t// call for update currency rate to all exchange rates\n\treturn xeResponseData, nil\n}",
"func (s APIv1) Do(req *http.Request, data interface{}, checkStatus bool) (rep *http.Response, err error) {\n\tif rep, err = s.client.Do(req); err != nil {\n\t\treturn rep, fmt.Errorf(\"could not execute request: %s\", err)\n\t}\n\tdefer rep.Body.Close()\n\n\t// Detect errors if they've occurred\n\tif checkStatus {\n\t\tif rep.StatusCode < 200 || rep.StatusCode >= 300 {\n\t\t\treturn rep, fmt.Errorf(\"[%d] %s\", rep.StatusCode, rep.Status)\n\t\t}\n\t}\n\n\t// Check the content type to ensure data deserialization is possible\n\tif ct := rep.Header.Get(\"Content-Type\"); ct != \"application/json; charset=utf-8\" {\n\t\treturn rep, fmt.Errorf(\"unexpected content type: %q\", ct)\n\t}\n\n\t// Deserialize the JSON data from the body\n\tif data != nil && rep.StatusCode >= 200 && rep.StatusCode < 300 {\n\t\tif err = json.NewDecoder(rep.Body).Decode(data); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"could not deserialize response data: %s\", err)\n\t\t}\n\t}\n\n\treturn rep, nil\n}",
"func fetchData(client *http.Client, url, format string) ([]byte, error) {\n\tif format == \"json\" {\n\t\tformat = \"application/json\"\n\t} else if format == \"xml\" {\n\t\tformat = \"application/xml\"\n\t}\n\n\t// search in-memory store\n\tif CacheOn {\n\t\tvalue, found := Memory.Get(url)\n\t\tif found {\n\t\t\treturn value.([]byte), nil\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", format)\n\treq.Header.Set(\"User-Agent\", \"\")\n\n\tr, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tif r.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.New(string(body))\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// save data to in-memory store\n\tif CacheOn {\n\t\tMemory.Set(url, data, cache.DefaultExpiration)\n\t}\n\n\treturn data, nil\n}",
"func callService(from string, wg *sync.WaitGroup) {\n\tresponseData, apiErr := getXeAPIData(from, wg)\n\tif apiErr != nil {\n\t\tlogger.WithField(\"get XE API data error:\", apiErr.Error()).Info(\"Get API data failed\")\n\t\tlog.Panic(apiErr)\n\t}\n\t// prepared query values with parametrs\n\tqueryValues := make([]string, 0, len(responseData.To))\n\tqueryParams := make([]interface{}, 0, len(responseData.To)*5)\n\tfor _, toValue := range responseData.To {\n\t\tqueryValues = append(queryValues, \"(?, ?, ?, ?, ?)\")\n\t\tqueryParams = append(queryParams, responseData.Amount, toValue.Mid, responseData.From, toValue.Quotecurrency, responseData.Timestamp)\n\t}\n\t// update responsedata to db\n\tdbErr := db.UpdateResponseData(responseData, queryValues, queryParams)\n\tif dbErr != nil {\n\t\tlogger.WithField(\"update data to database error:\", apiErr.Error()).Info(\"Updating to database failed\")\n\t\tlog.Panic(apiErr)\n\t}\n\twg.Done()\n}",
"func (ctrl *HomeController) GetData(ctx *gin.Context) {\n\thomeLogic := logic.HomeLogic{}\n\thomeLogic.SetCtx(ctx.Request.Context())\n\n\tdata := homeLogic.GetData()\n\n\tctx.JSON(HTTPSuccess, gin.H{\n\t\t\"code\": 0,\n\t\t\"message\": \"ok\",\n\t\t\"data\": data,\n\t})\n}",
"func TestGetDataFromUrlError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(302).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, ioutil.ReadAll)\n\n\tassert.Error(t, err)\n}",
"func (client ProxyMetricClient) GetData() (data []byte, err error) {\n\tconst generalScopeErr = \"error making a server request to get the metrics from remote endpoint\"\n\thttpClient := &http.Client{}\n\tvar resp *http.Response\n\t{\n\t\tsuccessResponse := false\n\t\tdefer func(startTime time.Time) {\n\t\t\tduration := time.Since(startTime).Seconds()\n\t\t\tlabels := []string{client.jobName, client.instanceName}\n\t\t\tif successResponse {\n\t\t\t\tclient.defFordwaderMetrics.FordwaderResponseDuration.WithLabelValues(labels...).Set(duration)\n\t\t\t}\n\t\t}(time.Now().UTC())\n\t\tif resp, err = httpClient.Do(client.req); err != nil {\n\t\t\tlog.WithFields(log.Fields{\"err\": err, \"req\": client.req}).Errorln(\"no success response\")\n\t\t\terrCause := fmt.Sprintln(\"can not do the request: \", err.Error())\n\t\t\treturn nil, util.ErrorFromThisScope(errCause, generalScopeErr)\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK {\n\t\t\tlog.WithFields(log.Fields{\"status\": resp.Status, \"req\": client.req}).Errorln(\"no success response\")\n\t\t\terrCause := fmt.Sprintf(\"no success response, status %s\", resp.Status)\n\t\t\treturn nil, util.ErrorFromThisScope(errCause, generalScopeErr)\n\t\t}\n\t\tsuccessResponse = true\n\t}\n\tdefer resp.Body.Close()\n\tvar reader io.ReadCloser\n\tvar isGzipContent = false\n\tdefer func() {\n\t\tif isGzipContent {\n\t\t\treader.Close()\n\t\t}\n\t}()\n\tswitch resp.Header.Get(\"Content-Encoding\") {\n\tcase \"gzip\":\n\t\tisGzipContent = true\n\t\treader, err = gzip.NewReader(resp.Body)\n\t\tif err != nil {\n\t\t\terrCause := fmt.Sprintln(\"can not create gzip reader.\", err.Error())\n\t\t\treturn nil, util.ErrorFromThisScope(errCause, generalScopeErr)\n\t\t}\n\t\tdefer reader.Close()\n\tdefault:\n\t\treader = resp.Body\n\t}\n\t// FIXME([email protected]): write an integration test for plain text and compressed content\n\tif data, err = ioutil.ReadAll(reader); err != nil {\n\t\terrCause := fmt.Sprintln(\"can not read the body: \", err.Error())\n\t\treturn nil, util.ErrorFromThisScope(errCause, generalScopeErr)\n\t}\n\treturn data, nil\n}",
"func GetData(w http.ResponseWriter, r *http.Request) {\n\tfrom := r.URL.Query().Get(\"from\")\n\tif from == \"\" {\n\t\tfrom = fmt.Sprintf(\"%d\", time.Now().Add(-10*time.Minute).UnixNano()/1000000000)\n\t}\n\tr.Body.Close()\n\tfromI, err := strconv.ParseInt(from, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'from' parameter\"))\n\t\treturn\n\t}\n\twindow := r.URL.Query().Get(\"window\")\n\tif window == \"\" {\n\t\twindow = \"300\"\n\t}\n\twindowI, err := strconv.ParseInt(window, 10, 64)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tw.Write([]byte(\"BAD 'window' parameter\"))\n\t\treturn\n\t}\n\trv, err := qei.GetData(time.Unix(fromI, 0), time.Duration(windowI)*time.Second)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(err.Error()))\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(rv.JsonBytes())\n\tr.Body.Close()\n}",
"func GetServiceData(operation, name string) ([]byte, error) {\n\trequest := LookupInfoRequest{operation, name}\n\tbuffer := make([]byte, PACKET_SIZE)\n\n\taddress, err := GetRegistryAddress()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconnection, err := net.DialTCP(TCP_PROTOCOL, nil, address)\n\tif err != nil {\n\t\tregistryAddress = nil\n\t\taddress, err = GetRegistryAddress()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t\n\t\tconnection, err = net.DialTCP(TCP_PROTOCOL, nil, address)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tdefer connection.Close()\n\n\tconnection.SetReadDeadline(time.Now().Add(time.Second * 4))\n\tbytes, err := json.Marshal(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t_, err = connection.Write(bytes)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlength, err := connection.Read(buffer)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn buffer[:length], nil\n}",
"func GetDataHandler(w http.ResponseWriter, r *http.Request) {\n\n}",
"func GetData(url string) []byte {\n\tlog.Println(\"getting data from URL: \", url)\n\n\tvar body []byte\n\n\t// define client with timeout of 10 seconds\n\tvar netClient = &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\tresp, err := netClient.Get(url)\n\tdefer resp.Body.Close()\n\tif err != nil{\n\t\tlog.Println(err)\n\t\treturn []byte(\"\")\n\t}\n\n\t// check for gzip data, unzip if needed\n\tif strings.Contains(url, \".gz\") {\n\t\tlog.Println(\"content encoded with gzip\")\n\t\tbody = GUnzip(resp.Body)\n\t} else {\n\t\tlog.Println(\"content not encoded\")\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tcheck(err)\n\t}\n\n\tlog.Println(\"data received\")\n\treturn body\n}",
"func UpdateGetData(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-type\", \"application/json\")\n\tjsonData, ret := client.ConsultExternalService()\n\tvar apiError model.ApiError\n\tif ret != apiError {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\tjson.NewEncoder(w).Encode(ret)\n\t\tlog.Println(\"[Error] Error consulting external service,\", ret)\n\t} else {\n\t\tupdatefile.UpdateFile(jsonData)\n\t\tw.WriteHeader(http.StatusOK)\n\t\tvar success model.UpdateOk\n\t\tsuccess.Message = \"Sucessfully retrieved more Jokes\"\n\t\tjson.NewEncoder(w).Encode(success)\n\t\tlog.Println(\"[Info] Successfully retrieved more Jokes\")\n\t}\n\n}",
"func TestGetDataAndReturnResponse(t *testing.T) {\n\tdata := getDataAndReturnResponse()\n\tif data.Message != \"hello world\" {\n\t\tt.Errorf(\"Expected string 'hello world' but received: '%s'\", data)\n\t}\n}",
"func GetService(nbmaster string, httpClient *http.Client, jwt string, host string, hostUuid string, serviceName string) {\r\n fmt.Printf(\"\\nGet NetBackup service %s on %s...\\n\\n\", serviceName, host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/admin/hosts/\" + hostUuid + \"/services/\" + serviceName\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Content-Type\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get services\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n resp, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(resp, &obj)\r\n service := obj.(map[string]interface{})[\"data\"].(map[string]interface{})\r\n\r\n fmt.Printf(\"id status\\n\");\r\n fmt.Printf(\"============.=========\\n\");\r\n id := (service)[\"id\"]\r\n status := ((service)[\"attributes\"]).(map[string]interface{})[\"status\"]\r\n\r\n fmt.Printf(\"%-12s %s\\n\", id, status);\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n}",
"func PrometheusGetData(clusterServer, clusterCertificateAuthorityData string, clusterInsecureSkipTLSVerify bool, userClientCertificateData, userClientKeyData, userToken, userUsername, userPassword, proxy string, timeout int64, request string) (string, error) {\n\trestConfig, clientset, err := kube.NewClient(mobile.Platform).GetClient(\"\", clusterServer, clusterCertificateAuthorityData, clusterInsecureSkipTLSVerify, userClientCertificateData, userClientKeyData, userToken, userUsername, userPassword, proxy, timeout)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn shared.PrometheusGetData(restConfig, clientset, request)\n}",
"func runTestGet(t *testing.T, s *Server, textPbPath string,\n\tdatatype gnmipb.GetRequest_DataType, encoding gnmipb.Encoding,\n\twantRetCode codes.Code, wantRespVal interface{}, useModels []*gnmipb.ModelData) {\n\t// Send request\n\tvar pbPath gnmipb.Path\n\tif err := proto.UnmarshalText(textPbPath, &pbPath); err != nil {\n\t\tt.Fatalf(\"error in unmarshaling path: %v\", err)\n\t}\n\treq := &gnmipb.GetRequest{\n\t\tPath: []*gnmipb.Path{&pbPath},\n\t\tType: datatype,\n\t\tEncoding: encoding,\n\t\tUseModels: useModels,\n\t}\n\tt.Log(\"req:\", req)\n\tresp, err := s.Get(context.Background(), req)\n\tt.Log(\"resp:\", resp)\n\n\t// Check return code\n\tif status.Code(err) != wantRetCode {\n\t\tt.Fatalf(\"got return code %v, want %v\", status.Code(err), wantRetCode)\n\t}\n\n\t// Check response value\n\tvar gotVal interface{}\n\tif resp != nil {\n\t\tnotifs := resp.GetNotification()\n\t\tif len(notifs) != 1 {\n\t\t\tt.Fatalf(\"got %d notifications, want 1\", len(notifs))\n\t\t}\n\t\tupdates := notifs[0].GetUpdate()\n\t\tif len(updates) != 1 {\n\t\t\tt.Fatalf(\"got %d updates in the notification, want 1\", len(updates))\n\t\t}\n\t\tval := updates[0].GetVal()\n\t\tif val == nil {\n\t\t\treturn\n\t\t}\n\n\t\tvar jsonbytes []byte\n\t\tswitch {\n\t\tcase val.GetJsonIetfVal() != nil:\n\t\t\tjsonbytes = val.GetJsonIetfVal()\n\t\tcase val.GetJsonVal() != nil:\n\t\t\tjsonbytes = val.GetJsonVal()\n\t\t}\n\n\t\tif len(jsonbytes) == 0 {\n\t\t\tgotVal, err = value.ToScalar(val)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"got: %v, want a scalar value\", gotVal)\n\t\t\t}\n\t\t} else {\n\t\t\t// Unmarshal json data to gotVal container for comparison\n\t\t\tif err := json.Unmarshal(jsonbytes, &gotVal); err != nil {\n\t\t\t\tt.Fatalf(\"error in unmarshaling JSON data to json val: %v\", err)\n\t\t\t}\n\t\t\tvar wantJSONStruct interface{}\n\t\t\tif err := json.Unmarshal([]byte(wantRespVal.(string)), &wantJSONStruct); err != nil {\n\t\t\t\tt.Fatalf(\"error in unmarshaling JSON data to json val: %v\", err)\n\t\t\t}\n\t\t\twantRespVal = wantJSONStruct\n\t\t}\n\t}\n\n\tif !reflect.DeepEqual(gotVal, wantRespVal) {\n\t\tt.Errorf(\"got: %v (%T),\\nwant %v (%T)\", gotVal, gotVal, wantRespVal, wantRespVal)\n\t}\n}",
"func (c *Client) GetData (options DataOptions) (Data, error) {\n\tvar (\n\t\tresp []byte\n\t\terr error\n\t\td Data\n\t\treqOptions = make(map[string]interface{})\n\t\tjsonBody []byte\n\t)\n\t\t\n\n\tjsonBody, err = json.Marshal(options)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\t\n\treqOptions[\"body\"] = bytes.NewReader(jsonBody)\n\n\tresp, err = c.Request(\"POST\", DataEndpoint, reqOptions)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\n\terr = json.Unmarshal(resp, &d)\n\tif err != nil {\n\t\treturn d, err\n\t}\n\treturn d, nil\n}",
"func (c *C2Default) htmlGetData(url string, body []byte) []byte {\n\t//log.Println(\"Sending HTML GET request to url: \", url)\n\tclient := &http.Client{\n\t\tTimeout: 30 * time.Second,\n\t}\n\tvar respBody []byte\n\n\tfor true {\n\t\tif len(c.Key) > 0 && len(body) > 0 {\n\t\t\tbody = c.encryptMessage(body) // Encrypt and then encapsulate the task request\n\t\t}\n\t\tencapbody := append([]byte(c.ApfellID), body...) // Prepend the UUID to the body of the request\n\t\tencbody := base64.StdEncoding.EncodeToString(encapbody) // Base64 the body\n\n\t\treq, err := http.NewRequest(\"GET\", url, bytes.NewBuffer([]byte(encbody)))\n\t\tif err != nil {\n\t\t\t//time.Sleep(time.Duration(c.SleepInterval()) * time.Second)\n\t\t\ttime.Sleep(time.Duration(c.getSleepTime()) * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(c.HostHeader) > 0 {\n\t\t\t//req.Header.Set(\"Host\", c.HostHeader)\n\t\t\treq.Host = c.HostHeader\n\t\t}\n\n\t\treq.Header.Set(\"User-Agent\", c.UserAgent)\n\t\tresp, err := client.Do(req)\n\n\t\tif err != nil {\n\t\t\t//time.Sleep(time.Duration(c.SleepInterval()) * time.Second)\n\t\t\ttime.Sleep(time.Duration(c.getSleepTime()) * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tif resp.StatusCode != 200 {\n\t\t\t//time.Sleep(time.Duration(c.SleepInterval()) * time.Second)\n\t\t\ttime.Sleep(time.Duration(c.getSleepTime()) * time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tdefer resp.Body.Close()\n\n\t\trespBody, _ = ioutil.ReadAll(resp.Body)\n\t\traw, _ := base64.StdEncoding.DecodeString(string(respBody))\n\t\tenc_raw := raw[36:] // Remove the prepended UUID\n\t\tif len(c.Key) != 0 {\n\t\t\tenc_raw = c.decryptMessage(enc_raw)\n\t\t\tif len(enc_raw) == 0 {\n\t\t\t\t//time.Sleep(time.Duration(c.SleepInterval()) * time.Second)\n\t\t\t\ttime.Sleep(time.Duration(c.getSleepTime()) * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\t//log.Printf(\"Raw htmlget response: %s\\n\", string(enc_raw))\n\t\treturn enc_raw\n\t}\n\treturn make([]byte, 0) //shouldn't get here\n\n}",
"func (a *API) Do(data interface{}, out interface{}) (err error) {\n\n\turl := a.GetEndpointURL()\n\tpayload, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tlog.Printf(\"Sending to %s: %s\\n\", url, payload)\n\treq, err := http.NewRequest(\"POST\", url, bytes.NewBuffer(payload))\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tclient := a.GetClient()\n\n\tr, err := client.Do(req)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdefer r.Body.Close()\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Handle BOM encoding.\n\tbody = bytes.TrimPrefix(body, []byte{239, 187, 191})\n\tlog.Printf(\"API response body: %s\\n\", body)\n\n\t// First, see if it's a generic error response. If so, return it.\n\terrResponse := ErrorResponse{}\n\tif err = json.Unmarshal(body, &errResponse); err == nil {\n\t\tif errResponse.Error() != nil {\n\t\t\treturn errResponse.Error()\n\t\t}\n\t}\n\n\terr = json.Unmarshal(body, &out)\n\treturn\n\n}",
"func (d *Module) GetData() interface{} {\n\n\treturn Response{\n\t\tInfo: d.getInfo(),\n\t\tContainers: d.getContainers(),\n\t\tNetworks: d.getNetworks(),\n\t\tVolumes: d.getVolumes(),\n\t\tImages: d.getImages(),\n\t}\n}",
"func TestApiGET(t *testing.T) {\n var response getResponse\n statusCode, err := Get(\"http://dummy.restapiexample.com/api/v1/employees\", &response)\n if err != nil {\n t.Errorf(\"Got err %v while trying rest.Get():\", err)\n return\n }\n if statusCode != 200 {\n t.Errorf(\"Expected status code 200, got %d instead\", statusCode)\n return\n }\n if response.Status != \"success\" {\n t.Errorf(\"Expected response.Status to be success, got %v instead\", response.Status)\n }\n if response.Data[0].Id != \"1\" {\n t.Errorf(\"Expected response.Data[0].Id to be 1, got %s instead\", response.Data[0].Id)\n }\n}",
"func httpDo(opt *httpclientOption) ([]byte, error) {\n\tdomain := opt.cfg.endpointByBalancer()\n\n\tif !strings.HasPrefix(domain, \"http://\") && !strings.HasPrefix(domain, \"https://\") {\n\t\tdomain = \"http://\" + domain\n\t}\n\n\turl := domain + opt.uri\n\n\tc := &http.Client{\n\t\tTimeout: time.Duration(opt.cfg.RetryTimeout) * time.Millisecond,\n\t\tTransport: http.DefaultTransport,\n\t}\n\treq, err := http.NewRequest(opt.method, url, opt.body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to execute http request, service_name: %s, url: %s, error: %s\", opt.serviceName, url, err.Error())\n\t}\n\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tfor k, v := range opt.headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tresp, err := c.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"http request failed, service name: %s, url: %s, error: %s\", opt.serviceName, url, err.Error())\n\t}\n\tif resp.StatusCode >= http.StatusBadRequest {\n\t\treturn nil, fmt.Errorf(\"http request failed, service name: %s, url: %s, code: %d, status: %s\", opt.serviceName, url, resp.StatusCode, resp.Status)\n\t}\n\tdefer resp.Body.Close()\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"http request read body failed, service name: %s, url: %s, error: %s\", opt.serviceName, url, err.Error())\n\t}\n\n\treturn b, nil\n}",
"func Test_Get(t *testing.T) {\n\th := NewHttpSend(GetUrlBuild(\"http://192.168.10.76/lotus/lotustest\", map[string]string{\"name\": \"xiaochuan\"}))\n\tbody, err := h.Get()\n\tif err != nil {\n\t\tt.Error(\"请求错误:\", err)\n\t\t//t.Errorf(err.Error())\n\t\tt.Fail()\n\t} else {\n\t\tt.Log(\"正常返回\")\n\n\t\tvar out bytes.Buffer\n\t\terr = json.Indent(&out, body, \"\", \"\\t\")\n\t\tfmt.Println(out.String())\n\t}\n}",
"func GetData(accessToken string, w http.ResponseWriter, r *http.Request) {\n\trequest, err := http.NewRequest(\"GET\", \"https://auth.vatsim.net/api/user\", nil)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\trequest.Header.Add(\"Bearer\", accessToken)\n\trequest.Header.Add(\"accept\", \"application/json\")\n\tclient := http.Client{}\n\tclient.Do(request)\n\n\tdefer request.Body.Close()\n\n\tbody, errReading := ioutil.ReadAll(request.Body)\n\tif errReading != nil {\n\t\tlog.Fatal(errReading)\n\t}\n\n\n\tvar userDetails map[string]interface{}\n\terrJSON := json.Unmarshal(body, &userDetails)\n\tif errJSON != nil {\n\t\tlog.Fatal(errJSON)\n\t}\n\tfmt.Println(userDetails)\n}",
"func httpGet(url string) (string, error) {\n\terrMsg := fmt.Sprintf(\"error fetching data from episodate api for url: %s\", url)\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\terr = errors.Wrapf(err, errMsg)\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\terr = errors.New(fmt.Sprintf(\"%s: Got HTTP StatusCode: %d\", errMsg, resp.StatusCode))\n\t\treturn \"\", err\n\t}\n\n\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\terr = errors.Wrapf(err, errMsg)\n\t\treturn \"\", err\n\t}\n\n\treturn string(bodyBytes), nil\n}",
"func (a *UtilsApiService) Test(ctx context.Context) (TestResponse, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue TestResponse\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/utils/test\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json;charset=UTF-8\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\tif err == nil { \n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 200 {\n\t\t\tvar v TestResponse\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ErrorModel\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"));\n\t\t\t\tif err != nil {\n\t\t\t\t\tnewErr.error = err.Error()\n\t\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t\t}\n\t\t\t\tnewErr.model = v\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (c *carService) FetchData() {\n\tclient := http.Client{}\n\n\tfmt.Printf(\"Fetching the url %s\", carServiceURL)\n\n\t// Call the external API\n\tresp, _ := client.Get(carServiceURL)\n\tfmt.Println(\"Response\", resp)\n\n\t// Write response to the channel\n\tcarDataChannel <- resp\n\n}",
"func (c *Client) FetchData(ctx context.Context, url string) ([]byte, error) {\n\n\t// Implement semaphores to ensure maximum concurrency threshold.\n\tc.semaphore <- struct{}{}\n\tdefer func() { <-c.semaphore }()\n\n\t// If there is an in-flight request for a unique URL, send response\n\t// from the in-flight request. Else, create the in-flight request.\n\tresponseRaw, err, shared := c.RequestGroup.Do(url, func() (interface{}, error) {\n\t\treturn c.fetchResponse(ctx)\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Infof(\"in-flight status : %t\", shared)\n\n\t//time.Sleep(time.Second * 4)\n\n\tresponse := responseRaw.([]byte)\n\n\treturn response, err\n}",
"func (h RestServer) GetData(res http.ResponseWriter, req *http.Request) {\n\twords, ok := req.URL.Query()[\"search\"]\n\n\tif !ok || len(words[0]) < 1 {\n\t\tjs := h.createErrRes(\"Url Param 'search' is missing\")\n\t\tres.WriteHeader(http.StatusNotAcceptable)\n\t\tres.Write(js)\n\t\treturn\n\t}\n\n\tpaginations, ok := req.URL.Query()[\"pagination\"]\n\n\tif !ok || len(paginations[0]) < 1 {\n\t\tjs := h.createErrRes(\"Url Param 'pagination' is missing\")\n\t\tres.WriteHeader(http.StatusNotAcceptable)\n\t\tres.Write(js)\n\t\treturn\n\t}\n\tpageStr := paginations[0]\n\tpageInt, err := strconv.Atoi(pageStr)\n\tif err != nil {\n\t\tjs := h.createErrRes(err.Error())\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write(js)\n\t\treturn\n\t}\n\n\tword := words[0]\n\n\tresAPI, err := GetDataMovie(word, pageStr)\n\tif err != nil {\n\t\tlog := model.LogHistory{SearchWord: word, Pagination: int64(pageInt), Success: false}\n\t\t_, err1 := log.Insert(h.Db)\n\t\tif err1 != nil {\n\t\t\tjs := h.createErrRes(err1.Error())\n\t\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\t\tres.Write(js)\n\t\t\treturn\n\t\t}\n\t\tjs := h.createErrRes(err.Error())\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write(js)\n\t\treturn\n\t}\n\n\tlog := model.LogHistory{SearchWord: word, Pagination: int64(pageInt), Success: true}\n\t_, err1 := log.Insert(h.Db)\n\tif err1 != nil {\n\t\tjs := h.createErrRes(err1.Error())\n\t\tres.WriteHeader(http.StatusInternalServerError)\n\t\tres.Write(js)\n\t}\n\tres.Header().Set(\"Content-Type\", \"application/json\")\n\tres.WriteHeader(http.StatusOK)\n\tres.Write(resAPI)\n}",
"func (c *baseClient) Do(req *http.Request, successV, failureV interface{}) (*http.Response, error) {\n\tresp, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tif successV != nil {\n\t\t\terr = json.NewDecoder(resp.Body).Decode(successV)\n\t\t}\n\t} else {\n\t\tif failureV != nil {\n\t\t\terr = json.NewDecoder(resp.Body).Decode(failureV)\n\t\t}\n\t}\n\n\treturn resp, err\n}",
"func HttpTest(c *gin.Context) {\n\t// pattern := c.Query(\"service\")\n\t// filter := c.Query(\"method\")\n\t// address := c.Query(\"address\")\n\n\t// send standard http request to backend http://address/service/method content-type:json\n\n\tc.JSON(http.StatusOK, gin.H{\n\t\t\"code\": 1,\n\t\t\"data\": \"implement me\",\n\t})\n}",
"func (c *Client) Get(ctx context.Context, url string, data ...interface{}) (*Response, error) {\n\treturn c.DoRequest(ctx, http.MethodGet, url, data...)\n}",
"func GetChartData(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"content-type\", \"application/json\")\n\n\tvar rr models.ResponseResult\n\tvar user models.User\n\tif ok := user.GetLoggedIn(ctx.Get(r, \"jwtToken\")); !ok {\n\t\trr = models.ResponseResult{\n\t\t\tError: \"Token is invalid!\",\n\t\t\tResult: \"\",\n\t\t}\n\t\t_ = json.NewEncoder(w).Encode(rr)\n\t\treturn\n\t}\n\n\tvar chartDatReq models.ChartDataRequest\n\tbody, _ := ioutil.ReadAll(r.Body)\n\n\t// log.Println(\"-===================> \", string(body))\n\n\terr := json.Unmarshal(body, &chartDatReq)\n\n\tif err != nil {\n\t\trr.Error = err.Error()\n\t\t_ = json.NewEncoder(w).Encode(rr)\n\t\treturn\n\t}\n\n\t// log.Println(chartDatReq)\n\tdataFetchResult := chartDatReq.Get()\n\t_ = json.NewEncoder(w).Encode(dataFetchResult)\n\treturn\n\n}",
"func infrastructure(client *http.Client, url string) ([]byte, error) {\n\treq, err := http.NewRequest(\"GET\", url, nil) // define req as request to the GET function\n\tresp, err := client.Do(req) // do the request\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"client req error: \" + err.Error())\n\t} else { // if there is an error, return nil and the error message\n\t\tresponseBody, err := ioutil.ReadAll(resp.Body) // responseBody will get the body of \"resp\" - the output from the GET request.\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"body read error: \" + err.Error())\n\t\t} else {\n\t\t\treturn responseBody, err\n\t\t}\n\t}\n}",
"func (s *Server) handleGetData(request []byte) {\n\tvar payload serverutil.MsgGetData\n\tif err := getPayload(request, &payload); err != nil {\n\t\tlog.Panic(err)\n\t}\n\taddr := payload.AddrSender.String()\n\tp, _ := s.GetPeer(addr)\n\tp.IncreaseBytesReceived(uint64(len(request)))\n\ts.AddPeer(p)\n\ts.Log(true, fmt.Sprintf(\"GetData kind: %s, with ID:%s received from %s\", payload.Kind, hex.EncodeToString(payload.ID), addr))\n\n\tif payload.Kind == \"block\" {\n\t\t//block\n\t\t//on recupère le block si il existe\n\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\tif block != nil {\n\t\t\t//envoie le block au noeud créateur de la requete\n\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t} else {\n\t\t\tfmt.Println(\"block is nil :( handleGetData\")\n\t\t\tgo func() {\n\t\t\t\tfor {\n\t\t\t\t\ttime.Sleep(time.Millisecond * 50)\n\t\t\t\t\tblock, _ := s.chain.GetBlockByHash(payload.ID)\n\t\t\t\t\tif block != nil {\n\t\t\t\t\t\ts.sendBlock(payload.AddrSender, block)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}()\n\t\t}\n\t} else {\n\t\ttx := mempool.Mempool.GetTx(hex.EncodeToString(payload.ID))\n\t\tif tx != nil {\n\t\t\ts.SendTx(payload.AddrSender, tx)\n\t\t}\n\t}\n}",
"func (i *Instance) doRequest(ctx context.Context, url string) (map[string]interface{}, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf(\"%s%s\", i.address, url), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := i.client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode >= 200 && resp.StatusCode < 300 {\n\t\tvar data map[string]interface{}\n\n\t\terr = json.NewDecoder(resp.Body).Decode(&data)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\tvar res ResponseError\n\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(res.Errors) > 0 {\n\t\treturn nil, fmt.Errorf(res.Errors[0].Msg)\n\t}\n\n\treturn nil, fmt.Errorf(\"%v\", res)\n}",
"func (f *FakeInstance) GetUserData(_ context.Context, _ string) (*govultr.UserData, *http.Response, error) {\n\tpanic(\"implement me\")\n}",
"func (q *worldstateQueryProcessor) getData(dbName, querierUserID, key string) (*types.GetDataResponse, error) {\n\tif worldstate.IsSystemDB(dbName) {\n\t\treturn nil, &errors.PermissionErr{\n\t\t\tErrMsg: \"no user can directly read from a system database [\" + dbName + \"]. \" +\n\t\t\t\t\"To read from a system database, use /config, /user, /db rest endpoints instead of /data\",\n\t\t}\n\t}\n\n\thasPerm, err := q.identityQuerier.HasReadAccessOnDataDB(querierUserID, dbName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !hasPerm {\n\t\treturn nil, &errors.PermissionErr{\n\t\t\tErrMsg: \"the user [\" + querierUserID + \"] has no permission to read from database [\" + dbName + \"]\",\n\t\t}\n\t}\n\n\tvalue, metadata, err := q.db.Get(dbName, key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tacl := metadata.GetAccessControl()\n\tif acl != nil {\n\t\tif !acl.ReadUsers[querierUserID] && !acl.ReadWriteUsers[querierUserID] {\n\t\t\treturn nil, &errors.PermissionErr{\n\t\t\t\tErrMsg: \"the user [\" + querierUserID + \"] has no permission to read key [\" + key + \"] from database [\" + dbName + \"]\",\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &types.GetDataResponse{\n\t\tValue: value,\n\t\tMetadata: metadata,\n\t}, nil\n}",
"func (_ERC725 *ERC725Caller) GetData(opts *bind.CallOpts, _key [32]byte) ([]byte, error) {\n\tvar (\n\t\tret0 = new([]byte)\n\t)\n\tout := ret0\n\terr := _ERC725.contract.Call(opts, out, \"getData\", _key)\n\treturn *ret0, err\n}",
"func (dm *Datamuse) Get() (Results, error) {\n\tc := &http.Client{Timeout: 30 * time.Second}\n\n\tresp, err := c.Get(dm.apiURL.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tvar res Results\n\terr = json.NewDecoder(resp.Body).Decode(&res)\n\n\treturn res, err\n}",
"func getDataFromEndpoint(endpoint string) Data {\r\n\t// Setting the type of request and authorization data\r\n\treq, err := http.NewRequest(\"GET\", endpoint, nil)\r\n\ttoken, _ := ioutil.ReadFile(\"accesstoken.txt\")\r\n\tencodedToken := base64.StdEncoding.EncodeToString([]byte(string(token)))\r\n\tbasicAuth := \"Basic \" + encodedToken\r\n\treq.Header.Set(\"Authorization\", basicAuth)\r\n\r\n\t//Executing the request\r\n\tresp, err := http.DefaultClient.Do(req)\r\n\r\n\tif err != nil {\r\n\t\tlog.Fatal(\"Error in the request.\\n[ERRO] -\", err)\r\n\t}\r\n\tdefer resp.Body.Close()\r\n\r\n\t// Getting json data, decoding them from the raw format and storing them at \"record's\" address\r\n\tvar record Data\r\n\tif err := json.NewDecoder(resp.Body).Decode(&record); err != nil {\r\n\t\tlog.Println(err)\r\n\t}\r\n\treturn record\r\n}",
"func Get(url string, data ...interface{}) (*ClientResponse, error) {\n\treturn DoRequest(\"GET\", url, data...)\n}",
"func (c *Client) get(url string, result interface{}) error {\n\treq, err := c.newRequest(url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := c.http.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif err = checkResponse(resp); err != nil {\n\t\treturn err\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = checkResults(b); err != nil {\n\t\treturn err\n\t}\n\n\terr = json.Unmarshal(b, &result)\n\n\treturn err\n}",
"func (s *Service) Get(lat, long float32) (Response, error) {\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tDial: (&net.Dialer{\n\t\t\t\tTimeout: s.Timeout,\n\t\t\t\tKeepAlive: s.Timeout,\n\t\t\t}).Dial,\n\t\t\tTLSHandshakeTimeout: s.Timeout,\n\t\t\tResponseHeaderTimeout: s.Timeout,\n\t\t\tExpectContinueTimeout: s.Timeout,\n\t\t},\n\t}\n\n\tret := Response{}\n\n\tif res, err := client.Get(fmt.Sprintf(s.URLFormat, s.Key, lat, long)); err != nil {\n\t\treturn ret, err\n\t} else if res.StatusCode/100 != 2 {\n\t\treturn ret, fmt.Errorf(\"invalid statuscode from darksky: %d\", res.StatusCode)\n\t} else if b, err := ioutil.ReadAll(res.Body); err != nil {\n\t\treturn ret, err\n\t} else if err := json.Unmarshal(b, &ret); err != nil {\n\t\treturn ret, err\n\t}\n\n\treturn ret, nil\n}",
"func TestGet(t *testing.T) {\n\tConfigure(t.Logf)\n\n\ttestData := \"this is some test data\"\n\n\tts := httptest.NewServer(http.HandlerFunc(\n\t\tfunc(w http.ResponseWriter, r *http.Request) {\n\t\t\tfmt.Fprint(w, testData)\n\t\t}))\n\tdefer ts.Close()\n\n\thttpClient, err := NewHTTPClient(ts.URL)\n\tif err != nil {\n\t\tt.Fatal(\"Could not create http client: \", err)\n\t}\n\n\tdata, err := httpClient.Get(\"/\")\n\tif err != nil {\n\t\tt.Fatal(\"Could not get from server: \", err)\n\t}\n\n\tif string(data) != testData {\n\t\tt.Fatalf(\"Response does not match: got %s, should be: %s\", data, testData)\n\t}\n}",
"func fetchData(url string) (*info, error) {\r\n\r\n\t// Throttle the data request rate\r\n\ttime.Sleep(100 * time.Millisecond)\r\n\r\n\tresp, _ := http.Get(source + url)\r\n\tdefer resp.Body.Close()\r\n\r\n\tresult, err := ioutil.ReadAll(resp.Body)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tvar i info\r\n\terr = json.Unmarshal(result, &i)\r\n\tif err != nil {\r\n\t\t//log.Println(err, err.Error())\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\ttotalRequest++\r\n\t//fmt.Printf(\"%v\\n\\n\", i)\r\n\treturn &i, nil\r\n}",
"func EndpointGETMe(w http.ResponseWriter, r *http.Request) {\n\t// Write the HTTP header for the response\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\tw.WriteHeader(http.StatusOK)\n\n\t// Create the actual data response structs of the API call\n\ttype ReturnData struct {\n\t\tSuccess Success\n\t\tData User\n\t}\n\n\t// Create the response structs\n\tvar success = Success{Success: true, Error: \"\"}\n\tvar data User\n\tvar returnData ReturnData\n\n\t// Process the API call\n\tif r.URL.Query().Get(\"token\") == \"\" {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater is required.\"\n\t} else if userID, err := gSessionCache.CheckSession(r.URL.Query().Get(\"token\")); err != nil {\n\t\tsuccess.Success = false\n\t\tsuccess.Error = \"Invalid API call. 'token' paramater must be a valid token.\"\n\t} else {\n\t\tdata, _, _ = gUserCache.GetUser(userID)\n\t}\n\n\t// Combine the success and data structs so that they can be returned\n\treturnData.Success = success\n\treturnData.Data = data\n\n\t// Respond with the JSON-encoded return data\n\tif err := json.NewEncoder(w).Encode(returnData); err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (bbc *MockBlueButtonClient) getData(endpoint, patientID string) (string, error) {\n\n\tfData, err := ioutil.ReadFile(\"../shared_files/synthetic_beneficiary_data/\" + endpoint)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tcleanData := strings.Replace(string(fData), \"20000000000001\", patientID, -1)\n\treturn cleanData, err\n}",
"func (t *Task) GetData(d interface{}) error {\n\tdata := t.Data\n\tif data == nil {\n\t\treturn nil\n\t}\n\treturn json.Unmarshal(data, d)\n}",
"func Test_DeviceService_Get_Success(t *testing.T) {\n\th := TestHelper{}\n\trep := new(mocks.IDeviceRepository)\n\trepAuth := new(mocks.IDeviceAuthRepository)\n\ts := h.CreateTestDeviceService(rep, repAuth)\n\n\tip := \"192.168.11.4\"\n\tport := 37777\n\n\texpected := models.Device{\n\t\tIP: ip,\n\t\tPort: port,\n\t}\n\n\trep.On(\"Get\", ip).Return(expected, nil)\n\n\treal, err := s.Get(ip)\n\tassert.NoError(t, err)\n\tassert.Equal(t, real, expected)\n}",
"func getData(client pb.DataClient, filter *pb.DataFilter) {\r\n\t// calling the streaming API\r\n\tstream, err := client.GetData(context.Background(), filter)\r\n\tif err != nil {\r\n\t\tlog.Fatalf(\"Error on get data: %v\", err)\r\n\t}\r\n\tfor {\r\n\t\tdata, err := stream.Recv()\r\n\t\tif err == io.EOF {\r\n\t\t\tbreak\r\n\t\t}\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatalf(\"%v.GetData(_) = _, %v\", client, err)\r\n\t\t}\r\n\t\tlog.Printf(\"Data: %v\", data)\r\n\t}\r\n}",
"func (tc *tclient) get() error {\n\t// 1 -- timeout via context.\n\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\tdefer cancel()\n\n\treq, err := http.NewRequestWithContext(ctx,\n\t\t\"GET\", tc.url+\"/ping\", nil)\n\n\tresp, err := tc.client.Do(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"SUCCESS: '%s'\", string(body))\n\treturn nil\n}",
"func GetDataByID(c *gin.Context) {\r\n\tid := c.Params.ByName(\"id\")\r\n\tvar predict Models.Predict\r\n\terr := Models.GetDataByID(&predict, id)\r\n\tif err != nil {\r\n\t\tc.AbortWithStatus(http.StatusNotFound)\r\n\t} else {\r\n\t\tc.JSON(http.StatusOK, predict)\r\n\t}\r\n}",
"func handleGetData(request []byte, bc *Blockchain) {\n\tvar buff bytes.Buffer\n\tvar payload getdata\n\n\tbuff.Write(request[commandLength:])\n\tdec := gob.NewDecoder(&buff)\n\terr := dec.Decode(&payload)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tif payload.Type == \"block\" {\n\t\tblock, err := bc.GetBlock([]byte(payload.ID))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\tsendBlock(payload.AddrFrom, &block)\n\t}\n\n\tif payload.Type == \"tx\" {\n\t\ttxID := hex.EncodeToString(payload.ID)\n\t\ttx := mempool[txID]\n\n\t\tsendTx(payload.AddrFrom, &tx)\n\t\t// delete(mempool, txID)\n\t}\n}",
"func (s *Test2) Call(reqBody *jModels.RequestBody, r *http.Request) (interface{}, *jModels.Error) {\n\tswitch reqBody.GetMethod() {\n\tcase \"NilArgs\":\n\t\tif reqBody.HasParams() {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInvalidParams, \"That method of service can't has param\", nil)\n\t\t}\n\t\tvar args jModels.NilArgs\n\t\tvar res Test2NilArgsResult\n\t\terr := s.NilArgs(args, &res)\n\t\tif err != nil {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInternalError, \"Internal error\", err.Error())\n\t\t}\n\t\treturn res, nil\n\tcase \"NilResult\":\n\t\tvar args models_7620940177658827552.Test2NilResultArgs\n\t\tif reqBody.HasParams() {\n\t\t\terr := json.Unmarshal(*reqBody.Params, &args)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInvalidParams, \"Can't unmarshal params to args structure'\", err.Error())\n\t\t\t}\n\t\t}\n\t\tvar res jModels.NilResult\n\t\terr := s.NilResult(args, &res)\n\t\tif err != nil {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInternalError, \"Internal error\", err.Error())\n\t\t}\n\t\treturn res, nil\n\tcase \"AnotherPackageResult\":\n\t\tvar args models.NilArgs\n\t\tif reqBody.HasParams() {\n\t\t\terr := json.Unmarshal(*reqBody.Params, &args)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInvalidParams, \"Can't unmarshal params to args structure'\", err.Error())\n\t\t\t}\n\t\t}\n\t\tvar res models.SomeModel\n\t\terr := s.AnotherPackageResult(args, &res)\n\t\tif err != nil {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInternalError, \"Internal error\", err.Error())\n\t\t}\n\t\treturn res, nil\n\tcase \"DoubleStarAnotherResult\":\n\t\tif reqBody.HasParams() {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInvalidParams, \"That method of service can't has param\", nil)\n\t\t}\n\t\tvar args jModels.NilArgs\n\t\tvar res *models.SomeModel\n\t\terr := s.DoubleStarAnotherResult(args, &res)\n\t\tif err != nil {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInternalError, \"Internal error\", err.Error())\n\t\t}\n\t\treturn res, nil\n\tcase \"DoubleStarResult\":\n\t\tif reqBody.HasParams() {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInvalidParams, \"That method of service can't has param\", nil)\n\t\t}\n\t\tvar args jModels.NilArgs\n\t\tvar res *Test2NilArgsResult\n\t\terr := s.DoubleStarResult(args, &res)\n\t\tif err != nil {\n\t\t\treturn nil, jModels.NewError(jModels.ErrorCodeInternalError, \"Internal error\", err.Error())\n\t\t}\n\t\treturn res, nil\n\tdefault:\n\t\treturn nil, jModels.NewError(jModels.ErrorCodeMethodNotFound, fmt.Sprintf(\"Unknown method '%s' for service '%s'\", reqBody.GetMethod(), \"Test2\"), nil)\n\t}\n}",
"func (communication *Wrapper) GetData(metaData common.MetaData, offset int64) common.SyncServiceError {\n\tcomm, err := communication.selectCommunicator(\"\", metaData.DestOrgID, metaData.OriginType, metaData.OriginID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn comm.GetData(metaData, offset)\n}",
"func (client DatasetClient) GetResponder(resp *http.Response) (result DatasetDetailInfo, err error) {\n err = autorest.Respond(\n resp,\n azure.WithErrorUnlessStatusCode(http.StatusOK),\n autorest.ByUnmarshallingJSON(&result),\n autorest.ByClosing())\n result.Response = autorest.Response{Response: resp}\n return\n }",
"func TestGetDataFromUrlBodyReadError(t *testing.T) {\n\tdefer gock.Off()\n\n\tapiUrl := \"http://example.com\"\n\tapiPath := \"status\"\n\n\tgock.New(apiUrl).\n\t\tGet(apiPath).\n\t\tReply(200).\n\t\tBodyString(\"\")\n\n\t_, err := getDataFromURL(apiUrl+\"/\"+apiPath, func(r io.Reader) ([]byte, error) {\n\t\treturn nil, errors.New(\"IO Reader error occurred\")\n\t})\n\n\tassert.Error(t, err)\n}",
"func GetAPI(w http.ResponseWriter, r *http.Request) {\n\tvar hour int\n\tvar query string\n\thour = checkhour()\n\n\tconDB()\n\tvar result CovidResponse\n\n\t//if last insert data more than two hour then insert new\n\tif hour > 2 {\n\t\tlog.Printf(\"1!\\n\")\n\t\tresponse, err := http.Get(\"https://services5.arcgis.com/VS6HdKS0VfIhv8Ct/arcgis/rest/services/COVID19_Indonesia_per_Provinsi/FeatureServer/0/query?where=1%3D1&outFields=*&outSR=4326&f=json\")\n\n\t\tif err != nil {\n\t\t\tfmt.Print(err.Error())\n\t\t\tos.Exit(1)\n\t\t}\n\n\t\tdata, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t\tvar responseObject ResponseData\n\t\tjson.Unmarshal(data, &responseObject)\n\n\t\ttrans, err := db.Begin()\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"Begin transaction error : \" + err.Error())\n\t\t}\n\n\t\tstmt, err := trans.Prepare(`INSERT INTO Covid (FID,KodeProvinsi,Provinsi,KasusPositif,KasusSembuh,KasusMeninggal,Latitude,Longitude,DateTimeCreated)\n\t\t\t\t\t\t\t\tVALUES($1,$2,$3,$4,$5,$6,$7,$8,$9);`)\n\t\tif err != nil {\n\t\t\ttrans.Rollback()\n\t\t\tlog.Fatal(\"Preparation transaction error : \" + err.Error())\n\t\t}\n\n\t\tvar dr DataResult\n\t\tfor i := 0; i < len(responseObject.Features); i++ {\n\t\t\tattr := responseObject.Features[i].Attributes\n\t\t\tgeo := responseObject.Features[i].Geometries\n\n\t\t\t_, err := stmt.Exec(attr.FID, attr.KodeProvinsi, attr.Provinsi, attr.KasusPositif, attr.KasusSembuh, attr.KasusMeninggal, geo.Latitude, geo.Longitude, time.Now())\n\t\t\tif err != nil {\n\t\t\t\ttrans.Rollback()\n\t\t\t\tlog.Fatal(\"Execute transaction error : \" + err.Error()) // return an error too, we may want to wrap them\n\t\t\t}\n\n\t\t\tdr.FID = attr.FID\n\t\t\tdr.KodeProvinsi = attr.KodeProvinsi\n\t\t\tdr.Provinsi = attr.Provinsi\n\t\t\tdr.KasusPositif = attr.KasusPositif\n\t\t\tdr.KasusSembuh = attr.KasusSembuh\n\t\t\tdr.KasusMeninggal = attr.KasusMeninggal\n\t\t\tdr.Latitude = geo.Latitude\n\t\t\tdr.Longitude = geo.Longitude\n\n\t\t\tresult.AddItem(dr)\n\t\t}\n\n\t\tdefer stmt.Close()\n\t\ttrans.Commit()\n\t\tdb.Close()\n\n\t\tjson.NewEncoder(w).Encode(result)\n\t\treturn\n\t}\n\n\tlog.Printf(\"2!\\n\")\n\tquery = `select FID,KodeProvinsi,Provinsi,KasusPositif,KasusSembuh,KasusMeninggal,Latitude,Longitude \n\t\t\t\tfrom covid where TO_CHAR(datetimecreated, 'YYYYMMDDHH24') = TO_CHAR((select MAX(datetimecreated) from covid),'YYYYMMDDHH24')`\n\n\tselectQuery, err := db.Query(query)\n\tfor selectQuery.Next() {\n\t\tvar r DataResult\n\t\terr = selectQuery.Scan(&r.FID, &r.KodeProvinsi, &r.Provinsi, &r.KasusPositif, &r.KasusSembuh, &r.KasusMeninggal, &r.Latitude, &r.Longitude)\n\t\tif err != nil {\n\t\t\tpanic(err.Error())\n\t\t}\n\n\t\tresult.AddItem(r)\n\t}\n\tdb.Close()\n\n\tjson.NewEncoder(w).Encode(result)\n}",
"func DoCall(url string) (SPres, error) {\n\tdata := SPres{}\n\trepo, err := sparql.NewRepo(\"http://rwgsparql:9999/blazegraph/namespace/ecrwg/sparql\")\n\tif err != nil {\n\t\tlog.Printf(\"query make repo: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tf := bytes.NewBufferString(queries)\n\tbank := sparql.LoadBank(f)\n\n\tq, err := bank.Prepare(\"orgInfo\", struct{ URL string }{url})\n\tif err != nil {\n\t\tlog.Printf(\"query bank prepair: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tres, err := repo.Query(q)\n\tif err != nil {\n\t\tlog.Printf(\"query call: %v\\n\", err)\n\t\treturn data, err\n\t}\n\n\tbindingsTest2 := res.Bindings() // map[string][]rdf.Term\n\n\t// This whole aspect seems verbose... there has to be a better Go way to do this check?\n\tdata.Description = \"No description provided by facility\"\n\tif len(bindingsTest2) > 0 {\n\t\tdata.Repository = bindingsTest2[\"repository\"][0].String()\n\t\tif len(bindingsTest2[\"description\"]) > 0 {\n\t\t\tdata.Description = bindingsTest2[\"description\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"name\"]) > 0 {\n\t\t\tdata.Name = bindingsTest2[\"name\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"url\"]) > 0 {\n\t\t\tdata.URL = bindingsTest2[\"url\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"logo\"]) > 0 {\n\t\t\tdata.Logo = bindingsTest2[\"logo\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_name\"]) > 0 {\n\t\t\tdata.ContactName = bindingsTest2[\"contact_name\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_email\"]) > 0 {\n\t\t\tdata.ContactEmail = bindingsTest2[\"contact_email\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_url\"]) > 0 {\n\t\t\tdata.ContactURL = bindingsTest2[\"contact_url\"][0].String()\n\t\t}\n\t\tif len(bindingsTest2[\"contact_role\"]) > 0 {\n\t\t\tdata.ContactRole = bindingsTest2[\"contact_role\"][0].String()\n\t\t}\n\t}\n\n\treturn data, err\n}",
"func (m *MockClient) Get(url string) (*http.Response, error) {\n\treturn GetFunc(url)\n}",
"func (h *Handler) get(c echo.Context) (e error) {\n\tctx := c.(*cuxs.Context)\n\t// get query string from request\n\trq := ctx.RequestQuery()\n\tvar data *[]model.SalesReturn\n\n\tvar tot int64\n\n\tif data, tot, e = GetSalesReturn(rq); e == nil {\n\t\tctx.Data(data, tot)\n\t}\n\n\treturn ctx.Serve(e)\n}",
"func (h *httpCloud) get(path string, resp interface{}) error {\n\trequestType := \"GET\"\n\tbody, err := h.sendHTTPRequest(requestType, path, nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"HTTP request to cloudprovider failed: %v\", err)\n\t}\n\tif body != nil {\n\t\tif err := json.Unmarshal(body, resp); err != nil {\n\t\t\treturn fmt.Errorf(\"GET response Unmarshal for %s failed with error: %v\\n\", path, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func (a *Client) Get(params *GetParams) (*GetOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewGetParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"get\",\n\t\tMethod: \"GET\",\n\t\tPathPattern: \"/\",\n\t\tProducesMediaTypes: []string{\"application/json; qs=0.5\", \"application/vnd.schemaregistry+json; qs=0.9\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\", \"application/octet-stream\", \"application/vnd.schemaregistry+json\", \"application/vnd.schemaregistry.v1+json\"},\n\t\tSchemes: []string{\"http\", \"https\"},\n\t\tParams: params,\n\t\tReader: &GetReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*GetOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for get: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func httpGetRespondsWith200(goTest *testing.T, output infratests.TerraformOutput) {\n\thostname := output[\"app_service_default_hostname\"].(string)\n\tmaxRetries := 20\n\ttimeBetweenRetries := 2 * time.Second\n\texpectedResponse := \"Hello App Service!\"\n\n\terr := httpClient.HttpGetWithRetryWithCustomValidationE(\n\t\tgoTest,\n\t\thostname,\n\t\tmaxRetries,\n\t\ttimeBetweenRetries,\n\t\tfunc(status int, content string) bool {\n\t\t\treturn status == 200 && strings.Contains(content, expectedResponse)\n\t\t},\n\t)\n\tif err != nil {\n\t\tgoTest.Fatal(err)\n\t}\n}",
"func httpGet(t *testing.T, url string) ([]byte, error) {\n\tclient := &http.Client{}\n\tresp, err := invokeWithRetry(\n\t\tfunc() (response *http.Response, e error) {\n\t\t\treturn client.Get(url)\n\t\t},\n\t)\n\trequire.NoError(t, err)\n\treturn handleHttpResp(t, resp)\n}",
"func GetData() []byte {\n\tvar request_string string\n\n\trequest_string = GeoLookupRequest(zip)\n\n\turl := os.Getenv(\"API_URL\") + os.Getenv(\"API_KEY\") + \"/\" + os.Getenv(\"CALL\") + request_string\n\tfmt.Println(url)\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbody, err := ioutil.ReadAll(resp.Body)\n\t//body = append(body[1:], body[:len(body)-1]...)\n\treturn body\n}",
"func TestGetUserService (t *testing.T){\n\tuser1, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user1)\n\n\tuser2, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user2)\n\n\tuser3, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user3)\n\n\tuser4, err := GetUserService(user_01.SocialNumber)\n\tassert.Equal(t, 200, err.HTTPStatus)\n\tassert.Equal(t, user_01, user4)\n}",
"func (m *MockRepository) GetData(param taxiFare.Param) ([]taxiFare.ResponseRedis, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"GetData\", param)\n\tret0, _ := ret[0].([]taxiFare.ResponseRedis)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func GetDataUsed() DataUsed {\n\thash := getMD5Hash(DevID + \"getdataused\" + AuthKey + getTimestamp())\n\turl := \"http://api.smitegame.com/smiteapi.svc/getdatausedJson/\" + DevID + \"/\" + hash + \"/\" + SessionID + \"/\" + getTimestamp()\n\n\tresponse, err := http.Get(url)\n\tif err != nil {\n\t\tperror(err)\n\t} else {\n\t\tdefer response.Body.Close()\n\t\tcontents, err := ioutil.ReadAll(response.Body)\n\t\tif err != nil {\n\t\t\tperror(err)\n\t\t} else {\n\t\t\tvar dataUsed []DataUsed\n\t\t\tjson.Unmarshal(contents, &dataUsed)\n\t\t\treturn dataUsed[0]\n\t\t}\n\t}\n\tdataUsed := DataUsed{ReturnMessage: \"Not found\"}\n\treturn dataUsed\n}",
"func makeHttpCall() {\n\tresp, err := http.Get(\"https://httpbin.org/get\")\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to send request message\")\n\t}\n\tdefer resp.Body.Close()\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatal().Err(err).Msg(\"Failed to read response body\")\n\t}\n\tlog.Info().Str(\"Get Response\", string(body)).Msg(\"Response\")\n}",
"func doDataRequest(token string) ([]byte, error) {\n\n\t// create JSON payload\n\tdata_payload := `{\n\t\t\"data\": {\n\t\t\t\t\"exportProducts\": {\n\t\t\t\t\t\"export_by_range\": {\n\t\t\t\t\t\t\"start\": 1\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t`\n\n\t// create client\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(API_METHOD, API_BASE_URL+\"/products\", strings.NewReader(data_payload))\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\t// set bearer authorization header with the OAuth token value\n\treq.Header.Add(\"Authorization\", \"Bearer \"+token)\n\n\t// send request\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\n\t// get response\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}",
"func (f5 *f5LTM) get(url string, result interface{}) error {\n\treturn f5.restRequest(\"GET\", url, nil, result)\n}",
"func (f *fetcher) Do(ctx context.Context) (*http.Response, []byte, error) {\n\n\treq, err := http.NewRequest(\"GET\", f.address.String(), nil)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif ctx != nil {\n\t\treq = req.WithContext(ctx)\n\t}\n\n\tresp, err := f.client.Do(req)\n\tdefer func() {\n\t\tif resp != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t}()\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tvar body []byte\n\tdone := make(chan struct{})\n\t// a seperate go routine to read resp into body\n\tgo func() {\n\t\tbody, err = ioutil.ReadAll(resp.Body)\n\t\tclose(done)\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t<-done\n\t\tif err == nil {\n\t\t\terr = ctx.Err()\n\t\t}\n\tcase <-done:\n\t}\n\treturn resp, body, err\n}",
"func GetCovidData(endpoint string) (*http.Response, error) {\n\tlog.Println(\"GET\", endpoint)\n\n\trequest, _ := http.NewRequest(http.MethodGet, endpoint, nil)\n\n\turl, _ := url.Parse(endpoint)\n\tkey, _ := os.LookupEnv(\"COVID_SERVICE_API_KEY\")\n\n\trequest.Header.Add(\"x-rapidapi-key\", key)\n\trequest.Header.Add(\"x-rapidapi-host\", url.Host)\n\n\treturn Client.Do(request)\n}",
"func (c *HttpClientImplementation) DoRequest(req *http.Request, result interface{}) *Error {\n\tstart := time.Now()\n\tres, err := c.HttpClient.Do(req)\n\tif err != nil {\n\t\tc.Logger.Error(\"Cannot send request: %v\", err.Error())\n\t\tvar statusCode int\n\n\t\tif res != nil {\n\t\t\tstatusCode = res.StatusCode\n\t\t} else if strings.Contains(err.Error(), \"timeout\") {\n\t\t\tstatusCode = 408\n\t\t} else {\n\t\t\tstatusCode = 0\n\t\t}\n\n\t\treturn &Error{\n\t\t\tMessage: fmt.Sprintf(\"Error when request via HttpClient, Cannot send request with error: %s\", err.Error()),\n\t\t\tStatusCode: statusCode,\n\t\t\tRawError: err,\n\t\t}\n\t}\n\n\tif res != nil {\n\t\tdefer res.Body.Close()\n\n\t\tc.Logger.Info(\"================== END ==================\")\n\t\tc.Logger.Info(\"Request completed in %v \", time.Since(start))\n\n\t\tresBody, err := ioutil.ReadAll(res.Body)\n\t\tif err != nil {\n\t\t\tc.Logger.Error(\"Request failed: %v\", err)\n\t\t\treturn &Error{\n\t\t\t\tMessage: \"Cannot read response body: \" + err.Error(),\n\t\t\t\tStatusCode: res.StatusCode,\n\t\t\t\tRawError: err,\n\t\t\t}\n\t\t}\n\n\t\trawResponse := newHTTPResponse(res, resBody)\n\t\tc.Logger.Debug(\"=============== Response ===============\")\n\t\t// Loop through headers to perform log\n\t\tlogHttpHeaders(c.Logger, rawResponse.Header, false)\n\t\tc.Logger.Debug(\"Response Body: %v\", string(rawResponse.RawBody))\n\n\t\tif result != nil {\n\t\t\tif err = json.Unmarshal(resBody, &result); err != nil {\n\t\t\t\treturn &Error{\n\t\t\t\t\tMessage: fmt.Sprintf(\"Invalid body response, parse error during API request to Midtrans with message: %s\", err.Error()),\n\t\t\t\t\tStatusCode: res.StatusCode,\n\t\t\t\t\tRawError: err,\n\t\t\t\t\tRawApiResponse: rawResponse,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check status_code from Midtrans response body\n\t\tif found, data := HasOwnProperty(\"status_code\", resBody); found {\n\t\t\tstatusCode, _ := strconv.Atoi(data[\"status_code\"].(string))\n\t\t\tif statusCode >= 401 && statusCode != 407 {\n\t\t\t\treturn &Error{\n\t\t\t\t\tMessage: fmt.Sprintf(\"Midtrans API is returning API error. HTTP status code: %s API response: %s\", strconv.Itoa(statusCode), string(resBody)),\n\t\t\t\t\tStatusCode: statusCode,\n\t\t\t\t\tRawApiResponse: rawResponse,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// Check StatusCode from Midtrans HTTP response api StatusCode\n\t\tif res.StatusCode >= 400 {\n\t\t\treturn &Error{\n\t\t\t\tMessage: fmt.Sprintf(\"Midtrans API is returning API error. HTTP status code: %s API response: %s\", strconv.Itoa(res.StatusCode), string(resBody)),\n\t\t\t\tStatusCode: res.StatusCode,\n\t\t\t\tRawApiResponse: rawResponse,\n\t\t\t\tRawError: err,\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *DefaultClient) Get(endpoint string) ([]byte, *http.Response, error) {\n\treturn s.http(http.MethodGet, endpoint, nil)\n}",
"func RequestGet(v *View, personalSocketAddr string) ([]string, map[int]string) {\r\n\tvar (\r\n\t\tg getView\r\n\t)\r\n\tnoResponseIndices := make(map[int]string)\r\n\r\n\tMu.Mutex.Lock()\r\n\t// fmt.Println(\"Check v.PersonalView before for in RqstGet:\", v.PersonalView)\r\n\tfor index, addr := range v.PersonalView {\r\n\t\tif addr == personalSocketAddr { // skip over the personal replica since we don't send to ourselves\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tfmt.Println(\"allSocketAddrs[index], index:\", v.PersonalView[index], index)\r\n\t\trequest, err := http.NewRequest(\"GET\", \"http://\"+v.PersonalView[index]+\"/key-value-store-view\", nil)\r\n\t\tif err != nil {\r\n\t\t\tlog.Fatal(\"There was an error creating a GET request with the following error:\", err.Error())\r\n\t\t}\r\n\r\n\t\tMu.Mutex.Unlock()\r\n\t\t// try to send a GET request 5 times //\r\n\t\tvar response *http.Response\r\n\t\tfor i := 0; i < 5; i++ {\r\n\t\t\thttpForwarder := &http.Client{Timeout: 3 * time.Second} // alias for DefaultClient\r\n\t\t\tresponse, err = httpForwarder.Do(request)\r\n\t\t\tif err != nil {\r\n\t\t\t\ttime.Sleep(time.Second * 3)\r\n\t\t\t\tfmt.Printf(\"ATTEMPTING TO SEND %v MORE TIMES & check err: %v\", i, err.Error())\r\n\t\t\t} else {\r\n\t\t\t\tbreak\r\n\t\t\t}\r\n\t\t}\r\n\t\tMu.Mutex.Lock()\r\n\t\tfmt.Println(\"Check personalView length in viewGet.go: \", len(v.PersonalView))\r\n\t\tif err != nil { // if a response doesn't come back, then that replica might be down\r\n\t\t\tfmt.Println(\"There was an error sending a GET request to \" + v.PersonalView[index])\r\n\t\t\tnoResponseIndices[index] = v.PersonalView[index]\r\n\t\t\tcontinue\r\n\t\t}\r\n\t\tbody, _ := ioutil.ReadAll(response.Body)\r\n\t\tstrBody := string(body[:])\r\n\t\tjson.NewDecoder(strings.NewReader(strBody)).Decode(&g)\r\n\t\tresponse.Body.Close()\r\n\t}\r\n\tMu.Mutex.Unlock()\r\n\r\n\treturn g.View, noResponseIndices\r\n}",
"func (a *netAPI) doRequest(ctx context.Context, urlString string, resp proto.Message) error {\n\thttpReq, err := http.NewRequest(\"GET\", urlString, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\thttpReq.Header.Add(\"Content-Type\", \"application/json\")\n\thttpReq.Header.Add(\"User-Agent\", userAgentString)\n\thttpReq = httpReq.WithContext(ctx)\n\thttpResp, err := a.client.Do(httpReq)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer httpResp.Body.Close()\n\tif httpResp.StatusCode != 200 {\n\t\treturn fmt.Errorf(\"webrisk: unexpected server response code: %d\", httpResp.StatusCode)\n\t}\n\tbody, err := ioutil.ReadAll(httpResp.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn protojson.Unmarshal(body, resp)\n}",
"func Get(c *gophercloud.ServiceClient, idOrURL string) (r GetResult) {\n\tvar url string\n\tif strings.Contains(idOrURL, \"/\") {\n\t\turl = idOrURL\n\t} else {\n\t\turl = getURL(c, idOrURL)\n\t}\n\tresp, err := c.Get(url, &r.Body, nil)\n\t_, r.Header, r.Err = gophercloud.ParseResponse(resp, err)\n\treturn\n}",
"func GetServices(nbmaster string, httpClient *http.Client, jwt string, host string, hostUuid string) {\r\n fmt.Printf(\"\\nGet NetBackup services available on %s...\\n\\n\", host)\r\n\r\n uri := \"https://\" + nbmaster + \":\" + port + \"/netbackup/admin/hosts/\" + hostUuid + \"/services\"\r\n\r\n request, _ := http.NewRequest(http.MethodGet, uri, nil)\r\n request.Header.Add(\"Authorization\", jwt);\r\n request.Header.Add(\"Content-Type\", contentTypeV3);\r\n\r\n response, err := httpClient.Do(request)\r\n\r\n if err != nil {\r\n fmt.Printf(\"The HTTP request failed with error: %s\\n\", err)\r\n panic(\"Unable to get services\")\r\n } else {\r\n if response.StatusCode == 200 {\r\n resp, _ := ioutil.ReadAll(response.Body)\r\n var obj interface{}\r\n json.Unmarshal(resp, &obj)\r\n data := obj.(map[string]interface{})\r\n var services []interface{} = data[\"data\"].([]interface{})\r\n\r\n fmt.Printf(\"id status\\n\");\r\n fmt.Printf(\"============.=========\\n\");\r\n for _, service := range services {\r\n id := (service.(map[string]interface{}))[\"id\"]\r\n status := (((service.(map[string]interface{}))[\"attributes\"]).(map[string]interface{}))[\"status\"]\r\n\r\n fmt.Printf(\"%-12s %s\\n\", id, status);\r\n }\r\n } else {\r\n printErrorResponse(response)\r\n }\r\n }\r\n}",
"func (o *ThingListResponse) GetDataOk() (*[]ThingResponse, bool) {\n\tif o == nil || o.Data == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Data, true\n}"
] | [
"0.7083786",
"0.6736604",
"0.66107166",
"0.65034205",
"0.6397895",
"0.6253341",
"0.61337847",
"0.61120474",
"0.6110037",
"0.6082655",
"0.60795283",
"0.59683114",
"0.5909923",
"0.59045714",
"0.58983636",
"0.5895347",
"0.588893",
"0.58722013",
"0.5820885",
"0.5819464",
"0.58122087",
"0.5807873",
"0.5803593",
"0.5741478",
"0.5730362",
"0.5723101",
"0.5719333",
"0.57126135",
"0.57032555",
"0.5701278",
"0.56860423",
"0.56841904",
"0.567747",
"0.5651845",
"0.56391025",
"0.56212425",
"0.56141853",
"0.5611622",
"0.5605793",
"0.5602524",
"0.55983883",
"0.5591337",
"0.55873555",
"0.5566339",
"0.55654246",
"0.5551461",
"0.5543974",
"0.55438375",
"0.5541376",
"0.5523351",
"0.55133224",
"0.5508032",
"0.5505163",
"0.549944",
"0.5484472",
"0.5480079",
"0.5465859",
"0.5464597",
"0.5437712",
"0.54369915",
"0.5426909",
"0.5421748",
"0.5415586",
"0.5412603",
"0.5411492",
"0.54070866",
"0.53972197",
"0.5394718",
"0.53902537",
"0.5389849",
"0.53872573",
"0.53806645",
"0.53742063",
"0.536565",
"0.5363195",
"0.5354944",
"0.5354479",
"0.5353677",
"0.53448653",
"0.53446835",
"0.5341862",
"0.53406334",
"0.5339048",
"0.5330018",
"0.53258866",
"0.5323025",
"0.5320716",
"0.5320055",
"0.53129",
"0.53123516",
"0.5312115",
"0.53113616",
"0.530854",
"0.53041667",
"0.52945185",
"0.52917594",
"0.5286604",
"0.5285309",
"0.52827793",
"0.52814347"
] | 0.64093554 | 4 |
Base58Encode encodes a slice of byte into its base 58 representation | func Base58Encode(input []byte) []byte {
encode := base58.Encode(input)
return []byte(encode)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func encodeBase58(i int64, b []byte) {\n\tp := len(b) - 1\n\tfor i >= 58 {\n\t\tb[p] = encodeBase58Map[i%58]\n\t\tp--\n\t\ti /= 58\n\t}\n\tb[p] = encodeBase58Map[i]\n}",
"func Base58Encode(input []byte) []byte {\n\tvar result []byte\n\n\tx := big.NewInt(0).SetBytes(input)\n\n\tbase := big.NewInt(int64(len(b58Alphabet)))\n\tzero := big.NewInt(0)\n\tmod := &big.Int{}\n\n\tfor x.Cmp(zero) != 0 {\n\t\tx.DivMod(x, base, mod)\n\t\tresult = append(result, b58Alphabet[mod.Int64()])\n\t}\n\n\t// https://en.bitcoin.it/wiki/Base58Check_encoding#Version_bytes\n\tif input[0] == 0x00 {\n\t\tresult = append(result, b58Alphabet[0])\n\t}\n\n\tReverseBytes(result)\n\n\treturn result\n}",
"func Base58Encode(input []byte) []byte {\n\tvar result []byte\n\n\tx := big.NewInt(0).SetBytes(input)\n\n\tbase := big.NewInt(int64(len(b58Alphabet)))\n\tzero := big.NewInt(0)\n\tmod := &big.Int{}\n\n\tfor x.Cmp(zero) != 0 {\n\t\tx.DivMod(x, base, mod)\n\t\tresult = append(result, b58Alphabet[mod.Int64()])\n\t}\n\n\t// https://en.bitcoin.it/wiki/Base58Check_encoding#Version_bytes\n\tif input[0] == 0x00 {\n\t\tresult = append(result, b58Alphabet[0])\n\t}\n\n\tReverseBytes(result)\n\n\treturn result\n}",
"func b58encode(b []byte) (s string) {\n\t/* See https://en.bitcoin.it/wiki/Base58Check_encoding */\n\n\tconst BITCOIN_BASE58_TABLE = \"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\n\n\t/* Convert big endian bytes to big int */\n\tx := new(big.Int).SetBytes(b)\n\n\t/* Initialize */\n\tr := new(big.Int)\n\tm := big.NewInt(58)\n\tzero := big.NewInt(0)\n\ts = \"\"\n\n\t/* Convert big int to string */\n\tfor x.Cmp(zero) > 0 {\n\t\t/* x, r = (x / 58, x % 58) */\n\t\tx.QuoRem(x, m, r)\n\t\t/* Prepend ASCII character */\n\t\ts = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s\n\t}\n\n\treturn s\n}",
"func Base58Encode(buf []byte) string {\n\treturn base58.Encode(buf, base58.BitcoinAlphabet)\n}",
"func Base58Encode(b []byte, alphabet string) string {\n\tif b == nil || len(b) == 0 {\n\t\treturn \"\"\n\t}\n\tx := new(big.Int)\n\tx.SetBytes(b)\n\n\tanswer := make([]byte, 0)\n\tfor x.Cmp(bigZero) > 0 {\n\t\tmod := new(big.Int)\n\t\tx.DivMod(x, bigRadix, mod)\n\t\tanswer = append(answer, alphabet[mod.Int64()])\n\t}\n\n\t// leading zero bytes\n\tfor _, i := range b {\n\t\tif i != 0 {\n\t\t\tbreak\n\t\t}\n\t\tanswer = append(answer, alphabet[0])\n\t}\n\n\t// reverse\n\talen := len(answer)\n\tfor i := 0; i < alen/2; i++ {\n\t\tanswer[i], answer[alen-1-i] = answer[alen-1-i], answer[i]\n\t}\n\n\treturn string(answer)\n}",
"func (b58 Base58) Encode(b []byte) string {\n\tx := new(big.Int)\n\tx.SetBytes(b)\n\n\tanswer := make([]byte, 0, len(b)*136/100)\n\tfor x.Cmp(bigZero) > 0 {\n\t\tmod := new(big.Int)\n\t\tx.DivMod(x, bigRadix, mod)\n\t\tanswer = append(answer, b58.alphabet[mod.Int64()])\n\t}\n\n\t// leading zero bytes\n\tfor _, i := range b {\n\t\tif i != 0 {\n\t\t\tbreak\n\t\t}\n\t\tanswer = append(answer, b58.alphabet[0])\n\t}\n\n\t// reverse\n\talen := len(answer)\n\tfor i := 0; i < alen/2; i++ {\n\t\tanswer[i], answer[alen-1-i] = answer[alen-1-i], answer[i]\n\t}\n\n\treturn string(answer)\n}",
"func encodeBase58Len(i int64) int {\n\n\tvar l = 1\n\tfor i >= 58 {\n\t\tl++\n\t\ti /= 58\n\t}\n\treturn l\n}",
"func encode(src []byte) ([]byte, int) {\n\n\tvar dst []byte\n\tx := new(big.Int).SetBytes(src)\n\tr := new(big.Int)\n\tm := big.NewInt(58)\n\tzero := big.NewInt(0)\n\ts := \"\"\n\n\t/* While x > 0 */\n\tfor x.Cmp(zero) > 0 {\n\t\t/* x, r = (x / 58, x % 58) */\n\t\tx.QuoRem(x, m, r)\n\t\t/* Prepend ASCII character */\n\t\ts = string(base58table[r.Int64()]) + s\n\t\tdst = append(dst, base58table[r.Int64()])\n\t}\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range src {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\tdst = append(dst, base58table[0])\n\t}\n\n\tfor i := 0; i < len(dst)/2; i++ {\n\t\tdst[i], dst[len(dst)-1-i] =\n\t\t\tdst[len(dst)-1-i], dst[i]\n\t}\n\treturn dst, len(dst)\n}",
"func Base58Encode(pkh string) string {\n\tpkhBytes, _ := hex.DecodeString(pkh)\n\tb58 := base58.NewBitcoinBase58()\n\taddr, _ := b58.EncodeToString(pkhBytes)\n\treturn addr\n}",
"func b58cencode(payload []byte, prefix []byte) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := base58check.Encode(n)\n\treturn b58c\n}",
"func (self Base58Check) Encode(input []byte, version byte) string {\n\t/*if len(input) == 0 {\n\t\treturn \"\"\n\t}*/\n\tb := make([]byte, 0, 1+len(input)+common.CheckSumLen)\n\tb = append(b, version)\n\tb = append(b, input[:]...)\n\tcksum := ChecksumFirst4Bytes(b)\n\tb = append(b, cksum[:]...)\n\treturn Base58{}.Encode(b)\n}",
"func (this *GoTezos) b58cencode(payload []byte, prefix []byte) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := base58check.Encode(n)\n\treturn b58c\n}",
"func runtimeEncodeBase58(ic *interop.Context) error {\n\tsrc := ic.VM.Estack().Pop().Bytes()\n\tresult := base58.Encode(src)\n\tic.VM.Estack().PushVal([]byte(result))\n\treturn nil\n}",
"func B58cencode(payload []byte, prefix Prefix) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := encode(n)\n\treturn b58c\n}",
"func EncodeFromBytes(input, prefix []byte) string {\n\tpayload := append(prefix[1:], input...)\n\treturn base58.CheckEncode(payload, prefix[0])\n}",
"func b58checkencode(ver uint8, b []byte) (s string) {\n\t/* Prepend version */\n\tbcpy := append([]byte{ver}, b...)\n\n\t/* Create a new SHA256 context */\n\tsha256_h := sha256.New()\n\n\t/* SHA256 Hash #1 */\n\tsha256_h.Reset()\n\tsha256_h.Write(bcpy)\n\thash1 := sha256_h.Sum(nil)\n\n\t/* SHA256 Hash #2 */\n\tsha256_h.Reset()\n\tsha256_h.Write(hash1)\n\thash2 := sha256_h.Sum(nil)\n\n\t/* Append first four bytes of hash */\n\tbcpy = append(bcpy, hash2[0:4]...)\n\n\t/* Encode base58 string */\n\ts = b58encode(bcpy)\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range bcpy {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\ts = \"1\" + s\n\t}\n\n\treturn s\n}",
"func b58checkencode(ver uint8, b []byte) (s string) {\n\t/* Prepend version */\n\tbcpy := append([]byte{ver}, b...)\n\n\t/* Create a new SHA256 context */\n\tsha256_h := sha256.New()\n\n\t/* SHA256 Hash #1 */\n\tsha256_h.Reset()\n\tsha256_h.Write(bcpy)\n\thash1 := sha256_h.Sum(nil)\n\n\t/* SHA256 Hash #2 */\n\tsha256_h.Reset()\n\tsha256_h.Write(hash1)\n\thash2 := sha256_h.Sum(nil)\n\n\t/* Append first four bytes of hash */\n\tbcpy = append(bcpy, hash2[0:4]...)\n\n\t/* Encode base58 string */\n\ts = b58encode(bcpy)\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range bcpy {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\ts = \"1\" + s\n\t}\n\n\treturn s\n}",
"func IDB58Encode(id ID) string {\n\treturn core.IDB58Encode(id)\n}",
"func IDB58Encode(id ID) string {\n\treturn b58.Encode([]byte(id))\n}",
"func (enc *Encoding) Encode(src []byte) ([]byte, error) {\n\tif len(src) == 0 {\n\t\treturn []byte{}, nil\n\t}\n\tn, ok := new(big.Int).SetString(string(src), 10)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"expecting a number but got %q\", src)\n\t}\n\tbytes := make([]byte, 0, len(src))\n\tfor _, c := range src {\n\t\tif c == '0' {\n\t\t\tbytes = append(bytes, enc.alphabet[0])\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tzerocnt := len(bytes)\n\tmod := new(big.Int)\n\tzero := big.NewInt(0)\n\tfor {\n\t\tswitch n.Cmp(zero) {\n\t\tcase 1:\n\t\t\tn.DivMod(n, radix, mod)\n\t\t\tbytes = append(bytes, enc.alphabet[mod.Int64()])\n\t\tcase 0:\n\t\t\treverse(bytes[zerocnt:])\n\t\t\treturn bytes, nil\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"expecting a non-negative number in base58 encoding but got %s\", n)\n\t\t}\n\t}\n}",
"func Encode(encoding Encoding, bytes []byte) (string, error) {\n\tswitch {\n\tcase !encoding.valid():\n\t\treturn \"\", errInvalidEncoding\n\tcase encoding == CB58 && len(bytes) > maxCB58EncodeSize:\n\t\treturn \"\", fmt.Errorf(\"byte slice length (%d) > maximum for cb58 (%d)\", len(bytes), maxCB58EncodeSize)\n\t}\n\n\tchecked := make([]byte, len(bytes)+checksumLen)\n\tcopy(checked, bytes)\n\tcopy(checked[len(bytes):], hashing.Checksum(bytes, checksumLen))\n\tswitch encoding {\n\tcase Hex:\n\t\treturn fmt.Sprintf(\"0x%x\", checked), nil\n\tcase CB58:\n\t\treturn base58.Encode(checked), nil\n\tdefault:\n\t\treturn \"\", errInvalidEncoding\n\t}\n}",
"func Encode(b []byte) string {\n\treturn EncodeAlphabet(b, BTCAlphabet)\n}",
"func (c Color) Base58() string {\n\treturn base58.Encode(c.Bytes())\n}",
"func Base58Decode(input []byte) []byte {\n\tresult := big.NewInt(0)\n\n\tfor _, b := range input {\n\t\tcharIndex := bytes.IndexByte(b58Alphabet, b)\n\t\tresult.Mul(result, big.NewInt(58))\n\t\tresult.Add(result, big.NewInt(int64(charIndex)))\n\t}\n\n\tdecoded := result.Bytes()\n\n\tif input[0] == b58Alphabet[0] {\n\t\tdecoded = append([]byte{0x00}, decoded...)\n\t}\n\n\treturn decoded\n}",
"func Base58Decode(input []byte) []byte {\n\tresult := big.NewInt(0)\n\n\tfor _, b := range input {\n\t\tcharIndex := bytes.IndexByte(b58Alphabet, b)\n\t\tresult.Mul(result, big.NewInt(58))\n\t\tresult.Add(result, big.NewInt(int64(charIndex)))\n\t}\n\n\tdecoded := result.Bytes()\n\n\tif input[0] == b58Alphabet[0] {\n\t\tdecoded = append([]byte{0x00}, decoded...)\n\t}\n\treturn decoded\n}",
"func B58CheckEncode(version int, data []byte) (string, []byte) {\n\n\tdata = append([]byte{byte(version)}, data...)\n\thash := sha256.Sum256(data)\n\thash = sha256.Sum256(hash[:])\n\n\treturn encode(append(data, hash[:4]...))\n}",
"func decodeBase58(b []byte) int64 {\n\tvar id int64\n\tfor p := range b {\n\t\tid = id*58 + int64(decodeBase58Map[b[p]])\n\t}\n\treturn id\n}",
"func (e *Encoding) Encode(value uint64) string {\n\tif value == 0 {\n\t\treturn string(e.encode[:1])\n\t}\n\tbin := make([]byte, 0, 8)\n\tfor value > 0 {\n\t\tbin = append(bin, e.encode[value%58])\n\t\tvalue /= 58\n\t}\n\n\tfor i, j := 0, len(bin)-1; i < j; i, j = i+1, j-1 {\n\t\tbin[i], bin[j] = bin[j], bin[i]\n\t}\n\treturn string(bin)\n}",
"func Encode(src *[10]byte) (dst [16]byte)",
"func Encode(value []byte) []byte {\n var length int = len(value)\n encoded := make([]byte, base64.URLEncoding.EncodedLen(length))\n base64.URLEncoding.Encode(encoded, value)\n return encoded\n}",
"func encodeAddress(hash160 []byte, netID [2]byte) string {\n\t// Format is 2 bytes for a network and address class (i.e. P2PKH vs\n\t// P2SH), 20 bytes for a RIPEMD160 hash, and 4 bytes of checksum.\n\treturn base58.CheckEncode(hash160[:ripemd160.Size], netID)\n}",
"func encodePubkey(pubkey *ecdsa.PublicKey) []byte {\n\treturn []byte(base64.RawURLEncoding.EncodeToString(ethcrypto.FromECDSAPub(pubkey)))\n}",
"func b58decode(s string) (b []byte, err error) {\n\t/* See https://en.bitcoin.it/wiki/Base58Check_encoding */\n\n\tconst BITCOIN_BASE58_TABLE = \"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\n\n\t/* Initialize */\n\tx := big.NewInt(0)\n\tm := big.NewInt(58)\n\n\t/* Convert string to big int */\n\tfor i := 0; i < len(s); i++ {\n\t\tb58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])\n\t\tif b58index == -1 {\n\t\t\treturn nil, fmt.Errorf(\"Invalid base-58 character encountered: '%c', index %d.\", s[i], i)\n\t\t}\n\t\tb58value := big.NewInt(int64(b58index))\n\t\tx.Mul(x, m)\n\t\tx.Add(x, b58value)\n\t}\n\n\t/* Convert big int to big endian bytes */\n\tb = x.Bytes()\n\n\treturn b, nil\n}",
"func Base58Decode(b string, alphabet string) ([]byte, error) {\n\tif len(b) < 5 {\n\t\treturn nil, fmt.Errorf(\"Base58 string too short: %s\", b)\n\t}\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(alphabet, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\treturn nil, fmt.Errorf(\"Bad Base58 string: %s\", b)\n\t\t}\n\t\tidx := big.NewInt(int64(tmp))\n\t\ttmp1 := big.NewInt(0)\n\t\ttmp1.Mul(j, idx)\n\n\t\tanswer.Add(answer, tmp1)\n\t\tj.Mul(j, bigRadix)\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != alphabet[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen)\n\tcopy(val[numZeros:], tmpval)\n\n\treturn val, nil\n}",
"func Base58Decode(input []byte) []byte {\n\tdecode, err := base58.Decode(string(input[:]))\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn decode\n}",
"func Base58Decode(input []byte) []byte {\n\tdecode, err := base58.Decode(string(input[:]))\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\treturn decode\n}",
"func encode(size int, input []byte, position int, numberOfByte int, controlBitsQuantity int) []byte {\n\tencoded := make([]byte, int(size/8))\n\t//Data bits accommodate process\n\tfor i := controlBitsQuantity - 1; i > 0; i-- {\n\t\tsl := expInt(i) - 1\n\t\til := expInt(i-1) - 1\n\t\tfor j := sl - 1; j > il; j-- {\n\t\t\tdataBit := takeBit(input[numberOfByte], position, 7-int(j%8))\n\t\t\tx := int(j / 8)\n\t\t\tencoded[x] = encoded[x] | dataBit\n\t\t\tposition++\n\t\t\tif position > 7 {\n\t\t\t\tnumberOfByte--\n\t\t\t\tposition = 0\n\t\t\t}\n\t\t}\n\t}\n\t//Control bits calculus process\n\tfor i := 0; i < controlBitsQuantity-1; i++ {\n\t\tparity := byte(0)\n\t\tfor j := expInt(i) - 1; j < size; j += expInt(i + 1) {\n\t\t\tfor k := 0; k < expInt(i); k++ {\n\t\t\t\tparity ^= takeBit(encoded[int((j+k)/8)], 7-((j+k)%8), 0)\n\t\t\t}\n\t\t}\n\t\tx := int(int(expInt(i)-1) / 8)\n\t\tencoded[x] = encoded[x] | takeBit(parity, 0, 7-(expInt(i)-1)%8)\n\t}\n\treturn encoded\n}",
"func encodeKeyHash(verPublicKeyHash []byte, checkSum []byte) string {\n\n\ts := \"START encodeKeyHash() - Encodes verPublicKeyHash & checkSum\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\t// 7 - CONCAT\n\taddressHex := append(verPublicKeyHash, checkSum...)\n\ts = \"7 - CONCAT \" + hex.EncodeToString(addressHex)\n\tlog.Info(\"WALLET: GUTS \" + s)\n\n\t// 8 - BASE58 ENCODING\n\tjeffCoinAddressHex := base58.Encode(addressHex)\n\ts = \"8 - BASE58 ENCODING \" + jeffCoinAddressHex\n\tlog.Info(\"WALLET: GUTS \" + s)\n\n\ts = \"END encodeKeyHash() - Encodes verPublicKeyHash & checkSum\"\n\tlog.Debug(\"WALLET: GUTS \" + s)\n\n\treturn jeffCoinAddressHex\n\n}",
"func Encode(b []byte) string {\n\tenc := make([]byte, len(b)*2+2)\n\tcopy(enc, \"0x\")\n\thex.Encode(enc[2:], b)\n\treturn string(enc)\n}",
"func Encode(b []byte) string {\n\tenc := make([]byte, len(b)*2+2)\n\tcopy(enc, \"0x\")\n\thex.Encode(enc[2:], b)\n\treturn string(enc)\n}",
"func encode(value []byte) []byte {\n\tencoded := make([]byte, base64.URLEncoding.EncodedLen(len(value)))\n\tbase64.URLEncoding.Encode(encoded, value)\n\treturn encoded\n}",
"func encodeByteSlice(w io.Writer, bz []byte) (err error) {\n\terr = encodeVarint(w, int64(len(bz)))\n\tif err != nil {\n\t\treturn\n\t}\n\t_, err = w.Write(bz)\n\treturn\n}",
"func Encode(src []byte) string {\n\treturn base64.RawURLEncoding.EncodeToString(src)\n}",
"func EncodeToBytes(i uint64) []byte {\n\tif i == 0 {\n\t\treturn []byte{0x0}\n\t}\n\n\tiBuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(iBuf, i)\n\n\t// bits needed to encode i\n\tl := bits.Len64(i)\n\n\t// bytes needed to encode i\n\tlb := l / 8\n\tif l%8 != 0 {\n\t\tlb += 1 // one more byte needed for the extra bits\n\t}\n\n\treturn iBuf[8-lb:]\n}",
"func encodeByteSequence(v [][]byte) []byte {\n\tvar hexstrings []string\n\tfor _, a := range v {\n\t\thexstrings = append(hexstrings, hexutil.Encode(a))\n\t}\n\treturn []byte(strings.Join(hexstrings, \",\"))\n}",
"func decode(src []byte) ([]byte, int, error) {\n\tb := string(src)\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(base58table, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\tfmt.Println(b)\n\t\t\treturn []byte(\"\"), 0,\n\t\t\t\terrors.New(\"encoding/base58: invalid character found: ~\" +\n\t\t\t\t\tstring(b[i]) + \"~\")\n\t\t}\n\t\tidx := big.NewInt(int64(tmp))\n\t\ttmp1 := big.NewInt(0)\n\t\ttmp1.Mul(j, idx)\n\n\t\tanswer.Add(answer, tmp1)\n\t\tj.Mul(j, big.NewInt(radix))\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != base58table[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen, flen)\n\tcopy(val[numZeros:], tmpval)\n\treturn val, len(val), nil\n}",
"func B64EncodeByteToStr(inputBytes []byte) string {\r\n\treturn base64.StdEncoding.EncodeToString(inputBytes)\r\n}",
"func encode(k Key) ([]byte, error) {\n\tver := k.version()\n\tpsize := ver.PayloadSize()\n\ttsize := 1 + psize + 4\n\traw := k.raw()\n\tif len(raw) > psize {\n\t\treturn nil, errors.New(\"tbd\")\n\t}\n\ttmp := make([]byte, tsize)\n\ttmp[0] = byte(ver)\n\tcopy(tmp[len(tmp)-4-len(raw):], raw)\n\tsum := doublehash.SumDoubleSha256(tmp[:1+psize])\n\tcopy(tmp[1+psize:], sum[:4])\n\treturn rippleEncoding.Encode(tmp)\n}",
"func (a *A25) A58() []byte {\n\tvar out [34]byte\n\tfor n := 33; n >= 0; n-- {\n\t\tc := 0\n\t\tfor i := 0; i < 25; i++ {\n\t\t\tc = c*256 + int(a[i])\n\t\t\ta[i] = byte(c / 58)\n\t\t\tc %= 58\n\t\t}\n\t\tout[n] = tmpl[c]\n\t}\n\ti := 1\n\tfor i < 34 && out[i] == '1' {\n\t\ti++\n\t}\n\treturn out[i-1:]\n}",
"func Encode(in string) string {\n\tbytes := []byte(in)\n\tpairs := encodePairs(bytes)\n\tvar builder strings.Builder\n\tfor i, pair := range pairs {\n\t\tres := encodeBase45(pair)\n\t\tif i + 1 == len(pairs) && res[2] == 0 {\n\t\t\tfor _, b := range res[:2] {\n\t\t\t\tif c, ok := encodingMap[b]; ok {\n\t\t\t\t\tbuilder.WriteRune(c)\n\t\t\t\t}\n\t\t\t}\n\t\t} else {\n\t\t\tfor _, b := range res {\n\t\t\t\tif c, ok := encodingMap[b]; ok {\n\t\t\t\t\tbuilder.WriteRune(c)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn builder.String()\n}",
"func Base32Encoder(asBytes Bytes16) string {\n\treturn base32.StdEncoding.EncodeToString(asBytes[:])\n}",
"func (b58 Base58) Decode(b string) []byte {\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tscratch := new(big.Int)\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(b58.alphabet, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\treturn []byte(\"\")\n\t\t}\n\t\tscratch.SetInt64(int64(tmp))\n\t\tscratch.Mul(j, scratch)\n\t\tanswer.Add(answer, scratch)\n\t\tj.Mul(j, bigRadix)\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != b58.alphabet[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen)\n\tcopy(val[numZeros:], tmpval)\n\n\treturn val\n}",
"func (pk PublicKey) PublicKeyBase58() string {\n\treturn stringEntry(pk[PublicKeyBase58Property])\n}",
"func encodeBytes(data []byte) []byte {\n\t// Allocate more space to avoid unnecessary slice growing.\n\t// Assume that the byte slice size is about `(len(data) / encGroupSize + 1) * (encGroupSize + 1)` bytes,\n\t// that is `(len(data) / 8 + 1) * 9` in our implement.\n\tdLen := len(data)\n\tresult := make([]byte, 0, (dLen/encGroupSize+1)*(encGroupSize+1))\n\tfor idx := 0; idx <= dLen; idx += encGroupSize {\n\t\tremain := dLen - idx\n\t\tpadCount := 0\n\t\tif remain >= encGroupSize {\n\t\t\tresult = append(result, data[idx:idx+encGroupSize]...)\n\t\t} else {\n\t\t\tpadCount = encGroupSize - remain\n\t\t\tresult = append(result, data[idx:]...)\n\t\t\tresult = append(result, pads[:padCount]...)\n\t\t}\n\n\t\tmarker := encMarker - byte(padCount)\n\t\tresult = append(result, marker)\n\t}\n\treturn result\n}",
"func shaEncode(input string) string {\n\tsha := sha512.Sum512([]byte(input))\n\treturn hex.EncodeToString(sha[:])\n}",
"func sliceEncoder(e *encodeState, v reflect.Value) {\n\tif v.IsNil() {\n\t\te.WriteString(\"le\")\n\t\treturn\n\t}\n\tif v.Type().Elem().Kind() == reflect.Uint8 {\n\t\ts := v.Bytes()\n\t\tb := strconv.AppendInt(e.scratch[:0], int64(len(s)), 10)\n\t\te.Write(b)\n\t\te.WriteString(\":\")\n\t\te.Write(s)\n\t}\n}",
"func Encode(in []byte) (string, error) {\n\treturn b64.StdEncoding.EncodeToString(in), nil\n}",
"func (m Multihash) B58String() string {\n\treturn b58.Encode([]byte(m))\n}",
"func ikcp_encode8u(p []byte, c byte) []byte {\n\tp[0] = c\n\treturn p[1:]\n}",
"func Base64Encode(operand string) string { return base64.StdEncoding.EncodeToString([]byte(operand)) }",
"func encodeUtf8(codepoints []uint16) string {\n\tbytesRequired := 0\n\n\tfor _, cp := range codepoints {\n\t\tbytesRequired += utf8.RuneLen(int(cp))\n\t}\n\n\tbs := make([]byte, bytesRequired)\n\tcurByte := 0\n\tfor _, cp := range codepoints {\n\t\tcurByte += utf8.EncodeRune(bs[curByte:], int(cp))\n\t}\n\n\treturn string(bs)\n}",
"func Encode(s string) string {\n\tvar enc string\n\tfor _, c := range []byte(s) {\n\t\tif isEncodable(c) {\n\t\t\tenc += \"%\"\n\t\t\tenc += string(\"0123456789ABCDEF\"[c>>4])\n\t\t\tenc += string(\"0123456789ABCDEF\"[c&15])\n\t\t} else {\n\t\t\tenc += string(c)\n\t\t}\n\t}\n\treturn enc\n}",
"func Encode(number int) (string, error) {\n\tif number == 0 {\n\t\treturn \"0\", nil\n\t}\n\n\tstr := \"\"\n\tfor number > 0 {\n\t\tdigit := number % 62\n\t\tchr, err := dehydrate(digit)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tstr = chr + str\n\t\tnumber = int(number / 62)\n\t}\n\treturn str, nil\n}",
"func Base64Encode(raw []byte) string {\n\treturn base64.URLEncoding.EncodeToString(raw)\n}",
"func encodeBytes(r uint16, decoded []byte) (uint16, []byte) {\n\tvar c1, c2, cipher uint16\n\tc1 = 52845\n\tc2 = 22719\n\tencoded := make([]byte, len(decoded))\n\tfor i, plain := range decoded {\n\t\tcipher = uint16(plain) ^ (r >> 8)\n\t\tr = (uint16(cipher)+r)*c1 + c2\n\t\tencoded[i] = byte(cipher)\n\t}\n\treturn r, encoded\n}",
"func (b *blockEnc) encodeRaw(a []byte) {\n\tvar bh blockHeader\n\tbh.setLast(b.last)\n\tbh.setSize(uint32(len(a)))\n\tbh.setType(blockTypeRaw)\n\tb.output = bh.appendTo(b.output[:0])\n\tb.output = append(b.output, a...)\n\tif debugEncoder {\n\t\tprintln(\"Adding RAW block, length\", len(a), \"last:\", b.last)\n\t}\n}",
"func encodeUUID(src [16]byte) string {\n\treturn fmt.Sprintf(\"%x-%x-%x-%x-%x\", src[0:4], src[4:6], src[6:8], src[8:10], src[10:16])\n}",
"func Base64Transaction(tx []byte) (bs string) {\n jtx, _ := json.Marshal(tx)\n bs = base64.StdEncoding.EncodeToString(jtx)\n return bs\n}",
"func Encode(num int) []byte {\n\tstr := []byte{}\n\n\tfor num >= base {\n\t\tmod := num % base\n\t\tstr = prepend(str, Alphabet[mod])\n\t\tnum = (num - mod) / base\n\t}\n\n\treturn prepend(str, Alphabet[num])\n}",
"func encodeInt(b []byte, v int64) []byte {\n\tvar data [8]byte\n\tu := encodeIntToCmpUint(v)\n\tbinary.BigEndian.PutUint64(data[:], u)\n\treturn append(b, data[:]...)\n}",
"func EncodeOutPoint(op *corepb.OutPoint) string {\n\tbuf := make([]byte, len(op.Hash))\n\tcopy(buf, op.Hash[:])\n\t// reverse bytes\n\tfor i, j := 0, len(buf)-1; i < j; i, j = i+1, j-1 {\n\t\tbuf[i], buf[j] = buf[j], buf[i]\n\t}\n\t// append separator ':'\n\tbuf = append(buf, ':')\n\t// put index\n\tb := make([]byte, 4)\n\tbinary.LittleEndian.PutUint32(b, op.Index)\n\tbuf = append(buf, b...)\n\n\treturn base58.Encode(buf)\n}",
"func encodeNumber(n byte, lenBytes int) []byte {\n\tlenSlice := make([]byte, lenBytes)\n\tlenSlice[0] = n\n\treturn lenSlice\n}",
"func base62(b byte) byte {\n\tif b < 10 {\n\t\treturn byte('0' + b)\n\t} else if b < 36 {\n\t\treturn byte('a' - 10 + b)\n\t} else if b < 62 {\n\t\treturn byte('A' - 36 + b)\n\t}\n\tpanic(\"integer out of range for base 62 encode\")\n}",
"func base64Encode(src []byte) string {\n\tbuf := make([]byte, base64EncodedLen(len(src)))\n\n\t// mehhhhh actually base64 encoding is a bit more involved\n\t// and it feels like not a good use of time to implement it myself right now,\n\t// I should come back to it. Basically you take 3 bytes of input,\n\t// and then break it into 4 groups of 6 bits, and then encode that\n\t// to produce 4 bytes of base64\n\tbase64.StdEncoding.Encode(buf, src)\n\treturn string(buf)\n}",
"func EncodedInt(n int) []byte {\n\tvar b [5]byte\n\ti := 0\n\te := uint32(n)\n\tif e == 0 {\n\t\treturn []byte{0}\n\t}\n\n\tfor e != 0 {\n\t\tb[i] = byte(e)\n\t\te >>= 7\n\t\tif e != 0 {\n\t\t\tb[i] |= 0x80\n\t\t}\n\t\ti++\n\t}\n\n\treturn b[:i]\n}",
"func (cb *codedBuffer) encodeRawBytes(b []byte) error {\n\tcb.encodeVarint(uint64(len(b)))\n\tcb.buf = append(cb.buf, b...)\n\treturn nil\n}",
"func (ed EncodeDecoder) Encode(id int64) string {\n\tid += ed.offset\n\tif id < 58 {\n\t\treturn string(ed.alphabet[id])\n\t}\n\n\tb := make([]byte, 0, 11)\n\tfor id >= 58 {\n\t\tb = append(b, ed.alphabet[id%58])\n\t\tid /= 58\n\t}\n\tb = append(b, ed.alphabet[id])\n\n\tfor x, y := 0, len(b)-1; x < y; x, y = x+1, y-1 {\n\t\tb[x], b[y] = b[y], b[x]\n\t}\n\n\treturn string(b)\n}",
"func encodeRangeKey(keyType byte, ss ...[]byte) []byte {\n\toutput := buildRangeValue(2, ss...)\n\toutput[len(output)-2] = keyType\n\treturn output\n}",
"func NewEncoding(encoder string) (*Encoding, error) {\n\tif len(encoder) != 58 {\n\t\treturn nil, errors.New(\"base58: encoding alphabet is not 58-bytes\")\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\tif encoder[i] == '\\n' || encoder[i] == '\\r' {\n\t\t\treturn nil, errors.New(\"base58: encoding alphabet contains newline character\")\n\t\t}\n\t}\n\te := new(Encoding)\n\tfor i := range e.decodeMap {\n\t\te.decodeMap[i] = -1\n\t}\n\tfor i := range encoder {\n\t\te.encode[i] = byte(encoder[i])\n\t\te.decodeMap[e.encode[i]] = i\n\t}\n\treturn e, nil\n}",
"func (addr *Address) Encode() (string, error) {\n\tripe := addr.Ripe[:]\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif ripe[0] == 0x00 {\n\t\t\tripe = ripe[1:] // exclude first byte\n\t\t\tif ripe[0] == 0x00 {\n\t\t\t\tripe = ripe[1:] // exclude second byte as well\n\t\t\t}\n\t\t}\n\tcase 4:\n\t\tripe = bytes.TrimLeft(ripe, \"\\x00\")\n\tdefault:\n\t\treturn \"\", ErrUnknownAddressType\n\t}\n\n\tif len(ripe) > 19 {\n\t\treturn \"\", errors.New(\"improper ripe, doesn't have null bytes in front\")\n\t}\n\n\tvar binaryData bytes.Buffer\n\tWriteVarInt(&binaryData, addr.Version)\n\tWriteVarInt(&binaryData, addr.Stream)\n\tbinaryData.Write(ripe)\n\n\t// calc checksum from 2 rounds of SHA512\n\tchecksum := DoubleSha512(binaryData.Bytes())[:4]\n\n\ttotalBin := append(binaryData.Bytes(), checksum...)\n\n\treturn \"BM-\" + string(base58.Encode(totalBin)), nil // done\n}",
"func encode6Bits(src int) byte {\n\tdiff := (0x41)\n\tdiff += ((25 - src) >> 8) & 6\n\tdiff -= ((51 - src) >> 8) & 75\n\tdiff -= ((61 - src) >> 8) & 15\n\tdiff += ((62 - src) >> 8) & 3\n\tret := src + diff\n\treturn byte(ret)\n}",
"func encodeBlockNumber(number uint64) []byte {\n\tenc := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(enc, number)\n\treturn enc\n}",
"func encodeBlockNumber(number uint64) []byte {\n\tenc := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(enc, number)\n\treturn enc\n}",
"func EncodeToString(s []byte) string {\n\treturn hex.EncodeToString(Reverse(s))\n}",
"func encode(n uint64) []byte {\n\tbuf := make([]byte, 8)\n\tbinary.BigEndian.PutUint64(buf, n)\n\treturn buf\n}",
"func __b64encode(out *[]byte, src *[]byte, mode int)",
"func (b *blockEnc) encodeRawTo(dst, src []byte) []byte {\n\tvar bh blockHeader\n\tbh.setLast(b.last)\n\tbh.setSize(uint32(len(src)))\n\tbh.setType(blockTypeRaw)\n\tdst = bh.appendTo(dst)\n\tdst = append(dst, src...)\n\tif debugEncoder {\n\t\tprintln(\"Adding RAW block, length\", len(src), \"last:\", b.last)\n\t}\n\treturn dst\n}",
"func EncodeSeed(public PrefixByte, src []byte) ([]byte, error) {\n\n\tif len(src) != ed25519.SeedSize {\n\t\treturn nil, fmt.Errorf(\"invalid key length\")\n\t}\n\n\t// In order to make this human printable for both bytes, we need to do a little\n\t// bit manipulation to setup for base32 encoding which takes 5 bits at a time.\n\tb1 := byte(PrefixByteSeed) | (byte(public) >> 5)\n\tb2 := (byte(public) & 31) << 3 // 31 = 00011111\n\n\tvar raw bytes.Buffer\n\n\traw.WriteByte(b1)\n\traw.WriteByte(b2)\n\n\t// write payload\n\tif _, err := raw.Write(src); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Calculate and write crc16 checksum\n\terr := binary.Write(&raw, binary.LittleEndian, crc16(raw.Bytes()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdata := raw.Bytes()\n\tbuf := make([]byte, b32Enc.EncodedLen(len(data)))\n\tb32Enc.Encode(buf, data)\n\treturn buf, nil\n}",
"func binaryEncode(dst, src []byte) {\n\td := uint(0)\n\t_, _ = src[0], dst[7]\n\tfor i := 7; i >= 0; i-- {\n\t\tif src[0]&(1<<d) == 0 {\n\t\t\tdst[i] = '0'\n\t\t} else {\n\t\t\tdst[i] = '1'\n\t\t}\n\t\td++\n\t}\n}",
"func Encode(e uint) string {\n\tif e == 0 {\n\t\treturn string(EncodeMap[0])\n\t}\n\n\tresult := []byte{}\n\tfor e > 0 {\n\t\tremainder := e % MapLength\n\t\te = e / 62\n\t\tresult = append(result, EncodeMap[remainder])\n\t}\n\n\treturn string(result)\n}",
"func Base64Encode(b []byte) []byte {\r\n\tbuf := make([]byte, base64.RawURLEncoding.EncodedLen(len(b)))\r\n\tbase64.RawURLEncoding.Encode(buf, b)\r\n\treturn buf\r\n}",
"func (addr *Address) Encode() (string, error) {\n\tripe := addr.Ripe[:]\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif ripe[0] == 0x00 {\n\t\t\tripe = ripe[1:] // exclude first byte\n\t\t\tif ripe[0] == 0x00 {\n\t\t\t\tripe = ripe[1:] // exclude second byte as well\n\t\t\t}\n\t\t}\n\tcase 4:\n\t\tripe = bytes.TrimLeft(ripe, \"\\x00\")\n\tdefault:\n\t\treturn \"\", errors.New(\"unsupported address version\")\n\t}\n\n\tvar binaryData bytes.Buffer\n\tbinaryData.Write(addr.Version.Serialize())\n\tbinaryData.Write(addr.Stream.Serialize())\n\tbinaryData.Write(ripe)\n\n\tsha := sha512.New()\n\tsha.Write(binaryData.Bytes())\n\tcurrentHash := sha.Sum(nil) // calc hash\n\tsha.Reset() // reset hash\n\tsha.Write(currentHash)\n\tchecksum := sha.Sum(nil)[:4] // calc checksum from another round of SHA512\n\n\ttotalBin := append(binaryData.Bytes(), checksum...)\n\n\ti := new(big.Int).SetBytes(totalBin)\n\treturn \"BM-\" + string(base58.EncodeBig(nil, i)), nil // done\n}",
"func tob64(in []byte) string {\n\treturn base64.RawURLEncoding.EncodeToString(in)\n}",
"func (f genHelperEncoder) EncEncode(v interface{}) { f.e.encode(v) }",
"func (msg *MsgPing) BtcEncode(w io.Writer, pver uint32) error {\n\terr := writeElement(w, msg.Nonce)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Encode(in, out []byte) []byte {\n\tif len(in)%3 != 0 {\n\t\tpanic(\"len(in) must be a multiple of 3\")\n\t}\n\tfor len(in) >= 3 {\n\t\tout = encode24(in, out)\n\t\tin = in[3:]\n\t}\n\treturn out\n}",
"func (c *curve) encodePoint(x, y *mod.Int) []byte {\n\n\t// Encode the y-coordinate\n\tb, _ := y.MarshalBinary()\n\n\t// Encode the sign of the x-coordinate.\n\tif y.M.BitLen()&7 == 0 {\n\t\t// No unused bits at the top of y-coordinate encoding,\n\t\t// so we must prepend a whole byte.\n\t\tb = append(make([]byte, 1), b...)\n\t}\n\tif c.coordSign(x) != 0 {\n\t\tb[0] |= 0x80\n\t}\n\n\t// Convert to little-endian\n\treverse(b, b)\n\treturn b\n}",
"func HexEncode(bs []byte) string {\n\treturn fmt.Sprintf(\"0x%s\", hex.EncodeToString(bs))\n}",
"func StandardEncoder(asBytes Bytes16) string {\n\tbuf := make([]byte, 36)\n\n\twriteIndex := 0\n\n\tfor i, b := range asBytes {\n\n\t\tif (i == 4) || (i == 6) || (i == 8) || (i == 10) {\n\t\t\tbuf[writeIndex] = ('-')\n\t\t\twriteIndex++\n\t\t}\n\n\t\tbuf[writeIndex] = hextable[b>>4]\n\t\tbuf[writeIndex+1] = hextable[b&0x0f]\n\n\t\twriteIndex += 2\n\t}\n\n\treturn string(buf)\n}"
] | [
"0.81840426",
"0.7762757",
"0.7762757",
"0.76827353",
"0.76582295",
"0.7521171",
"0.72944915",
"0.7157638",
"0.7039257",
"0.69844925",
"0.69814324",
"0.6871738",
"0.68469775",
"0.6762033",
"0.6709189",
"0.66834635",
"0.66349524",
"0.66349524",
"0.6607509",
"0.6540181",
"0.6510735",
"0.64275277",
"0.62820566",
"0.6238424",
"0.6087282",
"0.6074139",
"0.60462356",
"0.60452837",
"0.59762895",
"0.57637495",
"0.568406",
"0.56829095",
"0.5682449",
"0.56670105",
"0.56643677",
"0.56571585",
"0.56571585",
"0.5640534",
"0.5625732",
"0.56224376",
"0.56224376",
"0.56046623",
"0.5571181",
"0.55251634",
"0.54531056",
"0.5401614",
"0.5392556",
"0.53713936",
"0.5366826",
"0.53620785",
"0.5357435",
"0.5350322",
"0.5346882",
"0.5345104",
"0.53331673",
"0.52867585",
"0.5286176",
"0.527757",
"0.52718633",
"0.52572757",
"0.525177",
"0.52365106",
"0.5221173",
"0.52161545",
"0.5195751",
"0.5190157",
"0.51810235",
"0.5176097",
"0.51742154",
"0.51664734",
"0.51648265",
"0.51441145",
"0.51435345",
"0.5140952",
"0.5134914",
"0.5123635",
"0.5100372",
"0.50896645",
"0.5074968",
"0.50654954",
"0.5064523",
"0.5062724",
"0.5062247",
"0.5062247",
"0.50598377",
"0.50558007",
"0.50532144",
"0.50530744",
"0.5048053",
"0.50363606",
"0.50351536",
"0.50347173",
"0.50328094",
"0.5026976",
"0.501819",
"0.5011829",
"0.49979392",
"0.49813366",
"0.49747518",
"0.4972258"
] | 0.80068636 | 1 |
Base58Decode decodes a base 58 representation into a byte slice | func Base58Decode(input []byte) []byte {
decode, err := base58.Decode(string(input[:]))
if err != nil {
log.Panic(err)
}
return decode
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Base58Decode(input []byte) []byte {\n\tresult := big.NewInt(0)\n\n\tfor _, b := range input {\n\t\tcharIndex := bytes.IndexByte(b58Alphabet, b)\n\t\tresult.Mul(result, big.NewInt(58))\n\t\tresult.Add(result, big.NewInt(int64(charIndex)))\n\t}\n\n\tdecoded := result.Bytes()\n\n\tif input[0] == b58Alphabet[0] {\n\t\tdecoded = append([]byte{0x00}, decoded...)\n\t}\n\treturn decoded\n}",
"func Base58Decode(input []byte) []byte {\n\tresult := big.NewInt(0)\n\n\tfor _, b := range input {\n\t\tcharIndex := bytes.IndexByte(b58Alphabet, b)\n\t\tresult.Mul(result, big.NewInt(58))\n\t\tresult.Add(result, big.NewInt(int64(charIndex)))\n\t}\n\n\tdecoded := result.Bytes()\n\n\tif input[0] == b58Alphabet[0] {\n\t\tdecoded = append([]byte{0x00}, decoded...)\n\t}\n\n\treturn decoded\n}",
"func Base58Decode(b string, alphabet string) ([]byte, error) {\n\tif len(b) < 5 {\n\t\treturn nil, fmt.Errorf(\"Base58 string too short: %s\", b)\n\t}\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(alphabet, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\treturn nil, fmt.Errorf(\"Bad Base58 string: %s\", b)\n\t\t}\n\t\tidx := big.NewInt(int64(tmp))\n\t\ttmp1 := big.NewInt(0)\n\t\ttmp1.Mul(j, idx)\n\n\t\tanswer.Add(answer, tmp1)\n\t\tj.Mul(j, bigRadix)\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != alphabet[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen)\n\tcopy(val[numZeros:], tmpval)\n\n\treturn val, nil\n}",
"func b58decode(s string) (b []byte, err error) {\n\t/* See https://en.bitcoin.it/wiki/Base58Check_encoding */\n\n\tconst BITCOIN_BASE58_TABLE = \"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\n\n\t/* Initialize */\n\tx := big.NewInt(0)\n\tm := big.NewInt(58)\n\n\t/* Convert string to big int */\n\tfor i := 0; i < len(s); i++ {\n\t\tb58index := strings.IndexByte(BITCOIN_BASE58_TABLE, s[i])\n\t\tif b58index == -1 {\n\t\t\treturn nil, fmt.Errorf(\"Invalid base-58 character encountered: '%c', index %d.\", s[i], i)\n\t\t}\n\t\tb58value := big.NewInt(int64(b58index))\n\t\tx.Mul(x, m)\n\t\tx.Add(x, b58value)\n\t}\n\n\t/* Convert big int to big endian bytes */\n\tb = x.Bytes()\n\n\treturn b, nil\n}",
"func decodeBase58(b []byte) int64 {\n\tvar id int64\n\tfor p := range b {\n\t\tid = id*58 + int64(decodeBase58Map[b[p]])\n\t}\n\treturn id\n}",
"func decode(src []byte) ([]byte, int, error) {\n\tb := string(src)\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(base58table, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\tfmt.Println(b)\n\t\t\treturn []byte(\"\"), 0,\n\t\t\t\terrors.New(\"encoding/base58: invalid character found: ~\" +\n\t\t\t\t\tstring(b[i]) + \"~\")\n\t\t}\n\t\tidx := big.NewInt(int64(tmp))\n\t\ttmp1 := big.NewInt(0)\n\t\ttmp1.Mul(j, idx)\n\n\t\tanswer.Add(answer, tmp1)\n\t\tj.Mul(j, big.NewInt(radix))\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != base58table[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen, flen)\n\tcopy(val[numZeros:], tmpval)\n\treturn val, len(val), nil\n}",
"func (b58 Base58) Decode(b string) []byte {\n\tanswer := big.NewInt(0)\n\tj := big.NewInt(1)\n\n\tscratch := new(big.Int)\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\ttmp := strings.IndexAny(b58.alphabet, string(b[i]))\n\t\tif tmp == -1 {\n\t\t\treturn []byte(\"\")\n\t\t}\n\t\tscratch.SetInt64(int64(tmp))\n\t\tscratch.Mul(j, scratch)\n\t\tanswer.Add(answer, scratch)\n\t\tj.Mul(j, bigRadix)\n\t}\n\n\ttmpval := answer.Bytes()\n\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != b58.alphabet[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tflen := numZeros + len(tmpval)\n\tval := make([]byte, flen)\n\tcopy(val[numZeros:], tmpval)\n\n\treturn val\n}",
"func IDB58Decode(s string) (core.ID, error) {\n\treturn core.IDB58Decode(s)\n}",
"func (self Base58Check) Decode(input string) (result []byte, version byte, err error) {\n\t/*if len(input) == 0 {\n\t\treturn []byte{}, 0, errors.New(\"Input to decode is empty\")\n\t}*/\n\n\tdecoded := Base58{}.Decode(input)\n\tif len(decoded) < 5 {\n\t\treturn nil, 0, ErrInvalidFormat\n\t}\n\tversion = decoded[0]\n\t// var cksum []byte\n\tcksum := make([]byte, common.CheckSumLen)\n\tcopy(cksum[:], decoded[len(decoded)-common.CheckSumLen:])\n\tif bytes.Compare(ChecksumFirst4Bytes(decoded[:len(decoded)-common.CheckSumLen]), cksum) != 0 {\n\t\treturn nil, 0, ErrChecksum\n\t}\n\tpayload := decoded[1 : len(decoded)-common.CheckSumLen]\n\tresult = append(result, payload...)\n\treturn\n}",
"func runtimeDecodeBase58(ic *interop.Context) error {\n\tsrc := ic.VM.Estack().Pop().String()\n\tresult, err := base58.Decode(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tic.VM.Estack().PushVal(result)\n\treturn nil\n}",
"func (enc *Encoding) Decode(src []byte) ([]byte, error) {\n\tif len(src) == 0 {\n\t\treturn []byte{}, nil\n\t}\n\tvar zeros []byte\n\tfor i, c := range src {\n\t\tif c == enc.alphabet[0] && i < len(src)-1 {\n\t\t\tzeros = append(zeros, '0')\n\t\t} else {\n\t\t\tbreak\n\t\t}\n\t}\n\tn := new(big.Int)\n\tvar i int64\n\tfor _, c := range src {\n\t\tif i = enc.decodeMap[c]; i < 0 {\n\t\t\treturn nil, fmt.Errorf(\"invalid character '%c' in decoding a base58 string \\\"%s\\\"\", c, src)\n\t\t}\n\t\tn.Add(n.Mul(n, radix), big.NewInt(i))\n\t}\n\treturn n.Append(zeros, 10), nil\n}",
"func Decode(encoding Encoding, str string) ([]byte, error) {\n\tswitch {\n\tcase !encoding.valid():\n\t\treturn nil, errInvalidEncoding\n\tcase len(str) == 0:\n\t\treturn nil, nil\n\tcase encoding == CB58 && len(str) > maxCB58DecodeSize:\n\t\treturn nil, fmt.Errorf(\"string length (%d) > maximum for cb58 (%d)\", len(str), maxCB58DecodeSize)\n\t}\n\n\tvar (\n\t\tdecodedBytes []byte\n\t\terr error\n\t)\n\tswitch encoding {\n\tcase Hex:\n\t\tif !strings.HasPrefix(str, hexPrefix) {\n\t\t\treturn nil, errMissingHexPrefix\n\t\t}\n\t\tdecodedBytes, err = hex.DecodeString(str[2:])\n\tcase CB58:\n\t\tdecodedBytes, err = base58.Decode(str)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(decodedBytes) < checksumLen {\n\t\treturn nil, errMissingChecksum\n\t}\n\t// Verify the checksum\n\trawBytes := decodedBytes[:len(decodedBytes)-checksumLen]\n\tif len(rawBytes) > maxCB58EncodeSize {\n\t\treturn nil, fmt.Errorf(\"byte slice length (%d) > maximum for cb58 (%d)\", len(decodedBytes), maxCB58EncodeSize)\n\t}\n\n\tchecksum := decodedBytes[len(decodedBytes)-checksumLen:]\n\tif !bytes.Equal(checksum, hashing.Checksum(rawBytes, checksumLen)) {\n\t\treturn nil, errBadChecksum\n\t}\n\treturn rawBytes, nil\n}",
"func checkDecode(input string, curve CurveID) (result []byte, err error) {\n\tdecoded := base58.Decode(input)\n\tif len(decoded) < 5 {\n\t\treturn nil, fmt.Errorf(\"invalid format\")\n\t}\n\tvar cksum [4]byte\n\tcopy(cksum[:], decoded[len(decoded)-4:])\n\t///// WARN: ok the ripemd160checksum should include the prefix in CERTAIN situations,\n\t// like when we imported the PubKey without a prefix ?! tied to the string representation\n\t// or something ? weird.. checksum shouldn't change based on the string reprsentation.\n\tif bytes.Compare(ripemd160checksum(decoded[:len(decoded)-4], curve), cksum[:]) != 0 {\n\t\treturn nil, fmt.Errorf(\"invalid checksum\")\n\t}\n\t// perhaps bitcoin has a leading net ID / version, but EOS doesn't\n\tpayload := decoded[:len(decoded)-4]\n\tresult = append(result, payload...)\n\treturn\n}",
"func b58checkdecode(s string) (ver uint8, b []byte, err error) {\n\t/* Decode base58 string */\n\tb, err = b58decode(s)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\t/* Add leading zero bytes */\n\tfor i := 0; i < len(s); i++ {\n\t\tif s[i] != '1' {\n\t\t\tbreak\n\t\t}\n\t\tb = append([]byte{0x3f}, b...)\n\t}\n\n\t/* Verify checksum */\n\tif len(b) < 5 {\n\t\treturn 0, nil, fmt.Errorf(\"Invalid base-58 check string: missing checksum.\")\n\t}\n\n\t/* Create a new SHA256 context */\n\tsha256_h := sha256.New()\n\n\t/* SHA256 Hash #1 */\n\tsha256_h.Reset()\n\tsha256_h.Write(b[:len(b)-4])\n\thash1 := sha256_h.Sum(nil)\n\n\t/* SHA256 Hash #2 */\n\tsha256_h.Reset()\n\tsha256_h.Write(hash1)\n\thash2 := sha256_h.Sum(nil)\n\n\t/* Compare checksum */\n\tif bytes.Compare(hash2[0:4], b[len(b)-4:]) != 0 {\n\t\treturn 0, nil, fmt.Errorf(\"Invalid base-58 check string: invalid checksum.\")\n\t}\n\n\t/* Strip checksum bytes */\n\tb = b[:len(b)-4]\n\n\t/* Extract and strip version */\n\tver = b[0]\n\tb = b[1:]\n\n\treturn ver, b, nil\n}",
"func Decode(str []byte) (int, error) {\n\tnum := 0\n\tmulti := 1\n\n\tfor i := len(str); i > 0; i-- {\n\t\tchar := str[i-1]\n\t\tindex := bytes.IndexByte(Alphabet, char)\n\t\tif index == -1 {\n\t\t\treturn -1, errInvalidBase58\n\t\t}\n\t\tnum += multi * index\n\t\tmulti = multi * base\n\t}\n\n\treturn num, nil\n}",
"func B58CheckDecode(data string) (int, []byte, []byte) {\n\n\t_, encoded := decode(data)\n\n\treturn int(encoded[0]),\n\t\tencoded[1 : len(encoded)-4],\n\t\tencoded[len(encoded)-4:]\n}",
"func (e *Encoding) Decode(value string) (uint64, error) {\n\tif value == \"\" {\n\t\treturn 0, errors.New(\"base58: value should not be empty\")\n\t}\n\tvar n uint64\n\tfor i := range value {\n\t\tu := e.decodeMap[value[i]]\n\t\tif u < 0 {\n\t\t\treturn 0, fmt.Errorf(\"base58: invalid character - %d:%s\", i, string(value[i]))\n\t\t}\n\t\tn = n*58 + uint64(u)\n\t}\n\treturn n, nil\n}",
"func encodeBase58(i int64, b []byte) {\n\tp := len(b) - 1\n\tfor i >= 58 {\n\t\tb[p] = encodeBase58Map[i%58]\n\t\tp--\n\t\ti /= 58\n\t}\n\tb[p] = encodeBase58Map[i]\n}",
"func (ed EncodeDecoder) Decode(s string) int64 {\n\tb := []byte(s)\n\n\tvar id int64\n\n\tfor i := range b {\n\t\tid = id*58 + int64(ed.decodeBase58Map[b[i]])\n\t}\n\n\tid -= ed.offset\n\n\treturn id\n}",
"func SolanaDecodeToBytes(input string) ([]byte, error) {\n\tdecoded, err := Base58Decode(input, Base58DefaultAlphabet)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(decoded) != SolanaAddressLength {\n\t\treturn nil, errors.New(\"Invalid length\")\n\t}\n\treturn decoded, nil\n}",
"func Decode(b string) ([]byte, error) {\n\treturn DecodeAlphabet(b, BTCAlphabet)\n}",
"func DecodeBase58Address(addr string) (Address, error) {\n\tb, err := base58.Base582Hex(addr)\n\tif err != nil {\n\t\treturn Address{}, err\n\t}\n\treturn addressFromBytes(b)\n}",
"func (d *Decoder) Decode(input []byte) ([]byte, Encoding) {\n\tif len(input) == 0 {\n\t\treturn []byte{}, None\n\t}\n\n\tunmarshalled := &protodec.Empty{}\n\n\tif d.proto {\n\t\tif err := proto.Unmarshal(input, unmarshalled); err == nil {\n\t\t\t// TODO: remove control characters (unfortunately, they are all valid strings here)\n\t\t\treturn []byte(unmarshalled.String()), Proto\n\t\t}\n\t}\n\n\tif d.bitDec {\n\t\tbyteIn := strings.Trim(string(input), \"[]\") // [32 87 111 114 108 100] -> 32 87 111 114 108 100\n\n\t\tif b, err := Base2AsBytes(byteIn); err == nil {\n\t\t\treturn b, Bit\n\t\t}\n\t}\n\n\t// byte before hex, hex might contains letters, which are not valid in byte dec\n\tif d.byteDec {\n\t\tbyteIn := strings.Trim(string(input), \"[]\") // [32 87 111 114 108 100] -> 32 87 111 114 108 100\n\n\t\tif b, err := Base10AsBytes(byteIn); err == nil {\n\t\t\treturn b, Byte\n\t\t}\n\t}\n\n\t// hex after byte\n\tif d.hex {\n\t\thexIn := strings.TrimSpace(string(input)) // e.g. new line\n\t\thexIn = strings.TrimPrefix(hexIn, \"0x\") // hex prefix\n\t\thexIn = strings.Replace(hexIn, \" \", \"\", -1) // bd b2 3d bc 20 e2 8c 98 -> bdb23dbc20e28c98\n\n\t\tif b, err := hex.DecodeString(hexIn); err == nil {\n\t\t\treturn b, Hex\n\t\t}\n\t}\n\n\t// TODO: many false-positives. Decodes it when no base64 was given.\n\t// Keep it as one of the last decodings.\n\tif d.base64 {\n\t\tif b, err := base64.StdEncoding.DecodeString(strings.TrimSpace(string(input))); err == nil {\n\t\t\treturn b, Base64\n\t\t}\n\t}\n\n\treturn input, None\n}",
"func Base58Encode(input []byte) []byte {\n\tvar result []byte\n\n\tx := big.NewInt(0).SetBytes(input)\n\n\tbase := big.NewInt(int64(len(b58Alphabet)))\n\tzero := big.NewInt(0)\n\tmod := &big.Int{}\n\n\tfor x.Cmp(zero) != 0 {\n\t\tx.DivMod(x, base, mod)\n\t\tresult = append(result, b58Alphabet[mod.Int64()])\n\t}\n\n\t// https://en.bitcoin.it/wiki/Base58Check_encoding#Version_bytes\n\tif input[0] == 0x00 {\n\t\tresult = append(result, b58Alphabet[0])\n\t}\n\n\tReverseBytes(result)\n\n\treturn result\n}",
"func Base58Encode(input []byte) []byte {\n\tvar result []byte\n\n\tx := big.NewInt(0).SetBytes(input)\n\n\tbase := big.NewInt(int64(len(b58Alphabet)))\n\tzero := big.NewInt(0)\n\tmod := &big.Int{}\n\n\tfor x.Cmp(zero) != 0 {\n\t\tx.DivMod(x, base, mod)\n\t\tresult = append(result, b58Alphabet[mod.Int64()])\n\t}\n\n\t// https://en.bitcoin.it/wiki/Base58Check_encoding#Version_bytes\n\tif input[0] == 0x00 {\n\t\tresult = append(result, b58Alphabet[0])\n\t}\n\n\tReverseBytes(result)\n\n\treturn result\n}",
"func Decode(src []byte) (dst [10]byte)",
"func Decode(input string) ([]byte, error) {\n\tif len(input) == 0 {\n\t\treturn nil, ErrEmptyString\n\t}\n\tif !has0xPrefix(input) {\n\t\treturn nil, ErrMissingPrefix\n\t}\n\tb, err := hex.DecodeString(input[2:])\n\tif err != nil {\n\t\terr = mapError(err)\n\t}\n\treturn b, err\n}",
"func encodeBase58Len(i int64) int {\n\n\tvar l = 1\n\tfor i >= 58 {\n\t\tl++\n\t\ti /= 58\n\t}\n\treturn l\n}",
"func b58encode(b []byte) (s string) {\n\t/* See https://en.bitcoin.it/wiki/Base58Check_encoding */\n\n\tconst BITCOIN_BASE58_TABLE = \"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz\"\n\n\t/* Convert big endian bytes to big int */\n\tx := new(big.Int).SetBytes(b)\n\n\t/* Initialize */\n\tr := new(big.Int)\n\tm := big.NewInt(58)\n\tzero := big.NewInt(0)\n\ts = \"\"\n\n\t/* Convert big int to string */\n\tfor x.Cmp(zero) > 0 {\n\t\t/* x, r = (x / 58, x % 58) */\n\t\tx.QuoRem(x, m, r)\n\t\t/* Prepend ASCII character */\n\t\ts = string(BITCOIN_BASE58_TABLE[r.Int64()]) + s\n\t}\n\n\treturn s\n}",
"func DecodeAlphabet(b, alphabet string) ([]byte, error) {\n\tbigIntVal := big.NewInt(0)\n\tradix := big.NewInt(58)\n\n\tfor i := 0; i < len(b); i++ {\n\t\tidx := strings.IndexAny(alphabet, string(b[i]))\n\t\tif idx == -1 {\n\t\t\treturn nil, errors.New(\"illegal base58 data at input byte \" + strconv.FormatInt(int64(i), 10))\n\t\t}\n\t\tbigIntVal.Mul(bigIntVal, radix)\n\t\tbigIntVal.Add(bigIntVal, big.NewInt(int64(idx)))\n\t}\n\ttemp := bigIntVal.Bytes()\n\n\t//append prefix 0\n\tvar numZeros int\n\tfor numZeros = 0; numZeros < len(b); numZeros++ {\n\t\tif b[numZeros] != alphabet[0] {\n\t\t\tbreak\n\t\t}\n\t}\n\tanswerLen := numZeros + len(temp)\n\tanswer := make([]byte, answerLen, answerLen)\n\n\tcopy(answer[numZeros:], temp)\n\treturn answer, nil\n}",
"func MustDecodeBase58Address(addr string) Address {\n\ta, err := DecodeBase58Address(addr)\n\tif err != nil {\n\t\tlog.Panicf(\"Invalid address %s: %v\", addr, err)\n\t}\n\treturn a\n}",
"func decodeBase62(encStr string) string {\n\treturn string(noPad62Encoding.DecodeToBigInt(encStr).Bytes())\n}",
"func FromB58String(s string) (m Multihash, err error) {\n\tb, err := b58.Decode(s)\n\tif err != nil {\n\t\treturn Multihash{}, ErrInvalidMultihash\n\t}\n\n\treturn Cast(b)\n}",
"func Base58Encode(b []byte, alphabet string) string {\n\tif b == nil || len(b) == 0 {\n\t\treturn \"\"\n\t}\n\tx := new(big.Int)\n\tx.SetBytes(b)\n\n\tanswer := make([]byte, 0)\n\tfor x.Cmp(bigZero) > 0 {\n\t\tmod := new(big.Int)\n\t\tx.DivMod(x, bigRadix, mod)\n\t\tanswer = append(answer, alphabet[mod.Int64()])\n\t}\n\n\t// leading zero bytes\n\tfor _, i := range b {\n\t\tif i != 0 {\n\t\t\tbreak\n\t\t}\n\t\tanswer = append(answer, alphabet[0])\n\t}\n\n\t// reverse\n\talen := len(answer)\n\tfor i := 0; i < alen/2; i++ {\n\t\tanswer[i], answer[alen-1-i] = answer[alen-1-i], answer[i]\n\t}\n\n\treturn string(answer)\n}",
"func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}",
"func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}",
"func decodeCoinID(coinID dex.Bytes) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}",
"func DecodeBase58BitcoinAddress(addr string) (BitcoinAddress, error) {\n\tb, err := base58.Decode(addr)\n\tif err != nil {\n\t\treturn BitcoinAddress{}, err\n\t}\n\treturn BitcoinAddressFromBytes(b)\n}",
"func decodeCoinID(coinID []byte) (*chainhash.Hash, uint32, error) {\n\tif len(coinID) != 36 {\n\t\treturn nil, 0, fmt.Errorf(\"coin ID wrong length. expected 36, got %d\", len(coinID))\n\t}\n\tvar txHash chainhash.Hash\n\tcopy(txHash[:], coinID[:32])\n\treturn &txHash, binary.BigEndian.Uint32(coinID[32:]), nil\n}",
"func MustDecodeBase58BitcoinAddress(addr string) BitcoinAddress {\n\ta, err := DecodeBase58BitcoinAddress(addr)\n\tif err != nil {\n\t\tlog.Panicf(\"Invalid bitcoin address %s: %v\", addr, err)\n\t}\n\treturn a\n}",
"func Decode(value []byte) ([]byte, error) {\n var length int = len(value)\n decoded := make([]byte, base64.URLEncoding.DecodedLen(length))\n\n n, err := base64.URLEncoding.Decode(decoded, value)\n if err != nil {\n return nil, err\n }\n return decoded[:n], nil\n}",
"func (c Color) Base58() string {\n\treturn base58.Encode(c.Bytes())\n}",
"func vaultDecode(data interface{}) ([]byte, error) {\n\tencoded, ok := data.(string)\n\tif !ok {\n\t\treturn nil, errors.New(\"Received non-string data\")\n\t}\n\n\treturn base64.StdEncoding.DecodeString(prefixRegex.ReplaceAllString(encoded, \"\"))\n}",
"func DecodeToHex(input string, prefix []byte) (string, error) {\n\tdecoded, version, err := base58.CheckDecode(input)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(prefix) > 0 {\n\t\tif version != prefix[0] {\n\t\t\treturn \"\", errors.Errorf(\"[DecodeToHex] Unknown version %v %v\", version, prefix[0])\n\t\t}\n\n\t\tfor i := range prefix[1:] {\n\t\t\tif decoded[i] != prefix[i+1] {\n\t\t\t\treturn \"\", errors.Errorf(\"[DecodeToHex] Unknown prefix %v %v\", decoded[:2], prefix)\n\t\t\t}\n\t\t}\n\t}\n\treturn hex.EncodeToString(decoded[len(prefix)-1:]), nil\n}",
"func (b58 Base58) Encode(b []byte) string {\n\tx := new(big.Int)\n\tx.SetBytes(b)\n\n\tanswer := make([]byte, 0, len(b)*136/100)\n\tfor x.Cmp(bigZero) > 0 {\n\t\tmod := new(big.Int)\n\t\tx.DivMod(x, bigRadix, mod)\n\t\tanswer = append(answer, b58.alphabet[mod.Int64()])\n\t}\n\n\t// leading zero bytes\n\tfor _, i := range b {\n\t\tif i != 0 {\n\t\t\tbreak\n\t\t}\n\t\tanswer = append(answer, b58.alphabet[0])\n\t}\n\n\t// reverse\n\talen := len(answer)\n\tfor i := 0; i < alen/2; i++ {\n\t\tanswer[i], answer[alen-1-i] = answer[alen-1-i], answer[i]\n\t}\n\n\treturn string(answer)\n}",
"func Base58Encode(input []byte) []byte {\n\tencode := base58.Encode(input)\n\n\treturn []byte(encode)\n}",
"func encode(src []byte) ([]byte, int) {\n\n\tvar dst []byte\n\tx := new(big.Int).SetBytes(src)\n\tr := new(big.Int)\n\tm := big.NewInt(58)\n\tzero := big.NewInt(0)\n\ts := \"\"\n\n\t/* While x > 0 */\n\tfor x.Cmp(zero) > 0 {\n\t\t/* x, r = (x / 58, x % 58) */\n\t\tx.QuoRem(x, m, r)\n\t\t/* Prepend ASCII character */\n\t\ts = string(base58table[r.Int64()]) + s\n\t\tdst = append(dst, base58table[r.Int64()])\n\t}\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range src {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\tdst = append(dst, base58table[0])\n\t}\n\n\tfor i := 0; i < len(dst)/2; i++ {\n\t\tdst[i], dst[len(dst)-1-i] =\n\t\t\tdst[len(dst)-1-i], dst[i]\n\t}\n\treturn dst, len(dst)\n}",
"func ColorFromBase58EncodedString(base58String string) (color Color, err error) {\n\tparsedBytes, err := base58.Decode(base58String)\n\tif err != nil {\n\t\terr = errors.Errorf(\"error while decoding base58 encoded Color (%v): %w\", err, cerrors.ErrBase58DecodeFailed)\n\t\treturn\n\t}\n\n\tif color, _, err = ColorFromBytes(parsedBytes); err != nil {\n\t\terr = errors.Errorf(\"failed to parse Color from bytes: %w\", err)\n\t\treturn\n\t}\n\n\treturn\n}",
"func B64DecodeStrToByte(inputString string) ([]byte, error) {\r\n\tdecoded, err := base64.StdEncoding.DecodeString(inputString)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn decoded, nil\r\n}",
"func vaultDecode(data interface{}) ([]byte, error) {\n\tencoded, ok := data.(string)\n\tif !ok {\n\t\treturn nil, errors.New(\"Received non-string data\")\n\t}\n\treturn base64.StdEncoding.DecodeString(strings.TrimPrefix(encoded, vaultV1DataPrefix))\n}",
"func b58cencode(payload []byte, prefix []byte) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := base58check.Encode(n)\n\treturn b58c\n}",
"func runtimeEncodeBase58(ic *interop.Context) error {\n\tsrc := ic.VM.Estack().Pop().Bytes()\n\tresult := base58.Encode(src)\n\tic.VM.Estack().PushVal([]byte(result))\n\treturn nil\n}",
"func test() {\n\to := decode(\"101\")\n\tfmt.Println(o, \"== 1\")\n\to = decode(\"111\")\n\tfmt.Println(o, \"== 3\")\n\to = decode(\"12\")\n\tfmt.Println(o, \"== 2\")\n\to = decode(\"27\")\n\tfmt.Println(o, \"== 1\")\n}",
"func (this *GoTezos) b58cencode(payload []byte, prefix []byte) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := base58check.Encode(n)\n\treturn b58c\n}",
"func DecodeAddress(address string) (*Address, error) {\n\tif address[:3] == \"BM-\" { // Clients should accept addresses without BM-\n\t\taddress = address[3:]\n\t}\n\n\tdata := base58.Decode(address)\n\tif len(data) <= 12 { // rough lower bound, also don't want it to be empty\n\t\treturn nil, ErrUnknownAddressType\n\t}\n\n\thashData := data[:len(data)-4]\n\tchecksum := data[len(data)-4:]\n\n\tif !bytes.Equal(checksum, DoubleSha512(hashData)[0:4]) {\n\t\treturn nil, ErrChecksumMismatch\n\t}\n\t// create the address\n\taddr := new(Address)\n\n\tbuf := bytes.NewReader(data)\n\tvar err error\n\n\taddr.Version, err = ReadVarInt(buf) // read version\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\taddr.Stream, err = ReadVarInt(buf) // read stream\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tripe := make([]byte, buf.Len()-4) // exclude bytes already read and checksum\n\tbuf.Read(ripe) // this can never cause an error\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif len(ripe) > 19 || len(ripe) < 18 { // improper size\n\t\t\treturn nil, errors.New(\"version 3, the ripe length is invalid\")\n\t\t}\n\tcase 4:\n\t\t// encoded ripe data MUST have null bytes removed from front\n\t\tif ripe[0] == 0x00 {\n\t\t\treturn nil, errors.New(\"version 4, ripe data has null bytes in\" +\n\t\t\t\t\" the beginning, not properly encoded\")\n\t\t}\n\t\tif len(ripe) > 19 || len(ripe) < 4 { // improper size\n\t\t\treturn nil, errors.New(\"version 4, the ripe length is invalid\")\n\t\t}\n\tdefault:\n\t\treturn nil, ErrUnknownAddressType\n\t}\n\n\t// prepend null bytes to make sure that the total ripe length is 20\n\tnumPadding := 20 - len(ripe)\n\tripe = append(make([]byte, numPadding), ripe...)\n\tcopy(addr.Ripe[:], ripe)\n\n\treturn addr, nil\n}",
"func base64Decode(s string) ([]byte, error) {\n\t// add back missing padding\n\tswitch len(s) % 4 {\n\tcase 2:\n\t\ts += \"==\"\n\tcase 3:\n\t\ts += \"=\"\n\t}\n\treturn base64.URLEncoding.DecodeString(s)\n}",
"func base64Decode(s string) ([]byte, error) {\n\t// add back missing padding\n\tswitch len(s) % 4 {\n\tcase 2:\n\t\ts += \"==\"\n\tcase 3:\n\t\ts += \"=\"\n\t}\n\treturn base64.URLEncoding.DecodeString(s)\n}",
"func b58checkencode(ver uint8, b []byte) (s string) {\n\t/* Prepend version */\n\tbcpy := append([]byte{ver}, b...)\n\n\t/* Create a new SHA256 context */\n\tsha256_h := sha256.New()\n\n\t/* SHA256 Hash #1 */\n\tsha256_h.Reset()\n\tsha256_h.Write(bcpy)\n\thash1 := sha256_h.Sum(nil)\n\n\t/* SHA256 Hash #2 */\n\tsha256_h.Reset()\n\tsha256_h.Write(hash1)\n\thash2 := sha256_h.Sum(nil)\n\n\t/* Append first four bytes of hash */\n\tbcpy = append(bcpy, hash2[0:4]...)\n\n\t/* Encode base58 string */\n\ts = b58encode(bcpy)\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range bcpy {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\ts = \"1\" + s\n\t}\n\n\treturn s\n}",
"func b58checkencode(ver uint8, b []byte) (s string) {\n\t/* Prepend version */\n\tbcpy := append([]byte{ver}, b...)\n\n\t/* Create a new SHA256 context */\n\tsha256_h := sha256.New()\n\n\t/* SHA256 Hash #1 */\n\tsha256_h.Reset()\n\tsha256_h.Write(bcpy)\n\thash1 := sha256_h.Sum(nil)\n\n\t/* SHA256 Hash #2 */\n\tsha256_h.Reset()\n\tsha256_h.Write(hash1)\n\thash2 := sha256_h.Sum(nil)\n\n\t/* Append first four bytes of hash */\n\tbcpy = append(bcpy, hash2[0:4]...)\n\n\t/* Encode base58 string */\n\ts = b58encode(bcpy)\n\n\t/* For number of leading 0's in bytes, prepend 1 */\n\tfor _, v := range bcpy {\n\t\tif v != 0 {\n\t\t\tbreak\n\t\t}\n\t\ts = \"1\" + s\n\t}\n\n\treturn s\n}",
"func (m Multihash) B58String() string {\n\treturn b58.Encode([]byte(m))\n}",
"func (c *Coinbase) Decode(r io.Reader) error {\n\n\tvar Type uint8\n\tif err := encoding.ReadUint8(r, &Type); err != nil {\n\t\treturn err\n\t}\n\tc.TxType = TxType(Type)\n\n\tif err := encoding.Read256(r, &c.R); err != nil {\n\t\treturn err\n\t}\n\n\tif err := encoding.Read256(r, &c.Score); err != nil {\n\t\treturn err\n\t}\n\n\tif err := encoding.ReadVarBytes(r, &c.Proof); err != nil {\n\t\treturn err\n\t}\n\n\tlRewards, err := encoding.ReadVarInt(r)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Rewards = make(Outputs, lRewards)\n\tfor i := uint64(0); i < lRewards; i++ {\n\t\tc.Rewards[i] = &Output{}\n\t\tif err := c.Rewards[i].Decode(r); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func DecodeUTF8CodepointsToRawBytes(utf8Str string) ([]byte, error) {\n\trunes := []rune(utf8Str)\n\trawBytes := make([]byte, len(runes))\n\tfor i, r := range runes {\n\t\tif (r & 0xFF) != r {\n\t\t\treturn nil, fmt.Errorf(\"character out of range: %d\", r)\n\t\t}\n\t\trawBytes[i] = byte(r)\n\t}\n\treturn rawBytes, nil\n}",
"func ikcp_decode8u(p []byte, c *byte) []byte {\n\t*c = p[0]\n\treturn p[1:]\n}",
"func decodeAesCbc(src []byte) ([]byte, error) {\n block, err := aes.NewCipher([]byte(REQ_KEY))\n if err != nil {\n logrus.Error(\"aes decode err : \", err)\n return nil, err\n }\n\n blockMode := cipher.NewCBCDecrypter(block, []byte(REQ_IV))\n dst := make([]byte, len(src))\n blockMode.CryptBlocks(dst, src)\n out := KCS5UnPadding(dst)\n\n if len(out) > CodeSaltLen {\n out = out[CodeSaltLen:]\n }\n\n return out, nil\n}",
"func Decode(in string) ([]byte, error) {\n\to, err := b64.StdEncoding.DecodeString(in)\n\tif err != nil {\n\t\t// maybe it's in the URL variant?\n\t\to, err = b64.URLEncoding.DecodeString(in)\n\t\tif err != nil {\n\t\t\t// ok, just give up...\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn o, nil\n}",
"func Decode(str string) string {\n\trs := []rune(str)\n\tvar b []uint8\n\tfor _, r := range rs {\n\t\tb1 := r & ((1 << 8) - 1)\n\t\tb = append(b, uint8(b1))\n\n\t\tb2val := r - b1\n\t\tif b2val != unknownByteCodePoint {\n\t\t\tb2 := decodeMap[b2val]\n\t\t\tb = append(b, b2)\n\t\t}\n\t}\n\ts := string(b)\n\treturn s\n}",
"func B64Decode(data string) []byte {\n\tdec, err := base64.StdEncoding.DecodeString(data)\n\tif err != nil {\n\t\treturn nil\n\t}\n\n\treturn dec\n}",
"func Decode(b []byte) (uint64, error) {\n\tvar (\n\t\tsum = uint64(0)\n\t\tactual = uint64(0)\n\t\tdouble = false\n\t)\n\n\tif len(b) < common.MinImeiLength {\n\t\t//code must be at least 15 bytes long.\n\t\tpanic(common.ErrInvalidImei)\n\t}\n\n\t//iterate over each digit in the byteslice\n\tfor i := 0; i < common.MinImeiLength; i++ {\n\t\tdigit := uint64(b[i] - common.ASCIIZero)\n\t\t//each digit should be between 0-9\n\t\tif digit > 9 {\n\t\t\tdigit = digit - 9\n\t\t\treturn 0, common.Wrap(common.ErrInvalidImei, fmt.Sprintf(\"invalid digit: %d\", digit))\n\t\t}\n\t\t//base10\n\t\tactual = (uint64(10) * actual) + digit\n\n\t\t//skip last digit when calculating sum for luhn validation ref: https://en.wikipedia.org/wiki/International_Mobile_Equipment_Identity.\n\t\tif i == 14 {\n\t\t\tcontinue\n\t\t}\n\t\tif double {\n\t\t\tdigit = digit * 2\n\t\t}\n\t\tif digit >= 10 {\n\t\t\tdigit = digit - 9\n\t\t}\n\t\tsum += digit\n\t\t//double every other digit\n\t\tdouble = !double\n\t}\n\t//validate using Luhn algorithm\n\tif ((10 - (sum % 10)) % 10) != uint64(b[14]-common.ASCIIZero) {\n\t\treturn 0, common.Wrap(common.ErrChecksum, fmt.Sprintf(\"payload = %s sum = %v\", string(b), sum))\n\t}\n\treturn actual, nil\n}",
"func (id *UUID) DecodeString(src []byte) {\n\tconst srcBase = 62\n\tconst dstBase = 0x100000000\n\n\tparts := [StringMaxLen]byte{}\n\n\tpartsIndex := 21\n\tfor i := len(src); i > 0; {\n\t\t// offsets into base62Characters\n\t\tconst offsetUppercase = 10\n\t\tconst offsetLowercase = 36\n\n\t\ti--\n\t\tb := src[i]\n\t\tswitch {\n\t\tcase b >= '0' && b <= '9':\n\t\t\tb -= '0'\n\t\tcase b >= 'A' && b <= 'Z':\n\t\t\tb = offsetUppercase + (b - 'A')\n\t\tdefault:\n\t\t\tb = offsetLowercase + (b - 'a')\n\t\t}\n\t\tparts[partsIndex] = b\n\t\tpartsIndex--\n\t}\n\n\tn := len(id)\n\tbp := parts[:]\n\tbq := make([]byte, 0, len(src))\n\n\tfor len(bp) > 0 {\n\t\tquotient := bq[:0]\n\t\tremainder := uint64(0)\n\n\t\tfor _, c := range bp {\n\t\t\tvalue := uint64(c) + uint64(remainder)*srcBase\n\t\t\tdigit := value / dstBase\n\t\t\tremainder = value % dstBase\n\n\t\t\tif len(quotient) != 0 || digit != 0 {\n\t\t\t\tquotient = append(quotient, byte(digit))\n\t\t\t}\n\t\t}\n\n\t\tid[n-4] = byte(remainder >> 24)\n\t\tid[n-3] = byte(remainder >> 16)\n\t\tid[n-2] = byte(remainder >> 8)\n\t\tid[n-1] = byte(remainder)\n\t\tn -= 4\n\t\tbp = quotient\n\t}\n\n\tvar zero [16]byte\n\tcopy(id[:n], zero[:])\n}",
"func bitcoinDecodeRawTransaction(tx []byte, reply *bitcoinTransaction) error {\n\tglobalBitcoinData.Lock()\n\tdefer globalBitcoinData.Unlock()\n\n\tif !globalBitcoinData.initialised {\n\t\treturn fault.ErrNotInitialised\n\t}\n\n\t// need to be in hex for bitcoind\n\targuments := []interface{}{\n\t\thex.EncodeToString(tx),\n\t}\n\treturn bitcoinCall(\"decoderawtransaction\", arguments, reply)\n}",
"func Decode(in string) (string, error) {\n\tsize := len(in)\n\tmod := size % 3\n\tif mod != 0 && mod != 2 {\n\t\treturn \"\", InvalidLengthError{\n\t\t\tlength: size,\n\t\t\tmod: mod,\n\t\t}\n\t}\n\tbytes := make([]byte, 0, size)\n\tfor pos, char := range in {\n\t\tv, ok := decodingMap[char]\n\t\tif !ok {\n\t\t\treturn \"\", InvalidCharacterError{\n\t\t\t\tchar: char,\n\t\t\t\tposition: pos,\n\t\t\t}\n\t\t}\n\t\tbytes = append(bytes, v)\n\t}\n\tchunks := decodeChunks(bytes)\n\ttriplets, err := decodeTriplets(chunks)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\ttripletsLength := len(triplets)\n\tdecoded := make([]byte, 0, tripletsLength * 2)\n\tfor i := 0; i < tripletsLength - 1; i++ {\n\t\tbytes := uint16ToBytes(triplets[i])\n\t\tdecoded = append(decoded, bytes[0])\n\t\tdecoded = append(decoded, bytes[1])\n\t}\n\tif mod == 2 {\n\t\tbytes := uint16ToBytes(triplets[tripletsLength - 1])\n\t\tdecoded = append(decoded, bytes[1])\n\t} else {\n\t\tbytes := uint16ToBytes(triplets[tripletsLength - 1])\n\t\tdecoded = append(decoded, bytes[0])\n\t\tdecoded = append(decoded, bytes[1])\n\t}\n\treturn string(decoded), nil\n}",
"func (a *A25) A58() []byte {\n\tvar out [34]byte\n\tfor n := 33; n >= 0; n-- {\n\t\tc := 0\n\t\tfor i := 0; i < 25; i++ {\n\t\t\tc = c*256 + int(a[i])\n\t\t\ta[i] = byte(c / 58)\n\t\t\tc %= 58\n\t\t}\n\t\tout[n] = tmpl[c]\n\t}\n\ti := 1\n\tfor i < 34 && out[i] == '1' {\n\t\ti++\n\t}\n\treturn out[i-1:]\n}",
"func DecodeKey(s string) ([]byte, error) {\n\tkey, err := hex.DecodeString(s)\n\tif err == nil && len(key) != KeyLen {\n\t\terr = fmt.Errorf(\"length is %d, expected %d\", len(key), KeyLen)\n\t}\n\treturn key, err\n}",
"func Base64Decode(b []byte) ([]byte, error) {\r\n\tbuf := make([]byte, base64.RawURLEncoding.DecodedLen(len(b)))\r\n\tn, err := base64.RawURLEncoding.Decode(buf, b)\r\n\treturn buf[:n], err\r\n}",
"func (p *Provider) Decode(in []byte) (err error) {\n\tvar block *pem.Block\n\tfor {\n\t\tblock, in = pem.Decode(in)\n\t\tif block == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tswitch block.Type {\n\t\tcase BlockCertificate:\n\t\t\tp.chain.Certificate = append(p.chain.Certificate, block.Bytes)\n\t\tcase BlockPrivateKey, BlockECPrivateKey, BlockRSAPrivateKey:\n\t\t\tif p.key, err = ParsePrivateKey(block); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unhandled block type %q\", block.Type)\n\t\t}\n\t}\n\treturn nil\n}",
"func B58cencode(payload []byte, prefix Prefix) string {\n\tn := make([]byte, (len(prefix) + len(payload)))\n\tfor k := range prefix {\n\t\tn[k] = prefix[k]\n\t}\n\tfor l := range payload {\n\t\tn[l+len(prefix)] = payload[l]\n\t}\n\tb58c := encode(n)\n\treturn b58c\n}",
"func (enc *Base64Encoding) Base64Decode(src []byte) ([]byte, error) {\n\tnumOfEquals := 4 - (len(src) % 4)\n\tfor i := 0; i < numOfEquals; i++ {\n\t\tsrc = append(src, '=')\n\t}\n\tdst := make([]byte, enc.Encoding.DecodedLen(len(src)))\n\tn, err := enc.Encoding.Decode(dst, src)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn dst[:n], nil\n}",
"func decodeCopy(in string) (string, error) {\n\tvar buf bytes.Buffer\n\tstart := 0\n\tfor i, n := 0, len(in); i < n; i++ {\n\t\tif in[i] != '\\\\' {\n\t\t\tcontinue\n\t\t}\n\t\tbuf.WriteString(in[start:i])\n\t\ti++\n\t\tif i >= n {\n\t\t\treturn \"\", fmt.Errorf(\"unknown escape sequence: %q\", in[i-1:])\n\t\t}\n\n\t\tch := in[i]\n\t\tif decodedChar := decodeMap[ch]; decodedChar != 0 {\n\t\t\tbuf.WriteByte(decodedChar)\n\t\t} else if ch == 'x' {\n\t\t\t// \\x can be followed by 1 or 2 hex digits.\n\t\t\ti++\n\t\t\tif i >= n {\n\t\t\t\treturn \"\", fmt.Errorf(\"unknown escape sequence: %q\", in[i-2:])\n\t\t\t}\n\t\t\tch = in[i]\n\t\t\tdigit, ok := decodeHexDigit(ch)\n\t\t\tif !ok {\n\t\t\t\treturn \"\", fmt.Errorf(\"unknown escape sequence: %q\", in[i-2:i])\n\t\t\t}\n\t\t\tif i+1 < n {\n\t\t\t\tif v, ok := decodeHexDigit(in[i+1]); ok {\n\t\t\t\t\ti++\n\t\t\t\t\tdigit <<= 4\n\t\t\t\t\tdigit += v\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteByte(digit)\n\t\t} else if ch >= '0' && ch <= '7' {\n\t\t\tdigit, _ := decodeOctDigit(ch)\n\t\t\t// 1 to 2 more octal digits follow.\n\t\t\tif i+1 < n {\n\t\t\t\tif v, ok := decodeOctDigit(in[i+1]); ok {\n\t\t\t\t\ti++\n\t\t\t\t\tdigit <<= 3\n\t\t\t\t\tdigit += v\n\t\t\t\t}\n\t\t\t}\n\t\t\tif i+1 < n {\n\t\t\t\tif v, ok := decodeOctDigit(in[i+1]); ok {\n\t\t\t\t\ti++\n\t\t\t\t\tdigit <<= 3\n\t\t\t\t\tdigit += v\n\t\t\t\t}\n\t\t\t}\n\t\t\tbuf.WriteByte(digit)\n\t\t} else {\n\t\t\treturn \"\", fmt.Errorf(\"unknown escape sequence: %q\", in[i-1:i+1])\n\t\t}\n\t\tstart = i + 1\n\t}\n\tbuf.WriteString(in[start:])\n\treturn buf.String(), nil\n}",
"func bcd(data []byte) []byte {\n\tout := make([]byte, len(data)/2+1)\n\tn, err := hex.Decode(out, data)\n\tif err != nil {\n\t\tpanic(err.Error())\n\t}\n\treturn out[:n]\n}",
"func (cdc OctetsCodec) Decode(b []byte) (v interface{}, s string, err error) {\n\treturn b, string(b), nil\n}",
"func MustDecode(input string) []byte {\n\tdec, err := Decode(input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn dec\n}",
"func NewEncoding(encoder string) (*Encoding, error) {\n\tif len(encoder) != 58 {\n\t\treturn nil, errors.New(\"base58: encoding alphabet is not 58-bytes\")\n\t}\n\tfor i := 0; i < len(encoder); i++ {\n\t\tif encoder[i] == '\\n' || encoder[i] == '\\r' {\n\t\t\treturn nil, errors.New(\"base58: encoding alphabet contains newline character\")\n\t\t}\n\t}\n\te := new(Encoding)\n\tfor i := range e.decodeMap {\n\t\te.decodeMap[i] = -1\n\t}\n\tfor i := range encoder {\n\t\te.encode[i] = byte(encoder[i])\n\t\te.decodeMap[e.encode[i]] = i\n\t}\n\treturn e, nil\n}",
"func MustHexDecode(s string) []byte {\n\tb, err := hex.DecodeString(s)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn b\n}",
"func (b Byte) Decode(r io.Reader) (interface{}, error) {\n\ti, err := util.ReadInt8(r)\n\treturn Byte(i), err\n}",
"func Base64Decode(input []byte) []byte {\n\tdec := base64.StdEncoding\n\tdecLength := dec.DecodedLen(len(input))\n\toutput := make([]byte, decLength)\n\tn, err := dec.Decode(output, input)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif n < decLength {\n\t\toutput = output[:n]\n\t}\n\treturn output\n}",
"func Decode8u(p []byte, c *byte) []byte {\n\t*c = p[0]\n\treturn p[1:]\n}",
"func decodeByteSequence(val []byte) ([][]byte, error) {\n\ts := string(val)\n\tvar res [][]byte\n\tif s == \"\" {\n\t\treturn res, nil\n\t}\n\tfor _, v := range strings.Split(s, \",\") {\n\t\tbs, err := hexutil.Decode(v)\n\t\tif err != nil {\n\t\t\treturn [][]byte{}, err\n\t\t}\n\t\tres = append(res, bs)\n\t}\n\treturn res, nil\n}",
"func Decode(part string) ([]byte, error) {\n\tif l := len(part) % 4; l > 0 {\n\t\tpart += strings.Repeat(\"=\", 4-l)\n\t}\n\n\treturn base64.URLEncoding.DecodeString(part)\n}",
"func (msg *MsgExtended) BtcDecode(r io.Reader, pver uint32) error {\n\tvar command [CommandSize]byte\n\tif _, err := io.ReadFull(r, command[:]); err != nil {\n\t\t// If read failed assume closed connection since net package doesn't give consistent errors.\n\t\treturn messageError(\"ReadMessage\", err.Error())\n\t}\n\tmsg.ExtCommand = string(bytes.TrimRight(command[:], string(rune(0))))\n\n\tif err := binary.Read(r, littleEndian, &msg.Length); err != nil {\n\t\t// If read failed assume closed connection since net package doesn't give consistent errors.\n\t\treturn messageError(\"ReadMessage\", err.Error())\n\t}\n\n\tmsg.Payload = make([]byte, msg.Length)\n\tif _, err := io.ReadFull(r, msg.Payload); err != nil {\n\t\t// If read failed assume closed connection since net package doesn't give consistent errors.\n\t\treturn messageError(\"ReadMessage\", err.Error())\n\t}\n\n\treturn nil\n}",
"func decodeBLS12381FieldElement(in []byte) ([]byte, error) {\n\tif len(in) != 64 {\n\t\treturn nil, errors.New(\"invalid field element length\")\n\t}\n\t// check top bytes\n\tfor i := 0; i < 16; i++ {\n\t\tif in[i] != byte(0x00) {\n\t\t\treturn nil, errBLS12381InvalidFieldElementTopBytes\n\t\t}\n\t}\n\tout := make([]byte, 48)\n\tcopy(out[:], in[16:])\n\treturn out, nil\n}",
"func DecodeAddress(address string) (*Address, error) {\n\t// if address[:3] == \"BM-\" { // Clients should accept addresses without BM-\n\t//\taddress = address[3:]\n\t// }\n\t//\n\t// decodeAddress says this but then UI checks for a missingbm status from\n\t// decodeAddress, which doesn't exist. So I choose NOT to accept addresses\n\t// without the initial \"BM-\"\n\n\ti, err := base58.DecodeToBig([]byte(address[3:]))\n\tif err != nil {\n\t\treturn nil, errors.New(\"input address not valid base58 string\")\n\t}\n\tdata := i.Bytes()\n\n\thashData := data[:len(data)-4]\n\tchecksum := data[len(data)-4:]\n\n\t// Take two rounds of SHA512 hashes\n\tsha := sha512.New()\n\tsha.Write(hashData)\n\tcurrentHash := sha.Sum(nil)\n\tsha.Reset()\n\tsha.Write(currentHash)\n\n\tif !bytes.Equal(checksum, sha.Sum(nil)[0:4]) {\n\t\treturn nil, errors.New(\"checksum failed\")\n\t}\n\t// create the address\n\taddr := new(Address)\n\n\tbuf := bytes.NewReader(data)\n\n\terr = addr.Version.DeserializeReader(buf) // get the version\n\tif err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"version: \" + err.Error())\n\t}\n\n\terr = addr.Stream.DeserializeReader(buf)\n\tif err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"stream: \" + err.Error())\n\t}\n\n\tripe := make([]byte, buf.Len()-4) // exclude bytes already read and checksum\n\tn, err := buf.Read(ripe)\n\tif n != len(ripe) || err != nil {\n\t\treturn nil, types.DeserializeFailedError(\"ripe: \" + err.Error())\n\t}\n\n\tswitch addr.Version {\n\tcase 2:\n\t\tfallthrough\n\tcase 3:\n\t\tif len(ripe) > 20 || len(ripe) < 18 { // improper size\n\t\t\treturn nil, errors.New(\"version 3, the ripe length is invalid\")\n\t\t}\n\tcase 4:\n\t\t// encoded ripe data MUST have null bytes removed from front\n\t\tif ripe[0] == 0x00 {\n\t\t\treturn nil, errors.New(\"version 4, ripe data has null bytes in\" +\n\t\t\t\t\" the beginning, not properly encoded\")\n\t\t}\n\t\tif len(ripe) > 20 || len(ripe) < 4 { // improper size\n\t\t\treturn nil, errors.New(\"version 4, the ripe length is invalid\")\n\t\t}\n\tdefault:\n\t\treturn nil, errors.New(\"unsupported address version\")\n\t}\n\n\t// prepend null bytes to make sure that the total ripe length is 20\n\tnumPadding := 20 - len(ripe)\n\tripe = append(make([]byte, numPadding), ripe...)\n\tcopy(addr.Ripe[:], ripe)\n\n\treturn addr, nil\n}",
"func decodePubkey(val []byte) (*ecdsa.PublicKey, error) {\n\tdata, err := base64.RawURLEncoding.DecodeString(string(val))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ethcrypto.UnmarshalPubkey(data)\n}",
"func bytesDecode(raw []byte) (*encryptedData, error) {\n\tmacLen := 32\n\tivLen := 16\n\tif len(raw) == 0 {\n\t\treturn nil, errors.Errorf(\"raw must not be empty\")\n\t}\n\n\tif len(raw) < macLen+ivLen+pubKeyBytesLenCompressed+1 {\n\t\treturn nil, errors.Errorf(\"raw data does not have enough bytes to be encoded\")\n\t}\n\n\tif raw[0] != cipher.AES256CBC {\n\t\treturn nil, errors.Errorf(\"invalid prefix\")\n\t}\n\traw = raw[1:]\n\tdecompressedKey := decompress(raw[ivLen : ivLen+pubKeyBytesLenCompressed])\n\t// iv and mac must be created this way to ensure the cap of the array is not different\n\tiv := make([]byte, ivLen)\n\tcopy(iv, raw[:ivLen])\n\tmac := make([]byte, macLen)\n\tcopy(mac, raw[ivLen+pubKeyBytesLenCompressed:ivLen+pubKeyBytesLenCompressed+macLen])\n\n\tret := &encryptedData{\n\t\tInitializationVector: iv,\n\t\tEphemeralPublicKey: decompressedKey,\n\t\tMessageAuthenticationCode: mac,\n\t\tCiphertext: raw[ivLen+pubKeyBytesLenCompressed+macLen:],\n\t}\n\tif err := ret.verify(); err != nil {\n\t\treturn nil, errors.WithMessage(err, \"encrypted data is invalid\")\n\t}\n\n\treturn ret, nil\n}",
"func Base64Decode(encoded string) ([]byte, error) {\n\treturn base64.URLEncoding.DecodeString(encoded)\n}",
"func SecKeyFromBitcoinWalletImportFormat(input string) (SecKey, error) {\n\tb, err := base58.Decode(input)\n\tif err != nil {\n\t\treturn SecKey{}, err\n\t}\n\n\t//1+32+1+4\n\tif len(b) != 38 {\n\t\treturn SecKey{}, ErrInvalidLength\n\t}\n\tif b[0] != 0x80 {\n\t\treturn SecKey{}, ErrBitcoinWIFInvalidFirstByte\n\t}\n\n\tif b[1+32] != 0x01 {\n\t\treturn SecKey{}, ErrBitcoinWIFInvalidSuffix\n\t}\n\n\tb2 := DoubleSHA256(b[0:34])\n\tchksum := b[34:38]\n\n\tif !bytes.Equal(chksum, b2[0:4]) {\n\t\treturn SecKey{}, ErrBitcoinWIFInvalidChecksum\n\t}\n\n\treturn NewSecKey(b[1:33])\n}",
"func __b64decode(out *[]byte, src unsafe.Pointer, len int, mode int) (ret int)",
"func decode(s string) string {\n\tr := s\n\tfor _, tr := range codecValues {\n\t\tif strings.Index(r, tr.encoded) >= 0 {\n\t\t\tr = strings.Replace(r, tr.encoded, tr.decoded, -1)\n\t\t}\n\t}\n\treturn r\n}",
"func (b *baseSemanticUTF8Base64) Decode() string {\n\treturn b.decoded\n}",
"func Decode(in, out []byte) (int, []byte) {\n\tif len(in)%6 != 0 {\n\t\tpanic(\"len(in) must be a multiple of 6\")\n\t}\n\tvar errcount, c int\n\tfor len(in) >= 6 {\n\t\tc, out = decode24(in, out)\n\t\terrcount += c\n\t\tin = in[6:]\n\t}\n\treturn errcount, out\n}"
] | [
"0.82640064",
"0.8263569",
"0.77547556",
"0.772591",
"0.7617213",
"0.74173695",
"0.7274444",
"0.70765555",
"0.70323175",
"0.6839322",
"0.6795667",
"0.6781376",
"0.673101",
"0.65043974",
"0.6490628",
"0.64824307",
"0.64178103",
"0.60556257",
"0.59470063",
"0.59457445",
"0.59107846",
"0.58712006",
"0.5858051",
"0.5842897",
"0.5842897",
"0.5830589",
"0.57595795",
"0.57322216",
"0.57168996",
"0.5634383",
"0.5553838",
"0.5482786",
"0.5475391",
"0.54749995",
"0.5469103",
"0.5469103",
"0.5469103",
"0.5444875",
"0.5380095",
"0.53312266",
"0.531484",
"0.5293972",
"0.52849394",
"0.5248347",
"0.52330947",
"0.52247864",
"0.5177097",
"0.51695836",
"0.51669186",
"0.5151822",
"0.5120514",
"0.51097715",
"0.5097112",
"0.50840414",
"0.50688773",
"0.5065173",
"0.5065173",
"0.50609505",
"0.50609505",
"0.50488365",
"0.503862",
"0.50300604",
"0.50288385",
"0.5019499",
"0.500946",
"0.5000918",
"0.49747482",
"0.49718803",
"0.49624658",
"0.49577254",
"0.49527767",
"0.49461272",
"0.4937673",
"0.49370447",
"0.49361202",
"0.49347615",
"0.49162441",
"0.49041072",
"0.4888178",
"0.48628286",
"0.48611516",
"0.48576576",
"0.48507577",
"0.48500484",
"0.48468208",
"0.48390183",
"0.48358548",
"0.4829582",
"0.48290086",
"0.48284924",
"0.48282748",
"0.48246223",
"0.48198444",
"0.4815056",
"0.48099038",
"0.48069876",
"0.48012626",
"0.4800074",
"0.47953153"
] | 0.8383557 | 1 |
AsInt converts an action to an integer to be used for indexing. This integer only uses 4 bits and is only valid for 2 players. TargetPlayer is instead encoded as a bool: targetSelf. That never conflicts with SelectedCard. | func (act Action) AsInt() int {
retVal := 0
if act.PlayRecent {
retVal = 1
}
if act.SelectedCard != None && act.SelectedCard != Guard {
retVal += 2 * (int(act.SelectedCard) - 1)
} else if act.TargetPlayerOffset > 0 {
retVal += 2 * act.TargetPlayerOffset
}
return retVal
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ActionFromInt(st int) Action {\n\tact := Action{}\n\tif st%2 == 1 {\n\t\tact.PlayRecent = true\n\t}\n\n\tst = (st & 0xF) >> 1\n\t// Now st is the TargetPlayerOffset or SelectedCard. We don't know which, but if *any* card was selected,\n\t// then the other player must be targeted (since the played card is a guard), so for 2 players the offset is 1.\n\tif st > 0 {\n\t\tact.TargetPlayerOffset = 1\n\t}\n\tact.SelectedCard = Card(st + 1)\n\treturn act\n}",
"func (r *ISAAC) Int() int {\n\tu := uint(r.Uint64())\n\treturn int(u << 1 >> 1) // clear sign bit if int == int32\n}",
"func (q *QLearning) GetAction(s State) Action {\n\taction := Action(0)\n\tmax := q.qt[s][0]\n\n\tfor i := 1; i < q.actns; i++ {\n\t\tif max < q.qt[s][Action(i)] {\n\t\t\tmax = q.qt[s][Action(i)]\n\t\t\taction = Action(i)\n\t\t}\n\t}\n\treturn action\n}",
"func (v Season_Ic_Ta) Int() int {\n\treturn int(v)\n}",
"func (v Season_Uc_Ta) Int() int {\n\treturn int(v)\n}",
"func ConvertToInt(target string, def int) int {\n\tfo, err := strconv.Atoi(target)\n\tif err != nil {\n\t\treturn def\n\t}\n\treturn fo\n}",
"func (input *PuzzleInput) ParseAsInt() int {\n\tnum, err := strconv.Atoi(string(input.Data))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn num\n}",
"func (a ASTNode) Int() int {\n\tif a.t != tval {\n\t\tpanic(ConfErr{a.pos, errors.New(\"Not a basic value\")})\n\t}\n\tv, err := strconv.Atoi(a.val.(string))\n\tif err != nil {\n\t\tpanic(ConfErr{a.pos, err})\n\t}\n\treturn v\n}",
"func (tv *TypedInt) Int() int {\n\tvar x big.Int\n\tx.SetBytes(tv.Bytes)\n\tif len(tv.TypeOpts) > 1 && tv.TypeOpts[1] == 1 {\n\t\tx.Neg(&x)\n\t}\n\treturn int(x.Int64())\n}",
"func (t Target) Int() *big.Int {\n\treturn new(big.Int).SetBytes(t[:])\n}",
"func stateToInt(s string) int {\n\tvar i int\n\tswitch {\n\tcase s == \"UNKNOWN\":\n\t\ti = uUNKNOWN\n\tcase s == \"INIT\":\n\t\ti = uINIT\n\tcase s == \"READY\":\n\t\ti = uREADY\n\tcase s == \"TEST\":\n\t\ti = uTEST\n\tcase s == \"DONE\":\n\t\ti = uDONE\n\tcase s == \"TERM\":\n\t\ti = uTERM\n\tdefault:\n\t\ti = -1\n\t}\n\treturn i\n}",
"func Int(ss []int, target int) int {\n\treturn IndexOf(len(ss), func(i int) bool { return ss[i] == target })\n}",
"func (o Offset) Int() int32 {\n\tl, _ := strconv.Atoi(string(o))\n\treturn int32(l)\n}",
"func (a *AST) Int() int {\n\tvar dst C.int\n\tC.Z3_get_numeral_int(a.rawCtx, a.rawAST, &dst)\n\treturn int(dst)\n}",
"func ToInt(value interface{}) (int, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\tif v {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase int:\n\t\treturn v, nil\n\tcase int8:\n\t\treturn int(v), nil\n\tcase int16:\n\t\treturn int(v), nil\n\tcase int32:\n\t\treturn int(v), nil\n\tcase int64:\n\t\treturn int(v), nil\n\tcase uint:\n\t\treturn int(v), nil\n\tcase uint8:\n\t\treturn int(v), nil\n\tcase uint16:\n\t\treturn int(v), nil\n\tcase uint32:\n\t\treturn int(v), nil\n\tcase uint64:\n\t\treturn int(v), nil\n\tcase float32:\n\t\treturn int(v), nil\n\tcase float64:\n\t\treturn int(v), nil\n\tcase complex64:\n\t\treturn int(real(v)), nil\n\tcase complex128:\n\t\treturn int(real(v)), nil\n\tcase []byte:\n\t\ts = string(v)\n\tcase string:\n\t\ts = v\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to int\", v, v)\n\t}\n\n\tif i, err := strconv.ParseInt(s, 0, 64); err == nil {\n\t\treturn int(i), nil\n\t}\n\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to int\", value, value)\n}",
"func (v *Value) AsInt(dv int) int {\n\tif v.IsUndefined() {\n\t\treturn dv\n\t}\n\tswitch tv := v.raw.(type) {\n\tcase string:\n\t\ti, err := strconv.Atoi(tv)\n\t\tif err != nil {\n\t\t\treturn dv\n\t\t}\n\t\treturn i\n\tcase int:\n\t\treturn tv\n\tcase float64:\n\t\treturn int(tv)\n\tcase bool:\n\t\tif tv {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase time.Time:\n\t\tns := tv.UnixNano()\n\t\tif ns > maxInt {\n\t\t\treturn dv\n\t\t}\n\t\treturn int(ns)\n\tcase time.Duration:\n\t\tns := tv.Nanoseconds()\n\t\tif ns > maxInt {\n\t\t\treturn dv\n\t\t}\n\t\treturn int(ns)\n\t}\n\treturn dv\n}",
"func (ref *UIElement) ValueAsInt32() (int32, error) {\n\treturn ref.Int32Attr(ValueAttribute)\n}",
"func (i SNSSubscribeAttribute) IntValue() int {\n\treturn int(i)\n}",
"func Action(v int) predicate.QueueItem {\n\treturn predicate.QueueItem(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldAction), v))\n\t})\n}",
"func (c *JSONElement) AsInt(def int) (value int) {\n\tvalue, err := c.Json.Int()\n\tif err != nil {\n\t\treturn def\n\t}\n\n\treturn value\n}",
"func (code Code) Int() int {\n\treturn int(code)\n}",
"func Int(v interface{}) *int {\n\tswitch v.(type) {\n\tcase string, int32, int16, int8, int64, uint32, uint16, uint8, uint64, float32, float64:\n\t\tval := fmt.Sprintf(\"%v\", v)\n\t\tres, err := strconv.Atoi(val)\n\t\tif err != nil {\n\t\t\texception.Err(err, 500).Ctx(M{\"v\": v}).Throw()\n\t\t}\n\t\treturn &res\n\tcase int, uint:\n\t\tres := v.(int)\n\t\treturn &res\n\tcase bool:\n\t\tval := v.(bool)\n\t\tvar res int = 0\n\t\tif val {\n\t\t\tres = 1\n\t\t}\n\t\treturn &res\n\t}\n\treturn nil\n}",
"func INT(i operand.Op) { ctx.INT(i) }",
"func (ws *WorkflowSelect) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = ws.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{workflow.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: WorkflowSelect.Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (k Kerx4) ActionType() uint8 {\n\tconst ActionType = 0xC0000000 // A two-bit field containing the action type.\n\treturn uint8(k.flags & ActionType >> 30)\n}",
"func TestToInt(t *testing.T) {\n\t// conversion from false to 0\n\tresult := evaluator.ToInt(false)\n\tassert.Equal(t, 0, result)\n\n\t// conversion from true to 1\n\tresult = evaluator.ToInt(true)\n\tassert.Equal(t, 1, result)\n}",
"func (ec ErrCode) Int() int { return int(ec.code) }",
"func toInt(item Any) int {\n\tvar newItem int\n\n\tval, ok := item.(int)\n\tif ok { newItem = val }\n\n\treturn newItem\n}",
"func (v Value2) Int() int { return int(v) }",
"func (ec *executionContext) _ActionsAction_actionID(ctx context.Context, field graphql.CollectedField, obj *models.ActionsAction) (ret graphql.Marshaler) {\n\tctx = ec.Tracer.StartFieldExecution(ctx, field)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tec.Error(ctx, ec.Recover(ctx, r))\n\t\t\tret = graphql.Null\n\t\t}\n\t\tec.Tracer.EndFieldExecution(ctx)\n\t}()\n\trctx := &graphql.ResolverContext{\n\t\tObject: \"ActionsAction\",\n\t\tField: field,\n\t\tArgs: nil,\n\t\tIsMethod: false,\n\t}\n\tctx = graphql.WithResolverContext(ctx, rctx)\n\tctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx)\n\tresTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {\n\t\tctx = rctx // use context from middleware stack in children\n\t\treturn obj.ActionID, nil\n\t})\n\tif err != nil {\n\t\tec.Error(ctx, err)\n\t\treturn graphql.Null\n\t}\n\tif resTmp == nil {\n\t\tif !ec.HasError(rctx) {\n\t\t\tec.Errorf(ctx, \"must not be null\")\n\t\t}\n\t\treturn graphql.Null\n\t}\n\tres := resTmp.(core.ActionID)\n\trctx.Result = res\n\tctx = ec.Tracer.StartFieldChildExecution(ctx)\n\treturn ec.marshalNActionID2githubᚗcomᚋfacebookincubatorᚋsymphonyᚋpkgᚋactionsᚋcoreᚐActionID(ctx, field.Selections, res)\n}",
"func ToInt(i interface{}) int {\n\treturn cast.ToInt(i)\n}",
"func toInt(card z.Card) int {\n\tresult := MapRankToInt[card.Rank]*10 + MapSuitToInt[card.Suit]\n\treturn result\n}",
"func toInt(card z.Card) int {\n\tresult := MapRankToInt[card.Rank]*10 + MapSuitToInt[card.Suit]\n\treturn result\n}",
"func (v Value) AsInt() int64 {\n\treturn v.iface.(int64)\n}",
"func (v *Value) Int() int {\n\tswitch {\n\tcase v.ivalOk:\n\tcase v.fvalOk:\n\t\tv.ival = int(v.fval)\n\t\tv.ivalOk = true\n\tcase v.svalOk:\n\t\t// Perform a best-effort conversion from string to int.\n\t\tstrs := matchInt.FindStringSubmatch(v.sval)\n\t\tvar i64 int64\n\t\tif len(strs) >= 2 {\n\t\t\ti64, _ = strconv.ParseInt(strs[1], 10, 0)\n\t\t}\n\t\tv.ival = int(i64)\n\t\tv.ivalOk = true\n\t}\n\treturn v.ival\n}",
"func (g *Game) PlayerAction(action string) {\n\tcurrPlace := g.CurrentLocation.GetHeroPlace(g.Hero)\n\n\tmaxStep := g.CurrentLocation.Size - 1\n\thero := currPlace.GetHero()\n\tmonster := currPlace.GetMonster()\n\n\tif isSkillAction(action) {\n\t\tif currPlace.IsOccupied() {\n\t\t\tres, err := fight(hero, monster, action)\n\t\t\tif err != nil {\n\t\t\t\t_ = fmt.Errorf(\"fight error: %v\", err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif !monster.IsAlive() {\n\t\t\t\tg.countKill(monster)\n\n\t\t\t\tcurrPlace.RemoveMonster()\n\t\t\t\tif hero.IsAlive() {\n\t\t\t\t\tres += hero.GainExperience(monster.GetExperienceValue())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tg.View.UpdateCombatLog(res)\n\t\t} else {\n\t\t\tif action == Heal {\n\t\t\t\tres := g.Hero.UseSkill(Heal, nil)\n\t\t\t\tg.View.UpdateCombatLog(res.Message)\n\t\t\t}\n\t\t}\n\n\t\tg.checkHeroStatus()\n\t\treturn\n\t}\n\n\tif isMovement(action) {\n\t\tswitch action {\n\t\tcase MoveUp:\n\t\t\tg.Hero.MoveUp()\n\t\tcase MoveDown:\n\t\t\tg.Hero.MoveDown(maxStep)\n\t\tcase MoveLeft:\n\t\t\tg.Hero.MoveLeft()\n\t\tcase MoveRight:\n\t\t\tg.Hero.MoveRight(maxStep)\n\t\t}\n\n\t\tif currPlace.IsOccupied() {\n\t\t\tres := fightBack(hero, monster)\n\t\t\tg.View.UpdateCombatLog(res)\n\t\t}\n\t}\n\n\tg.checkHeroStatus()\n\n\tcurrPlace.RemoveHero()\n\tg.CurrentLocation.PlaceHero(g.Hero)\n}",
"func (storage Redis) Set(action common.Action) (*int, error) {\n\t// @I Consider using hashmaps instead of json values\n\t// @I Investigate risk of an Action overriding another due to race conditions\n\t// when creating them\n\n\tif storage.client == nil {\n\t\treturn nil, fmt.Errorf(\"the Redis client has not been initialized yet\")\n\t}\n\n\t// We'll be storing an ActionWrapper which contains the Action type as well.\n\twrapper, err := wrapper.Wrapper(action)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tjsonAction, err := json.Marshal(wrapper)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Generate an ID, store the Action, and update the Actions index set.\n\tid := storage.generateID()\n\tkey := redisKey(id)\n\terr = storage.client.Cmd(\"SET\", key, jsonAction).Err\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = storage.client.Cmd(\"ZADD\", \"actions\", id, key).Err\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &id, nil\n}",
"func (action *Action) GetIndex() int64 {\n\treturn action.height*types.MaxTxsPerBlock + int64(action.index)\n}",
"func (c Int32EnumConvert) Convert(n int, item int32) (int32, error) { return c(n, item) }",
"func (id *RequestID) Int() (int, error) {\n\treturn id.intValue, id.intError\n}",
"func (c Cents) Int() int {\n\treturn int(c)\n}",
"func (v Value) Int(defaults ...int) int {\n\t// Return the default if the raw is undefined\n\tif v.raw == nil {\n\t\t// Make sure there's at least one thing in the list\n\t\tdefaults = append(defaults, 0)\n\t\treturn defaults[0]\n\t}\n\n\tswitch t := v.raw.(type) {\n\tcase int:\n\t\treturn t\n\tcase float64:\n\t\tif t != float64(int(t)) {\n\t\t\tslog.Panicf(\"%v cannot be represented as an int\", t)\n\t\t}\n\n\t\treturn int(t)\n\tcase string:\n\t\ti, err := strconv.Atoi(t)\n\t\tif err != nil {\n\t\t\tslog.Panicf(\"failed to convert string to int: %v\", err)\n\t\t}\n\t\treturn i\n\tdefault:\n\t\tslog.Panicf(\"%v is of unsupported type %v\", t, reflect.TypeOf(t).String())\n\t}\n\n\treturn 0 // Never hit\n}",
"func (s *Streamer) Int(v int) *Streamer {\n\treturn s.Int64(int64(v))\n}",
"func toAction(actionInput string) (GameAction, error) {\n\tnormalised := strings.ToUpper(strings.TrimSuffix(actionInput, \"\\n\"))\n\tif len(normalised) < 1 {\n\t\treturn -1, errors.New(\"No action specified\")\n\t}\n\n\tswitch normalised[0] {\n\tcase 'E':\n\t\treturn Explore, nil\n\n\tcase 'F':\n\t\treturn Flag, nil\n\n\tdefault:\n\t\treturn -1, errors.New(\"Invalid action\")\n\t}\n}",
"func (urs *UserRoleSelect) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = urs.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{userrole.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: UserRoleSelect.Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (state *State) ApplyAction(action int) {\n\tC.StateApplyAction(state.state, C.int(action))\n}",
"func AsInt(v interface{}) int {\n\tif v == nil {\n\t\treturn 0\n\t}\n\tv = AsValueRef(reflect.ValueOf(v)).Interface()\n\tswitch v.(type) {\n\tcase int:\n\t\treturn v.(int)\n\tcase int8:\n\t\treturn int(v.(int8))\n\tcase int16:\n\t\treturn int(v.(int16))\n\tcase int32:\n\t\treturn int(v.(int32))\n\tcase int64:\n\t\treturn int(v.(int64))\n\tcase uint:\n\t\treturn int(v.(uint))\n\tcase uint8:\n\t\treturn int(v.(uint8))\n\tcase uint16:\n\t\treturn int(v.(uint16))\n\tcase uint32:\n\t\treturn int(v.(uint32))\n\tcase uint64:\n\t\treturn int(v.(uint64))\n\tcase float32:\n\t\treturn int(v.(float32))\n\tcase float64:\n\t\treturn int(v.(float64))\n\tcase []byte:\n\t\tf, e := strconv.ParseFloat(string(v.([]byte)), 64)\n\t\tif e == nil {\n\t\t\treturn int(f)\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\tcase string:\n\t\tf, e := strconv.ParseFloat(v.(string), 64)\n\t\tif e == nil {\n\t\t\treturn int(f)\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\tcase bool:\n\t\tif v.(bool) {\n\t\t\treturn 1\n\t\t} else {\n\t\t\treturn 0\n\t\t}\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func (a *Arg) Int(def int) int {\n\tif a.value == \"\" {\n\t\treturn def\n\t}\n\ti, err := strconv.Atoi(a.value)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn i\n}",
"func (option ApplicationCommandInteractionDataOption) IntValue() (value int, ok bool) {\n\ttmpValue, ok := option.Value.(float64)\n\tif !ok {\n\t\treturn 0, ok\n\t}\n\tvalue = int(tmpValue)\n\treturn value, ok\n}",
"func (ai *actionItem) Action() llapi.HsmAction {\n\treturn ai.hai.Action\n}",
"func (r *Result) Int() int {\n\tif r.Error != nil {\n\t\treturn 0\n\t}\n\n\treturn convert.ToInt(r.Value)\n\n}",
"func IntToTarget(i *big.Int) (t Target) {\n\t// i may overflow the maximum target.\n\t// In the event of overflow, return the maximum.\n\tif i.BitLen() > 256 {\n\t\treturn RootDepth\n\t}\n\tb := i.Bytes()\n\t// need to preserve big-endianness\n\toffset := len(t[:]) - len(b)\n\tcopy(t[offset:], b)\n\treturn\n}",
"func Bool2Int(val bool) int {\n\tif val {\n\t\treturn 1\n\t} else {\n\t\treturn 0\n\t}\n}",
"func (p Parser) Int(ctx context.Context) (*int, error) {\n\tvalue, err := p.Source.String(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif value == nil {\n\t\treturn nil, nil\n\t}\n\tparsed, err := strconv.Atoi(*value)\n\tif err != nil {\n\t\treturn nil, ex.New(err)\n\t}\n\treturn &parsed, nil\n}",
"func (ws *WifiSelect) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = ws.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{wifi.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: WifiSelect.Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func ToIntValue(val interface{}) (int, bool) {\n\tif IsValueNil(val) {\n\t\treturn 0, false\n\t}\n\tv := reflect.ValueOf(val)\n\tswitch {\n\tcase IsIntKind(v.Kind()):\n\t\treturn int(v.Int()), true\n\tcase IsUintKind(v.Kind()):\n\t\treturn int(v.Uint()), true\n\t}\n\treturn 0, false\n}",
"func (ExprValue) Int() int { return intResult }",
"func (bbo *TabularBBO) GetAction(s []float64, rng *mathlib.Random) int {\n\t// Convert the one-hot state into an integer from 0 - (numStates-1)\n\tstate := mathlib.FromOneHot(s)\n\t// Get the action probabilities from theta, using softmax action selection.\n\tactionProbabilities := bbo.newTheta[state]\n\tdenominator := 0.0\n\tfor a := 0; a < len(actionProbabilities); a++ {\n\t\tactionProbabilities[a] = math.Exp(actionProbabilities[a])\n\t\tdenominator += actionProbabilities[a]\n\t}\n\tfor a := 0; a < len(actionProbabilities); a++ {\n\t\tactionProbabilities[a] /= denominator\n\t}\n\t// Select random action from softmax\n\ttemp := rng.Float64()\n\tsum := 0.0\n\tfor a := 0; a < bbo.numActions; a++ {\n\t\tsum += actionProbabilities[a]\n\t\tif temp <= sum {\n\t\t\treturn a // The function will return 'a'. This stops the for loop and returns from the function.\n\t\t}\n\t}\n\treturn bbo.numActions - 1 // Rounding error\n}",
"func ToInt(v interface{}) (int, error) {\n\tswitch v.(type) {\n\tcase string:\n\t\treturn Str2Int(v.(string))\n\tcase int, int8, int16, int32, int64:\n\t\treturn int(reflect.ValueOf(v).Int()), nil\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\treturn int(reflect.ValueOf(v).Uint()), nil\n\tcase float32, float64:\n\t\treturn int(reflect.ValueOf(v).Float()), nil\n\t}\n\n\treturn -1, fmt.Errorf(\"cannot parse value: %v\", v)\n}",
"func toInt(i interface{}) int {\n\ti = indirect(i)\n\n\tswitch s := i.(type) {\n\tcase int:\n\t\treturn s\n\tcase int64:\n\t\treturn int(s)\n\tcase int32:\n\t\treturn int(s)\n\tcase int16:\n\t\treturn int(s)\n\tcase int8:\n\t\treturn int(s)\n\tcase uint:\n\t\treturn int(s)\n\tcase uint64:\n\t\treturn int(s)\n\tcase uint32:\n\t\treturn int(s)\n\tcase uint16:\n\t\treturn int(s)\n\tcase uint8:\n\t\treturn int(s)\n\tcase float64:\n\t\treturn int(s)\n\tcase float32:\n\t\treturn int(s)\n\tcase string:\n\t\tv, err := strconv.ParseInt(s, 0, 0)\n\t\tif err == nil {\n\t\t\treturn int(v)\n\t\t}\n\t\tpanic(\"unable to cast variable to int\")\n\tcase bool:\n\t\tif s {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase nil:\n\t\treturn 0\n\tdefault:\n\t\tpanic(\"unable to cast variable to int\")\n\t}\n}",
"func (i SNSPlatformApplicationAttribute) IntValue() int {\n\treturn int(i)\n}",
"func (r *RedisSession) Int(reply interface{}) (int, error) {\n\treturn redis.Int(reply, nil)\n}",
"func (d UserData) ActionID() actions.ActionRef {\n\tval := d.ModelData.Get(models.NewFieldName(\"ActionID\", \"action_id\"))\n\tif !d.Has(models.NewFieldName(\"ActionID\", \"action_id\")) {\n\t\treturn *new(actions.ActionRef)\n\t}\n\treturn val.(actions.ActionRef)\n}",
"func (c *C) Int() Type {\n\tif c.e.conf.UseGoInt {\n\t\treturn c.e.Go().Int()\n\t}\n\treturn c.e.DefIntT()\n}",
"func IntToInt(int_ int) int {\n\treturn int_\n}",
"func (c Int32Convert) Convert(item int32) (int32, error) { return c(item) }",
"func (v Int) Value() interface{} {\n\tif !v.Valid() {\n\t\treturn nil\n\t}\n\treturn v.Int\n}",
"func (f Frame) OpponentAction() PlayerAction {\n\taction := PlayerAction(UNKNOWN_ANIMATION)\n\n\tit := NewOpponentIterator(f)\n\tfor it.Next() {\n\t\t_, player := it.Value()\n\t\taction, _ = player.GetPlayerAction()\n\t\treturn action\n\t}\n\n\treturn action\n}",
"func (state *State) ActionToString(player int, action int) string {\n\tcs := C.StateActionToString(state.state, C.int(player), C.int(action))\n\tstr := C.GoString(cs)\n\tC.free(unsafe.Pointer(cs))\n\treturn str\n}",
"func (level Level) Int() int {\n\treturn int(level)\n}",
"func (r *Decoder) Int() int { x := r.Int64(); v := int(x); assert(int64(v) == x); return v }",
"func (a *Ability) websocketListenerAction(c *astiws.Client, eventName string, payload json.RawMessage) error {\n\t// Ability is not activated\n\ta.m.Lock()\n\tactivated := a.activated\n\ta.m.Unlock()\n\tif !activated {\n\t\tastilog.Error(\"astimousing: ability is not activated\")\n\t\treturn nil\n\t}\n\n\t// Unmarshal payload\n\tvar p PayloadAction\n\tif err := json.Unmarshal(payload, &p); err != nil {\n\t\tastilog.Error(errors.Wrapf(err, \"astimousing: json unmarshaling %s into %#v failed\", payload, p))\n\t\treturn nil\n\t}\n\n\t// Switch on action\n\tswitch p.Action {\n\tcase actionClickLeft:\n\t\tastilog.Debugf(\"astimousing: clicking left mouse button with double %v\", p.Double)\n\t\ta.ms.ClickLeft(p.Double)\n\tcase actionClickMiddle:\n\t\tastilog.Debugf(\"astimousing: clicking middle mouse button with double %v\", p.Double)\n\t\ta.ms.ClickMiddle(p.Double)\n\tcase actionClickRight:\n\t\tastilog.Debugf(\"astimousing: clicking right mouse button with double %v\", p.Double)\n\t\ta.ms.ClickRight(p.Double)\n\tcase actionMove:\n\t\tastilog.Debugf(\"astimousing: moving mouse to %dx%d\", p.X, p.Y)\n\t\ta.ms.Move(p.X, p.Y)\n\tcase actionScrollDown:\n\t\tastilog.Debugf(\"astimousing: scrolling down with x %d\", p.X)\n\t\ta.ms.ScrollDown(p.X)\n\tcase actionScrollUp:\n\t\tastilog.Debugf(\"astimousing: scrolling up with x %d\", p.X)\n\t\ta.ms.ScrollUp(p.X)\n\tdefault:\n\t\tastilog.Errorf(\"astimousing: unknown action %s\", p.Action)\n\t}\n\treturn nil\n}",
"func (s Snowflake) Int() int64 {\n\ti, _ := strconv.ParseInt(string(s), 10, 0)\n\treturn i\n}",
"func ToInt32(i interface{}) int32 {\n\treturn cast.ToInt32(i)\n}",
"func (r *Redis) Int(reply interface{}, err error) (int, error) {\n\treturn redigo.Int(reply, err)\n}",
"func (client *Client) performInterfaceAction(sa *InterfaceAction) (changeID string, err error) {\n\tb, err := json.Marshal(sa)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn client.doAsync(\"POST\", \"/v2/interfaces\", nil, nil, bytes.NewReader(b))\n}",
"func (kss *KqiSourceSelect) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = kss.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{kqisource.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: KqiSourceSelect.Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func Int(v *Value, def int) int {\n\ti, err := v.Int()\n\tif err != nil {\n\t\treturn def\n\t}\n\treturn i\n}",
"func (d *DDL) ActionInterface(action string) (*Action, error) {\n\tfor _, act := range d.Actions {\n\t\tif act.Name == action {\n\t\t\treturn act, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not found an action called %s#%s\", d.Metadata.Name, action)\n}",
"func Int(v *int) int {\n\tif v != nil {\n\t\treturn *v\n\t}\n\treturn 0\n}",
"func ToInt(value interface{}) interface{} {\n\tswitch value := value.(type) {\n\tcase bool:\n\t\tif value {\n\t\t\treturn 1\n\t\t}\n\t\treturn 0\n\tcase int:\n\t\tif value < int(math.MinInt32) || value > int(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn value\n\tcase *int:\n\t\treturn ToInt(*value)\n\tcase int8:\n\t\treturn int(value)\n\tcase *int8:\n\t\treturn int(*value)\n\tcase int16:\n\t\treturn int(value)\n\tcase *int16:\n\t\treturn int(*value)\n\tcase int32:\n\t\treturn int(value)\n\tcase *int32:\n\t\treturn int(*value)\n\tcase int64:\n\t\tif value < int64(math.MinInt32) || value > int64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *int64:\n\t\treturn ToInt(*value)\n\tcase uint:\n\t\tif value > math.MaxInt32 {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *uint:\n\t\treturn ToInt(*value)\n\tcase uint8:\n\t\treturn int(value)\n\tcase *uint8:\n\t\treturn int(*value)\n\tcase uint16:\n\t\treturn int(value)\n\tcase *uint16:\n\t\treturn int(*value)\n\tcase uint32:\n\t\tif value > uint32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *uint32:\n\t\treturn ToInt(*value)\n\tcase uint64:\n\t\tif value > uint64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *uint64:\n\t\treturn ToInt(*value)\n\tcase float32:\n\t\tif value < float32(math.MinInt32) || value > float32(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *float32:\n\t\treturn ToInt(*value)\n\tcase float64:\n\t\tif value < float64(math.MinInt32) || value > float64(math.MaxInt32) {\n\t\t\treturn nil\n\t\t}\n\t\treturn int(value)\n\tcase *float64:\n\t\treturn ToInt(*value)\n\tcase string:\n\t\tval, err := strconv.ParseFloat(value, 32)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn ToInt(val)\n\tcase *string:\n\t\treturn ToInt(*value)\n\t}\n\n\t// If the value cannot be transformed into an int, return nil instead of '0'\n\t// to denote 'no integer found'\n\treturn nil\n}",
"func (s *Str) Int() int {\n\tval, err := strconv.Atoi(s.val)\n\tif err != nil {\n\t\ts.err = err\n\t}\n\treturn val\n}",
"func (v Int) Int32() int32 {\n\treturn v.v\n}",
"func (qiuo *QueueItemUpdateOne) SetAction(i int) *QueueItemUpdateOne {\n\tqiuo.mutation.ResetAction()\n\tqiuo.mutation.SetAction(i)\n\treturn qiuo\n}",
"func ToInt(v interface{}) (result int) {\n\tswitch v := v.(type) {\n\tcase string:\n\t\tresult, _ = strconv.Atoi(v)\n\tcase int:\n\t\tresult = v\n\tcase int64:\n\t\tresult = int(v)\n\tcase float64:\n\t\tresult = int(v)\n\tcase uint8:\n\t\tresult, _ = strconv.Atoi(string(v))\n\tcase []uint8:\n\t\tresult, _ = strconv.Atoi(string(v))\n\tcase []interface{}:\n\t\tif len(v) == 1 {\n\t\t\tresult = ToInt(v[0])\n\t\t}\n\tdefault:\n\t}\n\n\treturn result\n}",
"func (res Response) AsInt32() (int32, error) {\n\treturn res.Bits.AsInt32(), res.Error\n}",
"func ToInt(i interface{}) (int, error) {\n\ti = indirect(i)\n\n\tswitch s := i.(type) {\n\tcase int:\n\t\treturn s, nil\n\tcase int64:\n\t\treturn int(s), nil\n\tcase int32:\n\t\treturn int(s), nil\n\tcase int16:\n\t\treturn int(s), nil\n\tcase int8:\n\t\treturn int(s), nil\n\tcase string:\n\t\tv, err := strconv.ParseInt(s, 0, 0)\n\t\tif err == nil {\n\t\t\treturn int(v), nil\n\t\t}\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v to int\", i)\n\tcase float64:\n\t\treturn int(s), nil\n\tcase bool:\n\t\tif s {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase nil:\n\t\treturn 0, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v to int\", i)\n\t}\n}",
"func Int() int {\n\treturn 111\n}",
"func (buff *Bytes) ToInt() int {\r\n\treturn (int)(binary.LittleEndian.Uint32(*buff))\r\n}",
"func (v *Value) Int() int {\n return Util.ToInt(v.data)\n}",
"func (wts *WorkerTypeSelect) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = wts.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{workertype.Label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: WorkerTypeSelect.Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}",
"func (res Response) AsInt() (int, error) {\n\treturn res.Bits.AsInt(), res.Error\n}",
"func Int(input interface{}) (output int64, err error) {\n\n\tswitch castValue := input.(type) {\n\tcase Inter:\n\t\toutput = castValue.Int()\n\t\treturn\n\tcase string:\n\t\toutput, err = strconv.ParseInt(castValue, 10, 64)\n\t\treturn\n\tcase []byte:\n\t\toutput, err = strconv.ParseInt(string(castValue), 10, 64)\n\t\treturn\n\tcase int:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase int8:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase int16:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase int32:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase int64:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase uint:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase uint8:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase uint16:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase uint32:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase uint64:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase float32:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase float64:\n\t\toutput = int64(castValue)\n\t\treturn\n\tcase bool:\n\t\toutput = int64(0)\n\t\tif castValue {\n\t\t\toutput = int64(1)\n\t\t}\n\t\treturn\n\tcase nil:\n\t\toutput = int64(0)\n\t\treturn\n\tdefault:\n\t\terr = NewCastError(\"Could not convert to int\")\n\t}\n\treturn\n}",
"func (v Value10) Int() int { return int(v) }",
"func ToUnicodeInt(suit, rank int) (int, error) {\n\tbase := unicode_facedown_card\n\tif suit < 0 || suit > 3 {\n\t\treturn base, inputError(0, 3, suit)\n\t}\n\tif rank < 1 || rank > 13 {\n\t\treturn base, inputError(1, 13, rank)\n\t}\n\tval := base + (suit * 16) + rank\n\treturn val, nil\n}",
"func (state *State) OptInt(index int, optInt int64) int64 {\n\tif state.TypeAt(index) == NoneType {\n\t\treturn optInt\n\t}\n\treturn state.CheckInt(index)\n}",
"func (d Direction) Int() int {\n\treturn [...]int{0, 1, 2, 3}[d]\n}",
"func (k Key) Target() uint32 {\n\treturn uint32(k[16])<<24 | uint32(k[17])<<16 | uint32(k[18])<<8 | uint32(k[19])\n}",
"func (qiu *QueueItemUpdate) SetAction(i int) *QueueItemUpdate {\n\tqiu.mutation.ResetAction()\n\tqiu.mutation.SetAction(i)\n\treturn qiu\n}",
"func (s *selector) Int(ctx context.Context) (_ int, err error) {\n\tvar v []int\n\tif v, err = s.Ints(ctx); err != nil {\n\t\treturn\n\t}\n\tswitch len(v) {\n\tcase 1:\n\t\treturn v[0], nil\n\tcase 0:\n\t\terr = &NotFoundError{s.label}\n\tdefault:\n\t\terr = fmt.Errorf(\"ent: Ints returned %d results when one was expected\", len(v))\n\t}\n\treturn\n}"
] | [
"0.6858114",
"0.5630274",
"0.5474764",
"0.5404166",
"0.5386555",
"0.522365",
"0.51261777",
"0.50806195",
"0.50114006",
"0.49455163",
"0.49418768",
"0.49413294",
"0.4935143",
"0.49315566",
"0.48762146",
"0.4867004",
"0.48524567",
"0.4833814",
"0.48174962",
"0.48106116",
"0.480051",
"0.4792819",
"0.47520715",
"0.47507277",
"0.47463807",
"0.4723252",
"0.47218755",
"0.47182935",
"0.47071514",
"0.46971753",
"0.46927997",
"0.4688545",
"0.4688545",
"0.46877062",
"0.46853155",
"0.46791732",
"0.4674929",
"0.46688455",
"0.46678984",
"0.46517906",
"0.465089",
"0.46341294",
"0.46165654",
"0.46144113",
"0.46120688",
"0.46085355",
"0.4603411",
"0.4601444",
"0.45848027",
"0.458391",
"0.45819312",
"0.45745778",
"0.45707062",
"0.4560931",
"0.45542207",
"0.45518714",
"0.45509973",
"0.4550392",
"0.45497283",
"0.45494324",
"0.45466375",
"0.4543699",
"0.4536909",
"0.4530132",
"0.45264447",
"0.45246977",
"0.4523204",
"0.45199326",
"0.45189467",
"0.45073622",
"0.45068634",
"0.4501925",
"0.45002007",
"0.4499019",
"0.44976386",
"0.44965693",
"0.44922403",
"0.4488064",
"0.44860506",
"0.44833338",
"0.44832084",
"0.4477365",
"0.44725195",
"0.4471145",
"0.4469789",
"0.4469388",
"0.44686985",
"0.446214",
"0.44597548",
"0.4458993",
"0.44587886",
"0.44562137",
"0.44554958",
"0.44552967",
"0.44498056",
"0.4447589",
"0.44416988",
"0.44413847",
"0.44391438",
"0.44369018"
] | 0.7367914 | 0 |
ActionFromInt reverses action.AsInt, but only for the 4 bits that compose the action. Other bits are ignored. This only works for a 2player game. | func ActionFromInt(st int) Action {
act := Action{}
if st%2 == 1 {
act.PlayRecent = true
}
st = (st & 0xF) >> 1
// Now st is the TargetPlayerOffset or SelectedCard. We don't know which, but if *any* card was selected,
// then the other player must be targeted (since the played card is a guard), so for 2 players the offset is 1.
if st > 0 {
act.TargetPlayerOffset = 1
}
act.SelectedCard = Card(st + 1)
return act
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (act Action) AsInt() int {\n\tretVal := 0\n\tif act.PlayRecent {\n\t\tretVal = 1\n\t}\n\tif act.SelectedCard != None && act.SelectedCard != Guard {\n\t\tretVal += 2 * (int(act.SelectedCard) - 1)\n\t} else if act.TargetPlayerOffset > 0 {\n\t\tretVal += 2 * act.TargetPlayerOffset\n\t}\n\treturn retVal\n}",
"func toAction(actionInput string) (GameAction, error) {\n\tnormalised := strings.ToUpper(strings.TrimSuffix(actionInput, \"\\n\"))\n\tif len(normalised) < 1 {\n\t\treturn -1, errors.New(\"No action specified\")\n\t}\n\n\tswitch normalised[0] {\n\tcase 'E':\n\t\treturn Explore, nil\n\n\tcase 'F':\n\t\treturn Flag, nil\n\n\tdefault:\n\t\treturn -1, errors.New(\"Invalid action\")\n\t}\n}",
"func (q *QLearning) GetAction(s State) Action {\n\taction := Action(0)\n\tmax := q.qt[s][0]\n\n\tfor i := 1; i < q.actns; i++ {\n\t\tif max < q.qt[s][Action(i)] {\n\t\t\tmax = q.qt[s][Action(i)]\n\t\t\taction = Action(i)\n\t\t}\n\t}\n\treturn action\n}",
"func (state *State) ActionToString(player int, action int) string {\n\tcs := C.StateActionToString(state.state, C.int(player), C.int(action))\n\tstr := C.GoString(cs)\n\tC.free(unsafe.Pointer(cs))\n\treturn str\n}",
"func (k Kerx4) ActionType() uint8 {\n\tconst ActionType = 0xC0000000 // A two-bit field containing the action type.\n\treturn uint8(k.flags & ActionType >> 30)\n}",
"func ActionFromReadWrite(r, w bool) Action {\n\tvar a Action\n\tif r {\n\t\ta |= R\n\t}\n\tif w {\n\t\ta |= W\n\t}\n\treturn a\n}",
"func (c Int32EnumConvert) Convert(n int, item int32) (int32, error) { return c(n, item) }",
"func NewAction(line string) (Action, error) {\n action := Action{}\n isValid := utf8.ValidString(line)\n if !isValid {\n return action, errors.New(\"Action string contains invalid encoded UTF-8\")\n }\n\n instructionRune, length := utf8.DecodeRuneInString(line)\n v, err := strconv.Atoi(line[length:])\n\n if err != nil {\n return action, err\n }\n\n action.Instruction = string(instructionRune)\n action.Value = v\n return action, nil\n}",
"func (m *Message) Action() (*Action, error) {\n\tif err := m.checkType(ActionName); err != nil {\n\t\treturn nil, err\n\t}\n\tif m.Raw == false {\n\t\treturn m.Payload.(*Action), nil\n\t}\n\tobj := new(Action)\n\treturn obj, m.unmarshalPayload(obj)\n}",
"func (ec *executionContext) _ActionsAction_actionID(ctx context.Context, field graphql.CollectedField, obj *models.ActionsAction) (ret graphql.Marshaler) {\n\tctx = ec.Tracer.StartFieldExecution(ctx, field)\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tec.Error(ctx, ec.Recover(ctx, r))\n\t\t\tret = graphql.Null\n\t\t}\n\t\tec.Tracer.EndFieldExecution(ctx)\n\t}()\n\trctx := &graphql.ResolverContext{\n\t\tObject: \"ActionsAction\",\n\t\tField: field,\n\t\tArgs: nil,\n\t\tIsMethod: false,\n\t}\n\tctx = graphql.WithResolverContext(ctx, rctx)\n\tctx = ec.Tracer.StartFieldResolverExecution(ctx, rctx)\n\tresTmp, err := ec.ResolverMiddleware(ctx, func(rctx context.Context) (interface{}, error) {\n\t\tctx = rctx // use context from middleware stack in children\n\t\treturn obj.ActionID, nil\n\t})\n\tif err != nil {\n\t\tec.Error(ctx, err)\n\t\treturn graphql.Null\n\t}\n\tif resTmp == nil {\n\t\tif !ec.HasError(rctx) {\n\t\t\tec.Errorf(ctx, \"must not be null\")\n\t\t}\n\t\treturn graphql.Null\n\t}\n\tres := resTmp.(core.ActionID)\n\trctx.Result = res\n\tctx = ec.Tracer.StartFieldChildExecution(ctx)\n\treturn ec.marshalNActionID2githubᚗcomᚋfacebookincubatorᚋsymphonyᚋpkgᚋactionsᚋcoreᚐActionID(ctx, field.Selections, res)\n}",
"func ConvertToInt(target string, def int) int {\n\tfo, err := strconv.Atoi(target)\n\tif err != nil {\n\t\treturn def\n\t}\n\treturn fo\n}",
"func (a *Action) Get(i int) string {\n if a==nil || i<0 || i>= len(a.actions) {\n return \"\"\n }\n return a.actions[i]\n}",
"func FactionFromString(c string) Faction {\n\tswitch c {\n\tcase \"mine\":\n\t\treturn Faction_mine\n\tcase \"opponent\":\n\t\treturn Faction_opponent\n\n\tdefault:\n\t\treturn 0\n\t}\n}",
"func NewRecvAction(args any) *Action {\n\treturn &Action{Args: args}\n}",
"func Action(v int) predicate.QueueItem {\n\treturn predicate.QueueItem(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldAction), v))\n\t})\n}",
"func (c UintEnumConvert) Convert(n int, item uint) (uint, error) { return c(n, item) }",
"func (qiu *QueueItemUpdate) SetAction(i int) *QueueItemUpdate {\n\tqiu.mutation.ResetAction()\n\tqiu.mutation.SetAction(i)\n\treturn qiu\n}",
"func UpdateActionAction(c *gin.Context) {\n\tresult := render.NewResult()\n\tdefer c.JSON(http.StatusOK, result)\n\n\tidArg := c.Param(\"id\")\n\tid, err := strconv.ParseUint(idArg, 10, 64)\n\tif nil != err {\n\t\tresult.Error(err)\n\n\t\treturn\n\t}\n\n\taction := &model.Action{Model: model.Model{ID: uint64(id)}}\n\tif err := c.BindJSON(action); nil != err {\n\t\tresult.Error(errors.New(\"parses update action request failed\"))\n\n\t\treturn\n\t}\n\n\tsrv := service.FromContext(c)\n\tif err := srv.Actions.Update(c, action); nil != err {\n\t\tresult.Error(err)\n\t}\n}",
"func GetActionAction(c *gin.Context) {\n\tresult := render.NewResult()\n\tdefer c.JSON(http.StatusOK, result)\n\n\tidArg := c.Param(\"id\")\n\tid, err := strconv.ParseUint(idArg, 10, 64)\n\tif nil != err {\n\t\tresult.Error(err)\n\n\t\treturn\n\t}\n\n\tsrv := service.FromContext(c)\n\tdata, err := srv.Actions.Find(c, id)\n\tif nil == data {\n\t\tresult.Error(err)\n\n\t\treturn\n\t}\n\n\tresult.Result = data\n}",
"func (t *RestURLDescriptor) ConvertToInt() *ggt.MethodDescriptor { return t.methodConvertToInt }",
"func (qiuo *QueueItemUpdateOne) SetAction(i int) *QueueItemUpdateOne {\n\tqiuo.mutation.ResetAction()\n\tqiuo.mutation.SetAction(i)\n\treturn qiuo\n}",
"func UnmarshalActionState(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ActionState)\n\terr = core.UnmarshalPrimitive(m, \"status_code\", &obj.StatusCode)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"status_job_id\", &obj.StatusJobID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"status_message\", &obj.StatusMessage)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (a AnyMessage) Action() Action {\n\tif action, ok := a[KeyAction].(string); ok {\n\t\treturn Action(action)\n\t}\n\treturn ActionEmpty\n}",
"func Action(message string) string {\n\treturn Encode(ACTION, message)\n}",
"func ConvertStringToAction(in string) (configs.Action, error) {\n\tswitch in {\n\tcase \"SCMP_ACT_KILL\":\n\t\treturn configs.Kill, nil\n\tcase \"SCMP_ACT_ERRNO\":\n\t\treturn configs.Errno, nil\n\tcase \"SCMP_ACT_TRAP\":\n\t\treturn configs.Trap, nil\n\tcase \"SCMP_ACT_ALLOW\":\n\t\treturn configs.Allow, nil\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"string %s is not a valid action for seccomp\", in)\n\t}\n}",
"func UnmarshalAction(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(Action)\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"location\", &obj.Location)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"resource_group\", &obj.ResourceGroup)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"tags\", &obj.Tags)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"user_state\", &obj.UserState, UnmarshalUserState)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_readme_url\", &obj.SourceReadmeURL)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"source\", &obj.Source, UnmarshalExternalSource)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_type\", &obj.SourceType)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"command_parameter\", &obj.CommandParameter)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"bastion\", &obj.Bastion, UnmarshalTargetResourceset)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"targets_ini\", &obj.TargetsIni)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"credentials\", &obj.Credentials, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"inputs\", &obj.Inputs, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"outputs\", &obj.Outputs, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"settings\", &obj.Settings, UnmarshalVariableData)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"trigger_record_id\", &obj.TriggerRecordID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"account\", &obj.Account)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_created_at\", &obj.SourceCreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_created_by\", &obj.SourceCreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_updated_at\", &obj.SourceUpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"source_updated_by\", &obj.SourceUpdatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_at\", &obj.CreatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"created_by\", &obj.CreatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_at\", &obj.UpdatedAt)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"updated_by\", &obj.UpdatedBy)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"namespace\", &obj.Namespace)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"state\", &obj.State, UnmarshalActionState)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"playbook_names\", &obj.PlaybookNames)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"sys_lock\", &obj.SysLock, UnmarshalSystemLock)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (r *ISAAC) Int() int {\n\tu := uint(r.Uint64())\n\treturn int(u << 1 >> 1) // clear sign bit if int == int32\n}",
"func (d Direction) Unturn(action Action) Direction {\n\tswitch action {\n\tcase ActionTurnLeft:\n\t\treturn (d + DirectionInvalid + 1) % DirectionInvalid\n\tcase ActionTurnRight:\n\t\treturn (d + DirectionInvalid - 1) % DirectionInvalid\n\tcase ActionStraight:\n\t\treturn d\n\tdefault:\n\t\tpanic(\"Invalid action provided\")\n\t}\n}",
"func (self *NXOutput) GetActionMessage() openflow13.Action {\n\tofsNbits := self.fieldRange.ToOfsBits()\n\ttargetField := self.field\n\t// Create NX output Register action\n\treturn openflow13.NewOutputFromField(targetField, ofsNbits)\n}",
"func (a *Ability) websocketListenerAction(c *astiws.Client, eventName string, payload json.RawMessage) error {\n\t// Ability is not activated\n\ta.m.Lock()\n\tactivated := a.activated\n\ta.m.Unlock()\n\tif !activated {\n\t\tastilog.Error(\"astimousing: ability is not activated\")\n\t\treturn nil\n\t}\n\n\t// Unmarshal payload\n\tvar p PayloadAction\n\tif err := json.Unmarshal(payload, &p); err != nil {\n\t\tastilog.Error(errors.Wrapf(err, \"astimousing: json unmarshaling %s into %#v failed\", payload, p))\n\t\treturn nil\n\t}\n\n\t// Switch on action\n\tswitch p.Action {\n\tcase actionClickLeft:\n\t\tastilog.Debugf(\"astimousing: clicking left mouse button with double %v\", p.Double)\n\t\ta.ms.ClickLeft(p.Double)\n\tcase actionClickMiddle:\n\t\tastilog.Debugf(\"astimousing: clicking middle mouse button with double %v\", p.Double)\n\t\ta.ms.ClickMiddle(p.Double)\n\tcase actionClickRight:\n\t\tastilog.Debugf(\"astimousing: clicking right mouse button with double %v\", p.Double)\n\t\ta.ms.ClickRight(p.Double)\n\tcase actionMove:\n\t\tastilog.Debugf(\"astimousing: moving mouse to %dx%d\", p.X, p.Y)\n\t\ta.ms.Move(p.X, p.Y)\n\tcase actionScrollDown:\n\t\tastilog.Debugf(\"astimousing: scrolling down with x %d\", p.X)\n\t\ta.ms.ScrollDown(p.X)\n\tcase actionScrollUp:\n\t\tastilog.Debugf(\"astimousing: scrolling up with x %d\", p.X)\n\t\ta.ms.ScrollUp(p.X)\n\tdefault:\n\t\tastilog.Errorf(\"astimousing: unknown action %s\", p.Action)\n\t}\n\treturn nil\n}",
"func (c WafAction) ToPtr() *WafAction {\n\treturn &c\n}",
"func EditAction(c *gin.Context) {\n\tvar action = models.Action{}.MapRequestToAction(c)\n\n\taction.ID = helpers.GrabIDParamAndConvertToUInt(c)\n\n\tresult := models.DB.Save(&action)\n\n\tif result.RowsAffected == 0 {\n\t\tc.JSON(http.StatusOK, helpers.NoResults())\n\t\treturn\n\t}\n\n\t// TODO: Is this neccessary?\n\tif result.Error != nil {\n\t\tc.JSON(http.StatusBadRequest, helpers.BadRequest())\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"data\": helpers.Results{\n\t\tCount: 1,\n\t\tResults: action,\n\t}})\n}",
"func (t *RestPostDescriptor) ConvertToInt() *ggt.MethodDescriptor { return t.methodConvertToInt }",
"func (t *RestPost) ConvertToInt(w http.ResponseWriter, r *http.Request) {\n\tt.Log.Handle(w, r, nil, \"begin\", \"RestPost\", \"ConvertToInt\")\n\n\t{\n\t\terr := r.ParseForm()\n\n\t\tif err != nil {\n\n\t\t\tt.Log.Handle(w, r, err, \"parseform\", \"error\", \"RestPost\", \"ConvertToInt\")\n\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t\treturn\n\t\t}\n\n\t}\n\tvar postArg1 int\n\tif _, ok := r.Form[\"arg1\"]; ok {\n\t\txxTmppostArg1 := r.FormValue(\"arg1\")\n\t\tt.Log.Handle(w, r, nil, \"input\", \"form\", \"arg1\", xxTmppostArg1, \"RestPost\", \"ConvertToInt\")\n\t\t{\n\t\t\tvar err error\n\t\t\tpostArg1, err = strconv.Atoi(xxTmppostArg1)\n\n\t\t\tif err != nil {\n\n\t\t\t\tt.Log.Handle(w, r, err, \"post\", \"error\", \"RestPost\", \"ConvertToInt\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tt.embed.ConvertToInt(postArg1)\n\n\tw.WriteHeader(200)\n\n\tt.Log.Handle(w, r, nil, \"end\", \"RestPost\", \"ConvertToInt\")\n\n}",
"func (state *State) ApplyAction(action int) {\n\tC.StateApplyAction(state.state, C.int(action))\n}",
"func (me XsdGoPkgHasAttr_Action_TactionType_Other) ActionDefault() TactionType {\n\treturn TactionType(\"other\")\n}",
"func GetIntFromInt(i int) int {\n\treturn i\n}",
"func ActionFrom(clientConfig clientcmd.ClientConfig, flags *pflag.FlagSet, args []string) (action Action, err error) {\n\tif len(args) < 2 {\n\t\terr = errors.New(\"you must specify two or three arguments: verb, resource, and optional resourceName\")\n\t\treturn\n\t}\n\n\taction.Verb = args[0]\n\tif strings.HasPrefix(args[1], \"/\") {\n\t\taction.NonResourceURL = args[1]\n\t\tklog.V(3).Infof(\"Resolved nonResourceURL `%s`\", action.NonResourceURL)\n\t} else {\n\t\tresourceTokens := strings.SplitN(args[1], \"/\", 2)\n\t\taction.Resource = resourceTokens[0]\n\t\tif len(resourceTokens) > 1 {\n\t\t\taction.ResourceName = resourceTokens[1]\n\t\t\tklog.V(3).Infof(\"Resolved resourceName `%s`\", action.ResourceName)\n\t\t}\n\t}\n\n\taction.SubResource, err = flags.GetString(subResourceFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\n\taction.AllNamespaces, err = flags.GetBool(allNamespacesFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif action.AllNamespaces {\n\t\taction.Namespace = core.NamespaceAll\n\t\tklog.V(3).Infof(\"Resolved namespace `%s` from --all-namespaces flag\", action.Namespace)\n\t\treturn\n\t}\n\n\taction.Namespace, err = flags.GetString(namespaceFlag)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif action.Namespace != \"\" {\n\t\tklog.V(3).Infof(\"Resolved namespace `%s` from --namespace flag\", action.Namespace)\n\t\treturn\n\t}\n\n\t// Neither --all-namespaces nor --namespace flag was specified\n\taction.Namespace, _, err = clientConfig.Namespace()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"getting namespace from current context: %v\", err)\n\t}\n\tklog.V(3).Infof(\"Resolved namespace `%s` from current context\", action.Namespace)\n\treturn\n}",
"func (t *RestURL) ConvertToInt(w http.ResponseWriter, r *http.Request) {\n\tt.Log.Handle(w, r, nil, \"begin\", \"RestURL\", \"ConvertToInt\")\n\n\txxRouteVars := mux.Vars(r)\n\n\txxURLValues := r.URL.Query()\n\tvar urlArg1 int\n\tif false {\n\t} else if _, ok := xxRouteVars[\"arg1\"]; ok {\n\t\txxTmpurlArg1 := xxRouteVars[\"arg1\"]\n\t\t{\n\t\t\tvar err error\n\t\t\turlArg1, err = strconv.Atoi(xxTmpurlArg1)\n\n\t\t\tif err != nil {\n\n\t\t\t\tt.Log.Handle(w, r, err, \"url\", \"route\", \"error\", \"RestURL\", \"ConvertToInt\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\n\t} else if _, ok := xxURLValues[\"arg1\"]; ok {\n\t\txxTmpurlArg1 := xxURLValues.Get(\"arg1\")\n\t\t{\n\t\t\tvar err error\n\t\t\turlArg1, err = strconv.Atoi(xxTmpurlArg1)\n\n\t\t\tif err != nil {\n\n\t\t\t\tt.Log.Handle(w, r, err, \"url\", \"get\", \"error\", \"RestURL\", \"ConvertToInt\")\n\t\t\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tt.embed.ConvertToInt(urlArg1)\n\n\tw.WriteHeader(200)\n\n\tt.Log.Handle(w, r, nil, \"end\", \"RestURL\", \"ConvertToInt\")\n\n}",
"func ReadAction(in io.Reader) (*Action, error) {\n\ta := new(Action)\n\terr := yaml.NewDecoder(in).Decode(a)\n\treturn a, err\n}",
"func (s *Scanner) Action(state State, char byte) (a Action) {\n\n\tswitch state {\n\n\tcase StartState:\n\t\tswitch {\n\n\t\tcase s.isAlpha(char), s.isNumeric(char), s.isColon(char),\n\t\t\ts.isDash(char):\n\t\t\ta = MoveAppend\n\n\t\tcase s.isWhitespace(char):\n\t\t\ta = MoveNoAppend\n\n\t\tcase s.isPlus(char), s.isSemicolon(char), s.isLParen(char),\n\t\t\ts.isRParen(char), s.isComma(char), s.isEquals(char):\n\t\t\ta = HaltAppend\n\n\t\tcase s.isEof(char):\n\t\t\ta = HaltNoAppend\n\n\t\tdefault:\n\t\t\ta = ActionError\n\t\t}\n\n\tcase ScanAlpha:\n\t\tif s.isAlpha(char) || s.isNumeric(char) || s.isUnderscore(char) {\n\t\t\ta = MoveAppend\n\t\t} else {\n\t\t\ta = HaltReuse\n\t\t}\n\n\tcase ScanWhitespace:\n\t\tif s.isWhitespace(char) {\n\t\t\ta = MoveNoAppend\n\t\t} else {\n\t\t\ta = MoveAppend\n\t\t}\n\n\tcase ScanNumeric:\n\t\tif s.isNumeric(char) {\n\t\t\ta = MoveAppend\n\t\t} else {\n\t\t\ta = HaltReuse\n\t\t}\n\n\tcase ScanColon:\n\t\tif s.isEquals(char) {\n\t\t\ta = HaltAppend\n\t\t} else {\n\t\t\ta = ActionError\n\t\t}\n\n\tcase ScanDash:\n\t\tif s.isDash(char) {\n\t\t\ta = MoveAppend\n\t\t} else {\n\t\t\ta = HaltReuse\n\t\t}\n\n\tcase ProcessPlusOp, ProcessSemicolon, ProcessLParen, ProcessRParen,\n\t\tProcessComma, ProcessAssign, ProcessComment:\n\t\ta = HaltReuse\n\n\tcase ScanComment:\n\t\ta = MoveNoAppend\n\n\tdefault:\n\t\ta = ActionError\n\t}\n\n\treturn\n}",
"func NewAction(action EngineAction) *PacketUnstable {\n\tpkt := &PacketUnstable{}\n\tswitch action := action.(type) {\n\tcase ButtonPressedAction:\n\t\tpkt.Cmd = CmdButtonPressedAction\n\tcase ButtonReleasedAction:\n\t\tpkt.Cmd = CmdButtonReleasedAction\n\tdefault:\n\t\tpanic(fmt.Errorf(\"support for action %T not yet implemented\", action))\n\t}\n\tbuf := &bytes.Buffer{}\n\tif err := binary.Write(buf, binary.LittleEndian, action); err != nil {\n\t\tdie(err)\n\t}\n\tpkt.Data = buf.Bytes()\n\treturn pkt\n}",
"func (o ApiOutput) Action() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *Api) pulumi.StringPtrOutput { return v.Action }).(pulumi.StringPtrOutput)\n}",
"func ConvertAnyMessage(m AnyMessage) (ActionMessage, error) {\n\ta := m.Action()\n\tswitch a {\n\t// TODO support other Actions?\n\tcase ActionChatMessage:\n\t\treturn ParseChatMessage(m, a)\n\tcase ActionReadMessage:\n\t\treturn ParseReadMessage(m, a)\n\tcase ActionTypeStart:\n\t\treturn ParseTypeStart(m, a)\n\tcase ActionTypeEnd:\n\t\treturn ParseTypeEnd(m, a)\n\tcase ActionEmpty:\n\t\treturn m, errors.New(\"JSON object must have any action field\")\n\t}\n\treturn m, errors.New(\"unknown action: \" + string(a))\n}",
"func IntegerToASCII2(x int) string {\n\treturn strconv.Itoa(x)\n}",
"func stateToInt(s string) int {\n\tvar i int\n\tswitch {\n\tcase s == \"UNKNOWN\":\n\t\ti = uUNKNOWN\n\tcase s == \"INIT\":\n\t\ti = uINIT\n\tcase s == \"READY\":\n\t\ti = uREADY\n\tcase s == \"TEST\":\n\t\ti = uTEST\n\tcase s == \"DONE\":\n\t\ti = uDONE\n\tcase s == \"TERM\":\n\t\ti = uTERM\n\tdefault:\n\t\ti = -1\n\t}\n\treturn i\n}",
"func UnmarshalVersionAction(m map[string]json.RawMessage, result interface{}) (err error) {\n\t// Retrieve discriminator value to determine correct \"subclass\".\n\tvar discValue string\n\terr = core.UnmarshalPrimitive(m, \"action_type\", &discValue)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"error unmarshalling discriminator property 'action_type': %s\", err.Error())\n\t\treturn\n\t}\n\tif discValue == \"\" {\n\t\terr = fmt.Errorf(\"required discriminator property 'action_type' not found in JSON object\")\n\t\treturn\n\t}\n\tif discValue == \"private_cert_action_revoke_certificate\" {\n\t\terr = core.UnmarshalModel(m, \"\", result, UnmarshalPrivateCertificateVersionActionRevoke)\n\t} else {\n\t\terr = fmt.Errorf(\"unrecognized value for discriminator property 'action_type': %s\", discValue)\n\t}\n\treturn\n}",
"func ActionEQ(v int) predicate.QueueItem {\n\treturn predicate.QueueItem(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldAction), v))\n\t})\n}",
"func importActionHandler(w http.ResponseWriter, r *http.Request, db *gorp.DbMap, c *businesscontext.Ctx) error {\n\tvar a *sdk.Action\n\turl := r.Form.Get(\"url\")\n\t//Load action from url\n\tif url != \"\" {\n\t\tvar errnew error\n\t\ta, errnew = sdk.NewActionFromRemoteScript(url, nil)\n\t\tif errnew != nil {\n\t\t\treturn errnew\n\t\t}\n\t} else if r.Header.Get(\"content-type\") == \"multipart/form-data\" {\n\t\t//Try to load from the file\n\t\tr.ParseMultipartForm(64 << 20)\n\t\tfile, _, errUpload := r.FormFile(\"UploadFile\")\n\t\tif errUpload != nil {\n\t\t\treturn sdk.WrapError(sdk.ErrWrongRequest, \"importActionHandler> Cannot load file uploaded: %s\", errUpload)\n\t\t}\n\t\tbtes, errRead := ioutil.ReadAll(file)\n\t\tif errRead != nil {\n\t\t\treturn errRead\n\t\t}\n\n\t\tvar errnew error\n\t\ta, errnew = sdk.NewActionFromScript(btes)\n\t\tif errnew != nil {\n\t\t\treturn errnew\n\t\t}\n\t} else { // a jsonified action is posted in body\n\t\tif err := UnmarshalBody(r, &a); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif a == nil {\n\t\treturn sdk.ErrWrongRequest\n\t}\n\n\ttx, errbegin := db.Begin()\n\tif errbegin != nil {\n\t\treturn errbegin\n\t}\n\n\tdefer tx.Rollback()\n\n\t//Check if action exists\n\texist := false\n\texistingAction, errload := action.LoadPublicAction(tx, a.Name)\n\tif errload == nil {\n\t\texist = true\n\t\ta.ID = existingAction.ID\n\t}\n\n\t//http code status\n\tvar code int\n\n\t//Update or Insert the action\n\tif exist {\n\t\tif err := action.UpdateActionDB(tx, a, c.User.ID); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcode = 200\n\t} else {\n\t\ta.Enabled = true\n\t\ta.Type = sdk.DefaultAction\n\t\tif err := action.InsertAction(tx, a, true); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tcode = 201\n\t}\n\n\tif err := tx.Commit(); err != nil {\n\t\treturn err\n\t}\n\n\treturn WriteJSON(w, r, a, code)\n}",
"func get_action_order(action string, order interface{}) int {\n\t//fmt.Println(app_data[the_app][\"StartOrder\"])\n\tord, _ := order.(string)\n\tord1, _ := strconv.Atoi(ord)\n\trtn := ord1\n\treturn rtn\n}",
"func (c Int32Convert) Convert(item int32) (int32, error) { return c(item) }",
"func (s *SimpleAction) String() string {\n\treturn s.id\n}",
"func GetTFeedbackActionById(id int) (v *TFeedbackAction, err error) {\n\to := orm.NewOrm()\n\tv = &TFeedbackAction{Id: id}\n\tif err = o.Read(v); err == nil {\n\t\treturn v, nil\n\t}\n\treturn nil, err\n}",
"func ToInt(value interface{}) (int, error) {\n\tvalue = indirect(value)\n\n\tvar s string\n\tswitch v := value.(type) {\n\tcase nil:\n\t\treturn 0, nil\n\tcase bool:\n\t\tif v {\n\t\t\treturn 1, nil\n\t\t}\n\t\treturn 0, nil\n\tcase int:\n\t\treturn v, nil\n\tcase int8:\n\t\treturn int(v), nil\n\tcase int16:\n\t\treturn int(v), nil\n\tcase int32:\n\t\treturn int(v), nil\n\tcase int64:\n\t\treturn int(v), nil\n\tcase uint:\n\t\treturn int(v), nil\n\tcase uint8:\n\t\treturn int(v), nil\n\tcase uint16:\n\t\treturn int(v), nil\n\tcase uint32:\n\t\treturn int(v), nil\n\tcase uint64:\n\t\treturn int(v), nil\n\tcase float32:\n\t\treturn int(v), nil\n\tcase float64:\n\t\treturn int(v), nil\n\tcase complex64:\n\t\treturn int(real(v)), nil\n\tcase complex128:\n\t\treturn int(real(v)), nil\n\tcase []byte:\n\t\ts = string(v)\n\tcase string:\n\t\ts = v\n\tcase fmt.Stringer:\n\t\ts = v.String()\n\tdefault:\n\t\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to int\", v, v)\n\t}\n\n\tif i, err := strconv.ParseInt(s, 0, 64); err == nil {\n\t\treturn int(i), nil\n\t}\n\treturn 0, fmt.Errorf(\"unable to cast %#v of type %T to int\", value, value)\n}",
"func ConvertStringToAction(in string) (configs.Action, error) {\n\tif act, ok := actions[in]; ok {\n\t\treturn act, nil\n\t}\n\treturn 0, fmt.Errorf(\"string %s is not a valid action for seccomp\", in)\n}",
"func UnmarshalActionLiteState(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ActionLiteState)\n\terr = core.UnmarshalPrimitive(m, \"status_code\", &obj.StatusCode)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"status_message\", &obj.StatusMessage)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (m *UnifiedRoleAssignmentScheduleRequest) GetAction()(*UnifiedRoleScheduleRequestActions) {\n return m.action\n}",
"func GetAction(name string) (Action, error) {\n\tvar a Action\n\n\tpath := fmt.Sprintf(\"/action/%s\", name)\n\tdata, _, err := Request(\"GET\", path, nil)\n\tif err != nil {\n\t\treturn a, err\n\t}\n\n\tif err := json.Unmarshal(data, &a); err != nil {\n\t\treturn a, err\n\t}\n\n\treturn a, nil\n}",
"func convertPolicyAction(in string) string {\n\tswitch in {\n\tcase security.SGRule_PERMIT.String():\n\t\treturn netproto.PolicyRule_PERMIT.String()\n\tcase security.SGRule_DENY.String():\n\t\treturn netproto.PolicyRule_DENY.String()\n\tcase security.SGRule_REJECT.String():\n\t\treturn netproto.PolicyRule_REJECT.String()\n\t}\n\treturn \"\"\n}",
"func (dm *DelveMode) RestAction() data.Action {\n\treturn func(e *data.Entity) {\n\t\te.NextTurn += 1\n\t}\n}",
"func (c *StringConverter) ToInt() (int, error) {\n\ti, err := c.toInt(0)\n\treturn int(i), err\n}",
"func (DefaultCounter) Actions(int, int, int) {\n}",
"func (bbo *TabularBBO) GetAction(s []float64, rng *mathlib.Random) int {\n\t// Convert the one-hot state into an integer from 0 - (numStates-1)\n\tstate := mathlib.FromOneHot(s)\n\t// Get the action probabilities from theta, using softmax action selection.\n\tactionProbabilities := bbo.newTheta[state]\n\tdenominator := 0.0\n\tfor a := 0; a < len(actionProbabilities); a++ {\n\t\tactionProbabilities[a] = math.Exp(actionProbabilities[a])\n\t\tdenominator += actionProbabilities[a]\n\t}\n\tfor a := 0; a < len(actionProbabilities); a++ {\n\t\tactionProbabilities[a] /= denominator\n\t}\n\t// Select random action from softmax\n\ttemp := rng.Float64()\n\tsum := 0.0\n\tfor a := 0; a < bbo.numActions; a++ {\n\t\tsum += actionProbabilities[a]\n\t\tif temp <= sum {\n\t\t\treturn a // The function will return 'a'. This stops the for loop and returns from the function.\n\t\t}\n\t}\n\treturn bbo.numActions - 1 // Rounding error\n}",
"func (self *Output) GetOutAction() openflow13.Action {\n\tswitch self.outputType {\n\tcase \"drop\":\n\t\treturn nil\n\tcase \"toController\":\n\t\toutputAct := openflow13.NewActionOutput(openflow13.P_CONTROLLER)\n\t\t// Dont buffer the packets being sent to controller\n\t\toutputAct.MaxLen = openflow13.OFPCML_NO_BUFFER\n\n\t\treturn outputAct\n\tcase \"normal\":\n\t\tfallthrough\n\tcase \"port\":\n\t\treturn openflow13.NewActionOutput(self.portNo)\n\t}\n\n\treturn nil\n}",
"func (core *coreService) Action(actionHash string, checkPending bool) (*iotexapi.ActionInfo, error) {\n\tif err := core.checkActionIndex(); err != nil {\n\t\treturn nil, err\n\t}\n\tactHash, err := hash.HexStringToHash256(actionHash)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.InvalidArgument, err.Error())\n\t}\n\tact, err := core.getAction(actHash, checkPending)\n\tif err != nil {\n\t\treturn nil, status.Error(codes.Unavailable, err.Error())\n\t}\n\treturn act, nil\n}",
"func (x *CMsgClientToGCManageFavorites_Action) UnmarshalJSON(b []byte) error {\n\tnum, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*x = CMsgClientToGCManageFavorites_Action(num)\n\treturn nil\n}",
"func GetChaincodeAction(caBytes []byte) (*peer.ChaincodeAction, error) {\n\tchaincodeAction := &peer.ChaincodeAction{}\n\terr := proto.Unmarshal(caBytes, chaincodeAction)\n\treturn chaincodeAction, err\n}",
"func flippingBits(n int64) int64 {\n\tn_binary := strconv.FormatInt(n, 2)\n\tn_binary_len := len(n_binary)\n\tif n_binary_len < 32 {\n\t\tfor i := 0; i < 32 - n_binary_len; i++ {\n\t\t\tn_binary = \"0\" + n_binary \n\t\t}\n\t}\n\t\n\tn_binary_slice := []byte(n_binary)\n\tfor i, v := range n_binary_slice {\n\t\tif v == '1' {\n\t\t\tn_binary_slice[i] = '0'\n\t\t} else {\n\t\t\tn_binary_slice[i] = '1'\n\t\t}\n\t}\n\t\n\tres_int, _ := strconv.ParseInt(string(n_binary_slice), 2, 64)\n\treturn res_int\n}",
"func (m AoS) ToInt(f func(s string) I) AoI {\n\tif m.IsErr() {\n\t\treturn ErrAoI(m.err)\n\t}\n\n\txss := make([]int, len(m.just))\n\tfor i, v := range m.just {\n\t\tnum, err := f(v).Unbox()\n\t\tif err != nil {\n\t\t\treturn ErrAoI(err)\n\t\t}\n\t\txss[i] = num\n\t}\n\n\treturn JustAoI(xss)\n}",
"func (c MethodsCollection) ActionUnarchive() pActionUnarchive {\n\treturn pActionUnarchive{\n\t\tMethod: c.MustGet(\"ActionUnarchive\"),\n\t}\n}",
"func (r *Resolver) Action() generated.ActionResolver { return &actionResolver{r} }",
"func (r RelayType) ActionName(tx *types.Transaction) string {\r\n\tvar relay RelayAction\r\n\terr := types.Decode(tx.Payload, &relay)\r\n\tif err != nil {\r\n\t\treturn \"unknown-relay-action-err\"\r\n\t}\r\n\tif relay.Ty == RelayActionCreate && relay.GetCreate() != nil {\r\n\t\treturn \"relayCreateTx\"\r\n\t}\r\n\tif relay.Ty == RelayActionRevoke && relay.GetRevoke() != nil {\r\n\t\treturn \"relayRevokeTx\"\r\n\t}\r\n\tif relay.Ty == RelayActionAccept && relay.GetAccept() != nil {\r\n\t\treturn \"relayAcceptTx\"\r\n\t}\r\n\tif relay.Ty == RelayActionConfirmTx && relay.GetConfirmTx() != nil {\r\n\t\treturn \"relayConfirmTx\"\r\n\t}\r\n\tif relay.Ty == RelayActionVerifyTx && relay.GetVerify() != nil {\r\n\t\treturn \"relayVerifyTx\"\r\n\t}\r\n\tif relay.Ty == RelayActionRcvBTCHeaders && relay.GetBtcHeaders() != nil {\r\n\t\treturn \"relay-receive-btc-heads\"\r\n\t}\r\n\treturn \"unknown\"\r\n}",
"func (s *ExperimentSpec) GetAction() ActionType {\n\tif s.ManualOverride != nil {\n\t\treturn s.ManualOverride.Action\n\t}\n\treturn ActionType(\"\")\n}",
"func (m *TamAction) UnmarshalJSON(raw []byte) error {\n\t// AO0\n\tvar dataAO0 struct {\n\t\tAffectedObjectType string `json:\"AffectedObjectType,omitempty\"`\n\n\t\tAlertType *string `json:\"AlertType,omitempty\"`\n\n\t\tIdentifiers []*TamIdentifiers `json:\"Identifiers\"`\n\n\t\tOperationType *string `json:\"OperationType,omitempty\"`\n\n\t\tQueries []*TamQueryEntry `json:\"Queries\"`\n\n\t\tType *string `json:\"Type,omitempty\"`\n\t}\n\tif err := swag.ReadJSON(raw, &dataAO0); err != nil {\n\t\treturn err\n\t}\n\n\tm.AffectedObjectType = dataAO0.AffectedObjectType\n\n\tm.AlertType = dataAO0.AlertType\n\n\tm.Identifiers = dataAO0.Identifiers\n\n\tm.OperationType = dataAO0.OperationType\n\n\tm.Queries = dataAO0.Queries\n\n\tm.Type = dataAO0.Type\n\n\treturn nil\n}",
"func Int2String(v int) string {\n\treturn strconv.Itoa(v)\n}",
"func (a *Action) BuildActionID() string { return actionID(a.buildID) }",
"func (_ EventFilterAliases) Action(p graphql.ResolveParams) (EventFilterAction, error) {\n\tval, err := graphql.DefaultResolver(p.Source, p.Info.FieldName)\n\tret, ok := EventFilterAction(val.(string)), true\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\tif !ok {\n\t\treturn ret, errors.New(\"unable to coerce value for field 'action'\")\n\t}\n\treturn ret, err\n}",
"func NewAction(payload interface{}) Action {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tfmt.Fprintf(os.Stderr, \"Error: %v\\n\", r)\n\t\t\tfmt.Fprintf(os.Stderr, \"Payload: %v\\n\", payload)\n\t\t}\n\t}()\n\n\tvar a Action\n\ta.payload = payload\n\ta.headers = make(map[string]string)\n\n\tfor k, v := range payload.(map[interface{}]interface{}) {\n\t\tswitch k {\n\t\tcase \"catch\":\n\t\t\ta.catch = v.(string)\n\t\tcase \"warnings\":\n\t\t\t// TODO\n\t\t\tcontinue\n\t\tcase \"allowed_warnings\":\n\t\t\t// TODO\n\t\t\tcontinue\n\t\tcase \"node_selector\":\n\t\t\tcontinue\n\t\tcase \"headers\":\n\t\t\tfor kk, vv := range v.(map[interface{}]interface{}) {\n\t\t\t\ta.headers[kk.(string)] = vv.(string)\n\t\t\t}\n\t\tdefault:\n\t\t\ta.method = k.(string)\n\t\t\ta.params = v.(map[interface{}]interface{})\n\t\t}\n\t}\n\n\treturn a\n}",
"func Get(actionType string) Action {\n\treturn actions[actionType]\n}",
"func (action *DelayAction) UndoAction() (err error) {\n\tlog.Info(\"EXECUTE UNDO RevertStepsAction\")\n\treturn nil\n}",
"func (o IPRuleOutput) Action() ActionPtrOutput {\n\treturn o.ApplyT(func(v IPRule) *Action { return v.Action }).(ActionPtrOutput)\n}",
"func (o IPRuleResponseOutput) Action() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v IPRuleResponse) *string { return v.Action }).(pulumi.StringPtrOutput)\n}",
"func GetAction(client *whisk.Client, actionName string) func() (*whisk.Action, error) {\n\treturn func() (*whisk.Action, error) {\n\t\taction, _, err := client.Actions.Get(actionName, true)\n\t\tif err == nil {\n\t\t\treturn action, nil\n\t\t}\n\t\treturn nil, err\n\t}\n}",
"func (fa *FieldAction) Unpack(v string) error {\n\tswitch strings.ToLower(v) {\n\tcase \"\", \"append\":\n\t\t*fa = ActionAppend\n\tcase \"replace\":\n\t\t*fa = ActionReplace\n\tdefault:\n\t\treturn errors.Errorf(\"invalid dns field action value '%v'\", v)\n\t}\n\treturn nil\n}",
"func (s DynamoDBv2Action) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func RestoreActionHandler(ctx domain.ExecutionContext, file string, restoreConfigFilesOpt *bool, restoreFilesOpt *bool, restoreDBOpt *bool, key *string) {\n\n\tisQuiet := !(restoreConfigFilesOpt == nil && restoreFilesOpt == nil && restoreDBOpt == nil)\n\n\tif ctx.IsProd() && !isQuiet {\n\t\tok := prompter.YN(\"You're in production. Are you sure you want to continue?\", false)\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t}\n\n\tif !isQuiet {\n\t\tfmt.Printf(\" %s Choose what you want to restore:\\n\", color.YellowString(\"▶\"))\n\t}\n\n\tconfigFilesRestoration := false\n\tif restoreConfigFilesOpt == nil && len(config.Get().ConfigFiles) > 0 {\n\t\tconfigFilesRestoration = prompter.YN(\" - configuration files\", false)\n\t} else if restoreConfigFilesOpt != nil {\n\t\tconfigFilesRestoration = *restoreConfigFilesOpt\n\t}\n\n\tfilesRestoration := false\n\tif restoreFilesOpt == nil && len(config.Get().BackupConfig.Files) > 0 {\n\t\tfilesRestoration = prompter.YN(\" - others files\", false)\n\t} else if restoreFilesOpt != nil {\n\t\tfilesRestoration = *restoreFilesOpt\n\t}\n\n\tdbRestoration := false\n\tif restoreDBOpt == nil && len(config.Get().BackupConfig.Databases) > 0 {\n\t\tdbRestoration = prompter.YN(\" - database dumps\", false)\n\t} else if restoreDBOpt != nil {\n\t\tdbRestoration = *restoreDBOpt\n\t}\n\n\tfmt.Printf(\"\\n\\n\")\n\n\tdpath, dfile := path.Split(file)\n\tisEncrypted := false\n\tencryptedFile := file\n\tdecryptedFile := \"\"\n\tif len(dfile) > 4 {\n\t\tencryptedExtension := dfile[len(dfile)-4:]\n\t\tif encryptedExtension != \".enc\" && key != nil && *key != \"\" {\n\t\t\tfmt.Printf(\" %s This is not a .enc file, skip deciphering\\n\", color.RedString(\"✗\"))\n\t\t}\n\t\tisEncrypted = encryptedExtension == \".enc\" && key != nil && *key != \"\"\n\t\tdecryptedFile = dpath + \".\" + dfile[:len(dfile)-4]\n\t}\n\n\t// decrypt in an hidden file\n\tif isEncrypted {\n\t\terr := decrypt(encryptedFile, decryptedFile, key)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tfile = decryptedFile\n\t}\n\n\terr := untar(ctx, file, configFilesRestoration, filesRestoration, dbRestoration)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\n\t// remove decrypted file\n\tif isEncrypted {\n\t\terr = os.Remove(decryptedFile)\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn\n\t\t}\n\t}\n\n\tfmt.Printf(\"\\n %s Done\\n\", color.GreenString(\"✓\"))\n}",
"func TestToInt(t *testing.T) {\n\t// conversion from false to 0\n\tresult := evaluator.ToInt(false)\n\tassert.Equal(t, 0, result)\n\n\t// conversion from true to 1\n\tresult = evaluator.ToInt(true)\n\tassert.Equal(t, 1, result)\n}",
"func (c UintConvert) Convert(item uint) (uint, error) { return c(item) }",
"func (o *WorkflowServiceItemActionInstance) GetAction() string {\n\tif o == nil || o.Action == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Action\n}",
"func UnmarshalActionList(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ActionList)\n\terr = core.UnmarshalPrimitive(m, \"total_count\", &obj.TotalCount)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"limit\", &obj.Limit)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"offset\", &obj.Offset)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"actions\", &obj.Actions, UnmarshalActionLite)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (o FirewallPolicyRuleOutput) Action() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FirewallPolicyRule) *string { return v.Action }).(pulumi.StringPtrOutput)\n}",
"func (o IntentOutput) Action() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Intent) pulumi.StringOutput { return v.Action }).(pulumi.StringOutput)\n}",
"func (oe *OraErr) Action() string { return oe.action }",
"func (d UserData) ActionID() actions.ActionRef {\n\tval := d.ModelData.Get(models.NewFieldName(\"ActionID\", \"action_id\"))\n\tif !d.Has(models.NewFieldName(\"ActionID\", \"action_id\")) {\n\t\treturn *new(actions.ActionRef)\n\t}\n\treturn val.(actions.ActionRef)\n}",
"func NewHTTPAction(a map[interface{}]interface{}, dflt config.Default, playbook *config.TestDef) (HTTPAction, bool) {\n\tlog.Debugf(\"NewhttpAction=%v\", a)\n\tvalid := true\n\n\tif a[\"url\"] == \"\" || a[\"url\"] == nil {\n\t\tlog.Error(\"HttpAction must define a URL.\")\n\t\ta[\"url\"] = \"\"\n\t\tvalid = false\n\t} else {\n\t\t// Try to substitute already known variables: needed if variables are used\n\t\t// protocol://in the user:auth@server:port/ part of the URL\n\t\t// (cannot use SubstParams() here)\n\t\t// TODO: why here and not in DoHTTPRequest ? (same question for Mongo, SQL, etc...)\n\t\ttextData := a[\"url\"].(string)\n\t\tif strings.ContainsAny(textData, \"${\") {\n\t\t\tres := re.FindAllStringSubmatch(textData, -1)\n\t\t\tfor _, v := range res {\n\t\t\t\tlog.Debugf(\"playbook.Variables[%s]=%s\", v[1], playbook.Variables[v[1]])\n\t\t\t\tif _, err := playbook.Variables[v[1]]; !err {\n\t\t\t\t\tlog.Debugf(\"Variable ${%s} not set\", v[1])\n\t\t\t\t} else {\n\t\t\t\t\ttextData = strings.Replace(textData, \"${\"+v[1]+\"}\", url.QueryEscape(playbook.Variables[v[1]].Values[0]), 1) // TODO array\n\t\t\t\t}\n\t\t\t}\n\t\t\ta[\"url\"] = textData\n\t\t}\n\t\tvalid = setDefaultURL(a, dflt)\n\t\tlog.Debugf(\"setDefaultURL returned %v\", a)\n\t}\n\n\tif a[\"method\"] == nil || a[\"method\"] == \"\" {\n\t\tif dflt.Method == \"\" {\n\t\t\tlog.Error(\"Action has no Method and no default Method specified\")\n\t\t\ta[\"method\"] = \"\"\n\t\t\tvalid = false\n\t\t} else {\n\t\t\ta[\"method\"] = dflt.Method\n\t\t}\n\t} else if !config.IsValidHTTPMethod(a[\"method\"].(string)) {\n\t\tlog.Errorf(\"HttpAction must specify a valid HTTP method: GET, POST, PUT, HEAD or DELETE: %s\", a[\"method\"].(string))\n\t\tvalid = false\n\t}\n\tif a[\"title\"] == nil || a[\"title\"] == \"\" {\n\t\tlog.Error(\"HttpAction must define a title.\")\n\t\ta[\"title\"] = \"\"\n\t\tvalid = false\n\t}\n\tif a[\"use_http2\"] == nil {\n\t\ta[\"use_http2\"] = false\n\t} else {\n\t\tif _, ok := a[\"use_http2\"].(bool); !ok {\n\t\t\tlog.Error(\"use_http2 value must be a boolean (true or false)\")\n\t\t\ta[\"use_http2\"] = false\n\t\t\tvalid = false\n\t\t}\n\t}\n\n\t// Check formdatas\n\tnu := 0\n\tif a[\"body\"] != nil {\n\t\tnu++\n\t}\n\tif a[\"template\"] != nil {\n\t\taddEmbeddedFilename(a[\"template\"].(string))\n\t\tnu++\n\t}\n\tif a[\"upload_file\"] != nil {\n\t\taddEmbeddedFilename(a[\"upload_file\"].(string))\n\t\tnu++\n\t}\n\tif a[\"formdata\"] != nil {\n\t\tnu++\n\t}\n\tif nu > 1 {\n\t\tlog.Error(\"A HttpAction can contain a single 'body' or a 'template' or a 'formdata' or an 'upload_file'.\")\n\t\tvalid = false\n\t}\n\n\tvar storeCookie string\n\tif a[\"store_cookie\"] != nil && a[\"store_cookie\"].(string) != \"\" {\n\t\tstoreCookie = a[\"store_cookie\"].(string)\n\t}\n\n\theaders := make(map[string]string, 20)\n\tif a[\"headers\"] != nil {\n\t\t// Check the type : otherwise crashes if headers content is a list instead of a map...\n\t\tswitch v := a[\"headers\"].(type) {\n\t\tcase map[interface{}]interface{}:\n\t\t\t//for hdr, value := range a[\"headers\"].(map[interface{}]interface{}) {\n\t\t\tfor hdr, value := range v {\n\t\t\t\tlog.Debugf(\"Header Key=%s / Value=%s\", hdr.(string), value.(string))\n\t\t\t\theaders[strings.ToLower(hdr.(string))] = value.(string)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Fatalf(\"headers format is invalid: it should be a map (you probably set it as a list ?)\")\n\t\t}\n\t}\n\n\t// Set the Accept header if not set in Playbook\n\tif _, ok := headers[\"accept\"]; !ok {\n\t\theaders[\"accept\"] = \"text/html,application/json,application/xhtml+xml,application/xml,text/plain\"\n\t}\n\t// Set the User-Agent header if not set in Playbook\n\tif _, ok := headers[\"user-agent\"]; !ok {\n\t\tif is_daemon_mode {\n\t\t\theaders[\"user-agent\"] = \"chaingun-\" + injector_id\n\t\t} else {\n\t\t\theaders[\"user-agent\"] = \"chaingun\"\n\t\t}\n\t}\n\n\tformdatas, validData := NewFormDatas(a)\n\tresponseHandlers, validResp := NewResponseHandlers(a)\n\ttemplate, validTempl := getTemplate(a)\n\tbody, validBody := getBody(a)\n\tupload, validUpload := getFileToPUT(a)\n\n\tif !valid || !validResp || !validData || !validTempl || !validBody || !validUpload {\n\t\tlog.Errorf(\"Your YAML Playbook contains an invalid HTTPAction, see errors listed above.\")\n\t\tvalid = false\n\t}\n\n\thttpAction := HTTPAction{\n\t\tMethod: a[\"method\"].(string),\n\t\tUseHTTP2: a[\"use_http2\"].(bool),\n\t\tURL: a[\"url\"].(string),\n\t\tBody: body,\n\t\tTemplate: template,\n\t\tFormDatas: formdatas,\n\t\tHeaders: headers,\n\t\tTitle: a[\"title\"].(string),\n\t\tUploadFile: upload,\n\t\tStoreCookie: storeCookie,\n\t\tResponseHandlers: responseHandlers,\n\t}\n\n\tlog.Debugf(\"HTTPAction: %v\", httpAction)\n\n\treturn httpAction, valid\n}",
"func ActionStatus_Values() []string {\n\treturn []string{\n\t\tActionStatusUnknown,\n\t\tActionStatusInProgress,\n\t\tActionStatusCompleted,\n\t\tActionStatusFailed,\n\t\tActionStatusStopping,\n\t\tActionStatusStopped,\n\t}\n}",
"func NetworkservicesHttpRouteRulesActionToProto(o *networkservices.HttpRouteRulesAction) *networkservicespb.NetworkservicesHttpRouteRulesAction {\n\tif o == nil {\n\t\treturn nil\n\t}\n\tp := &networkservicespb.NetworkservicesHttpRouteRulesAction{}\n\tp.SetRedirect(NetworkservicesHttpRouteRulesActionRedirectToProto(o.Redirect))\n\tp.SetFaultInjectionPolicy(NetworkservicesHttpRouteRulesActionFaultInjectionPolicyToProto(o.FaultInjectionPolicy))\n\tp.SetRequestHeaderModifier(NetworkservicesHttpRouteRulesActionRequestHeaderModifierToProto(o.RequestHeaderModifier))\n\tp.SetResponseHeaderModifier(NetworkservicesHttpRouteRulesActionResponseHeaderModifierToProto(o.ResponseHeaderModifier))\n\tp.SetUrlRewrite(NetworkservicesHttpRouteRulesActionUrlRewriteToProto(o.UrlRewrite))\n\tp.SetTimeout(dcl.ValueOrEmptyString(o.Timeout))\n\tp.SetRetryPolicy(NetworkservicesHttpRouteRulesActionRetryPolicyToProto(o.RetryPolicy))\n\tp.SetRequestMirrorPolicy(NetworkservicesHttpRouteRulesActionRequestMirrorPolicyToProto(o.RequestMirrorPolicy))\n\tp.SetCorsPolicy(NetworkservicesHttpRouteRulesActionCorsPolicyToProto(o.CorsPolicy))\n\tsDestinations := make([]*networkservicespb.NetworkservicesHttpRouteRulesActionDestinations, len(o.Destinations))\n\tfor i, r := range o.Destinations {\n\t\tsDestinations[i] = NetworkservicesHttpRouteRulesActionDestinationsToProto(&r)\n\t}\n\tp.SetDestinations(sDestinations)\n\treturn p\n}",
"func (o FirewallPolicyRuleResponseOutput) Action() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FirewallPolicyRuleResponse) string { return v.Action }).(pulumi.StringOutput)\n}",
"func ToInt(i interface{}) int {\n\treturn cast.ToInt(i)\n}",
"func (ec *executionContext) marshalNAction2capactᚗioᚋcapactᚋpkgᚋengineᚋapiᚋgraphqlᚐAction(ctx context.Context, sel ast.SelectionSet, v Action) graphql.Marshaler {\n\treturn ec._Action(ctx, sel, &v)\n}"
] | [
"0.5708145",
"0.5499565",
"0.54050773",
"0.5264883",
"0.51558256",
"0.50154674",
"0.4927779",
"0.4892348",
"0.48327678",
"0.47832754",
"0.47781372",
"0.47771055",
"0.47565344",
"0.47364783",
"0.47332695",
"0.47315076",
"0.4730219",
"0.47273493",
"0.4720255",
"0.47181374",
"0.47104958",
"0.47005263",
"0.46756288",
"0.46648696",
"0.46515328",
"0.46313426",
"0.4607172",
"0.45930952",
"0.4589627",
"0.45755565",
"0.45736134",
"0.45593756",
"0.45567012",
"0.45488268",
"0.45411995",
"0.45376325",
"0.45275798",
"0.45125705",
"0.44773716",
"0.44380283",
"0.4430877",
"0.44192344",
"0.4405378",
"0.44050136",
"0.43983614",
"0.43943402",
"0.43905625",
"0.43902498",
"0.43894348",
"0.4387089",
"0.43754748",
"0.43752247",
"0.43732354",
"0.43708888",
"0.43684307",
"0.43678322",
"0.4362794",
"0.4362273",
"0.43588075",
"0.4354883",
"0.43499017",
"0.43420973",
"0.4341131",
"0.43353885",
"0.43181968",
"0.43160558",
"0.43095714",
"0.4303968",
"0.4301072",
"0.42861047",
"0.42797548",
"0.42766994",
"0.42731926",
"0.42728508",
"0.42694455",
"0.42671594",
"0.42658144",
"0.42657387",
"0.42650515",
"0.4262858",
"0.42627856",
"0.42570114",
"0.42432937",
"0.42417565",
"0.42400506",
"0.42376128",
"0.42356265",
"0.42330152",
"0.42326102",
"0.42321157",
"0.4227947",
"0.42277718",
"0.42214432",
"0.42206436",
"0.42202684",
"0.4216506",
"0.41996527",
"0.41977268",
"0.4196661",
"0.41792324"
] | 0.7415443 | 0 |
NewSyscallCounter creates a new SyscallCounter | func NewSyscallCounter() SyscallCounter {
return SyscallCounter(make(map[string]int))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewCounter() *StandardCounter {\n\treturn &StandardCounter{0}\n}",
"func NewCounter(options *CounterOptions) (c *Counter) {\n\tif options == nil {\n\t\toptions = new(CounterOptions)\n\t}\n\tif options.Subsystem == \"\" {\n\t\toptions.Subsystem = \"logging\"\n\t}\n\tif options.Name == \"\" {\n\t\toptions.Name = \"messages_total\"\n\t}\n\tif options.Help == \"\" {\n\t\toptions.Help = \"Number of log messages processed, partitioned by log level.\"\n\t}\n\tvector := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: options.Namespace,\n\t\t\tSubsystem: options.Subsystem,\n\t\t\tName: options.Name,\n\t\t\tHelp: options.Help,\n\t\t},\n\t\t[]string{\"level\"},\n\t)\n\treturn &Counter{\n\t\tvector: vector,\n\t\temergencyCounter: vector.WithLabelValues(\"emergency\"),\n\t\talertCounter: vector.WithLabelValues(\"alert\"),\n\t\tcriticalCounter: vector.WithLabelValues(\"critical\"),\n\t\terrorCounter: vector.WithLabelValues(\"error\"),\n\t\twarningCounter: vector.WithLabelValues(\"warning\"),\n\t\tnoticeCounter: vector.WithLabelValues(\"notice\"),\n\t\tinfoCounter: vector.WithLabelValues(\"info\"),\n\t\tdebugCounter: vector.WithLabelValues(\"debug\"),\n\t}\n}",
"func NewCounter(namespace, subsystem, name, help string, labelMap map[string]string) prometheus.Counter {\n\treturn prometheus.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t\tConstLabels: labelMap,\n\t})\n}",
"func New() (*Syscalls, error) {\n\tsyscalls := &Syscalls{\n\t\tnr: make(map[int]*Syscall),\n\t\tname: make(map[string]*Syscall),\n\t}\n\n\tif err := syscalls.init(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn syscalls, nil\n}",
"func NewCounter() Counter {\n\tif !Enabled {\n\t\treturn NilCounter{}\n\t}\n\treturn &StandardCounter{0}\n}",
"func newHLLCounter() counter {\n\treturn hllpp.New()\n}",
"func New() *Counter {\n\treturn new(Counter)\n}",
"func NewCounter() *Counter {\n\treturn &Counter{\n\t\tval: 0,\n\t\tincreaseBy: 0,\n\t}\n}",
"func NewCounter() *Counter {\n\treturn &Counter{\n\t\tcount: map[string]int{},\n\t}\n}",
"func NewCounter() *Counter {\n\treturn &Counter{\n\t\tclient: redis2.RedisClient,\n\t}\n}",
"func NewSetCounter(value int) Counter {\n\treturn Counter{\n\t\tSemaphore: NewSemaphore(1),\n\t\tcounter: value,\n\t}\n}",
"func NewCounter(subsystem, name, help string, labels []string) *prometheus.CounterVec {\n\treturn promauto.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t},\n\t\tlabels,\n\t)\n}",
"func NewCounter() Counter {\n\treturn NewSetCounter(0)\n}",
"func New(value int) alertCounter {\n\treturn alertCounter(value)\n}",
"func New(value int) alertCounter {\n\treturn alertCounter(value)\n}",
"func NewCounter(subsystem, name string, tags []string, help string) Counter {\n\treturn NewCounterWithOpts(subsystem, name, tags, help, DefaultOptions)\n}",
"func NewCounter() *Counter {\n\treturn &Counter{}\n}",
"func NewCounter() *Counter {\n\treturn &Counter{}\n}",
"func NewCounter(countPerRound int32) (c *Counter) {\n\t// these will be populated when work arrives\n\tvar algos []int32\n\t// Start the counter at a random position\n\trand.Seed(time.Now().UnixNano())\n\tc = &Counter{}\n\tc.C.Store(int32(rand.Intn(int(countPerRound)+1) + 1))\n\tc.Algos.Store(algos)\n\tc.RoundsPerAlgo.Store(countPerRound)\n\tc.rpa = countPerRound\n\treturn\n}",
"func NewCounter(name string, desc string) *Counter {\n\treturn &Counter{name: name, desc: desc, value: 0}\n}",
"func NewCounter() *Counter {\n\tc := new(Counter)\n\tc.Reset()\n\treturn c\n}",
"func NewCounter(isNil bool) Counter {\n\tif isNil {\n\t\treturn NilCounter{}\n\t}\n\treturn &StandardCounter{0}\n}",
"func NewCounter(name string) *Counter {\n\treturn &Counter{name: name}\n}",
"func NewCounter(name string) metics.Counter {\n\tif !Enabled {\n\t\treturn new(metics.NilCounter)\n\t}\n\treturn metics.GetOrRegisterCounter(name, metics.DefaultRegistry)\n}",
"func NewCounter(w io.Writer, key string, interval time.Duration) metrics.Counter {\n\tc := make(chan string)\n\tgo fwd(w, key, interval, c)\n\treturn statsdCounter(c)\n}",
"func NewCounter(name string, options ...Option) Counter {\n\treturn newCounter(name, options...)\n}",
"func NewPCPCounter(val int64, name string, desc ...string) (*PCPCounter, error) {\n\td, err := newpcpMetricDesc(name, Int64Type, CounterSemantics, OneUnit, desc...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsm, err := newpcpSingletonMetric(val, d)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PCPCounter{sm, sync.RWMutex{}}, nil\n}",
"func (p *influxProvider) NewCounter(name string) metrics.Counter {\n\treturn p.in.NewCounter(name)\n}",
"func New(p uint8) Counter {\n\tif p < 4 || p > 18 {\n\t\tpanic(\"hll: precision p must be in range [4,18]\")\n\t}\n\tm := int(1 << uint(p))\n\tc := Counter{\n\t\tp: p,\n\t\tbits: bitbucket.New(m, 6),\n\t}\n\tc.initParams()\n\n\treturn c\n}",
"func (r *Registry) NewCounter(name string) *Counter {\n\treturn r.NewCounterWithUnit(name, \"\")\n}",
"func NewCounter(httpClient *http.Client,\n\terrorMessage string,\n\tlogger *log.Logger,\n\toutputPhrase string) *Сounter {\n\treturn &Сounter{\n\t\thttpClient: httpClient,\n\t\terrorMessage: errorMessage,\n\t\tlogger: logger,\n\t\toutputPhrase: outputPhrase,\n\t}\n}",
"func (ms *MetricSet) NewCounter(name string) *CounterMetric {\n\tm := new(CounterMetric)\n\tm.name = name\n\tm.c = ms.metricChan\n\n\tif _, found := ms.states[name]; found {\n\t\tpanic(fmt.Sprintf(\"Metric '%s' already exists\"))\n\t}\n\tms.states[name] = &metricState{\n\t\tType: METRIC_COUNTER,\n\t\tValue: uint64(0),\n\t}\n\n\treturn m\n}",
"func NewWriteCounter(w io.Writer) (wc WriteCounter) {\n\n}",
"func (e *Exporter) NewCounter(name string, help string) *stats.Counter {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewCounter(name, help)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewCounter(\"\", help)\n\tif exists := e.createCountTracker(name, help, lvar, reuseOnDup, typeCounter); exists != nil {\n\t\treturn exists.(*stats.Counter)\n\t}\n\treturn lvar\n}",
"func NewCounter(opts Options) Counter {\n\treturn Counter{\n\t\tOptions: opts,\n\t\tmax: math.MinInt64,\n\t\tmin: math.MaxInt64,\n\t}\n}",
"func (fs *flowControlConnStats) newCounters(ffs *flowControlFlowStats) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tffs.toRelease = fs.bytesBufferedPerFlow\n}",
"func NewCountVec(namespace, subsystem, name string) *CountVec {\n\tcntVecCache.RLock()\n\tkey := namespace + subsystem + name + \"cnt\"\n\tif vec, ok := cntVecCache.m[key]; ok {\n\t\tcntVecCache.RUnlock()\n\t\treturn vec.(*CountVec)\n\t}\n\tcntVecCache.RUnlock()\n\tcntVecCache.Lock()\n\tif vec, ok := cntVecCache.m[key]; ok {\n\t\tcntVecCache.Unlock()\n\t\treturn vec.(*CountVec)\n\t}\n\tcntVecCache.m[key] = &CountVec{\n\t\tvec: NewCounterVec(namespace, subsystem, name, \"Database counter by handlers\", []string{\"status\", \"action\"}),\n\t}\n\tcntVecCache.Unlock()\n\treturn cntVecCache.m[key].(*CountVec)\n}",
"func NewCounter() *Counter {\n\treturn &Counter{\n\t\ttopicStats: make(map[string]*stats),\n\t}\n}",
"func NewCounter(prefix, name string, value int64, rate float32, tags map[string]string) *Counter {\n\treturn &Counter{\n\t\tPrefix: prefix,\n\t\tName: name,\n\t\tValue: value,\n\t\tRate: rate,\n\t\tTags: tags,\n\t}\n}",
"func NewSyscallService(ctxmgr *ContextManager) *SyscallService {\n\treturn &SyscallService{\n\t\tctxmgr: ctxmgr,\n\t}\n}",
"func NewCounters(name, help string, config interface{}, maxIdleSec int64) (*Counters, error) {\n\tcfg, err := parseCounterConfig(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\terr = validateCounterConfig(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Counters{\n\t\tmetricVec: newMetricVec(func(labels map[string]string) prometheus.Metric {\n\t\t\treturn &expiringCounter{prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\t\tHelp: help,\n\t\t\t\tName: name,\n\t\t\t\tConstLabels: labels,\n\t\t\t}),\n\t\t\t\t0,\n\t\t\t}\n\t\t}, maxIdleSec),\n\t\tCfg: cfg,\n\t}, nil\n}",
"func newSamplerBackendRateCounter() *samplerBackendRateCounter {\n\treturn &samplerBackendRateCounter{\n\t\tbackend: newMemoryBackend(),\n\t\texit: make(chan struct{}),\n\t\tstopped: make(chan struct{}),\n\t}\n}",
"func (s SyscallCounter) Add(name string, count int) {\n\ts[name] = count\n}",
"func CreateCounter(cxt context.Context) chan int {\n\tdestination := make(chan int)\n\n\tgo func() { //! go routine\n\t\tdefer close(destination)\n\t\tcounter := 1\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <- cxt.Done():\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tdestination <- counter\n\t\t\t\tcounter ++\n\t\t\t\ttime.Sleep(1 * time.Second) //! simulasi slow\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn destination\n}",
"func CounterNotifyEventNew(buf []byte) xgb.Event {\n\tv := CounterNotifyEvent{}\n\tb := 1 // don't read event number\n\n\tv.Kind = buf[b]\n\tb += 1\n\n\tv.Sequence = xgb.Get16(buf[b:])\n\tb += 2\n\n\tv.Counter = Counter(xgb.Get32(buf[b:]))\n\tb += 4\n\n\tv.WaitValue = Int64{}\n\tb += Int64Read(buf[b:], &v.WaitValue)\n\n\tv.CounterValue = Int64{}\n\tb += Int64Read(buf[b:], &v.CounterValue)\n\n\tv.Timestamp = xproto.Timestamp(xgb.Get32(buf[b:]))\n\tb += 4\n\n\tv.Count = xgb.Get16(buf[b:])\n\tb += 2\n\n\tif buf[b] == 1 {\n\t\tv.Destroyed = true\n\t} else {\n\t\tv.Destroyed = false\n\t}\n\tb += 1\n\n\tb += 1 // padding\n\n\treturn v\n}",
"func NewCounter(tokens []Token) Counter {\n\tcounter := Counter{}\n\tcounter.Data = make(map[string]int)\n\n\tcount := 0\n\tlast := \"\"\n\tfor _, token := range tokens {\n\t\tif token.Datum == last {\n\t\t\tcount++\n\t\t} else {\n\t\t\tcount = 1\n\t\t}\n\t\tlast = token.Datum\n\t\tcounter.Update(token)\n\t}\n\treturn counter\n}",
"func NewCounter(size int64) *Counter {\n\tr := &Counter{\n\t\tMaxSize: size,\n\t\tBuckets: make(map[int64]*bucket),\n\t\tMutex: &sync.RWMutex{},\n\t}\n\treturn r\n}",
"func NewReadCounter(w io.Reader) (rc ReadCounter) {\n\t//rc = w.\n}",
"func New() Interface {\n\treturn &procSysctl{}\n}",
"func New() Interface {\n\treturn &procSysctl{}\n}",
"func NewCounter(ttl time.Duration, callback func(float64)) (counter *Counter) {\n\tcounter = &Counter{\n\t\tentries: make(map[[sha256.Size]byte]time.Time),\n\t\tttl: ttl,\n\t\tcallback: callback,\n\t}\n\n\tgo func() {\n\t\tfor now := range time.Tick(time.Second) {\n\t\t\tcounter.mutex.Lock()\n\t\t\tfor k, v := range counter.entries {\n\t\t\t\tif delta := now.Sub(v); delta > counter.ttl {\n\t\t\t\t\tdelete(counter.entries, k)\n\t\t\t\t}\n\t\t\t}\n\t\t\tcounter.callback(float64(len(counter.entries)))\n\t\t\tcounter.mutex.Unlock()\n\t\t}\n\t}()\n\n\treturn\n}",
"func NewTPSCounter() *TPSCounter {\n\tbucketSize := 5\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := &TPSCounter{\n\t\tbucket: make([]uint64, bucketSize, bucketSize),\n\t\tbucketOut: make([]uint64, bucketSize, bucketSize),\n\t\tallCount: 0,\n\t\tbucketSize: bucketSize,\n\t\tcancel: cancel,\n\t}\n\ts.startT = time.Now()\n\tgo s.run(ctx)\n\treturn s\n}",
"func TestCounter(t *testing.T) {\n\tnodeName := \"192.168.1.11\"\n\tc := NewCounter()\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"websocket\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\n\tfmt.Println(\"Counter.Plus++++++++\", c, Gcounter)\n\n}",
"func New() *SystemMetrics {\n\treturn &SystemMetrics{}\n}",
"func NewLabelCounter(subsystem, name, help string, labels ...string) *prometheus.CounterVec {\n\treturn prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: fmt.Sprintf(\"%s_%s\", name, total),\n\t\t\tHelp: help,\n\t\t\tConstLabels: nil,\n\t\t}, labels)\n}",
"func NewLabelCounter(subsystem, name, help string, labels ...string) *prometheus.CounterVec {\n\treturn prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tNamespace: namespace,\n\t\t\tSubsystem: subsystem,\n\t\t\tName: fmt.Sprintf(\"%s_%s\", name, total),\n\t\t\tHelp: help,\n\t\t\tConstLabels: nil,\n\t\t}, labels)\n}",
"func NewProcessMemoryCounter() *ProcessMemoryCounters {\n\t// it's really interesting, `unsafe.Sizeof` will report this as 8 bytes, but Windows doesn't like it if I don't\n\t// explicitly set at least 128. weird idiosyncrasy.\n\treturn &ProcessMemoryCounters{Cb: 128}\n}",
"func NewReadCounter(r io.Reader) ReadCounter {\n\treturn &readCounter{\n\t\treader: r,\n\t\tcounter: counter{ops: 0, bytes: 0},\n\t}\n}",
"func newOperationCountingStore() (*operationCountingStore) {\n\treturn &operationCountingStore{\n\t\tdelay: time.Duration(0),\n\t\tread: 0,\n\t\twrite: 0,\n\t}\n}",
"func (m *Metrics) NewCounter(name string, description string) prometheus.Counter {\n\n\tvar item prometheus.Counter\n\n\tmc := m.Get(name)\n\n\tif mc == nil {\n\n\t\toptions := prometheus.CounterOpts{\n\t\t\tName: name,\n\t\t\tHelp: description,\n\t\t}\n\n\t\titem = prometheus.NewCounter(options)\n\t\tprometheus.Register(item)\n\n\t\tmc = &metric{Name: name, Description: description, Type: Counter, Inner: item}\n\n\t\tm.Set(name, mc)\n\n\t} else {\n\t\titem = mc.Inner.(prometheus.Counter)\n\t}\n\n\treturn item\n}",
"func SystemcounterRead(buf []byte, v *Systemcounter) int {\n\tb := 0\n\n\tv.Counter = Counter(xgb.Get32(buf[b:]))\n\tb += 4\n\n\tv.Resolution = Int64{}\n\tb += Int64Read(buf[b:], &v.Resolution)\n\n\tv.NameLen = xgb.Get16(buf[b:])\n\tb += 2\n\n\t{\n\t\tbyteString := make([]byte, v.NameLen)\n\t\tcopy(byteString[:v.NameLen], buf[b:])\n\t\tv.Name = string(byteString)\n\t\tb += xgb.Pad(int(v.NameLen))\n\t}\n\n\treturn b\n}",
"func Constructor() HitCounter {\n\treturn HitCounter{&Node{0,0,nil,nil}}\n}",
"func NewPCPCounterVector(values map[string]int64, name string, desc ...string) (*PCPCounterVector, error) {\n\tvals := make(Instances)\n\tfor k, v := range values {\n\t\tvals[k] = v\n\t}\n\n\tim, err := generateInstanceMetric(vals, name, vals.Keys(), Int64Type, CounterSemantics, OneUnit, desc...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &PCPCounterVector{im, sync.RWMutex{}}, nil\n}",
"func NewCounterChannel() *CounterChannel {\n\tc := &CounterChannel{\n\t\treadCh: make(chan uint64),\n\t\twriteCh: make(chan int),\n\t}\n\n\t// The actual counter value lives inside this goroutine.\n\t// It can only be accessed for R/W via one of the channels.\n\tgo func() {\n\t\tvar count uint64 = 0\n\t\tfor {\n\t\t\tselect {\n\t\t\t// Reading from readCh is equivalent to reading count.\n\t\t\tcase c.readCh <- count:\n\t\t\t// Writing to the writeCh increments count.\n\t\t\tcase <-c.writeCh:\n\t\t\t\tcount++\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn c\n}",
"func NewCounterWithOpts(subsystem, name string, tags []string, help string, opts Options) Counter {\n\t// subsystem is optional\n\tif subsystem != \"\" && !opts.NoDoubleUnderscoreSep {\n\t\t// Prefix metrics with a _, prometheus will add a second _\n\t\t// It will create metrics with a custom separator and\n\t\t// will let us replace it to a dot later in the process.\n\t\tname = fmt.Sprintf(\"_%s\", name)\n\t}\n\n\tc := &promCounter{\n\t\tpc: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tSubsystem: subsystem,\n\t\t\t\tName: name,\n\t\t\t\tHelp: help,\n\t\t\t},\n\t\t\ttags,\n\t\t),\n\t}\n\ttelemetryRegistry.MustRegister(c.pc)\n\treturn c\n}",
"func newBaseCount() baseCount {\n\treturn baseCount{words: make(map[word]int)}\n}",
"func newFreqCount() freqCount {\n\treturn make(freqCount, 3000000)\n}",
"func New(conn redis.Conn, logger log.Logger) (s CountService) {\n\tvar svc CountService\n\t{\n\t\tsvc = &stubCountService{conn: conn, logger: logger}\n\t\tsvc = LoggingMiddleware(logger)(svc)\n\t}\n\treturn svc\n}",
"func NewStringCounter() StringCounter {\n\treturn StringCounter{\n\t\tcount: uint64(1),\n\t}\n}",
"func createCounterRequest(c *xgb.Conn, Id Counter, InitialValue Int64) []byte {\n\tsize := 16\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tbuf[b] = c.Extensions[\"SYNC\"]\n\tb += 1\n\n\tbuf[b] = 2 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\txgb.Put32(buf[b:], uint32(Id))\n\tb += 4\n\n\t{\n\t\tstructBytes := InitialValue.Bytes()\n\t\tcopy(buf[b:], structBytes)\n\t\tb += xgb.Pad(len(structBytes))\n\t}\n\n\treturn buf\n}",
"func NewCounterFromMap(m map[interface{}]int) *Counter {\n\tif m == nil {\n\t\tm = make(map[interface{}]int)\n\t}\n\treturn &Counter{\n\t\tcount: m,\n\t\tmut: new(sync.Mutex),\n\t}\n}",
"func (p *Provider) NewCounter(name string) metrics.Counter {\n\treturn p.newCounter(name)\n}",
"func newCounterEvent(e *events.Envelope) *Event {\n\tvar m = e.GetCounterEvent()\n\tvar r = LabelSet{\n\t\t\"cf_origin\": \"firehose\",\n\t\t\"deployment\": e.GetDeployment(),\n\t\t\"event_type\": e.GetEventType().String(),\n\t\t\"job\": e.GetJob(),\n\t\t\"job_index\": e.GetIndex(),\n\t\t\"origin\": e.GetOrigin(),\n\t}\n\tmsg := fmt.Sprintf(\"%s (delta=%d, total=%d)\", m.GetName(), m.GetDelta(), m.GetTotal())\n\treturn &Event{\n\t\tLabels: r,\n\t\tMsg: msg,\n\t}\n}",
"func newDestinationCounters(template string, handler string, adapter string) DestinationCounters {\n\tsuccessLabels := prometheus.Labels{\n\t\tmeshFunction: template,\n\t\thandlerName: handler,\n\t\tadapterName: adapter,\n\t\terrorStr: \"false\",\n\t}\n\n\tfailedLabels := prometheus.Labels{\n\t\tmeshFunction: template,\n\t\thandlerName: handler,\n\t\tadapterName: adapter,\n\t\terrorStr: \"true\",\n\t}\n\n\treturn DestinationCounters{\n\t\ttotalCount: dispatchCount.With(successLabels),\n\t\tduration: dispatchDuration.With(successLabels),\n\t\tfailedTotalCount: dispatchCount.With(failedLabels),\n\t\tfailedDuration: dispatchDuration.With(failedLabels),\n\t}\n}",
"func NewIntCounter(stream IntStream) *IntCounter {\n\treturn &IntCounter{\n\t\tstream: stream,\n\t\tlastTaken: math.MinInt64,\n\t\tlastCall: math.MinInt64,\n\t}\n}",
"func NewNum(numThreads int) *AIO {\n\ta := AIO{\n\t\trq: make(chan interface{}, ulimitNoFile()),\n\t}\n\n\tif numThreads < 1 {\n\t\tnumThreads = 1\n\t}\n\n\tfor i := 0; i < numThreads; i++ {\n\t\tt := newThread(a.rq)\n\t\tgo t.listen()\n\t}\n\n\treturn &a\n}",
"func newCounter(initialValue uint, endState []uint) *counter {\n\tc := counter{}\n\n\tc.initialValue = initialValue\n\tc.combinationUnsetIndex = int(initialValue) + 1\n\n\tc.state = make([]uint, 2)\n\tc.state[0] = initialValue\n\n\tif util.MaxUint(endState...) > initialValue {\n\t\tpanic(\"newCounter: endState cannot be reached with these parameters\")\n\t}\n\tif util.SumUint(endState...) != initialValue {\n\t\tpanic(\"newCounter: endState sum must be equal to initialValue\")\n\t}\n\tc.endState = endState\n\n\treturn &c\n}",
"func (store *Store) CreateCounter(name string) *Counter {\n\tcounter := newCounter()\n\tstore.Register(name, counter.ReadI, COUNTER)\n\treturn counter\n}",
"func NewWriteCounter(w io.Writer) WriteCounter {\n\treturn &writeCounter{\n\t\twriter: w,\n\t\tcounter: counter{ops: 0, bytes: 0},\n\t}\n}",
"func newTimer(namespace, subsystem, name string, labelNames []string, client *statsd.Statter, isPrometheusEnabled bool) *Timer {\n\topts := prometheus.HistogramOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t}\n\tvec := prometheus.NewHistogramVec(opts, labelNames)\n\tif isPrometheusEnabled {\n\t\tprometheus.MustRegister(vec)\n\t}\n\treturn &Timer{\n\t\twatcher: vec,\n\t\tlabels: labelNames,\n\t\tclient: client,\n\t\tprefix: strings.Join([]string{namespace, subsystem, name}, \".\"),\n\t}\n}",
"func spawn(ctx coretypes.Sandbox) (dict.Dict, error) {\n\tctx.Log().Debugf(\"inccounter.spawn\")\n\tstate := ctx.State()\n\n\tval, _, _ := codec.DecodeInt64(state.MustGet(VarCounter))\n\n\tname, ok, err := codec.DecodeString(ctx.Params().MustGet(VarName))\n\tif err != nil {\n\t\tctx.Log().Panicf(\"%v\", err)\n\t}\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"parameter 'name' wasnt found\")\n\t}\n\tdscr, ok, err := codec.DecodeString(ctx.Params().MustGet(VarDescription))\n\tif err != nil {\n\t\tctx.Log().Panicf(\"%v\", err)\n\t}\n\tif !ok {\n\t\tdscr = \"N/A\"\n\t}\n\tpar := dict.New()\n\tpar.Set(VarCounter, codec.EncodeInt64(val+1))\n\terr = ctx.DeployContract(Interface.ProgramHash, name, dscr, par)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// increase counter in newly spawned contract\n\thname := coretypes.Hn(name)\n\t_, err = ctx.Call(hname, coretypes.Hn(FuncIncCounter), nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx.Log().Debugf(\"inccounter.spawn: new contract name = %s hname = %s\", name, hname.String())\n\treturn nil, nil\n}",
"func (c IntCodec) New() unsafe.Pointer {\n\treturn unsafe.Pointer(new(int))\n}",
"func (e *Exporter) NewCounterFunc(name string, help string, f func() int64) *stats.CounterFunc {\n\tif e.name == \"\" || name == \"\" {\n\t\tv := stats.NewCounterFunc(name, help, f)\n\t\taddUnnamedExport(name, v)\n\t\treturn v\n\t}\n\tlvar := stats.NewCounterFunc(\"\", help, f)\n\t_ = e.createCountTracker(name, help, lvar, replaceOnDup, typeCounter)\n\treturn lvar\n}",
"func (ws *Watchers) newWatcher(sentinelCap int) int {\n\tws.mu.Lock()\n\tdefer ws.mu.Unlock()\n\tws.initRPCServer()\n\tif ws.lookup == nil {\n\t\tws.lookup = map[int]*watcher{}\n\t}\n\tw := &watcher{id: ws.nextID, sentinelCap: sentinelCap}\n\tws.nextID++\n\tws.lookup[w.id] = w\n\treturn w.id\n}",
"func New() *Stats {\n\tname, _ := os.Hostname()\n\n\tstats := &Stats{\n\t\tclosed: make(chan struct{}, 1),\n\t\tUptime: time.Now(),\n\t\tPid: os.Getpid(),\n\t\tResponseCounts: map[string]int{},\n\t\tTotalResponseCounts: map[string]int{},\n\t\tTotalResponseTime: time.Time{},\n\t\tHostname: name,\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-stats.closed:\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\tstats.ResetResponseCounts()\n\n\t\t\t\ttime.Sleep(time.Second * 1)\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn stats\n}",
"func newSystemService(rpc RpcClient) (*SystemService, error) {\r\n\tif rpc == nil {\r\n\t\treturn nil, errors.New(\"rpc client cannot be nil\")\r\n\t}\r\n\r\n\treturn &SystemService{\r\n\t\trpc: rpc,\r\n\t}, nil\r\n}",
"func newCountHashWriter(w io.Writer) *countHashWriter {\n\treturn &countHashWriter{w: w}\n}",
"func NewCount(name string, incValue, decValue int) (inc, dec *Count) {\n\tvalue := 0\n\tinc = &Count{}\n\tdec = &Count{}\n\tinc.name = name + \"_INC\"\n\tinc.value = &value\n\tinc.incDec = incValue\n\tdec.name = name + \"_DEC\"\n\tdec.value = inc.value\n\tdec.incDec = decValue\n\treturn inc, dec\n}",
"func newPerfProcessor(vcs vcsinfo.VCS, config *sharedconfig.IngesterConfig, client *http.Client) (ingestion.Processor, error) {\n\treturn &perfProcessor{\n\t\tstore: ptracestore.Default,\n\t\tvcs: vcs,\n\t}, nil\n}",
"func CreateCounter(c *xgb.Conn, Id Counter, InitialValue Int64) CreateCounterCookie {\n\tif _, ok := c.Extensions[\"SYNC\"]; !ok {\n\t\tpanic(\"Cannot issue request 'CreateCounter' using the uninitialized extension 'SYNC'. sync.Init(connObj) must be called first.\")\n\t}\n\tcookie := c.NewCookie(false, false)\n\tc.NewRequest(createCounterRequest(c, Id, InitialValue), cookie)\n\treturn CreateCounterCookie{cookie}\n}",
"func NewRegisteredCounter(name string, r Registry) Counter {\n\tc := NewCounter()\n\tif nil == r {\n\t\tr = DefaultRegistry\n\t}\n\tr.Register(name, c)\n\treturn c\n}",
"func(m *Monitor) RegisterCounter(nameSpace, subSystem, name string, labelNames []string) {\n\tif !m.readOnly {\n\t\tif m.instance != nameSpace {\n\t\t\tpanic(fmt.Sprintf(\"invaid name space %s\", nameSpace))\n\t\t}\n\t\tif len(labelNames) > 0 {\n\t\t\tcounter := prometheus.NewCounterVec(\n\t\t\t\tprometheus.CounterOpts{\n\t\t\t\t\tNamespace: nameSpace,\n\t\t\t\t\tSubsystem: subSystem,\n\t\t\t\t\tName: name,\n\t\t\t\t}, labelNames)\n\t\t\tif _, ok := m.counters.LoadOrStore(MKey{nameSpace, subSystem, name}, counter); ok {\n\t\t\t\tpanic(fmt.Sprintf(\"repeated registration counter for %s %s %s\", nameSpace, subSystem, name))\n\t\t\t} else {\n\t\t\t\tm.pusher.Collector(counter)\n\t\t\t}\n\t\t} else {\n\t\t\tcounter := prometheus.NewCounter(\n\t\t\t\tprometheus.CounterOpts{\n\t\t\t\t\tNamespace: nameSpace,\n\t\t\t\t\tSubsystem: subSystem,\n\t\t\t\t\tName: name,\n\t\t\t\t})\n\t\t\tif _, ok := m.counters.LoadOrStore(MKey{nameSpace, subSystem, name}, counter); ok {\n\t\t\t\tpanic(fmt.Sprintf(\"repeated registration counter for %s %s %s\", nameSpace, subSystem, name))\n\t\t\t} else {\n\t\t\t\tm.pusher.Collector(counter)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpanic(\"monitor already start\")\n\t}\n}",
"func (ds *DS) CreateCounter() error {\n\n\tc := ds.ctx\n\tcounter := &Counter{Rsvps: int64(0), Visitors: int64(0), Confirms: int64(0)}\n\tk := ds.datastoreKeyah(\"counter\", 1234567890)\n\t_, err := datastore.Put(c, k, counter)\n\tif err != nil {\n\t\tlog.Errorf(c, \"Couldn't create counter: %v\", err)\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func tcNew(n *ir.UnaryExpr) ir.Node {\n\tif n.X == nil {\n\t\t// Fatalf because the OCALL above checked for us,\n\t\t// so this must be an internally-generated mistake.\n\t\tbase.Fatalf(\"missing argument to new\")\n\t}\n\tl := n.X\n\tl = typecheck(l, ctxType)\n\tt := l.Type()\n\tif t == nil {\n\t\tn.SetType(nil)\n\t\treturn n\n\t}\n\tn.X = l\n\tn.SetType(types.NewPtr(t))\n\treturn n\n}",
"func NewCounts() *Counts {\n\treturn &Counts{\n\t\tCounts: map[string]int{},\n\t}\n}",
"func NewWriteCounter(total int) *WriteCounter {\r\n\tb := pb.New(total)\r\n\tb.SetRefreshRate(RefreshRate)\r\n\tb.ShowTimeLeft = true\r\n\tb.ShowSpeed = true\r\n\tb.SetUnits(pb.U_BYTES)\r\n\r\n\treturn &WriteCounter{\r\n\t\tbar: b,\r\n\t}\r\n}",
"func libc_getpid() int32",
"func New(qps int64) *limiter {\n\tif qps <= 0 {\n\t\treturn nil\n\t}\n\n\trl := &limiter{\n\t\tqps: qps,\n\t}\n\trl.current = make(map[string]int64, 0)\n\n\t// launch a goroutine to reset the counter every second\n\tgo rl.reset()\n\n\treturn rl\n}",
"func New(counter metrics.Counter, latency metrics.Histogram, logger log.Logger) Logger {\n\treturn Logger{\n\t\tcallUpdate: make(chan interface{}),\n\t\tcallError: make(chan error),\n\t\trequestCount: counter,\n\t\trequestLatency: latency,\n\t\tlogger: logger,\n\t}\n}",
"func NewPerCPU(m *metrics.MetricContext, name string) *PerCPU {\n\to := new(PerCPU)\n\n\t// initialize all metrics and register them\n\tmisc.InitializeMetrics(o, m, \"cpustat.\"+name, true)\n\treturn o\n}"
] | [
"0.65613407",
"0.62249815",
"0.622236",
"0.620934",
"0.6184915",
"0.61593485",
"0.61469537",
"0.6113768",
"0.6087508",
"0.6055666",
"0.60481656",
"0.6030751",
"0.60302186",
"0.6028467",
"0.6028467",
"0.5996901",
"0.5976532",
"0.5976532",
"0.5974575",
"0.59735125",
"0.5947771",
"0.594265",
"0.5821122",
"0.58014315",
"0.57707614",
"0.5723891",
"0.5689284",
"0.56629026",
"0.56507593",
"0.5599425",
"0.5597032",
"0.55855244",
"0.55685616",
"0.55559105",
"0.5550789",
"0.5545235",
"0.55349636",
"0.55326957",
"0.5526605",
"0.5521664",
"0.5504388",
"0.5410157",
"0.5404979",
"0.539504",
"0.53834087",
"0.5378202",
"0.5357798",
"0.5346037",
"0.5339828",
"0.5339828",
"0.5331559",
"0.53294283",
"0.5316334",
"0.5312377",
"0.5267079",
"0.5267079",
"0.52214295",
"0.5218866",
"0.52163255",
"0.5211668",
"0.51888347",
"0.5151688",
"0.5145262",
"0.5124786",
"0.5090101",
"0.5062347",
"0.5043105",
"0.5036109",
"0.5018863",
"0.50029856",
"0.49999696",
"0.49935958",
"0.49913606",
"0.49906048",
"0.49844754",
"0.49563408",
"0.49551836",
"0.49427354",
"0.49417162",
"0.49377313",
"0.49351928",
"0.49239036",
"0.49076077",
"0.49010786",
"0.4886921",
"0.4882119",
"0.48719758",
"0.4863735",
"0.48621127",
"0.48576316",
"0.48543608",
"0.4839534",
"0.48271778",
"0.4826082",
"0.47707105",
"0.4761676",
"0.47582126",
"0.47301447",
"0.47187552",
"0.47136304"
] | 0.8822192 | 0 |
Add adds single counter to SyscallCounter | func (s SyscallCounter) Add(name string, count int) {
s[name] = count
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Counter) Add(mp string) int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tc.count[mp]++\n\tlogrus.Debugf(\"Mount count increased to %d for %q\", c.count[mp], mp)\n\treturn c.count[mp]\n}",
"func (c *Counter) AddCount(mp string, n int) int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tc.count[mp] += n\n\tlogrus.Debugf(\"Mount count increased to %d for %q\", c.count[mp], mp)\n\treturn c.count[mp]\n}",
"func (this *List) Add(c Counter) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n _, ok := this.counters[c.Name()]\n if ok {\n return fmt.Errorf(\"Counter already exists\")\n }\n\n this.counters[c.Name()] = c\n\n return nil\n}",
"func (c *Counter) Add() {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tc.cnt++\n}",
"func (c *CollectingCounter) Add(delta float64) {\n\tc.CounterValue += delta\n}",
"func (r *WaitingCounter) Add(c int32) {\n\t(*atomic.Int32)(r).Add(c)\n}",
"func (counter *counter) addCount(n int64) {\n\tcounter.Lock()\n\tdefer counter.Unlock()\n\n\tif n <= 0 {\n\t\treturn\n\t}\n\n\t// Add the ops read and increase the count of operation.\n\t// bytes Number of ops.\n\tcounter.bytes += n\n\tcounter.ops++\n}",
"func (c *Counter) Add(delta uint64) (value uint64) {\n\treturn atomic.AddUint64(c.addr(), delta)\n}",
"func (h *Int64CounterHandle) Add(ctx context.Context, value int64) {\n\th.recordOne(ctx, core.NewInt64Number(value))\n}",
"func (c *Counter) Add(delta uint64) {\n\tatomic.AddUint64(&c.value, delta)\n}",
"func (ms *metricSender) AddToCounter(name string, delta uint64) error {\n\treturn ms.eventEmitter.Emit(&events.CounterEvent{Name: &name, Delta: &delta})\n}",
"func (c *Counter) Add(delta int64) (new int64) {\n\tc.window.Add(delta)\n\treturn c.global.Add(delta)\n}",
"func (r *refCounter) Add(item stackitem.Item) {\n\tif r == nil {\n\t\treturn\n\t}\n\t*r++\n\n\tirc, ok := item.(rcInc)\n\tif !ok || irc.IncRC() > 1 {\n\t\treturn\n\t}\n\tswitch t := item.(type) {\n\tcase *stackitem.Array, *stackitem.Struct:\n\t\tfor _, it := range item.Value().([]stackitem.Item) {\n\t\t\tr.Add(it)\n\t\t}\n\tcase *stackitem.Map:\n\t\tfor i := range t.Value().([]stackitem.MapElement) {\n\t\t\tr.Add(t.Value().([]stackitem.MapElement)[i].Value)\n\t\t}\n\t}\n}",
"func (c *Counter) Add(n int) {\n\tc.lock.Lock()\n\tc.num += int64(n)\n\tc.lock.Unlock()\n}",
"func (s *simplePromCounter) Add(val float64) {\n\ts.c.Add(val)\n}",
"func AddToCounter(name string, delta uint64) error {\n\treturn metricSender.AddToCounter(name, delta)\n}",
"func (s *Stats) Add() {\n\ts.mutex.Lock()\n\ts.Unknown += 1\n\ts.mutex.Unlock()\n}",
"func add(context *Context) {\n x := context.opcode & 0x0F00 >> 8\n y := context.opcode & 0x00F0 >> 4\n sum := uint16(context.cpu.v[x]) + uint16(context.cpu.v[y])\n if sum > 255 {\n context.cpu.v[0xF] = 1\n } else {\n context.cpu.v[0xF] = 0\n }\n context.cpu.v[x] = byte(sum & 0xFF)\n}",
"func (c *Context) Inc(ctr string) {\n\tvalue := c.counters[ctr].value\n\tc.counters[ctr] = change{\n\t\tvalue: value + 1,\n\t\tlast: time.Now(),\n\t}\n}",
"func (c *Int64Counter) Add(ctx context.Context, value int64, labels LabelSet) {\n\tc.recordOne(ctx, core.NewInt64Number(value), labels)\n}",
"func (l *Librato) AddCounter(c Counter) {\n\tselect {\n\tcase l.publisher.measures <- c:\n\tdefault:\n\t\tl.publisher.reportError(fmt.Errorf(\"counter could not be added to the metrics queue\"))\n\t}\n}",
"func (cs *UnsafeCounterIndex) Add(n int64, values ...string) int64 {\n\th := vhash(values)\n\tc := cs.findOrCreate(h, values)\n\tc.Count += n\n\treturn c.Count\n}",
"func (f *falconGauge) Add(value int64) {\n\tfalcon.SetCounterCount(f.name, value)\n}",
"func (c *Counter) Add(delta uint64) {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.ticksCurrent = atomic.LoadInt64(&ticks)\n\tc.v += delta\n\n\t// initialize previous values to current if counter\n\t// overflows or if this is our first value\n\tif c.ticksPrevious == 0 || c.p > c.v {\n\t\tc.p = c.v\n\t\tc.ticksPrevious = c.ticksCurrent\n\t}\n}",
"func (errors *Errors) Add(u uint64) uint64 {\n\terrCount := errors.counter.Add(u)\n\terrors.checkMaxError(errCount)\n\treturn errCount\n}",
"func (c *counter) AddOne() int {\n\tc.count++\n\treturn c.count\n}",
"func (e *expiringCounter) Add(val float64) {\n\te.Counter.Add(val)\n\te.lastModSec = time.Now().Unix()\n}",
"func (c *Counter) Add(key string, value int64) int64 {\n\tcount, loaded := c.m.LoadOrStore(key, &value)\n\tif loaded {\n\t\treturn atomic.AddInt64(count.(*int64), value)\n\t}\n\treturn *count.(*int64)\n}",
"func addi(context *Context) {\n x := context.opcode & 0x0F00 >> 8\n context.cpu.i += uint16(context.cpu.v[x])\n}",
"func IncCount(payload []byte) ([]byte, error) {\n\tgo func() {\n\t\t// Update custom metric\n\t\t_, err := wapc.HostCall(\"tarmac\", \"metrics\", \"counter\", []byte(`{\"name\":\"kv_counter_inc_called\"}`))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t}()\n\n\ti := 0\n\n\t// Fetch current value from Database\n\tb, err := wapc.HostCall(\"tarmac\", \"kvstore\", \"get\", []byte(`{\"key\":\"kv_counter_example\"}`))\n\tif err == nil {\n\t\tj, err := fastjson.ParseBytes(b)\n\t\tif err != nil {\n\t\t\treturn []byte(fmt.Sprintf(`{\"status\":{\"code\":500,\"status\":\"Failed to call parse json - %s\"}}`, err)), nil\n\t\t}\n\n\t\t// Check if value is missing and return 0 if empty\n\t\tif j.GetInt(\"status\", \"code\") == 200 {\n\t\t\ts, err := base64.StdEncoding.DecodeString(string(j.GetStringBytes(\"data\")))\n\t\t\tif err == nil {\n\t\t\t\tn, err := strconv.Atoi(fmt.Sprintf(\"%s\", s))\n\t\t\t\tif err == nil {\n\t\t\t\t\ti = n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Increment Counter\n\ti += 1\n\ts := strconv.Itoa(i)\n\n\t// Store new Counter value\n\t_, err = wapc.HostCall(\"tarmac\", \"kvstore\", \"set\", []byte(fmt.Sprintf(`{\"key\":\"kv_counter_example\",\"data\":\"%s\"}`, base64.StdEncoding.EncodeToString([]byte(s)))))\n\tif err != nil {\n\t\treturn []byte(fmt.Sprintf(`{\"status\":{\"code\":500,\"status\":\"Failed to call host callback - %s\"}}`, err)), nil\n\t}\n\n\t// Return Counter value to user\n\treturn []byte(fmt.Sprintf(`{\"payload\":\"%s\",\"status\":{\"code\":200,\"status\":\"Success\"}}`, base64.StdEncoding.EncodeToString([]byte(s)))), nil\n}",
"func (c *Counter) Add(n uint64) uint64 {\n\treturn atomic.AddUint64(&c.value, n)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (h *Float64CounterHandle) Add(ctx context.Context, value float64) {\n\th.recordOne(ctx, core.NewFloat64Number(value))\n}",
"func (c *Counter) Add(i float64) {\n\tif i == 0 {\n\t\treturn\n\t}\n\n\tc.Mutex.Lock()\n\tdefer c.Mutex.Unlock()\n\n\tb := c.getCurrentBucket()\n\tb.Value += i\n\tc.Sum +=i\n\tc.removeExpiredBuckets()\n}",
"func (c *Counter) Add(elem interface{}) {\n\tc.mut.Lock()\n\tdefer c.mut.Unlock()\n\tc.count[elem] += 1\n}",
"func (m *MetricsManager) AddCounter(name, help string, labelNames []string) error {\n\tvar allLabels sort.StringSlice\n\tfor k := range m.commonLabels {\n\t\tallLabels = append(allLabels, k)\n\t}\n\tallLabels = append(allLabels, labelNames...)\n\tallLabels.Sort()\n\n\tmetric := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t},\n\t\tallLabels,\n\t)\n\tif err := prometheus.Register(metric); err != nil {\n\t\treturn err\n\t}\n\n\tpartialMetric, err := metric.CurryWith(m.commonLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.counters[name] = &Counter{\n\t\tcreationTime: time.Now(),\n\t\tmetric: partialMetric,\n\t}\n\treturn nil\n}",
"func (c *Counter) Add(labels map[string]string, value float64) error {\n\tm, err := c.metric.GetMetricWith(labels)\n\tif err != nil {\n\t\treturn err\n\t}\n\tm.Add(value)\n\treturn nil\n}",
"func AddCounterNode(w http.ResponseWriter, r *http.Request) {\n\t//params := mux.Vars(r)\n\tvar node CounterNode\n\t_ = json.NewDecoder(r.Body).Decode(&node)\n\tvar nodeExists = false\n\tfor _, value := range counternodes {\n\t\tif value == node.ID {\n\t\t\tnodeExists = true\n\t\t\tbreak\n\t\t}\n\t}\n\t//Dont add the node if its already exist\n\tif nodeExists == false {\n\t\tcounternodes[node.EndPoint] = node.ID\n\t\t\n\t\tfmt.Println(\"node added : \" + node.EndPoint + \" : \" + node.ID)\n\t\tw.WriteHeader(http.StatusOK)\n\t} else {\n\t\tfmt.Println(\"node already exists : \" + node.EndPoint + \" : \" + node.ID)\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n\n\t//json.NewEncoder(w).Encode(Message{ID : id, Count : read(id)})\n}",
"func (ac *Accumulator) AddCounter(measurement string, fields map[string]interface{},\n\ttags map[string]string, t ...time.Time) {\n\t// as of right now metric always returns a nil error\n\tm, _ := metric.New(measurement, tags, fields, getTime(t), telegraf.Counter)\n\tac.AddMetric(m)\n}",
"func (s *DevStat) CounterInc(id DevStatType, n int64) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.Counters[id] = s.Counters[id].(int) + int(n)\n}",
"func (gtc *GroupTagCounter) AddCounter(c TagCounter) {\n\tgtc.tagcounters = append(gtc.tagcounters, c)\n}",
"func (gc *GroupCounter) Add(a float64) {\n\tfor _, c := range gc.counters {\n\t\tc.Add(a)\n\t}\n}",
"func (m *SafeMode) Add(tctx *tcontext.Context, n int32) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\treturn m.setCount(tctx, m.count+n)\n}",
"func TestCounter(t *testing.T) {\n\tnodeName := \"192.168.1.11\"\n\tc := NewCounter()\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"websocket\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\n\tfmt.Println(\"Counter.Plus++++++++\", c, Gcounter)\n\n}",
"func (pm *Manager) Add(description string, cmd *exec.Cmd) int64 {\n\tpm.mutex.Lock()\n\tpid := pm.counter + 1\n\tpm.Processes[pid] = &Process{\n\t\tPID: pid,\n\t\tDescription: description,\n\t\tStart: time.Now(),\n\t\tCmd: cmd,\n\t}\n\tpm.counter = pid\n\tpm.mutex.Unlock()\n\n\treturn pid\n}",
"func (s *Stats) add(d *Stats) {\n\ts.ItemCount += d.ItemCount\n\ts.EffectiveSize += d.EffectiveSize\n\ts.UsedSize += d.UsedSize\n\ts.Readers += d.Readers\n\ts.MarkedDeleted += d.MarkedDeleted\n\ts.Writers += d.Writers\n\ts.WritersBlocked += d.WritersBlocked\n}",
"func Add(a, b int) int {\n\tklog.Infof(\"a:%d, b:%d\", a, b)\n\treturn a + b\n}",
"func (r *Registers) PushProgramCounter() {\n\tr.stack[r.sp] = r.pc\n\tr.sp++\n}",
"func (cm *customMetrics) AddCounter(namespace, subsystem, name, help, internalKey string) {\n\n\tcm.counters[internalKey] = promauto.NewCounter(prometheus.CounterOpts{\n\t\tNamespace: namespace,\n\t\tSubsystem: subsystem,\n\t\tName: name,\n\t\tHelp: help,\n\t})\n}",
"func (s SyscallCounter) AddRange(m map[string]int) {\n\tfor k, v := range m {\n\t\ts[k] = v\n\t}\n}",
"func (tc *AsynchronousTokenCounter) Add() error {\n\ttc.mutex.Lock()\n\tdefer tc.mutex.Unlock()\n\n\tif tc.finished {\n\t\treturn trace.Errorf(\"Count is already finished, cannot add more content\")\n\t}\n\ttc.count += 1\n\treturn nil\n}",
"func (muo *ModelUpdateOne) AddCounter(i int64) *ModelUpdateOne {\n\tmuo.mutation.AddCounter(i)\n\treturn muo\n}",
"func add_counter(id int) {\r\n\tdefer wg.Done()\r\n\t\r\n\tfor i:=0; i<2; i++ {\r\n\t\t//fmt.Printf(\"in 11 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t\tvalue := counter\r\n\t\t//grouting 退出,放回队列?\r\n\t\truntime.Gosched()\r\n\t\tvalue ++\r\n\t\tcounter = value\r\n\t\t\r\n\t\tfmt.Printf(\"in 22 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t}\r\n\t\r\n\t/*\r\n\tfor i:=0; i<2; i++ {\r\n\t\tfmt.Printf(\"in 11 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t\t//grouting 退出,放回队列?\r\n\t\truntime.Gosched()\r\n\t\tcounter++\r\n\t\t\r\n\t\tfmt.Printf(\"in 22 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t}\r\n\t*/\r\n}",
"func (c ShmCounter) Inc(i int64) {\n\tatomic.AddInt64((*int64)(unsafe.Pointer(c)), i)\n}",
"func (data *Data) AddCount(cnt float64) {\n\tdata.Lock()\n\tdefer data.Unlock()\n\n\tdata.count += cnt\n\tdata.totalCount += cnt\n}",
"func AddCounter(counterVec *prometheus.CounterVec, v float64) {\n\tif counterVec == nil {\n\t\treturn\n\t}\n\tcounterVec.With(nil).Add(v)\n}",
"func PdhAddCounter(queryHdl syscall.Handle, CounterPath string) (syscall.Handle, error) {\n\tvar counterHdl syscall.Handle\n\n\tr, _, err := procPdhAddCounter.Call(\n\t\t\t\t\tuintptr(queryHdl),\n\t\t\t\t\tuintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(CounterPath))),\n\t\t\t\t\t0,\n\t\t\t\t\tuintptr(unsafe.Pointer(&counterHdl)))\n\n\tif r != 0 {\n\t\treturn 0, err\n\t}\n\n\treturn counterHdl, nil\n}",
"func (NilCounter) Inc(i int64) {}",
"func (NilCounter) Inc(i int64) {}",
"func (lmt *RateLimiter) AddCall(id string) int {\n\tvar level int\n\tcount, err := lmt.store.Increment(id)\n\tif err != nil {\n\t\treturn -1\n\t}\n\n\tif count > lmt.threshold {\n\t\tlevel++\n\n\t\tif lmt.stack != nil {\n\t\t\tlevel += lmt.stack.AddCall(id)\n\t\t}\n\t}\n\n\treturn level\n}",
"func (sc *DurationCounter) Add(tv *TimedValue) {\n\tsc.MinuteCounter.Add(tv.t.Second(), tv.val)\n\tsc.HourCounter.Add(tv.t.Minute(), tv.val)\n\tsc.DayCounter.Add(tv.t.Hour(), tv.val)\n\tsc.WeekCounter.Add(int(tv.t.Weekday()), tv.val)\n}",
"func (c *CountResult) Add(k string, v int) {\n\tif c.m == nil {\n\t\tc.m = make(map[string]int)\n\t}\n\tc.m[k] = v\n}",
"func (vm *VM) opAdd(instr []uint16) int {\n\ta, b, c := vm.getAbc(instr)\n\n\tvm.registers[a] = (b + c) % 32768\n\treturn 4\n}",
"func (f *FFlags) AddRetryCount(procSign string) (int, error) {\n\tvar data = 1\n\tretryFile := f.getRetryFile(procSign)\n\tif util.FileExists(retryFile) {\n\t\t// read data first\n\t\tq, err := ioutil.ReadFile(retryFile)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tr, _ := strconv.Atoi(string(q))\n\t\tdata = r + 1\n\t}\n\n\tdataStr := strconv.Itoa(data)\n\tif err := util.WriteFileNS(retryFile, false, []byte(dataStr)); err != nil {\n\t\treturn 0, err\n\t}\n\treturn data, nil\n}",
"func NewSyscallCounter() SyscallCounter {\n\treturn SyscallCounter(make(map[string]int))\n}",
"func (running *LRunning) add(delta int32) int32 {\n\t// Calling AddInt32() function\n\t// with its parameter\n\treturn atomic.AddInt32((*int32)(running), delta)\n}",
"func (k Keeper) Add(ctx sdk.Context, address sdk.AccAddress, amount sdk.Int) (sdk.Int, error) {\n\tvalue, err := k.Get(ctx, address)\n\tif err != nil {\n\t\treturn sdk.Int{}, sdkerrors.Wrapf(sdkerrors.ErrInvalidRequest, err.Error())\n\t}\n\tres := value.Add(amount)\n\t// emit event\n\tctx.EventManager().EmitEvent(\n\t\tsdk.NewEvent(\n\t\t\ttypes.EventType,\n\t\t\tsdk.NewAttribute(sdk.AttributeKeyAction, types.AttributeActionAdded),\n\t\t\tsdk.NewAttribute(types.AttributeKeyAddress, address.String()),\n\t\t\tsdk.NewAttribute(types.AttributeKeyAmount, amount.String()),\n\t\t),\n\t)\n\treturn k.set(ctx, address, res)\n}",
"func (c *Float64Counter) Add(ctx context.Context, value float64, labels LabelSet) {\n\tc.recordOne(ctx, core.NewFloat64Number(value), labels)\n}",
"func (s *counts) Add(other counts) {\n\ts.chars += other.chars\n\ts.words += other.words\n\ts.lines += other.lines\n\ts.Added++\n}",
"func (c *Counter) Increment(addend int64) {\n\tatomic.AddInt64(&c.value, addend)\n}",
"func (c *PCPCounterVector) Inc(inc int64, instance string) error {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tif inc < 0 {\n\t\treturn errors.New(\"increment cannot be negative\")\n\t}\n\n\tif inc == 0 {\n\t\treturn nil\n\t}\n\n\tv, err := c.valInstance(instance)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn c.setInstance(v.(int64)+inc, instance)\n}",
"func (f *Filter) AddSyscall(nr interface{}, ns int) error {\n\trule, err := NewFilterRule(\n\t\tFilterRuleSetModeSyscall(),\n\t\tFilterRuleSetSyscall(nr),\n\t\tFilterRuleSetPidNamespace(ns),\n\t\tFilterRuleSetActionAllow())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn f.AddRule(rule)\n}",
"func (cc *charCounts) Add(c rune) {\n\tfor _, count := range cc.counts {\n\t\tif count.char == c {\n\t\t\tcount.count++\n\t\t\treturn\n\t\t}\n\t}\n\tnewItem := new(charCount)\n\tnewItem.char, newItem.count = c, 1\n\tcc.counts = append(cc.counts, newItem)\n}",
"func(m *Monitor) RegisterCounter(nameSpace, subSystem, name string, labelNames []string) {\n\tif !m.readOnly {\n\t\tif m.instance != nameSpace {\n\t\t\tpanic(fmt.Sprintf(\"invaid name space %s\", nameSpace))\n\t\t}\n\t\tif len(labelNames) > 0 {\n\t\t\tcounter := prometheus.NewCounterVec(\n\t\t\t\tprometheus.CounterOpts{\n\t\t\t\t\tNamespace: nameSpace,\n\t\t\t\t\tSubsystem: subSystem,\n\t\t\t\t\tName: name,\n\t\t\t\t}, labelNames)\n\t\t\tif _, ok := m.counters.LoadOrStore(MKey{nameSpace, subSystem, name}, counter); ok {\n\t\t\t\tpanic(fmt.Sprintf(\"repeated registration counter for %s %s %s\", nameSpace, subSystem, name))\n\t\t\t} else {\n\t\t\t\tm.pusher.Collector(counter)\n\t\t\t}\n\t\t} else {\n\t\t\tcounter := prometheus.NewCounter(\n\t\t\t\tprometheus.CounterOpts{\n\t\t\t\t\tNamespace: nameSpace,\n\t\t\t\t\tSubsystem: subSystem,\n\t\t\t\t\tName: name,\n\t\t\t\t})\n\t\t\tif _, ok := m.counters.LoadOrStore(MKey{nameSpace, subSystem, name}, counter); ok {\n\t\t\t\tpanic(fmt.Sprintf(\"repeated registration counter for %s %s %s\", nameSpace, subSystem, name))\n\t\t\t} else {\n\t\t\t\tm.pusher.Collector(counter)\n\t\t\t}\n\t\t}\n\t} else {\n\t\tpanic(\"monitor already start\")\n\t}\n}",
"func (DefaultCounter) SendTotal(int) {\n}",
"func (c *ElementCounter) Add(s string) {\n\tif c.m == nil {\n\t\tc.m = make(map[string]int)\n\t}\n\tif c.m[s] == 0 {\n\t\tc.m[s] = 1\n\t} else {\n\t\tc.m[s]++\n\t}\n}",
"func (d Data) Add(key uint32, value interface{}) {\n\td.mutex.Lock()\n\tdefer d.mutex.Unlock()\n\td.data[key] = value\n\td.counts[key] = d.counts[key] + 1\n}",
"func (metrics *Metrics) Inc(name string) {\n\t// Avoid crash\n\tif !metrics.Ready {\n\t\treturn\n\t}\n\tnow := time.Now().Unix()\n\t// Unlock mutex\n\tmetrics.mux.Lock()\n\tif c, ok := metrics.Counters[name]; ok {\n\t\tif (now - c.Interval) >= defaultInterval {\n\t\t\tc.Count = 0\n\t\t\tc.Interval = now\n\t\t} else {\n\t\t\tc.Count++\n\t\t}\n\t\tmetrics.Counters[name] = c\n\t} else {\n\t\tc := Counter{\n\t\t\tCount: 1,\n\t\t\tInterval: now,\n\t\t}\n\t\tmetrics.Counters[name] = c\n\t}\n\tvalue := metrics.Counters[name].Count\n\tmetrics.mux.Unlock()\n\n\t// Send value\n\tmetrics.ConnectAndSend(name, value)\n}",
"func (c *standardResettingCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func AddCounter(counterVec *prometheus.CounterVec, labels prometheus.Labels, v float64) {\n\tif counterVec == nil {\n\t\treturn\n\t}\n\tcounterVec.With(labels).Add(v)\n}",
"func _cgoexp_e93fccc2f088_add(a *struct {\n\t\tp0 _Ctype_int\n\t\tp1 _Ctype_int\n\t\tr0 _Ctype_int\n\t}) {\n\ta.r0 = add(a.p0, a.p1)\n}",
"func (mu *ModelUpdate) AddCounter(i int64) *ModelUpdate {\n\tmu.mutation.AddCounter(i)\n\treturn mu\n}",
"func (s *AddressStack) IncProgramCounter() {\n\ts.pc.Increment()\n}",
"func (cc *ExampleCC) addToInt(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 3 {\n\t\treturn shim.Error(\"Invalid args. Expecting collection, key, amountToAdd\")\n\t}\n\n\tcoll := args[0]\n\tkey := args[1]\n\tamountToAdd, err := strconv.Atoi(args[2])\n\tif err != nil {\n\t\treturn shim.Error(\"Invalid arg: amountToAdd is not an int\")\n\t}\n\n\toldValue, err := stub.GetPrivateData(coll, key)\n\tif err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error getting private data for collection [%s] and key [%s]: %s\", coll, key, err))\n\t}\n\n\tvar oldValueInt int\n\tif oldValue != nil {\n\t\toldValueInt, err = strconv.Atoi(string(oldValue))\n\t\tif err != nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Error parsing existing amount [%s]: %s\", string(oldValue), err))\n\t\t}\n\t} else {\n\t\toldValueInt = 0\n\t}\n\n\tnewValueInt := oldValueInt + amountToAdd\n\tif err := stub.PutPrivateData(coll, key, []byte(strconv.Itoa(newValueInt))); err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error storing new sum [%s] to key [%s] in private collection [%s]: %s\", newValueInt, key, coll, err))\n\t}\n\n\treturn shim.Success(nil)\n}",
"func (s *Stats) Inc(d *Data) {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.StatusCode[d.StatusCode]++\n\ts.Method[d.Method]++\n\ts.Path[d.Path]++\n\ts.InBytes += d.InBytes\n\ts.OutBytes += d.OutBytes\n}",
"func generateAddTwoToCounterScript(counterAddress string) string {\n\treturn fmt.Sprintf(\n\t\t`\n import 0x%s\n\n transaction {\n\n prepare(signer: AuthAccount) {\n if signer.borrow<&Counting.Counter>(from: /storage/counter) == nil {\n signer.save(<-Counting.createCounter(), to: /storage/counter)\n }\n\n signer.borrow<&Counting.Counter>(from: /storage/counter)!.add(2)\n }\n }\n `,\n\t\tcounterAddress,\n\t)\n}",
"func (proxy CalculatorProxy) Add(a int, b int) int {\n\trequestor := distribution.NewRequestor()\n\tinvocation := distribution.NewInvocation(\n\t\tproxy.client.ObjectID,\n\t\tproxy.client.Hostname,\n\t\tproxy.client.Port,\n\t\t\"add\",\n\t\t[]int{a, b},\n\t)\n\n\trequestor.Invoke(invocation)\n\n\treturn 2 + 2\n}",
"func (r *Registers) IncrementProgramCounter(increment uint16) {\n\tr.pc = r.pc + (2 * increment)\n}",
"func (m *Metrics) Add(stageInfo *stage.Info, count int) {\n\tswitch stageInfo.Action {\n\tcase \"query\":\n\t\tm.QueryJobs += count\n\tcase \"copy\":\n\t\tm.CopyJobs += count\n\tcase \"load\", \"reload\":\n\t\tm.LoadProcesss += count\n\tdefault:\n\t\tm.OtherJobs += count\n\t}\n}",
"func (t *testMetricsBackend) IncrementCounter(l metricsLabels) {\n\tt.Lock()\n\tt.counters[l]++\n\tt.Unlock()\n}",
"func (e *Event) AddInt(key string, value int) {\n\tvar ckey *C.char = C.CString(key)\n\tC.oboe_event_add_info_int64(&e.event, ckey, C.int64_t(value))\n\tC.free(unsafe.Pointer(ckey))\n}",
"func (c *safeCounter) Inc() {\n\tc.mux.Lock()\n\tc.cnt++\n\tc.mux.Unlock()\n}",
"func (m *Meter) Add(amount int64) {\n\tm.totalCount += amount\n}",
"func (v *IntValueMonitor) Add(val int64) {\n\tv.mtx.Lock()\n\tv.count += 1\n\tv.sum += val\n\tv.sum_squared += (val * val)\n\tv.recent = val\n\tif val > v.max {\n\t\tv.max = val\n\t}\n\tif val < v.min {\n\t\tv.min = val\n\t}\n\tv.mtx.Unlock()\n}",
"func (c *counter) Inc(n uint64) {\n\tatomic.AddUint64(&c.val, n)\n}",
"func addb(context *Context) {\n x := context.opcode & 0x0F00 >> 8\n b := byte(context.opcode & 0x00FF)\n context.cpu.v[x] += b\n context.cpu.pc += 2\n}",
"func (a v3ioAppender) Add(lset utils.Labels, t int64, v float64) (uint64, error) {\n\treturn a.metricsCache.Add(lset, t, v)\n}",
"func (s *SumMutex) AddToSum(value int) {\n\ts.mux.Lock()\n\ts.sum += value\n\ts.mux.Unlock()\n}"
] | [
"0.693444",
"0.6744105",
"0.67251784",
"0.6690354",
"0.65803283",
"0.6555088",
"0.6521243",
"0.6459427",
"0.64519507",
"0.6396349",
"0.6385772",
"0.63718647",
"0.6366864",
"0.6361009",
"0.62777066",
"0.6268622",
"0.61870646",
"0.61822426",
"0.6144839",
"0.61348534",
"0.61290646",
"0.60795844",
"0.60723734",
"0.60159886",
"0.60150427",
"0.5998381",
"0.59685975",
"0.5952655",
"0.5948167",
"0.5934066",
"0.5926421",
"0.5919343",
"0.5919343",
"0.5919343",
"0.58993775",
"0.587417",
"0.5845378",
"0.5839806",
"0.58349156",
"0.5821273",
"0.5810223",
"0.5802945",
"0.5795909",
"0.57878566",
"0.5776768",
"0.5747797",
"0.5745027",
"0.5734152",
"0.5733267",
"0.57131875",
"0.5709209",
"0.5708542",
"0.5707731",
"0.5705532",
"0.56973964",
"0.56794477",
"0.56433845",
"0.5641291",
"0.56383896",
"0.5636916",
"0.5636916",
"0.5632025",
"0.56320125",
"0.5610363",
"0.5600559",
"0.5599598",
"0.5591727",
"0.5590688",
"0.5586763",
"0.55866945",
"0.5584047",
"0.5564392",
"0.5554491",
"0.5550628",
"0.5542212",
"0.5536874",
"0.55353916",
"0.5522697",
"0.5512239",
"0.550488",
"0.55048513",
"0.5498628",
"0.5490696",
"0.54810077",
"0.5468215",
"0.54559857",
"0.5446926",
"0.54363847",
"0.5434658",
"0.54323626",
"0.5424954",
"0.54247475",
"0.5423738",
"0.5423674",
"0.5423591",
"0.5420098",
"0.5417963",
"0.5404503",
"0.5389025",
"0.5380083"
] | 0.78227204 | 0 |
AddRange add multiple counter to SyscallCounter | func (s SyscallCounter) AddRange(m map[string]int) {
for k, v := range m {
s[k] = v
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func SystemcounterReadList(buf []byte, dest []Systemcounter) int {\n\tb := 0\n\tfor i := 0; i < len(dest); i++ {\n\t\tdest[i] = Systemcounter{}\n\t\tb += SystemcounterRead(buf[b:], &dest[i])\n\t}\n\treturn xgb.Pad(b)\n}",
"func (s SyscallCounter) Add(name string, count int) {\n\ts[name] = count\n}",
"func (c *PCPCounterVector) IncAll(val int64) {\n\tfor ins := range c.indom.instances {\n\t\tc.MustInc(val, ins)\n\t}\n}",
"func addValuesToChannel(c chan<- int) {\n\tfor i := 0; i < 100; i++ {\n\t\tc <- i\n\t}\n\tclose(c)\n}",
"func (ss *SerSlave) Counters(cnt Counter) []uint64 {\n\treturn ss.cnt.GetAll()\n}",
"func (counter *counter) addCount(n int64) {\n\tcounter.Lock()\n\tdefer counter.Unlock()\n\n\tif n <= 0 {\n\t\treturn\n\t}\n\n\t// Add the ops read and increase the count of operation.\n\t// bytes Number of ops.\n\tcounter.bytes += n\n\tcounter.ops++\n}",
"func (c *Counter) AddCount(mp string, n int) int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tc.count[mp] += n\n\tlogrus.Debugf(\"Mount count increased to %d for %q\", c.count[mp], mp)\n\treturn c.count[mp]\n}",
"func (s *SliceOfInt32) Concat(items []int32) *SliceOfInt32 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func (r *refCounter) Add(item stackitem.Item) {\n\tif r == nil {\n\t\treturn\n\t}\n\t*r++\n\n\tirc, ok := item.(rcInc)\n\tif !ok || irc.IncRC() > 1 {\n\t\treturn\n\t}\n\tswitch t := item.(type) {\n\tcase *stackitem.Array, *stackitem.Struct:\n\t\tfor _, it := range item.Value().([]stackitem.Item) {\n\t\t\tr.Add(it)\n\t\t}\n\tcase *stackitem.Map:\n\t\tfor i := range t.Value().([]stackitem.MapElement) {\n\t\t\tr.Add(t.Value().([]stackitem.MapElement)[i].Value)\n\t\t}\n\t}\n}",
"func (s *SliceOfUint32) Concat(items []uint32) *SliceOfUint32 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func (s *Set) Add(items ...uint32) *Set {\n\tfor _, item := range items {\n\t\ts.items[item] = struct{}{}\n\t}\n\treturn s\n}",
"func (gc *GroupCounter) Add(a float64) {\n\tfor _, c := range gc.counters {\n\t\tc.Add(a)\n\t}\n}",
"func (ms *metricSender) AddToCounter(name string, delta uint64) error {\n\treturn ms.eventEmitter.Emit(&events.CounterEvent{Name: &name, Delta: &delta})\n}",
"func (m *Metrics) Add(trace []uint64, count ...int64) error {\n\tif len(count) > 4 {\n\t\treturn fmt.Errorf(\"too many counts (%d) to register in reporter\", len(count))\n\t}\n\t// Only the last point.\n\ts := m.getStats(trace[0])\n\tfor i, n := range count {\n\t\ts.Self[i] += n\n\t}\n\t// Record cumulated stats.\n\tseen := make(map[uint64]bool, len(trace))\n\tfor i, a := range trace {\n\t\ts := m.getStats(a)\n\t\tif !seen[a] {\n\t\t\tseen[a] = true\n\t\t\tfor j, n := range count {\n\t\t\t\ts.Cumul[j] += n\n\t\t\t}\n\t\t}\n\t\tif i > 0 {\n\t\t\tcallee := trace[i-1]\n\t\t\tif s.Callees == nil {\n\t\t\t\ts.Callees = make(map[uint64][4]int64)\n\t\t\t}\n\t\t\tedges := s.Callees[callee]\n\t\t\tfor j, n := range count {\n\t\t\t\tedges[j] += n\n\t\t\t}\n\t\t\ts.Callees[callee] = edges\n\t\t}\n\t}\n\treturn nil\n}",
"func (s *SliceOfUint) Concat(items []uint) *SliceOfUint {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func registerRequestsCounter(registerer prometheus.Registerer) *prometheus.CounterVec {\n\tcounter := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"oauth2_proxy_requests_total\",\n\t\t\tHelp: \"Total number of requests by HTTP status code.\",\n\t\t},\n\t\t[]string{\"code\"},\n\t)\n\n\tif err := registerer.Register(counter); err != nil {\n\t\tif are, ok := err.(prometheus.AlreadyRegisteredError); ok {\n\t\t\tcounter = are.ExistingCollector.(*prometheus.CounterVec)\n\t\t} else {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\treturn counter\n}",
"func addResourceList(list, new corev1.ResourceList) {\n\tfor name, quantity := range new {\n\t\tif value, ok := list[name]; !ok {\n\t\t\tlist[name] = quantity.DeepCopy()\n\t\t} else {\n\t\t\tvalue.Add(quantity)\n\t\t\tlist[name] = value\n\t\t}\n\t}\n}",
"func listSystemCountersRequest(c *xgb.Conn) []byte {\n\tsize := 4\n\tb := 0\n\tbuf := make([]byte, size)\n\n\tbuf[b] = c.Extensions[\"SYNC\"]\n\tb += 1\n\n\tbuf[b] = 1 // request opcode\n\tb += 1\n\n\txgb.Put16(buf[b:], uint16(size/4)) // write request size in 4-byte units\n\tb += 2\n\n\treturn buf\n}",
"func AddToCounter(name string, delta uint64) error {\n\treturn metricSender.AddToCounter(name, delta)\n}",
"func (s *SliceOfInt) Concat(items []int) *SliceOfInt {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func add_counter(id int) {\r\n\tdefer wg.Done()\r\n\t\r\n\tfor i:=0; i<2; i++ {\r\n\t\t//fmt.Printf(\"in 11 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t\tvalue := counter\r\n\t\t//grouting 退出,放回队列?\r\n\t\truntime.Gosched()\r\n\t\tvalue ++\r\n\t\tcounter = value\r\n\t\t\r\n\t\tfmt.Printf(\"in 22 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t}\r\n\t\r\n\t/*\r\n\tfor i:=0; i<2; i++ {\r\n\t\tfmt.Printf(\"in 11 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t\t//grouting 退出,放回队列?\r\n\t\truntime.Gosched()\r\n\t\tcounter++\r\n\t\t\r\n\t\tfmt.Printf(\"in 22 add_counter(%d) counter=%d \\n\", id, counter)\r\n\t}\r\n\t*/\r\n}",
"func (l *Int32) AddAll(values ...int32) {\n\tl.values = append(l.values, values...)\n}",
"func (x profIndex) addCountsAndClearFlags(data, tag int) profIndex {\n\treturn profIndex((uint64(x)>>34+uint64(uint32(tag)<<2>>2))<<34 | uint64(uint32(x)+uint32(data)))\n}",
"func addResourceList(list, new v1.ResourceList) {\n\tfor name, quantity := range new {\n\t\tif value, ok := list[name]; !ok {\n\t\t\tlist[name] = quantity.DeepCopy()\n\t\t} else {\n\t\t\tvalue.Add(quantity)\n\t\t\tlist[name] = value\n\t\t}\n\t}\n}",
"func (cc *charCounts) Add(c rune) {\n\tfor _, count := range cc.counts {\n\t\tif count.char == c {\n\t\t\tcount.count++\n\t\t\treturn\n\t\t}\n\t}\n\tnewItem := new(charCount)\n\tnewItem.char, newItem.count = c, 1\n\tcc.counts = append(cc.counts, newItem)\n}",
"func (r *WaitingCounter) Add(c int32) {\n\t(*atomic.Int32)(r).Add(c)\n}",
"func (s *SliceOfInt64) Concat(items []int64) *SliceOfInt64 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func (this *List) Add(c Counter) error {\n this.lock.Lock()\n defer this.lock.Unlock()\n\n _, ok := this.counters[c.Name()]\n if ok {\n return fmt.Errorf(\"Counter already exists\")\n }\n\n this.counters[c.Name()] = c\n\n return nil\n}",
"func (r *ReconciliationLoop) addSlots(command *contrib.NodeCommand) (*contrib.NodeCommandResult, error) {\n\tc := *command\n\tvar slots []int\n\tfor _, s := range c.Arguments {\n\t\tslot, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\tlog.Warnf(\"Invalid slot value: %v\", s)\n\t\t}\n\t\tslots = append(slots, slot)\n\t}\n\n\tres, err := r.redis.AddSlots(slots)\n\tif err != nil {\n\t\tlog.Warnf(\"Redis error: %v\", err)\n\t}\n\n\treturn contrib.NewNodeCommandResult(c.ID, []string{res}, err == nil)\n}",
"func (s *SliceOfInt16) Concat(items []int16) *SliceOfInt16 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func (set Int64Set) Add(i ...int64) Int64Set {\n\tfor _, v := range i {\n\t\tset.doAdd(v)\n\t}\n\treturn set\n}",
"func (s *counts) Add(other counts) {\n\ts.chars += other.chars\n\ts.words += other.words\n\ts.lines += other.lines\n\ts.Added++\n}",
"func (cs *UnsafeCounterIndex) Add(n int64, values ...string) int64 {\n\th := vhash(values)\n\tc := cs.findOrCreate(h, values)\n\tc.Count += n\n\treturn c.Count\n}",
"func (s *SliceOfUint64) Concat(items []uint64) *SliceOfUint64 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func listSystemCountersReply(buf []byte) *ListSystemCountersReply {\n\tv := new(ListSystemCountersReply)\n\tb := 1 // skip reply determinant\n\n\tb += 1 // padding\n\n\tv.Sequence = xgb.Get16(buf[b:])\n\tb += 2\n\n\tv.Length = xgb.Get32(buf[b:]) // 4-byte units\n\tb += 4\n\n\tv.CountersLen = xgb.Get32(buf[b:])\n\tb += 4\n\n\tb += 20 // padding\n\n\tv.Counters = make([]Systemcounter, v.CountersLen)\n\tb += SystemcounterReadList(buf[b:], v.Counters)\n\n\treturn v\n}",
"func addInts(ints ...int) int {\n\ttotal := 0\n\tfor _, v := range ints {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func SystemcounterListBytes(buf []byte, list []Systemcounter) int {\n\tb := 0\n\tvar structBytes []byte\n\tfor _, item := range list {\n\t\tstructBytes = item.Bytes()\n\t\tcopy(buf[b:], structBytes)\n\t\tb += len(structBytes)\n\t}\n\treturn xgb.Pad(b)\n}",
"func (c *CollectingCounter) Add(delta float64) {\n\tc.CounterValue += delta\n}",
"func (list *APTAuditList) incrementCount() {\n\tlist.mutex.Lock()\n\tlist.count += 1\n\tlist.mutex.Unlock()\n}",
"func (fs *flowControlConnStats) newCounters(ffs *flowControlFlowStats) {\n\tfs.mu.Lock()\n\tdefer fs.mu.Unlock()\n\tffs.toRelease = fs.bytesBufferedPerFlow\n}",
"func processCombinedCounters(counters *interfaces.VnetInterfaceCombinedCounters) {\n\tfmt.Printf(\"%+v\\n\", counters)\n\n\tcounterNames := []string{\"Rx\", \"Tx\"}\n\n\tfor i := uint32(0); i < counters.Count; i++ {\n\t\tfmt.Printf(\"Interface '%d': %s packets = %d, %s bytes = %d\\n\",\n\t\t\tcounters.FirstSwIfIndex+i, counterNames[counters.VnetCounterType], counters.Data[i].Packets,\n\t\t\tcounterNames[counters.VnetCounterType], counters.Data[i].Bytes)\n\t}\n}",
"func (r *Resource) Add(rl v1.ResourceList) {\n\tif r == nil {\n\t\treturn\n\t}\n\n\tfor rName, rQuant := range rl {\n\t\tswitch rName {\n\t\tcase v1.ResourceCPU:\n\t\t\tr.MilliCPU += rQuant.MilliValue()\n\t\tcase v1.ResourceMemory:\n\t\t\tr.Memory += rQuant.Value()\n\t\t}\n\t}\n}",
"func (s *IntSet) AddAll(values ...int) {\n\tfor _, x := range values {\n\t\tword, bit := x/64, uint(x%64)\n\t\tfor word >= len(s.words) {\n\t\t\ts.words = append(s.words, 0)\n\t\t}\n\t\ts.words[word] |= 1 << bit\n\t}\n}",
"func (tc *TokenCount) AddPromptCounter(prompt TokenCounter) {\n\tif prompt != nil {\n\t\ttc.Prompt = append(tc.Prompt, prompt)\n\t}\n}",
"func (_m *Reporter) Count(name string, value int64, tags ...monitoring.Tag) {\n\t_va := make([]interface{}, len(tags))\n\tfor _i := range tags {\n\t\t_va[_i] = tags[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, name, value)\n\t_ca = append(_ca, _va...)\n\t_m.Called(_ca...)\n}",
"func (g *PCPGaugeVector) IncAll(val float64) {\n\tfor ins := range g.indom.instances {\n\t\tg.MustInc(val, ins)\n\t}\n}",
"func (s *SliceOfInt8) Concat(items []int8) *SliceOfInt8 {\n\ts.items = append(s.items, items...)\n\treturn s\n}",
"func IncNumIPSets() {\n\tnumIPSets.Inc()\n}",
"func Accumulate(s []*big.Int) (r *big.Int) {\n\tr = big.NewInt(0)\n\tfor _, e := range s {\n\t\tr.Add(r, e)\n\t}\n\treturn\n}",
"func (gtc *GroupTagCounter) AddCounter(c TagCounter) {\n\tgtc.tagcounters = append(gtc.tagcounters, c)\n}",
"func (m *MetricsManager) AddCounter(name, help string, labelNames []string) error {\n\tvar allLabels sort.StringSlice\n\tfor k := range m.commonLabels {\n\t\tallLabels = append(allLabels, k)\n\t}\n\tallLabels = append(allLabels, labelNames...)\n\tallLabels.Sort()\n\n\tmetric := prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: name,\n\t\t\tHelp: help,\n\t\t},\n\t\tallLabels,\n\t)\n\tif err := prometheus.Register(metric); err != nil {\n\t\treturn err\n\t}\n\n\tpartialMetric, err := metric.CurryWith(m.commonLabels)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.counters[name] = &Counter{\n\t\tcreationTime: time.Now(),\n\t\tmetric: partialMetric,\n\t}\n\treturn nil\n}",
"func (c *thresholdCollector) push(s []types.AccessReviewThreshold) ([]uint32, error) {\n\tif len(s) == 0 {\n\t\t// empty threshold sets are equivalent to the default threshold\n\t\ts = []types.AccessReviewThreshold{\n\t\t\t{\n\t\t\t\tName: \"default\",\n\t\t\t\tApprove: 1,\n\t\t\t\tDeny: 1,\n\t\t\t},\n\t\t}\n\t}\n\n\tvar indexes []uint32\n\n\tfor _, t := range s {\n\t\ttid, err := c.pushThreshold(t)\n\t\tif err != nil {\n\t\t\treturn nil, trace.Wrap(err)\n\t\t}\n\t\tindexes = append(indexes, tid)\n\t}\n\n\treturn indexes, nil\n}",
"func (a *Counter) IncEachTransaction() {\n\ta.mu.Lock()\n\tt := time.Now().Format(\"2006-01-02T15\")\n\tcnt := a.resendTransactionCount[t]\n\tcnt.TransactionsSend += 1\n\ta.resendTransactionCount[t] = cnt\n\ta.mu.Unlock()\n}",
"func (s *Stats) add(d *Stats) {\n\ts.ItemCount += d.ItemCount\n\ts.EffectiveSize += d.EffectiveSize\n\ts.UsedSize += d.UsedSize\n\ts.Readers += d.Readers\n\ts.MarkedDeleted += d.MarkedDeleted\n\ts.Writers += d.Writers\n\ts.WritersBlocked += d.WritersBlocked\n}",
"func (ac *Accumulator) AddMetrics(ms []telegraf.Metric) {\n\tfor _, m := range ms {\n\t\tac.AddMetric(m)\n\t}\n}",
"func readAddRequests(broadCastAddChannel chan *AddMessage) []AddMessage {\n\tvar addMessages []AddMessage\n\tfor i := 0; i < 10; i++ {\n\t\t// Arbitraily read up to ten add requests in a single frame\n\t\tselect {\n\t\tcase msg := <-broadCastAddChannel:\n\t\t\t// TODO: add some sort of types\n\t\t\tTrace.Printf(\"Adding with %+v\\n\", msg)\n\t\t\taddMessages = append(addMessages, *msg)\n\t\tdefault:\n\t\t\t// Move on to other things\n\t\t\tbreak\n\t\t}\n\t}\n\treturn addMessages\n}",
"func addStrings(s string, count int) {\n\twg.Add(count) // we will create \"count\" new goroutines\n\tfor n := 0; n < count; n++ {\n\t\tgo addString(s)\n\t}\n}",
"func add(context *Context) {\n x := context.opcode & 0x0F00 >> 8\n y := context.opcode & 0x00F0 >> 4\n sum := uint16(context.cpu.v[x]) + uint16(context.cpu.v[y])\n if sum > 255 {\n context.cpu.v[0xF] = 1\n } else {\n context.cpu.v[0xF] = 0\n }\n context.cpu.v[x] = byte(sum & 0xFF)\n}",
"func (tc *TokenCount) AddCompletionCounter(completion TokenCounter) {\n\tif completion != nil {\n\t\ttc.Completion = append(tc.Completion, completion)\n\t}\n}",
"func (s *IPSet) AddRange(r IPRange) {\n\tif !r.Valid() {\n\t\treturn\n\t}\n\t// If there are any removals (s.out), then we need to compact the set\n\t// first to get the order right.\n\ts.toInOnly()\n\ts.in = append(s.in, r)\n}",
"func updateCounters(t *db.TestDetails, counters map[string]map[string]pair, isBlocked bool) {\n\tvar category string\n\tvar typ string\n\n\tif isApiTest(t.TestSet) {\n\t\tcategory = \"api\"\n\t} else {\n\t\tcategory = \"app\"\n\t}\n\n\tif t.Type == \"\" {\n\t\ttyp = \"unknown\"\n\t} else {\n\t\ttyp = strings.ToLower(t.Type)\n\t}\n\n\tif _, ok := counters[category]; !ok {\n\t\tcounters[category] = make(map[string]pair)\n\t}\n\n\tval := counters[category][typ]\n\tif isBlocked {\n\t\tval.blocked++\n\t} else {\n\t\tval.bypassed++\n\t}\n\tcounters[category][typ] = val\n}",
"func (mc *MetricCollection) AddFromRaw(rawMetrics ...*RawMetric) {\n\tfor _, raw := range rawMetrics {\n\t\tmc.Add(raw.ConvertToMetric())\n\t}\n}",
"func sendCounters(\n\tscope tally.Scope,\n\ttable, operation string,\n\terr error,\n) {\n\terrMsg := \"none\"\n\tif err != nil {\n\t\terrMsg = getGocqlErrorTag(err)\n\t}\n\ts := scope.Tagged(map[string]string{\n\t\t\"table\": table,\n\t\t\"operation\": operation,\n\t\t\"error\": errMsg,\n\t})\n\ts.Counter(\"execute\").Inc(1)\n}",
"func (c *PCPCounterVector) UpAll() { c.IncAll(1) }",
"func ListSystemCounters(c *xgb.Conn) ListSystemCountersCookie {\n\tif _, ok := c.Extensions[\"SYNC\"]; !ok {\n\t\tpanic(\"Cannot issue request 'ListSystemCounters' using the uninitialized extension 'SYNC'. sync.Init(connObj) must be called first.\")\n\t}\n\tcookie := c.NewCookie(true, true)\n\tc.NewRequest(listSystemCountersRequest(c), cookie)\n\treturn ListSystemCountersCookie{cookie}\n}",
"func (ps *PeerSet) decrIPRangeCounts(address string) {\n\taddrParts := strings.Split(address, \".\")\n\n\tc := ps.connectedIPs\n\tdecrNestedCounters(c, addrParts, 0)\n}",
"func (ps *linuxHarvester) populateIOCounters(sample, lastSample *types.ProcessSample, source Snapshot, elapsedSeconds float64) error {\n\tioCounters, err := source.IOCounters()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif ioCounters != nil {\n\t\t// Delta\n\t\tif lastSample != nil && lastSample.LastIOCounters != nil {\n\t\t\tlastCounters := lastSample.LastIOCounters\n\n\t\t\ttrace.Proc(\"ReadCount: %d, WriteCount: %d, ReadBytes: %d, WriteBytes: %d\", ioCounters.ReadCount, ioCounters.WriteCount, ioCounters.ReadBytes, ioCounters.WriteBytes)\n\t\t\tioReadCountPerSecond := acquire.CalculateSafeDelta(ioCounters.ReadCount, lastCounters.ReadCount, elapsedSeconds)\n\t\t\tioWriteCountPerSecond := acquire.CalculateSafeDelta(ioCounters.WriteCount, lastCounters.WriteCount, elapsedSeconds)\n\t\t\tioReadBytesPerSecond := acquire.CalculateSafeDelta(ioCounters.ReadBytes, lastCounters.ReadBytes, elapsedSeconds)\n\t\t\tioWriteBytesPerSecond := acquire.CalculateSafeDelta(ioCounters.WriteBytes, lastCounters.WriteBytes, elapsedSeconds)\n\n\t\t\tsample.IOReadCountPerSecond = &ioReadCountPerSecond\n\t\t\tsample.IOWriteCountPerSecond = &ioWriteCountPerSecond\n\t\t\tsample.IOReadBytesPerSecond = &ioReadBytesPerSecond\n\t\t\tsample.IOWriteBytesPerSecond = &ioWriteBytesPerSecond\n\t\t}\n\n\t\t// Cumulative\n\t\tsample.IOTotalReadCount = &ioCounters.ReadCount\n\t\tsample.IOTotalWriteCount = &ioCounters.WriteCount\n\t\tsample.IOTotalReadBytes = &ioCounters.ReadBytes\n\t\tsample.IOTotalWriteBytes = &ioCounters.WriteBytes\n\n\t\tsample.LastIOCounters = ioCounters\n\t}\n\treturn nil\n}",
"func (s *SeriesIDSet) AddMany(ids ...uint64) {\n\tif len(ids) == 0 {\n\t\treturn\n\t}\n\n\ta32 := make([]uint32, len(ids))\n\tfor i := range ids {\n\t\ta32[i] = uint32(ids[i])\n\t}\n\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.bitmap.AddMany(a32)\n}",
"func (list_obj *DualSortedSeqnoListWithLock) appendSeqnos(seqno_1 uint64, seqno_2 uint64, logger *log.CommonLogger) {\n\tlist_obj.lock.Lock()\n\tdefer list_obj.lock.Unlock()\n\tlist_obj.seqno_list_1 = append(list_obj.seqno_list_1, seqno_1)\n\tlist_obj.seqno_list_2 = append(list_obj.seqno_list_2, seqno_2)\n\tlogger.Tracef(\"after adding seqno_1 %v, seqno_2 %v, seqno_list_1 is %v, seqno_list_2 is %v\\n\", seqno_1, seqno_2, list_obj.seqno_list_1, list_obj.seqno_list_2)\n}",
"func AddInts(value1, value2 int) int {\n\treturn 0\n}",
"func (p thinPoly) Inc(x []int32) thinPoly {\n\tfor i := range x {\n\t\tp[i] += x[i]\n\t}\n\treturn p\n}",
"func AddToUint8Slice(nums []uint8, toAdd uint8) {\n\tfor i := range nums {\n\t\tnums[i] += toAdd\n\t}\n}",
"func (cc *ExampleCC) addToInt(stub shim.ChaincodeStubInterface, args []string) pb.Response {\n\tif len(args) != 3 {\n\t\treturn shim.Error(\"Invalid args. Expecting collection, key, amountToAdd\")\n\t}\n\n\tcoll := args[0]\n\tkey := args[1]\n\tamountToAdd, err := strconv.Atoi(args[2])\n\tif err != nil {\n\t\treturn shim.Error(\"Invalid arg: amountToAdd is not an int\")\n\t}\n\n\toldValue, err := stub.GetPrivateData(coll, key)\n\tif err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error getting private data for collection [%s] and key [%s]: %s\", coll, key, err))\n\t}\n\n\tvar oldValueInt int\n\tif oldValue != nil {\n\t\toldValueInt, err = strconv.Atoi(string(oldValue))\n\t\tif err != nil {\n\t\t\treturn shim.Error(fmt.Sprintf(\"Error parsing existing amount [%s]: %s\", string(oldValue), err))\n\t\t}\n\t} else {\n\t\toldValueInt = 0\n\t}\n\n\tnewValueInt := oldValueInt + amountToAdd\n\tif err := stub.PutPrivateData(coll, key, []byte(strconv.Itoa(newValueInt))); err != nil {\n\t\treturn shim.Error(fmt.Sprintf(\"Error storing new sum [%s] to key [%s] in private collection [%s]: %s\", newValueInt, key, coll, err))\n\t}\n\n\treturn shim.Success(nil)\n}",
"func addi(context *Context) {\n x := context.opcode & 0x0F00 >> 8\n context.cpu.i += uint16(context.cpu.v[x])\n}",
"func (r *RunningStats) AddAll(data []float64) {\n\tfor _, x := range data {\n\t\tr.Add(x)\n\t}\n}",
"func (l IntList) Concat(a []IntList) IntList {\n\tfor _, v := range a {\n\t\tl = l.Append(v)\n\t}\n\treturn l\n}",
"func (gc *GroupCounter) Inc() {\n\tfor _, c := range gc.counters {\n\t\tc.Inc()\n\t}\n}",
"func AddAll(nums... int) int {\n\tsum:=0\n\tfor _,num:=range nums{\n\t\tsum+=num\n\t}\n\treturn sum\n}",
"func (data *Data) AddCount(cnt float64) {\n\tdata.Lock()\n\tdefer data.Unlock()\n\n\tdata.count += cnt\n\tdata.totalCount += cnt\n}",
"func (c *Tool) Add(strs ...string) *Tool {\n\tfor _, s := range strs {\n\t\tc.Args = append(c.Args, s)\n\t}\n\treturn c\n}",
"func (set *AppleSet) Add(more ...Apple) {\n\tset.s.Lock()\n\tdefer set.s.Unlock()\n\n\tfor _, v := range more {\n\t\tset.doAdd(v)\n\t}\n}",
"func (el *eventList) increase() {\n\tel.size >>= 1\n\tel.events = make([]unix.Kevent_t, el.size)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (c *StandardCounter) Inc(i int64) {\n\tatomic.AddInt64(&c.count, i)\n}",
"func (pit *Pit) Counters() (cnt Counters) {\n\tcnt.NEntries = uint64(pit.nEntries)\n\tcnt.NInsert = uint64(pit.nInsert)\n\tcnt.NFound = uint64(pit.nFound)\n\tcnt.NCsMatch = uint64(pit.nCsMatch)\n\tcnt.NAllocErr = uint64(pit.nAllocErr)\n\tcnt.NDataHit = uint64(pit.nDataHit)\n\tcnt.NDataMiss = uint64(pit.nDataMiss)\n\tcnt.NNackHit = uint64(pit.nNackHit)\n\tcnt.NNackMiss = uint64(pit.nNackMiss)\n\tcnt.NExpired = uint64(pit.timeoutSched.nTriggered)\n\treturn cnt\n}",
"func (p CountProfile) Add(s DNA) {\n\tif len(s) > len(p) {\n\t\ts = s[:len(p)]\n\t}\n\tfor x, b := range s {\n\t\tswitch lb := b | LCBit; lb {\n\t\tcase 'a', 'c', 't', 'g':\n\t\t\tp[x][lb>>1&3]++\n\t\t}\n\t}\n}",
"func (pb *primitiveBuilder) addPullouts(pullouts []*pulloutSubquery) {\n\tfor _, pullout := range pullouts {\n\t\tpullout.setUnderlying(pb.plan)\n\t\tpb.plan = pullout\n\t\tpb.plan.Reorder(0)\n\t}\n}",
"func incFn(args []reflect.Value) []reflect.Value {\n\treturn []reflect.Value{reflect.ValueOf(args[0].Interface().(int) + 1)}\n}",
"func (l licenseApplication) counter(applications map[string][]*models.UserApplication) (sum chan int) {\n\tsum = make(chan int)\n\tgo func(responseChn chan int) {\n\t\tfor _, appItems := range applications {\n\n\t\t\tdesktopMap := make(map[string]bool)\n\t\t\tlaptopMap := make(map[string]bool)\n\n\t\t\tfor _, item := range appItems {\n\t\t\t\tmachineType := strings.ToUpper(item.ComputerType)\n\t\t\t\tkey := fmt.Sprintf(\"%s-%s\", item.UserID, item.ComputerID)\n\t\t\t\tswitch machineType {\n\t\t\t\tcase Desktop:\n\t\t\t\t\tdesktopMap[key] = true\n\t\t\t\tdefault:\n\t\t\t\t\tlaptopMap[key] = true\n\t\t\t\t}\n\t\t\t}\n\t\t\tif len(desktopMap) > len(laptopMap) {\n\t\t\t\tresponseChn <- len(desktopMap)\n\t\t\t}\n\t\t\tresponseChn <- len(laptopMap)\n\t\t}\n\t\tdefer close(responseChn)\n\t}(sum)\n\treturn sum\n}",
"func TestCounter(t *testing.T) {\n\tnodeName := \"192.168.1.11\"\n\tc := NewCounter()\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"undone\")\n\tc.Plus(nodeName, \"websocket\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\tc.Minus(nodeName, \"undone\")\n\n\tfmt.Println(\"Counter.Plus++++++++\", c, Gcounter)\n\n}",
"func (NilCounter) Inc(i int64) {}",
"func (NilCounter) Inc(i int64) {}",
"func (c *C) Add(n int) {\n\tc.wg.Add(n)\n\tfor n > 0 {\n\t\tn--\n\t\tc.ch <- struct{}{}\n\t}\n}",
"func add(a, b []int) []int {\n\tres := make([]int, len(a))\n\tfor i := range a {\n\t\tres[i] = a[i] + b[i]\n\t}\n\treturn res\n}",
"func main() {\n\treqs := make(chan Request)\n\tdefer close(reqs)\n\tfor i := 0; i < 3; i++ {\n\t\tgo PlusOneService(reqs, i)\n\t}\n\tvar wg sync.WaitGroup\n\tfor i := 3; i < 53; i += 10 {\n\t\twg.Add(1)\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\t\t\tresps := make(chan Response)\n\t\t\treqs <- Request{i, resps}\n\t\t\t// multi responses\n\t\t\tfor resp := range resps {\n\t\t\t\tfmt.Println(i, \"=>\", resp)\n\t\t\t}\n\t\t\t// or only single one\n\t\t\t//fmt.Println(i, \"=>\", <-resp)\n\t\t}(i)\n\t}\n\twg.Wait()\n}",
"func (m pComputeAccessesCount) Extend(fnct func(m.UserSet) m.UserData) pComputeAccessesCount {\n\treturn pComputeAccessesCount{\n\t\tMethod: m.Method.Extend(fnct),\n\t}\n}",
"func (gp *GoPool) Add(n int) {\n\tif n < 0 {\n\t\tpanic(\"n cannot be < 0\")\n\t}\n\tfor i := 0; i < n; i++ {\n\t\tgp.buffer <- true // take a slot in buffer channel\n\t}\n}",
"func (c *Counter) Add(mp string) int {\n\tc.mutex.Lock()\n\tdefer c.mutex.Unlock()\n\n\tc.count[mp]++\n\tlogrus.Debugf(\"Mount count increased to %d for %q\", c.count[mp], mp)\n\treturn c.count[mp]\n}",
"func (self *Weights) addMultiple(w weight,multiples int) {\n\tfor x:=multiples; x > 0; x-- {\n\t\tself.add(w)\n\t}\n}"
] | [
"0.58888173",
"0.57848877",
"0.56218725",
"0.53386444",
"0.5303115",
"0.52801883",
"0.5248637",
"0.5165493",
"0.5153725",
"0.5127275",
"0.51228803",
"0.50775003",
"0.5071776",
"0.5071021",
"0.5059025",
"0.50525403",
"0.50412554",
"0.5031862",
"0.4982327",
"0.4956368",
"0.49411914",
"0.49290663",
"0.49208662",
"0.49150327",
"0.4914811",
"0.49070355",
"0.49025345",
"0.48750284",
"0.48566154",
"0.48512468",
"0.48511064",
"0.48422265",
"0.4841588",
"0.48255822",
"0.4812403",
"0.48081407",
"0.48015943",
"0.4800506",
"0.47995093",
"0.47810948",
"0.47681618",
"0.47668472",
"0.47660998",
"0.47429088",
"0.47357655",
"0.47291318",
"0.47244975",
"0.47141507",
"0.4704669",
"0.46975482",
"0.46969667",
"0.46910977",
"0.4690628",
"0.46884277",
"0.4683439",
"0.4679642",
"0.46781138",
"0.46765664",
"0.46713874",
"0.46687764",
"0.46679026",
"0.46606606",
"0.4652545",
"0.46496403",
"0.46494326",
"0.46460664",
"0.4644201",
"0.46418193",
"0.46348235",
"0.46271452",
"0.46262416",
"0.46248835",
"0.46236715",
"0.46220258",
"0.4611049",
"0.46015617",
"0.46014386",
"0.45974538",
"0.45934463",
"0.45901874",
"0.4588551",
"0.4587643",
"0.4587614",
"0.4587614",
"0.4587614",
"0.45836762",
"0.45801166",
"0.4573318",
"0.45716372",
"0.45700246",
"0.45697743",
"0.4568433",
"0.4568433",
"0.45637",
"0.4562103",
"0.45618412",
"0.4556251",
"0.45525756",
"0.45508692",
"0.45467612"
] | 0.6513837 | 0 |
Check return inside, allow | func (s SyscallCounter) Check(name string) (bool, bool) {
n, o := s[name]
if o {
s[name] = n - 1
if n <= 1 {
return true, false
}
return true, true
}
return false, true
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func FuncChangeRet() bool { return false }",
"func checkUsernameToReturn(c *gin.Context, username string) bool {\n\tif !isValidUsername(username) {\n\t\tc.Status(http.StatusBadRequest)\n\t\treturn true\n\t}\n\treturn false\n}",
"func hasReturnValue(fnValue reflect.Value) bool {\n\treturn fnValue.Type().NumOut() > 0 && fnValue.Type().Out(0) != errorType\n}",
"func Noreturn(p *obj.Prog) bool",
"func (cb callBacker) checkCall(fn_name string) (bool, error) {\n\tif cb.Scripter.HasEraValue(fn_name) {\n\t\terr := cb.Scripter.EraCall(fn_name)\n\t\treturn true, err\n\t}\n\treturn false, nil\n}",
"func execValid(_ int, p *gop.Context) {\n\targs := p.GetArgs(1)\n\tret := json.Valid(args[0].([]byte))\n\tp.Ret(1, ret)\n}",
"func (m Matcher) Deadcode() bool { return boolResult }",
"func (*BasicBlock) isValue() {}",
"func checkReturnCode(resp http.Response) (err error) {\n\terr = nil\n\tif resp.StatusCode >= 300 {\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusNotFound:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid argument (user, group etc).\")\n\t\tcase http.StatusUnauthorized:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid token.\")\n\t\tcase http.StatusForbidden:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid token and token user must be an admin\")\n\t\tdefault:\n\t\t\terr = httpError(resp)\n\t\t}\n\t}\n\treturn err\n}",
"func checkReturnCode(resp http.Response) (err error) {\n\terr = nil\n\tif resp.StatusCode >= 300 {\n\t\tswitch resp.StatusCode {\n\t\tcase http.StatusNotFound:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid argument (user, group etc).\")\n\t\tcase http.StatusUnauthorized:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid token.\")\n\t\tcase http.StatusForbidden:\n\t\t\terr = httpErrorMesg(resp, \"Check for valid token and token user must be an admin\")\n\t\tdefault:\n\t\t\terr = httpError(resp)\n\t\t}\n\t}\n\treturn err\n}",
"func _onlyCurator(){\n\tif state.ReadUint32(append(CURATOR_KEY, address.GetSignerAddress()...)) != 0xffffffff {\n\t\tpanic(\"this function is restricted!\")\n\t}\n}",
"func (s *BaseConcertoListener) EnterReturnExpr(ctx *ReturnExprContext) {}",
"func almostOkayFunction() {\n\treturn nil\n}",
"func (ma *FakeActor) HasReturnValue(ctx exec.VMContext) (address.Address, uint8, error) {\n\tif err := ctx.Charge(100); err != nil {\n\t\treturn address.Undef, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, \"Insufficient gas\")\n\t}\n\n\treturn address.Undef, 0, nil\n}",
"func check(e error){\n\tif e!=nil{\n\t\tpanic(e)\n\t}\n}",
"func check(e interface{}) {\n\tif e != nil {\n\t\tpanic(fmt.Sprintf(\"check fail: %v\", e))\n\t}\n}",
"func (checker *CheckerType) NeedsExpectedValue() bool {\n return true\n}",
"func testHelperSuccess(c *gocheck.C, name string,\n expectedResult interface{},\n closure func() interface{}) {\n var result interface{}\n defer (func() {\n if err := recover(); err != nil {\n panic(err)\n }\n checkState(c, result,\n &expectedState{\n name: name,\n result: expectedResult,\n failed: false,\n log: \"\",\n })\n })()\n result = closure()\n}",
"func (v *visitor) checkRange(node *ast.RangeStmt) bool {\n\tvar keyObject *ast.Object\n\tvar valueObject *ast.Object\n\tif key, ok := node.Key.(*ast.Ident); ok {\n\t\tkeyObject = key.Obj\n\t} else if node.Key != nil {\n\t\tpanic(\"wtf\")\n\t}\n\tif value, ok := node.Value.(*ast.Ident); ok {\n\t\tvalueObject = value.Obj\n\t} else if node.Value != nil {\n\t\tpanic(\"wtf\")\n\t}\n\t// no variables: no possible errors\n\tif keyObject == nil && valueObject == nil {\n\t\treturn true\n\t}\n\n\tast.Inspect(node.Body, func(n ast.Node) bool {\n\t\tif unary, ok := n.(*ast.UnaryExpr); ok {\n\t\t\tif unary.Op == token.AND {\n\t\t\t\t// recurse: could be address of a compound expression, e.g. &(rangeVar.y)\n\t\t\t\t// fmt.Printf(\"!! WTF %s %s\\n\", reflect.ValueOf(unary.X), v.str(unary.X))\n\n\t\t\t\tast.Inspect(unary.X, func(n2 ast.Node) bool {\n\t\t\t\t\t// fmt.Printf(\"!!!! WTF %s %s\\n\", reflect.ValueOf(n2), v.str(n2))\n\t\t\t\t\tif ident, ok := n2.(*ast.Ident); ok {\n\t\t\t\t\t\tif ident.Obj == keyObject || ident.Obj == valueObject {\n\n\t\t\t\t\t\t\tv.warnings = append(v.warnings,\n\t\t\t\t\t\t\t\twarning{v.str(unary), unary.Pos(), v.rangestr(node), node.Pos()})\n\t\t\t\t\t\t\t// break early: no need to check more things\n\t\t\t\t\t\t\treturn false\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn true\n\t\t\t\t})\n\t\t\t\t// we already recursively inspected the children\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\treturn true\n\t})\n\n\treturn true\n}",
"func (c gosundheitCheck) Execute() (interface{}, error) { return c.checkFn() }",
"func (*CapturedStacktrace) Truth() starlark.Bool { return starlark.True }",
"func (cb callBacker) checkCallBoolArgInt(fn_name string, arg int64) (struct{ Called, Return bool }, error) {\n\tif cb.Scripter.HasEraValue(fn_name) {\n\t\tret, err := cb.Scripter.EraCallBoolArgInt(fn_name, arg)\n\t\treturn struct{ Called, Return bool }{true, ret}, err\n\t}\n\treturn struct{ Called, Return bool }{}, nil\n}",
"func (ReturnInst) isTerm() {}",
"func ExampleMustAbsorbTrytes() {}",
"func checkResponse(message string, b *Board, conn net.Conn) (r bool) {\n data := strings.Split(strings.TrimSuffix(message, \"\\r\\n\"), \" \")\n if len(data) == 3 {\n x, err1 := strconv.Atoi(data[1])\n y, err2 := strconv.Atoi(data[2])\n if (data[0] == \"SHOOT\" && err1 == nil && err2 == nil) {\n // if x == b.zombie.x && y == b.zombie.y {\n if x == 0 && y == 0 {\n b.won = true\n }\n return true\n }\n }\n conn.Write([]byte(\"INVALID INPUT\\n\"))\n return false\n}",
"func Return() Rule { return ReturnRule }",
"func checkReturnDepositCoin(L *lua.LState, idx int) *payload.ReturnDepositCoin {\n\tud := L.CheckUserData(idx)\n\tif v, ok := ud.Value.(*payload.ReturnDepositCoin); ok {\n\t\treturn v\n\t}\n\tL.ArgError(1, \"ReturnDepositCoin expected\")\n\treturn nil\n}",
"func (f *SubRepoPermissionCheckerEnabledFunc) PushReturn(r0 bool) {\n\tf.PushHook(func() bool {\n\t\treturn r0\n\t})\n}",
"func (m *mOutboundMockCanAccept) Return(r bool) *OutboundMock {\n\tm.mock.CanAcceptFunc = nil\n\tm.expectationSeries = nil\n\n\tif m.mainExpectation == nil {\n\t\tm.mainExpectation = &OutboundMockCanAcceptExpectation{}\n\t}\n\tm.mainExpectation.result = &OutboundMockCanAcceptResult{r}\n\treturn m.mock\n}",
"func (*InstZExt) isValue() {}",
"func checkresult(action string, photolng float64, citylng float64) bool{\n\treturn (photolng <= citylng && action == \"West\") || (photolng >= citylng && action == \"East\")\n}",
"func FuncAddRetMore() (error, bool) { return nil, false }",
"func (f *Machine) isLegal(a uint8, b uint8) bool {\n\treturn f.transitions.Search(serialize(a, b)) != nil\n}",
"func (c *T) Failed() bool",
"func (epc *EntryPointCreate) check() error {\n\tif _, ok := epc.mutation.CreateTime(); !ok {\n\t\treturn &ValidationError{Name: \"create_time\", err: errors.New(\"ent: missing required field \\\"create_time\\\"\")}\n\t}\n\tif _, ok := epc.mutation.UpdateTime(); !ok {\n\t\treturn &ValidationError{Name: \"update_time\", err: errors.New(\"ent: missing required field \\\"update_time\\\"\")}\n\t}\n\tif _, ok := epc.mutation.Role(); !ok {\n\t\treturn &ValidationError{Name: \"role\", err: errors.New(\"ent: missing required field \\\"role\\\"\")}\n\t}\n\tif v, ok := epc.mutation.Role(); ok {\n\t\tif err := entrypoint.RoleValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"role\", err: fmt.Errorf(\"ent: validator failed for field \\\"role\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := epc.mutation.ParentBlockID(); !ok {\n\t\treturn &ValidationError{Name: \"parent_block\", err: errors.New(\"ent: missing required edge \\\"parent_block\\\"\")}\n\t}\n\treturn nil\n}",
"func OK() ValidationError {\n\treturn ValidationError{}\n}",
"func myCondition(mycheck bool){\nif mycheck{\nfmt.Println(\"this ran\")\n}\nfmt.Println(\"wassup\")\n}",
"func (s *BaselimboListener) EnterFunction_arg_ret(ctx *Function_arg_retContext) {}",
"func (p DiceParser) check(t TokenType) bool {\n\treturn p.peek().Type == t\n}",
"func accept(err error) bool {\n\treturn err == nil || err == redis.Nil\n}",
"func _onlyOwner(){\n\tif !bytes.Equal(address.GetSignerAddress(), getOwner()) {\n\t\tpanic(\"this function is restricted!\")\n\t}\n}",
"func (resp *Response) OK() bool {\n\treturn resp.StatusCode < 400\n}",
"func (w *response) bodyAllowed() bool {\n\tif !w.wroteHeader {\n\t\tpanic(\"\")\n\t}\n\treturn bodyAllowedForStatus(w.status)\n}",
"func (m *publicAuth) ValidResAccess(pathArr []string, c *gin.Context) bool {\n\tvar userName string\n\tsession := sessions.Default(c)\n\trole := session.Get(\"role\")\n\tpathStr := c.Request.URL.Path\n\tmethod := c.Request.Method\n\n\t//admin have full privilege\n\tif nil != role {\n\t\tirole := role.(string)\n\t\tif \"1\" == irole {\n\t\t\treturn true\n\t\t}\n\t}\n\tiuserName := session.Get(common.WEBSessionUinKey)\n\tif nil == iuserName {\n\t\tblog.Error(\"user name error\")\n\t\treturn false\n\t}\n\n\tuserName = iuserName.(string)\n\n\t//index page or static page\n\tif 0 == len(pathArr) || \"\" == pathArr[1] || \"static\" == pathArr[1] {\n\t\treturn true\n\t}\n\n\t//valid privilege url must match session\n\tif strings.Contains(pathStr, types.BK_CC_PRIVI_PATTERN) {\n\t\tif pathArr[len(pathArr)-1] == userName {\n\t\t\treturn true\n\t\t}\n\t\tblog.Error(\"privilege user name error\")\n\t\treturn false\n\t}\n\t//search classfication return true\n\tif strings.Contains(pathStr, types.BK_CC_CLASSIFIC) && method == common.HTTPSelectPost {\n\t\treturn true\n\t}\n\n\t//search object attr return true\n\tif strings.Contains(pathStr, types.BK_CC_OBJECT_ATTR) && method == common.HTTPSelectPost {\n\t\treturn true\n\t}\n\n\t//usercustom return true\n\tif strings.Contains(pathStr, types.BK_CC_USER_CUSTOM) {\n\t\treturn true\n\t}\n\n\t//objectatt group return true\n\tif strings.Contains(pathStr, types.BK_OBJECT_ATT_GROUP) {\n\t\treturn true\n\t}\n\n\t//favorites return true\n\tif strings.Contains(pathStr, types.BK_CC_HOST_FAVORITES) {\n\t\treturn true\n\t}\n\n\t//search object return true\n\tif types.ObjectPatternRegexp.MatchString(pathStr) {\n\t\treturn true\n\t}\n\n\t//biz search privilege, return true\n\tif strings.Contains(pathStr, types.BK_APP_SEARCH) || strings.Contains(pathStr, types.BK_SET_SEARCH) || strings.Contains(pathStr, types.BK_MODULE_SEARCH) || strings.Contains(pathStr, types.BK_INST_SEARCH) || strings.Contains(pathStr, types.BK_HOSTS_SEARCH) {\n\t\treturn true\n\t}\n\tif strings.Contains(pathStr, types.BK_HOSTS_SNAP) || strings.Contains(pathStr, types.BK_HOSTS_HIS) {\n\t\treturn true\n\t}\n\tif strings.Contains(pathStr, types.BK_TOPO_MODEL) {\n\t\treturn true\n\t}\n\tif strings.Contains(pathStr, types.BK_INST_SEARCH_OWNER) && strings.Contains(pathStr, types.BK_OBJECT_PLAT) {\n\t\treturn true\n\t}\n\tif types.SearchPatternRegexp.MatchString(pathStr) {\n\t\treturn true\n\t}\n\tif strings.Contains(pathStr, types.BK_INST_ASSOCIATION_TOPO_SEARCH) {\n\t\treturn true\n\t}\n\tif strings.Contains(pathStr, types.BK_INST_ASSOCIATION_OWNER_SEARCH) {\n\t\treturn true\n\t}\n\t//valid resource config\n\tif types.ResPatternRegexp.MatchString(pathStr) {\n\t\tblog.Debug(\"valid resource config: %v\", pathStr)\n\t\tsysPrivi := session.Get(\"sysPrivi\")\n\t\treturn validSysConfigPrivi(sysPrivi, types.BK_CC_RESOURCE)\n\n\t}\n\n\t//valid inst privilege op\n\tif strings.Contains(pathStr, types.BK_INSTS) && !strings.Contains(pathStr, types.BK_TOPO) {\n\t\test := c.GetHeader(common.BKAppIDField)\n\t\tif \"\" == est {\n\t\t\t//common inst op valid\n\t\t\tmodelPrivi := session.Get(\"modelPrivi\").(string)\n\t\t\tif 0 == len(modelPrivi) {\n\t\t\t\tblog.Error(\"get model privilege json error\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn validModelConfigPrivi(modelPrivi, method, pathArr)\n\t\t} else {\n\t\t\t//mainline inst op valid\n\t\t\tvar objName string\n\t\t\tvar mainLineObjIDArr []string\n\t\t\tif method == common.HTTPCreate {\n\t\t\t\tobjName = pathArr[len(pathArr)-1]\n\t\t\t} else {\n\t\t\t\tobjName = pathArr[len(pathArr)-2]\n\t\t\t}\n\t\t\tmainLineObjIDStr := session.Get(\"mainLineObjID\").(string)\n\t\t\terr := json.Unmarshal([]byte(mainLineObjIDStr), &mainLineObjIDArr)\n\t\t\tif nil != err {\n\t\t\t\tblog.Error(\"get main line object id array false\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tif util.InStrArr(mainLineObjIDArr, objName) {\n\t\t\t\t//goo main line common object valid\n\t\t\t\tgoto appvalid\n\t\t\t}\n\n\t\t}\n\n\t\tblog.Error(\"valid inst error\")\n\t\treturn false\n\n\t}\n\n\t//valid inst import privilege\n\tif strings.Contains(pathStr, types.BK_INSTSI) && !strings.Contains(pathStr, types.BK_IMPORT) {\n\t\test := c.GetHeader(common.BKAppIDField)\n\t\tif \"\" == est {\n\t\t\tmodelPrivi := session.Get(\"modelPrivi\").(string)\n\t\t\tif 0 == len(modelPrivi) {\n\t\t\t\tblog.Error(\"get model privilege json error\")\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn validInstsOpPrivi(modelPrivi, method, pathArr)\n\t\t}\n\t\tblog.Error(\"valid inst error\")\n\t\treturn false\n\n\t}\n\n\tif len(pathArr) > 3 {\n\t\t//valid system config exclude resource\n\t\tpath3 := pathArr[3]\n\t\tif util.InArray(path3, types.BK_CC_MODEL_PRE) {\n\t\t\t//only admin config model privilege\n\t\t\tif \"1\" == role {\n\t\t\t\treturn true\n\t\t\t} else {\n\t\t\t\treturn false\n\t\t\t}\n\t\t}\n\t\tif util.InArray(path3, types.BK_CC_EVENT_PRE) {\n\t\t\t//valid event config privilege\n\t\t\tsysPrivi := session.Get(\"sysPrivi\")\n\t\t\treturn validSysConfigPrivi(sysPrivi, types.BK_CC_EVENT)\n\t\t}\n\t\tif util.InArray(path3, types.BK_CC_AUDIT_PRE) {\n\t\t\t//valid event config privilege\n\t\t\treturn true\n\t\t\t//\t\t\tsysPrivi := session.Get(\"sysPrivi\")\n\t\t\t//\t\t\treturn validSysConfigPrivi(sysPrivi, BK_CC_AUDIT)\n\t\t}\n\n\t}\n\n\t//valid biz operation privilege\nappvalid:\n\treturn validAppConfigPrivi(c, method, pathStr)\n\n}",
"func check(e error) {\r\n if e != nil {\r\n panic(e)\r\n }\r\n}",
"func (p *TestPager) Check() (bool, error) {\n return false, nil\n}",
"func goodFunction() {\n\treturn nil\n}",
"func justChecking()string {\n fmt.Println(\"YO .. man !.. I am A userdefined function ,, ready to GO\")\n return \"The function is working\"\n}",
"func check(t *testing.T, ck *Clerk, start int,end int, value string) {\n v := ck.Get(start,end)\n \n if v != value {\n t.Fatalf(\"Get(%v) -> %v, expected %v\", start,end, v, value)\n //fmt.Printf(\"Get(%v) -> %v, expected %v\", start,end, v, value)\n }\n}",
"func (a *attack) validate(res interface{}) bool {\n\tswitch res.(type) {\n\tcase *discoV1resp:\n\t\tr := res.(*discoV1resp)\n\t\tif r.EnrollURL != \"\" {\n\t\t\tendp, _ := URL.Parse(r.EnrollURL)\n\t\t\ta.log.Successf([]interface{}{endp.Hostname()}, \"Endpoint Discovery\")\n\t\t} else if r.GreenboxURL != \"\" {\n\t\t\tendp, _ := URL.Parse(r.GreenboxURL)\n\t\t\ta.samlURL = endp.Hostname()\n\t\t\ta.log.Successf([]interface{}{endp.Hostname()}, \"SAML Endpoint Discovery\")\n\t\t\treturn true\n\t\t} else if r.MDM.ServiceURL != \"\" {\n\t\t\tendp, _ := URL.Parse(r.MDM.ServiceURL)\n\t\t\ta.log.Successf([]interface{}{endp.Hostname()}, \"Endpoint Discovery\")\n\t\t\treturn true\n\t\t}\n\t\tif r.GroupID != \"\" {\n\t\t\ta.log.Successf([]interface{}{r.GroupID}, \"GroupID Discovery\")\n\t\t\treturn true\n\t\t} else if r.TenantGroup != \"\" {\n\t\t\ta.log.Successf([]interface{}{r.TenantGroup}, \"Tenant Discovery\")\n\t\t\treturn true\n\t\t} else if r.MDM.GroupID != \"\" {\n\t\t\ta.log.Successf([]interface{}{r.MDM.GroupID}, \"GroupID Discovery\")\n\t\t\treturn true\n\t\t}\n\n\tcase int:\n\t\tswitch res.(int) {\n\t\tcase 1:\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass, res.(int)}, \"Registration Disabled\")\n\t\tcase 2:\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass, res.(int)}, \"AirWatch Single-Factor Registration\")\n\t\t\treturn true\n\t\tcase 4:\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass, res.(int)}, \"Single-Factor Registration\")\n\t\t\treturn true\n\t\tcase 8:\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass, res.(int)}, \"Token Registration\")\n\t\t\treturn true\n\t\tcase 18:\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass, res.(int)}, \"SAML Registration\")\n\t\t\treturn true\n\t\tdefault:\n\t\t\ta.log.Errorf([]interface{}{a.user, a.pass, res.(int)}, \"Unknown Registration\")\n\t\t}\n\n\tcase status:\n\t\tswitch res.(status).Code {\n\t\tcase 1:\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass}, \"Authentication Successful: %s\", res.(status).Notification)\n\t\t\treturn true\n\t\tcase 2:\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass}, \"Authentication Failure: %s\", res.(status).Notification)\n\t\tdefault:\n\t\t\ta.log.Errorf([]interface{}{a.user, a.pass}, \"Unknown Response: %s\", res.(status).Notification)\n\t\t}\n\n\tcase string:\n\t\tswitch res.(string) {\n\t\tcase \"AUTH--1\":\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass, res.(string)}, \"Invalid GroupID\")\n\t\tcase \"AUTH-1001\":\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass, res.(string)}, \"Authentication Failure\")\n\t\tcase \"AUTH-1002\":\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass, res.(string)}, \"Account Lockout\")\n\t\tcase \"AUTH-1003\":\n\t\t\ta.log.Failf([]interface{}{a.user, a.pass, res.(string)}, \"Account Disabled\")\n\t\tcase \"AUTH-1006\":\n\t\t\ta.log.Successf([]interface{}{a.user, a.pass, res.(string)}, \"Authentication Successful\")\n\t\t\treturn true\n\t\tdefault:\n\t\t\ta.log.Errorf([]interface{}{a.user, a.pass, res.(string)}, \"Unknown Response\")\n\t\t}\n\t}\n\treturn false\n}",
"func (a API) SubmitBlockChk() (isNew bool) {\n\tselect {\n\tcase o := <-a.Ch.(chan SubmitBlockRes):\n\t\tif o.Err != nil {\n\t\t\ta.Result = o.Err\n\t\t} else {\n\t\t\ta.Result = o.Res\n\t\t}\n\t\tisNew = true\n\tdefault:\n\t}\n\treturn\n}",
"func checkInvoke_forError(t *testing.T, stub *CouchDBMockStub, function string, args []byte) pb.Response {\n\tmockInvokeArgs := [][]byte{[]byte(function), args}\n\ttxId := generateTransactionId()\n\tres := stub.MockInvoke(txId, mockInvokeArgs)\n\tif res.Status != shim.OK {\n\t}\n\treturn res\n}",
"func TestReturnIf(t *testing.T) {\n\tt.Logf(\"%s\", utl.ReturnIf(5 > 4, \"It's true\", \"It's false\"))\n}",
"func (me TEventType) IsAssignmentReturned() bool { return me.String() == \"AssignmentReturned\" }",
"func defaultGuard(interface{}, Event) bool { return true }",
"func (*InstSIToFP) isValue() {}",
"func (pr *prepareResult) check(qd *queryDescr) error {\n\tcall := qd.kind == qkCall\n\tif call != pr.fc.IsProcedureCall() {\n\t\treturn fmt.Errorf(\"function code mismatch: query descriptor %s - function code %s\", qd.kind, pr.fc)\n\t}\n\n\tif !call {\n\t\t// only input parameters allowed\n\t\tfor _, f := range pr.parameterFields {\n\t\t\tif f.Out() {\n\t\t\t\treturn fmt.Errorf(\"invalid parameter %s\", f)\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func checkStatus(expected, result int, final bool) bool {\n if expected == result {\n if final {\n fmt.Println(\"PASS\")\n passCount++\n }\n return true\n }\n fmt.Println(\"FAIL\")\n failCount++\n return false\n}",
"func (r *GetReservationRS) Ok() bool {\n\treturn len(r.Errors.Error) <= 0\n}",
"func (e BadRequest) IsBadRequest() {}",
"func (*text) isOutput() {\n}",
"func is_accepted(w http.ResponseWriter, r *http.Request) {\r\n\tfmt.Println(\"\\n Api Hit====>isAccepted\")\r\n\tvar vars = mux.Vars(r)\r\n\tvar id = vars[\"id\"]\r\n\tproc := cache[id]\r\n\tflag := isAccepted(proc)\r\n\tif flag {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens successfully Accepted\")\r\n\t} else {\r\n\t\tjson.NewEncoder(w).Encode(\"Input tokens Rejected by the PDA\")\r\n\t}\r\n}",
"func check(e error) {\n\t// fmt.Println(\"check\")\n\tif e != nil {\n\t\tfmt.Println(\"err != nil\")\n\t\tlog.Fatal(e)\n\t}\n}",
"func (s *BasePlSqlParserListener) EnterReturn_statement(ctx *Return_statementContext) {}",
"func main(){\n\n\tfmt.Println(checkInclusion(\"ab\",\"eidbaooo\"))\n\n}",
"func (o *GetExecutionByIndexAndPipelineIDUsingGETUnauthorized) IsSuccess() bool {\n\treturn false\n}",
"func (r *singleRule) allowVisit(key interface{}) bool {\n\treturn r.add(key) == nil\n}",
"func (p *Parser) check(tokenType TokenType) bool {\n\treturn p.current.Type == tokenType\n}",
"func (*InstSExt) isValue() {}",
"func (e InvalidCredentials) IsInvalidCredentials() {}",
"func okHealthCheck(proxy *Proxy) error {\n\treturn nil\n}",
"func (self *TileSprite) OutOfBoundsKill() bool{\n return self.Object.Get(\"outOfBoundsKill\").Bool()\n}",
"func (m Match) Ok() bool { return len(m.Errors) == 0 }",
"func doubleReturn(fn, ln string) (string, bool) {\n\tformalName := fmt.Sprint(fn, \" \", ln)\n\tvar isAdmin bool\n\tif strings.Contains(strings.ToLower(fn), \"hanming\") {\n\t\tisAdmin = true\n\t}\n\treturn formalName, isAdmin\n}",
"func (ac *AreahistoryCreate) check() error {\n\tif _, ok := ac.mutation.WalletID(); !ok {\n\t\treturn &ValidationError{Name: \"WalletID\", err: errors.New(\"ent: missing required field \\\"WalletID\\\"\")}\n\t}\n\tif v, ok := ac.mutation.WalletID(); ok {\n\t\tif err := areahistory.WalletIDValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"WalletID\", err: fmt.Errorf(\"ent: validator failed for field \\\"WalletID\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := ac.mutation.ProvinceNameTH(); ok {\n\t\tif err := areahistory.ProvinceNameTHValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"ProvinceNameTH\", err: fmt.Errorf(\"ent: validator failed for field \\\"ProvinceNameTH\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := ac.mutation.DistrictNameTH(); ok {\n\t\tif err := areahistory.DistrictNameTHValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"DistrictNameTH\", err: fmt.Errorf(\"ent: validator failed for field \\\"DistrictNameTH\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := ac.mutation.SubDistrict(); ok {\n\t\tif err := areahistory.SubDistrictValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"SubDistrict\", err: fmt.Errorf(\"ent: validator failed for field \\\"SubDistrict\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}",
"func (self *TileSprite) OutOfCameraBoundsKill() bool{\n return self.Object.Get(\"outOfCameraBoundsKill\").Bool()\n}",
"func TestTryReturn(t *testing.T) {\n\tv, err := Try(func(throw Thrower) int {\n\t\treturn 1\n\t})\n\tassert.Nil(t, err)\n\tassert.Equal(t, 1, v, \"Result should be what was returned\")\n}",
"func _return(i Instruction, ls *LuaState) {\n\ta, b, _ := i.ABC()\n\ta += 1\n\n\tif b == 1 {\n\t\t// no return values\n\t} else if b > 1 {\n\t\t// b-1 return values\n\t\tluaCheckStack(ls, b-1)\n\t\tfor i := a; i <= a+b-2; i++ {\n\t\t\tluaPushValue(ls, i)\n\t\t}\n\t} else {\n\t\t_fixStack(a, ls)\n\t}\n}",
"func (*InstUIToFP) isValue() {}",
"func main() {\n\tfmt.Printf(\"valid for part 1: %v\\n\", part1())\n\tfmt.Printf(\"valid for part 2: %v\\n\", part2())\n}",
"func (lp *loop) HasReturned() bool {\n\treturn len(lp.returnCh) == 1\n}",
"func check(e error, m string) {\n\tif e != nil {\n\t\tprintln(m)\n\t\tos.Exit(1)\n\t}\n}",
"func (t *SBI) isCaller(stub shim.ChaincodeStubInterface, certificate []byte) (bool, error) {\n\tmyLogger.Debugf(\"Check caller...\")\n\tfmt.Printf(\"PDD-DBG: Check caller...\")\n\n\tsigma, err := stub.GetCallerMetadata()\n\tif err != nil {\n\t\treturn false, errors.New(\"Failed getting metadata\")\n\t}\n\tpayload, err := stub.GetPayload()\n\tif err != nil {\n\t\treturn false, errors.New(\"Failed getting payload\")\n\t}\n\tbinding, err := stub.GetBinding()\n\tif err != nil {\n\t\treturn false, errors.New(\"Failed getting binding\")\n\t}\n\n\tmyLogger.Debugf(\"passed certificate [% x]\", certificate)\n\tmyLogger.Debugf(\"passed sigma [% x]\", sigma)\n\tmyLogger.Debugf(\"passed payload [% x]\", payload)\n\tmyLogger.Debugf(\"passed binding [% x]\", binding)\n\n\tfmt.Printf(\"PDD-DBG: passed certificate [% x]\", certificate)\n\tfmt.Printf(\"PDD-DBG: passed sigma [% x]\", sigma)\n\tfmt.Printf(\"PDD-DBG: passed payload [% x]\", payload)\n\tfmt.Printf(\"PDD-DBG: passed binding [% x]\", binding)\n\n\tok, err := stub.VerifySignature(\n\t\tcertificate,\n\t\tsigma,\n\t\tappend(payload, binding...),\n\t)\n\tif err != nil {\n\t\tmyLogger.Error(\"Failed checking signature \", err.Error())\n\t\tfmt.Printf(\"PDD-DBG: Failed checking signature %s\", err.Error())\n\t\treturn ok, err\n\t}\n\tif !ok {\n\t\tmyLogger.Error(\"Invalid signature\")\n\t\tfmt.Printf(\"PDD-DBG: Invalid signature\")\n\t}\n\n\t//myLogger.Debug(\"Check caller...Verified!\")\n\t//fmt.Printf(\"PDD-DBG: Check caller...Verified!\")\n\n\treturn ok, err\n}",
"func (acc *AccessControlCreate) check() error {\n\tif _, ok := acc.mutation.ServiceID(); !ok {\n\t\treturn &ValidationError{Name: \"service_id\", err: errors.New(\"ent: missing required field \\\"service_id\\\"\")}\n\t}\n\tif _, ok := acc.mutation.OpenAuth(); !ok {\n\t\treturn &ValidationError{Name: \"open_auth\", err: errors.New(\"ent: missing required field \\\"open_auth\\\"\")}\n\t}\n\tif _, ok := acc.mutation.BlackList(); !ok {\n\t\treturn &ValidationError{Name: \"black_list\", err: errors.New(\"ent: missing required field \\\"black_list\\\"\")}\n\t}\n\tif _, ok := acc.mutation.WhiteList(); !ok {\n\t\treturn &ValidationError{Name: \"white_list\", err: errors.New(\"ent: missing required field \\\"white_list\\\"\")}\n\t}\n\tif _, ok := acc.mutation.WhiteHostName(); !ok {\n\t\treturn &ValidationError{Name: \"white_host_name\", err: errors.New(\"ent: missing required field \\\"white_host_name\\\"\")}\n\t}\n\tif _, ok := acc.mutation.ClientipFlowLimit(); !ok {\n\t\treturn &ValidationError{Name: \"clientip_flow_limit\", err: errors.New(\"ent: missing required field \\\"clientip_flow_limit\\\"\")}\n\t}\n\tif _, ok := acc.mutation.ServiceFlowLimit(); !ok {\n\t\treturn &ValidationError{Name: \"service_flow_limit\", err: errors.New(\"ent: missing required field \\\"service_flow_limit\\\"\")}\n\t}\n\treturn nil\n}",
"func main() {\n\t// fmt.Println(isValidRow([]byte{8, 3, 3, '.', 7, '.', '.', '.', '.'}))\n}",
"func (s *State) acceptable(addr string, point string) bool {\n\tif s.optionalValidator == nil {\n\t\tif addr == point {\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\n\ts.vo.Lock()\n\tstate := s.optionalValidator(addr, point)\n\ts.vo.Unlock()\n\treturn state\n}",
"func check(e error) {\n\tif e != nil {\n\t\tpanic(e)\n\n\t}\n}",
"func (s *BasevhdlListener) EnterReturn_statement(ctx *Return_statementContext) {}",
"func requestOk(w http.ResponseWriter, r *http.Request) bool {\n\tif r.Body == nil {\n\t\thttp.Error(w, \"Please send a request body\", 400)\n\t\treturn false\n\t}\n\treturn true\n}",
"func (p policyGetCheck) check(contextMap map[string]string) (ok bool, err error) {\n\tok = true\n\tvar c = client.NewPolicyValidatorAPI(p.cluster.VanClient)\n\n\t// allowIncoming\n\tif p.allowIncoming != nil {\n\n\t\tvar res *client.PolicyAPIResult\n\n\t\tres, err = c.IncomingLink()\n\n\t\tif err != nil {\n\t\t\tok = false\n\t\t\tlog.Printf(\"IncomingLink check failed with error %v\", err)\n\t\t\treturn\n\t\t}\n\n\t\tif res.Allowed != *p.allowIncoming {\n\t\t\tlog.Printf(\"Unexpected IncomingLink result (%v)\", res.Allowed)\n\t\t\tok = false\n\t\t}\n\t}\n\n\t// allowedHosts and allowedServices\n\t// All tests are very similar, so we run them with a table\n\tlists := []struct {\n\t\tname string\n\t\tlist []string\n\t\tfunction checkString\n\t\texpect bool\n\t}{\n\t\t{\n\t\t\tname: \"allowedHosts\",\n\t\t\tlist: p.allowedHosts,\n\t\t\tfunction: c.OutgoingLink,\n\t\t\texpect: true,\n\t\t}, {\n\t\t\tname: \"disallowedHosts\",\n\t\t\tlist: p.disallowedHosts,\n\t\t\tfunction: c.OutgoingLink,\n\t\t\texpect: false,\n\t\t}, {\n\t\t\tname: \"allowedServices\",\n\t\t\tlist: p.allowedServices,\n\t\t\tfunction: c.Service,\n\t\t\texpect: true,\n\t\t}, {\n\t\t\tname: \"disallowedServices\",\n\t\t\tlist: p.disallowedServices,\n\t\t\tfunction: c.Service,\n\t\t\texpect: false,\n\t\t},\n\t}\n\n\tfor _, list := range lists {\n\t\t// If not configured, just move on\n\t\tif len(list.list) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\t// Template the list, in case we want to get something from the context\n\t\ttemplatedList, err := templateStringList(contextMap, list.list...)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed templating %v: %v\", list.name, err)\n\t\t\treturn false, err\n\t\t}\n\t\t// Run the configured function for each element on the list\n\t\tfor _, element := range templatedList {\n\t\t\tvar res *client.PolicyAPIResult\n\t\t\tres, err = list.function(element)\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"%v check failed with error %v\", list.name, err)\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif res.Allowed != list.expect {\n\t\t\t\tlog.Printf(\"Unexpected %v result for %v (%v)\", list.name, element, res.Allowed)\n\t\t\t\tok = false\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// allowedResources is different from the others, as its function takes two\n\t// arguments. Still, allowed and disallowed follow similar paths, so we\n\t// use a table\n\tresourceItems := []struct {\n\t\tallow bool\n\t\tlist []string\n\t}{\n\t\t{\n\t\t\tallow: true,\n\t\t\tlist: p.allowedResources,\n\t\t}, {\n\t\t\tallow: false,\n\t\t\tlist: p.disallowedResources,\n\t\t},\n\t}\n\tfor _, resourceItem := range resourceItems {\n\t\t// Template the list, in case we want to get something from the context\n\t\ttemplatedList, err := templateStringList(contextMap, resourceItem.list...)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed templating exposed resources (%v): %v\", resourceItem.allow, err)\n\t\t\treturn false, err\n\t\t}\n\t\tfor _, element := range templatedList {\n\t\t\tvar res *client.PolicyAPIResult\n\t\t\tsplitted := strings.SplitN(element, \"/\", 2)\n\t\t\tif len(splitted) != 2 {\n\t\t\t\t// TODO: should we try to do something else, instead? Fail, perhaps?\n\t\t\t\tlog.Printf(\"Ignoring GET check for resource without '/': %v\", element)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tres, err = c.Expose(splitted[0], splitted[1])\n\t\t\tif err != nil {\n\t\t\t\tlog.Printf(\"Resource check failed with error %v\", err)\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t\tif res.Allowed != resourceItem.allow {\n\t\t\t\tlog.Printf(\"Unexpected resource result: %v(%v)\", element, res.Allowed)\n\t\t\t\tok = false\n\t\t\t}\n\t\t}\n\t}\n\n\treturn\n}",
"func (lc *LoanbindingCreate) check() error {\n\tif v, ok := lc.mutation.Status(); ok {\n\t\tif err := loanbinding.StatusValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"Status\", err: fmt.Errorf(\"ent: validator failed for field \\\"Status\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := lc.mutation.WalletId(); ok {\n\t\tif err := loanbinding.WalletIdValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"WalletId\", err: fmt.Errorf(\"ent: validator failed for field \\\"WalletId\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := lc.mutation.AccountReference(); ok {\n\t\tif err := loanbinding.AccountReferenceValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"AccountReference\", err: fmt.Errorf(\"ent: validator failed for field \\\"AccountReference\\\": %w\", err)}\n\t\t}\n\t}\n\tif v, ok := lc.mutation.LoanAccountNo(); ok {\n\t\tif err := loanbinding.LoanAccountNoValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"LoanAccountNo\", err: fmt.Errorf(\"ent: validator failed for field \\\"LoanAccountNo\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}",
"func (arc *AppointmentResultsCreate) check() error {\n\tif _, ok := arc.mutation.CauseAppoint(); !ok {\n\t\treturn &ValidationError{Name: \"causeAppoint\", err: errors.New(\"ent: missing required field \\\"causeAppoint\\\"\")}\n\t}\n\tif v, ok := arc.mutation.CauseAppoint(); ok {\n\t\tif err := appointmentresults.CauseAppointValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"causeAppoint\", err: fmt.Errorf(\"ent: validator failed for field \\\"causeAppoint\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := arc.mutation.Advice(); !ok {\n\t\treturn &ValidationError{Name: \"advice\", err: errors.New(\"ent: missing required field \\\"advice\\\"\")}\n\t}\n\tif v, ok := arc.mutation.Advice(); ok {\n\t\tif err := appointmentresults.AdviceValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"advice\", err: fmt.Errorf(\"ent: validator failed for field \\\"advice\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := arc.mutation.DateAppoint(); !ok {\n\t\treturn &ValidationError{Name: \"dateAppoint\", err: errors.New(\"ent: missing required field \\\"dateAppoint\\\"\")}\n\t}\n\tif _, ok := arc.mutation.TimeAppoint(); !ok {\n\t\treturn &ValidationError{Name: \"timeAppoint\", err: errors.New(\"ent: missing required field \\\"timeAppoint\\\"\")}\n\t}\n\tif _, ok := arc.mutation.AddtimeSave(); !ok {\n\t\treturn &ValidationError{Name: \"addtimeSave\", err: errors.New(\"ent: missing required field \\\"addtimeSave\\\"\")}\n\t}\n\tif _, ok := arc.mutation.HourBeforeAppoint(); !ok {\n\t\treturn &ValidationError{Name: \"hourBeforeAppoint\", err: errors.New(\"ent: missing required field \\\"hourBeforeAppoint\\\"\")}\n\t}\n\tif v, ok := arc.mutation.HourBeforeAppoint(); ok {\n\t\tif err := appointmentresults.HourBeforeAppointValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"hourBeforeAppoint\", err: fmt.Errorf(\"ent: validator failed for field \\\"hourBeforeAppoint\\\": %w\", err)}\n\t\t}\n\t}\n\tif _, ok := arc.mutation.MinuteBeforeAppoint(); !ok {\n\t\treturn &ValidationError{Name: \"minuteBeforeAppoint\", err: errors.New(\"ent: missing required field \\\"minuteBeforeAppoint\\\"\")}\n\t}\n\tif v, ok := arc.mutation.MinuteBeforeAppoint(); ok {\n\t\tif err := appointmentresults.MinuteBeforeAppointValidator(v); err != nil {\n\t\t\treturn &ValidationError{Name: \"minuteBeforeAppoint\", err: fmt.Errorf(\"ent: validator failed for field \\\"minuteBeforeAppoint\\\": %w\", err)}\n\t\t}\n\t}\n\treturn nil\n}",
"func (o *GetMachineSnapshotsForbidden) IsSuccess() bool {\n\treturn false\n}",
"func In(err error) bool {\n\tfor err != nil {\n\t\tif _, ok := err.(*usererr); ok {\n\t\t\treturn true\n\t\t}\n\t\terr = unwrap(err)\n\t}\n\treturn false\n}",
"func check(e error) {\n if e != nil {\n panic(e)\n }\n}",
"func check(e error) {\n if e != nil {\n panic(e)\n }\n}",
"func check(e error) {\n if e != nil {\n panic(e)\n }\n}",
"func check(e error) {\n if e != nil {\n panic(e)\n }\n}",
"func validateProxyVerbRequest(client *http.Client, urlString string, httpVerb string, msg string) func() (bool, error) {\n\treturn func() (bool, error) {\n\t\tvar err error\n\n\t\trequest, err := http.NewRequest(httpVerb, urlString, nil)\n\t\tif err != nil {\n\t\t\tframework.Logf(\"Failed to get a new request. %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\n\t\tresp, err := client.Do(request)\n\t\tif err != nil {\n\t\t\tframework.Logf(\"Failed to get a response. %v\", err)\n\t\t\treturn false, nil\n\t\t}\n\t\tdefer resp.Body.Close()\n\n\t\tbuf := new(bytes.Buffer)\n\t\tbuf.ReadFrom(resp.Body)\n\t\tresponse := buf.String()\n\n\t\tswitch httpVerb {\n\t\tcase \"HEAD\":\n\t\t\tframework.Logf(\"http.Client request:%s | StatusCode:%d\", httpVerb, resp.StatusCode)\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\tdefault:\n\t\t\tvar jr *jsonResponse\n\t\t\terr = json.Unmarshal([]byte(response), &jr)\n\t\t\tif err != nil {\n\t\t\t\tframework.Logf(\"Failed to process jsonResponse. %v\", err)\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tframework.Logf(\"http.Client request:%s | StatusCode:%d | Response:%s | Method:%s\", httpVerb, resp.StatusCode, jr.Body, jr.Method)\n\t\t\tif resp.StatusCode != 200 {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif msg != jr.Body {\n\t\t\t\treturn false, nil\n\t\t\t}\n\n\t\t\tif httpVerb != jr.Method {\n\t\t\t\treturn false, nil\n\t\t\t}\n\t\t\treturn true, nil\n\t\t}\n\t}\n}",
"func (g gate) tryEnter() bool {\n\tselect {\n\tcase g <- struct{}{}:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (checker *CheckerType) Check(obtained, expected interface{}) (\n result bool, error string) {\n return false, \"\"\n}"
] | [
"0.5657477",
"0.55718315",
"0.5475712",
"0.545561",
"0.53494847",
"0.5347773",
"0.52995455",
"0.5298312",
"0.52896863",
"0.52896863",
"0.5246763",
"0.52449787",
"0.5231858",
"0.52129644",
"0.51947147",
"0.51889956",
"0.51766586",
"0.5143849",
"0.51412374",
"0.5135862",
"0.51291287",
"0.5117141",
"0.51033247",
"0.50846964",
"0.5078592",
"0.5064721",
"0.5061912",
"0.5051978",
"0.50510836",
"0.50265735",
"0.5024443",
"0.50184935",
"0.49952734",
"0.4977106",
"0.49627587",
"0.49504116",
"0.4949768",
"0.49480656",
"0.49362725",
"0.49353442",
"0.49294415",
"0.4927413",
"0.4922854",
"0.49224785",
"0.4918272",
"0.49167448",
"0.49132508",
"0.4912469",
"0.49076357",
"0.49067906",
"0.49045604",
"0.4898774",
"0.48980188",
"0.48979267",
"0.48938525",
"0.48880458",
"0.48809966",
"0.48779395",
"0.48715305",
"0.48677462",
"0.48649445",
"0.48592317",
"0.4848589",
"0.48468718",
"0.48458362",
"0.4839551",
"0.48378605",
"0.48325366",
"0.4830894",
"0.48253444",
"0.48229694",
"0.4822429",
"0.4819283",
"0.4815994",
"0.48101276",
"0.48099574",
"0.48035085",
"0.47997645",
"0.47950864",
"0.47912005",
"0.47893637",
"0.47840407",
"0.47835645",
"0.4782619",
"0.47823343",
"0.47820613",
"0.47814643",
"0.47814488",
"0.47732297",
"0.47689474",
"0.4768695",
"0.47659656",
"0.47587833",
"0.475867",
"0.47561803",
"0.47561803",
"0.47561803",
"0.47561803",
"0.47482616",
"0.4744067",
"0.474302"
] | 0.0 | -1 |
MagSq returns the squared magnitude of the Vec2 Useful if you only want to compare two of them but don't care about their actual lengths. | func (v Vec2) MagSq() float64 {
return v.X*v.X + v.Y*v.Y
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v Vec2) Mag() float64 {\n\treturn math.Sqrt(v.MagSq())\n}",
"func (a Vec2) LengthSq() float64 {\n\treturn a.X*a.X + a.Y*a.Y\n}",
"func (v Vector2) LengthSq() (m float32) {\n\treturn v.Dot(v)\n}",
"func _v2dMag(v *Vec2d) float64 { return math.Sqrt(v.x*v.x + v.y*v.y) }",
"func (v0 Vector2) DistanceSq(v1 Vector2) float32 {\n\treturn v0.Sub(v1).LengthSq()\n}",
"func (myVec Vec2f) Magnitude() float {\n\treturn math.Sqrt(myVec.x * myVec.x + myVec.y * myVec.y)\n}",
"func DistSq(v1, v2 Vect) f.Float { return LengthSq(Sub(v1, v2)) }",
"func (v Vec3) SqrMagnitude() float64 {\n\treturn v[0]*v[0] + v[1]*v[1] + v[2]*v[2]\n}",
"func (a Vector3) SqrMag() float32 {\n\treturn float32(a.X*a.X + a.Y*a.Y + a.Z*a.Z)\n}",
"func (v Vec) Mag() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func LengthSq(v Vect) f.Float { return Dot(v, v) }",
"func Vector2LenSqr(vector Vector2) float32 {\n\treturn vector.X*vector.X + vector.Y*vector.Y\n}",
"func (v Vector) Mag() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func (v *Vec4) LengthSq() float32 {\n\treturn Fsqr32(v.X) + Fsqr32(v.Y) + Fsqr32(v.Z) + Fsqr32(v.W)\n}",
"func (p Vect) LengthSq() f.Float { return Dot(p, p) }",
"func (v *Vector) DistanceSq(u *Vector) float64 {\n\tdx := (v.X - u.X)\n\tdy := (v.Y - u.Y)\n\n\tdist := ((dx * dx) + (dy * dy))\n\treturn dist\n}",
"func (v *Vector) LengthSquared() float64 {\n\treturn v.Dot(v)\n}",
"func (v *Vector3) SquaredLength() float64 {\n\treturn v.X*v.X + v.Y*v.Y + v.Z*v.Z\n}",
"func (v *Vector) LengthSquared() float64 {\n\treturn (v.X * v.X) + (v.Y * v.Y)\n\n}",
"func (v Vector) Magn() float64 {\n\treturn math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])\n}",
"func (v Vector) Magnitude() float64 {\n\tret := float64(0)\n\tfor _, val := range v.data {\n\t\tret += math.Pow(val, 2)\n\t}\n\n\treturn math.Sqrt(ret)\n}",
"func (v Vec3) Magnitude() float64 {\n\treturn math.Sqrt(v.SqrMagnitude())\n}",
"func (l *Line) SquaredLength() float64 {\n\treturn l.q.Subtract(l.p).SquaredMagnitude()\n}",
"func (gdt *Vector3) LengthSquared() Real {\n\targ0 := gdt.getBase()\n\n\tret := C.go_godot_vector3_length_squared(GDNative.api, arg0)\n\n\treturn Real(ret)\n}",
"func (v Vector3D) DistanceSquared(o Vector3D) int {\n\treturn v.Sub(o).LengthSquared()\n}",
"func sqrt2(x float64) string {\n\tif x < 0 {\n\t\treturn sqrt(-x) + \"i\"\n\t}\n\treturn fmt.Sprint(math.Sqrt(x))\n}",
"func (c *Clac) Mag() error {\n\treturn c.applyFloat(variadic, func(vals []value.Value) (value.Value, error) {\n\t\te := &eval{}\n\t\tmagSq, _ := reduceFloat(zero, vals, func(a, b value.Value) (value.Value, error) {\n\t\t\tmag := e.binary(a, \"+\", e.binary(b, \"*\", b))\n\t\t\treturn mag, e.err\n\t\t})\n\t\treturn unary(\"sqrt\", magSq)\n\t})\n}",
"func (v Vector2) Length() float64 {\r\n\tsumOfSquares := v.x*v.x + v.y*v.y\r\n\treturn math.Sqrt(sumOfSquares)\r\n}",
"func (vec Vector2) Len2() float32 {\n\treturn vec.X*vec.X + vec.Y*vec.Y\n}",
"func (a Vec2) Length() float64 {\n\treturn math.Sqrt(a.X*a.X + a.Y*a.Y)\n}",
"func (vec Vector2) Distance(vec2 Vector2) float32 {\n\txd := vec2.X - vec.X\n\tyd := vec2.Y - vec.Y\n\treturn Sqrt(xd*xd + yd*yd)\n}",
"func (v Vector2) Length() float32 {\n\treturn Sqrtf(v.LengthSq())\n}",
"func (v Vector3D) LengthSquared() int {\n\treturn v.x*v.x + v.y*v.y + v.z*v.z\n}",
"func Vlengthsq(v Vect) float64 {\n\treturn float64(C.cpvlengthsq(v.c()))\n}",
"func (vec Vector2) Distance2(vec2 Vector2) float32 {\n\txd := vec2.X - vec.X\n\tyd := vec2.Y - vec.Y\n\treturn xd*xd + yd*yd\n}",
"func (p Point) Distance2(p2 Point) float32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (gdt *Vector3) DistanceSquaredTo(b Vector3) Real {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_distance_squared_to(GDNative.api, arg0, arg1)\n\n\treturn Real(ret)\n}",
"func (v1 Vec3) SqrLength() float32 {\n\treturn float32(math.Pow(float64(v1.X()), 2) + math.Pow(float64(v1.Y()), 2) + math.Pow(float64(v1.Z()), 2))\n}",
"func Vector2Length(v Vector2) float32 {\n\treturn float32(math.Sqrt(float64((v.X * v.X) + (v.Y * v.Y))))\n}",
"func Magnitude(v Vector) float64 {\n\treturn math.Sqrt(math.Pow(v.I, 2) + math.Pow(v.J, 2) + math.Pow(v.K, 2))\n}",
"func (v Vector2) Length() float64 {\r\n\treturn v.TransposedMul(v)\r\n}",
"func (t *Tuple) Magnitude() float64 {\n\treturn math.Sqrt(square(t.x) +\n\t\tsquare(t.y) +\n\t\tsquare(t.z) +\n\t\tsquare(t.w))\n}",
"func (p Point2D) Distance2(p2 Point2D) float32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (p PointI) Distance2(p2 PointI) int32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (v0 Vector2) Distance(v1 Vector2) float32 {\n\treturn Sqrtf(v0.DistanceSq(v1))\n}",
"func (r *sparseRow) L2Norm() float64 {\n\treturn math.Sqrt(r.Square())\n}",
"func (v Vec3) SquareNorm() float32 {\n\treturn v[0]*v[0] + v[1]*v[1] + v[2]*v[2]\n}",
"func Vec3Length2(a Vec3) float32 {\n\treturn a[0]*a[0] + a[1]*a[1] + a[2]*a[2]\n}",
"func SqrDist(a, b Vector3) float32 {\n\treturn (Difference(a, b).SqrMag())\n}",
"func (q Quat) SquareLength() float32 {\n\treturn q.W*q.W + q.X*q.X + q.Y*q.Y + q.Z*q.Z\n}",
"func Vector2Scale(v Vector2, scale float32) Vector2 {\n\treturn NewVector2(v.X*scale, v.Y*scale)\n}",
"func L2norm64(x Vector64) float64 {\n\tif len(x) == 0 {\n\t\treturn math.NaN()\n\t}\n\treturn math.Pow(Dot64(x, x), 0.5)\n}",
"func (self *T) LengthSqr() float64 {\n\treturn self[0]*self[0] + self[1]*self[1]\n}",
"func (v Vector2) Normalize() Vector2 {\n\treturn v.ScalarMultiply(1.0 / v.Length())\n}",
"func Distance2(x1, y1, x2, y2 float64) float64 {\n\treturn math.Sqrt(\n\t\tmath.Pow(x1-x2, 2) +\n\t\t\tmath.Pow(y1-y2, 2))\n}",
"func (v Vector2) Normalize() Vector {\r\n\tif v.Length() == 0 {\r\n\t\treturn Vector2d(rand.Float64(), rand.Float64())\r\n\t}\r\n\r\n\treturn v.MulScalar(1 / v.Length())\r\n}",
"func Magnitude(r [3]float64) float64 {\n return math.Abs(math.Sqrt(r[1]*r[1] + r[2]*r[2] + r[0]*r[0]))\n}",
"func (a Vec2) Normalized() (v Vec2, ok bool) {\n\tlength := math.Sqrt(a.X*a.X + a.Y*a.Y)\n\tif Equal(length, 0) {\n\t\treturn Vec2Zero, false\n\t}\n\treturn Vec2{\n\t\ta.X / length,\n\t\ta.Y / length,\n\t}, true\n}",
"func GetMagnitude(p *Point) float64 {\n\treturn math.Sqrt(GetMagnitudeSquared(p))\n}",
"func (vn *VecN) Vec2() Vec2 {\n\traw := vn.Raw()\n\treturn Vec2{raw[0], raw[1]}\n}",
"func Vector2Normalize(v Vector2) Vector2 {\n\treturn Vector2Scale(v, 1/Vector2Length(v))\n}",
"func Vector2Distance(v1, v2 Vector2) float32 {\n\treturn float32(math.Sqrt(float64((v1.X-v2.X)*(v1.X-v2.X) + (v1.Y-v2.Y)*(v1.Y-v2.Y))))\n}",
"func (vn *VecN) LenSqr() float64 {\n\tif vn == nil {\n\t\treturn float64(math.NaN())\n\t}\n\tif len(vn.vec) == 0 {\n\t\treturn 0\n\t}\n\n\treturn vn.Dot(vn)\n}",
"func (t *Mat2) MulVec2(v *vector2.Vector) vector2.Vector {\n\treturn vector2.Vector{\n\t\tt[0][0]*v[0] + t[1][0]*v[1],\n\t\tt[0][1]*v[0] + t[1][1]*v[1],\n\t}\n}",
"func (c ChiSquared) StdDev() float64 {\n\treturn math.Sqrt(c.Variance())\n}",
"func Quat2Normalize(out, a []float64) []float64 {\n\tmagnitude := Quat2SquaredLength(a)\n\tif magnitude > 0 {\n\t\tmagnitude = math.Sqrt(magnitude)\n\n\t\ta0 := a[0] / magnitude\n\t\ta1 := a[1] / magnitude\n\t\ta2 := a[2] / magnitude\n\t\ta3 := a[3] / magnitude\n\n\t\tb0 := a[4]\n\t\tb1 := a[5]\n\t\tb2 := a[6]\n\t\tb3 := a[7]\n\n\t\tdotAB := a0*b0 + a1*b1 + a2*b2 + a3*b3\n\n\t\tout[0] = a0\n\t\tout[1] = a1\n\t\tout[2] = a2\n\t\tout[3] = a3\n\n\t\tout[4] = (b0 - a0*dotAB) / magnitude\n\t\tout[5] = (b1 - a1*dotAB) / magnitude\n\t\tout[6] = (b2 - a2*dotAB) / magnitude\n\t\tout[7] = (b3 - a3*dotAB) / magnitude\n\t}\n\treturn out\n}",
"func (p Point2D) Distance(p2 Point2D) float32 {\n\treturn p.VecTo(p2).Len()\n}",
"func (v Vector2D) Length() float64 {\n\treturn Distance(v, ZeroVector(2))\n}",
"func SquareBigRatComplex(vReal, vImag *big.Rat) (*big.Rat, *big.Rat) {\n\treturn MultiplyBigRatComplex(vReal, vImag, vReal, vImag)\n}",
"func (c *Context) MOVDQ2Q(imrx, mrx operand.Op) {\n\tc.addinstruction(x86.MOVDQ2Q(imrx, mrx))\n}",
"func (i I) Distance(i2 I) float64 {\n\treturn i.Subtract(i2).Mag()\n}",
"func ScalarProduct2(a, b Vector2) float64 {\r\n\treturn a.x*b.x + a.y*b.y\r\n}",
"func (a Vec2) Max(b Vec2) Vec2 {\n\tvar r Vec2\n\tif a.X > b.X {\n\t\tr.X = a.X\n\t} else {\n\t\tr.X = b.X\n\t}\n\tif a.Y > b.Y {\n\t\tr.Y = a.Y\n\t} else {\n\t\tr.Y = b.Y\n\t}\n\treturn r\n}",
"func vLenSqr(v []float32) float32 {\n\treturn v[0]*v[0] + v[1]*v[1] + v[2]*v[2]\n}",
"func (c *Clac) Pow2() error {\n\treturn c.applyFloat(1, func(vals []value.Value) (value.Value, error) {\n\t\treturn binary(value.Int(2), \"**\", vals[0])\n\t})\n}",
"func Sqrt(arg float64) float64 {\n\treturn math.Sqrt(arg)\n}",
"func (m Message) GetOrderQty2(f *field.OrderQty2Field) quickfix.MessageRejectError {\n\treturn m.Body.Get(f)\n}",
"func (v Vector) Norm() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func SizeQcow2(file string) (uint64, error) {\n\tvar size uint64\n\n\tcmd := exec.Command(\"qemu-img\", \"info\", file)\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"%s\", utils.OneLine(out))\n\t}\n\n\tfor _, l := range strings.Split(string(out), \"\\n\") {\n\t\tif !strings.Contains(l, \"virtual size\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tr := regexp.MustCompile(\"\\\\([0-9]+\")\n\t\tif d := r.Find([]byte(l)); d != nil {\n\t\t\ts, err := strconv.ParseInt(string(d[1:]), 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn 0, fmt.Errorf(\"failed to parse qcow2 size from qemu-img: %s\", err)\n\t\t\t}\n\n\t\t\tsize = uint64(s)\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn size, nil\n}",
"func Sqrt(value gcv.Value) gcv.Value {\n\tif value.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(cmplx.Sqrt(value.Complex()))\n\t}\n\treturn gcv.MakeValue(math.Sqrt(value.Real()))\n}",
"func (a Vec2) TransformVec2(b Mat3) Vec2 {\n\treturn Vec2{\n\t\ta.X*b[0][0] + a.Y*b[1][0],\n\t\ta.X*b[0][1] + a.Y*b[1][1],\n\t}\n}",
"func (a Vec2) Mul(b Vec2) Vec2 {\n\treturn Vec2{a.X * b.X, a.Y * b.Y}\n}",
"func op_f64_sqrt(expr *CXExpression, fp int) {\n\tinp1, out1 := expr.Inputs[0], expr.Outputs[0]\n\toutB1 := FromF64(math.Sqrt(ReadF64(fp, inp1)))\n\tWriteMemory(GetFinalOffset(fp, out1), outB1)\n}",
"func (p Point) Distance(p2 Point) float32 {\n\treturn p.VecTo(p2).Len()\n}",
"func GetVec2d(M *la.Matrix, col int, normalize bool) la.Vector {\n\tx := M.Get(0, col)\n\ty := M.Get(1, col)\n\tif normalize {\n\t\ts := math.Sqrt(x*x + y*y)\n\t\tif s > 0 {\n\t\t\tx /= s\n\t\t\ty /= s\n\t\t}\n\t}\n\treturn []float64{x, y}\n}",
"func Nrm2(x Vector) float64 {\n\tif x.Inc < 0 {\n\t\tpanic(negInc)\n\t}\n\treturn cblas128.Dznrm2(x.N, x.Data, x.Inc)\n}",
"func (p Point2) Distance(p2 Point2) float64 {\n\treturn Distance2(p.X(), p.Y(), p2.X(), p2.Y())\n}",
"func (v Vector2D) Normalize() Vector {\n\tlength := v.Length()\n\tv.X /= length\n\tv.Y /= length\n\treturn v\n}",
"func (i I) Mag() float64 {\n\treturn i.F().Mag()\n}",
"func (p Point) Distance(p2 Point) float64 {\n\treturn math.Sqrt(math.Pow(p2.x-p.x, 2) + math.Pow(p2.y-p.y, 2))\n}",
"func Magnitude(t Tuplelike) float64 {\n\tsum := 0.0\n\n\tfor _, value := range t.Values() {\n\t\tsum += value * value\n\t}\n\n\treturn math.Sqrt(sum)\n}",
"func (p1 XY) Distance(p2 XY) float64 {\n\tdx, dy := p2.X-p1.X, p2.Y-p1.Y\n\treturn math.Sqrt(float64(dx*dx) + float64(dy*dy))\n}",
"func Vec2(src dprec.Vec2) sprec.Vec2 {\n\treturn sprec.Vec2{\n\t\tX: float32(src.X),\n\t\tY: float32(src.Y),\n\t}\n}",
"func SquareBigFloatComplex(vReal, vImag *big.Float) (*big.Float, *big.Float) {\n\treturn MultiplyBigFloatComplex(vReal, vImag, vReal, vImag)\n}",
"func (v *Vector3) Sqrt() {\n\tv.X = math.Sqrt(v.X)\n\tv.Y = math.Sqrt(v.Y)\n\tv.Z = math.Sqrt(v.Z)\n}",
"func (v Vector2) Y() float64 {\r\n\treturn v.y\r\n}",
"func AudioSpectrogramMagnitudeSquared(value bool) AudioSpectrogramAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"magnitude_squared\"] = value\n\t}\n}",
"func (q1 Quat) Norm() float32 {\n\treturn q1.Len()\n}",
"func (v *Vector) Length() float64 {\n\tsquared := (v.X * v.X) + (v.Y * v.Y)\n\troot := math.Sqrt(squared)\n\treturn root\n\n}",
"func Length(v Vect) f.Float { return f.Sqrt(Dot(v, v)) }"
] | [
"0.76467",
"0.7355751",
"0.735416",
"0.72109294",
"0.6946739",
"0.6719755",
"0.66131836",
"0.64010394",
"0.62709486",
"0.6249757",
"0.62447196",
"0.616803",
"0.6158362",
"0.614744",
"0.59848154",
"0.59739727",
"0.59730613",
"0.59290344",
"0.58734214",
"0.58546335",
"0.5729729",
"0.57157713",
"0.56964225",
"0.5516383",
"0.55068076",
"0.5459328",
"0.5382928",
"0.5377095",
"0.5350077",
"0.53262943",
"0.5304666",
"0.5302367",
"0.5294141",
"0.5276815",
"0.5260093",
"0.5248138",
"0.52349955",
"0.5219273",
"0.51957095",
"0.51738983",
"0.51116383",
"0.5108359",
"0.50683075",
"0.50612533",
"0.5052154",
"0.49948502",
"0.49865586",
"0.4960128",
"0.4932456",
"0.4930566",
"0.491025",
"0.48815012",
"0.4856517",
"0.48243824",
"0.4805711",
"0.47990423",
"0.47481802",
"0.47432157",
"0.4739592",
"0.47358075",
"0.4734741",
"0.47202134",
"0.4669218",
"0.46640697",
"0.4644515",
"0.4553617",
"0.4548959",
"0.45449567",
"0.4528408",
"0.44883057",
"0.44692925",
"0.4455396",
"0.44424105",
"0.44161648",
"0.4401596",
"0.4382825",
"0.43808612",
"0.43701068",
"0.4362956",
"0.43582216",
"0.4349384",
"0.4347437",
"0.43332317",
"0.43303692",
"0.4314361",
"0.43096668",
"0.43034998",
"0.4303268",
"0.42999166",
"0.42975983",
"0.42910638",
"0.42904416",
"0.42863375",
"0.42844135",
"0.42813092",
"0.42744747",
"0.42556655",
"0.42524886",
"0.42516997",
"0.42367527"
] | 0.8301837 | 0 |
Mag returns the magnitude (length) of the Vec2 | func (v Vec2) Mag() float64 {
return math.Sqrt(v.MagSq())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (myVec Vec2f) Magnitude() float {\n\treturn math.Sqrt(myVec.x * myVec.x + myVec.y * myVec.y)\n}",
"func _v2dMag(v *Vec2d) float64 { return math.Sqrt(v.x*v.x + v.y*v.y) }",
"func (v Vector) Mag() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func (v Vec) Mag() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func (v Vector) Magn() float64 {\n\treturn math.Sqrt(v[0]*v[0] + v[1]*v[1] + v[2]*v[2])\n}",
"func (a Vec2) Length() float64 {\n\treturn math.Sqrt(a.X*a.X + a.Y*a.Y)\n}",
"func (v Vector2) Length() float64 {\r\n\treturn v.TransposedMul(v)\r\n}",
"func (v Vector) Magnitude() float64 {\n\tret := float64(0)\n\tfor _, val := range v.data {\n\t\tret += math.Pow(val, 2)\n\t}\n\n\treturn math.Sqrt(ret)\n}",
"func (v Vec2) MagSq() float64 {\n\treturn v.X*v.X + v.Y*v.Y\n}",
"func (vec Vector2) Len2() float32 {\n\treturn vec.X*vec.X + vec.Y*vec.Y\n}",
"func (v Vec3) Magnitude() float64 {\n\treturn math.Sqrt(v.SqrMagnitude())\n}",
"func (v Vector2) Length() float64 {\r\n\tsumOfSquares := v.x*v.x + v.y*v.y\r\n\treturn math.Sqrt(sumOfSquares)\r\n}",
"func (v Vector2) Length() float32 {\n\treturn Sqrtf(v.LengthSq())\n}",
"func (i I) Mag() float64 {\n\treturn i.F().Mag()\n}",
"func (v Vector2D) Length() float64 {\n\treturn Distance(v, ZeroVector(2))\n}",
"func Magnitude(v Vector) float64 {\n\treturn math.Sqrt(math.Pow(v.I, 2) + math.Pow(v.J, 2) + math.Pow(v.K, 2))\n}",
"func Vector2Length(v Vector2) float32 {\n\treturn float32(math.Sqrt(float64((v.X * v.X) + (v.Y * v.Y))))\n}",
"func (license *License) magnitude() float64 {\n\tsum := 0\n\tfor _, value := range license.Frequencies {\n\t\tsum += value * value\n\t}\n\treturn math.Sqrt(float64(sum))\n}",
"func (c *Clac) Mag() error {\n\treturn c.applyFloat(variadic, func(vals []value.Value) (value.Value, error) {\n\t\te := &eval{}\n\t\tmagSq, _ := reduceFloat(zero, vals, func(a, b value.Value) (value.Value, error) {\n\t\t\tmag := e.binary(a, \"+\", e.binary(b, \"*\", b))\n\t\t\treturn mag, e.err\n\t\t})\n\t\treturn unary(\"sqrt\", magSq)\n\t})\n}",
"func (t *Tuple) Magnitude() float64 {\n\treturn math.Sqrt(square(t.x) +\n\t\tsquare(t.y) +\n\t\tsquare(t.z) +\n\t\tsquare(t.w))\n}",
"func GetMagnitude(p *Point) float64 {\n\treturn math.Sqrt(GetMagnitudeSquared(p))\n}",
"func (v Vector2) LengthSq() (m float32) {\n\treturn v.Dot(v)\n}",
"func (d Delta) mag() float64 {\n\tswitch {\n\tcase d.Before != 0 && d.After != 0 && d.Before >= d.After:\n\t\treturn d.After / d.Before\n\tcase d.Before != 0 && d.After != 0 && d.Before < d.After:\n\t\treturn d.Before / d.After\n\tcase d.Before == 0 && d.After == 0:\n\t\treturn 1\n\tdefault:\n\t\t// 0 -> 1 or 1 -> 0\n\t\t// These are significant changes and worth surfacing.\n\t\treturn math.Inf(1)\n\t}\n}",
"func (a Vec2) LengthSq() float64 {\n\treturn a.X*a.X + a.Y*a.Y\n}",
"func Vec3Length2(a Vec3) float32 {\n\treturn a[0]*a[0] + a[1]*a[1] + a[2]*a[2]\n}",
"func Magnitude(r [3]float64) float64 {\n return math.Abs(math.Sqrt(r[1]*r[1] + r[2]*r[2] + r[0]*r[0]))\n}",
"func (v *Vector) Length() float64 {\n\treturn math.Sqrt(v.Dot(v))\n}",
"func (mov *Moves) Mag(move ecs.Entity) int {\n\tif move.Type().HasAll(movMag) {\n\t\treturn mov.mag[move.ID()]\n\t}\n\treturn 0\n}",
"func (v *Vector) Length() float64 {\n\tsquared := (v.X * v.X) + (v.Y * v.Y)\n\troot := math.Sqrt(squared)\n\treturn root\n\n}",
"func Length(v Vect) f.Float { return f.Sqrt(Dot(v, v)) }",
"func (v *Vector) LengthSquared() float64 {\n\treturn v.Dot(v)\n}",
"func (p Vect) Length() f.Float { return f.Sqrt(Dot(p, p)) }",
"func (v *Coordinate) Length() float64 {\n\treturn math.Sqrt(v.X*v.X + v.Y*v.Y)\n}",
"func (a Vector3) SqrMag() float32 {\n\treturn float32(a.X*a.X + a.Y*a.Y + a.Z*a.Z)\n}",
"func (v Vec3) Length() float64 {\n\treturn math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)\n}",
"func Vector2LenSqr(vector Vector2) float32 {\n\treturn vector.X*vector.X + vector.Y*vector.Y\n}",
"func (a Vec4) Length() float32 {\n\treturn math.Sqrt(a.X*a.X + a.Y*a.Y + a.Z*a.Z + a.W*a.W)\n}",
"func (v *Vector) LengthSquared() float64 {\n\treturn (v.X * v.X) + (v.Y * v.Y)\n\n}",
"func (v Vector2) Normalize() Vector2 {\n\treturn v.ScalarMultiply(1.0 / v.Length())\n}",
"func (v *Vector3) Length() float64 {\n\treturn math.Sqrt(v.X*v.X + v.Y*v.Y + v.Z*v.Z)\n}",
"func (geom Geometry) Length() float64 {\n\tlength := C.OGR_G_Length(geom.cval)\n\treturn float64(length)\n}",
"func (self *T) Length() float64 {\n\treturn float64(math.Sqrt(self.LengthSqr()))\n}",
"func (v Vector2) Normalize() Vector {\r\n\tif v.Length() == 0 {\r\n\t\treturn Vector2d(rand.Float64(), rand.Float64())\r\n\t}\r\n\r\n\treturn v.MulScalar(1 / v.Length())\r\n}",
"func (v Vec3) SqrMagnitude() float64 {\n\treturn v[0]*v[0] + v[1]*v[1] + v[2]*v[2]\n}",
"func (v Vector3) Length() float64 {\n\treturn math.Sqrt(math.Pow(float64(v.X), 2) + math.Pow(float64(v.Y), 2) + math.Pow(float64(v.Z), 2))\n}",
"func (p Vect) LengthSq() f.Float { return Dot(p, p) }",
"func Vec3Length(a Vec3) float32",
"func (vn *VecN) Vec2() Vec2 {\n\traw := vn.Raw()\n\treturn Vec2{raw[0], raw[1]}\n}",
"func (a Vector3) Length() float32 {\n\treturn float32(math.Sqrt(float64(a.X*a.X + a.Y*a.Y + a.Z*a.Z)))\n}",
"func (p Point) Distance2(p2 Point) float32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (u Vec) Len() float64 {\n\treturn math.Hypot(u.X, u.Y)\n}",
"func (v Vector2) Y() float64 {\r\n\treturn v.y\r\n}",
"func Magnitude(t Tuplelike) float64 {\n\tsum := 0.0\n\n\tfor _, value := range t.Values() {\n\t\tsum += value * value\n\t}\n\n\treturn math.Sqrt(sum)\n}",
"func (l *Line) GetLength() float64 {\n\treturn Distance(l.X, l.Y, l.X2, l.Y2)\n}",
"func Dist(v1, v2 Vect) f.Float { return Length(Sub(v1, v2)) }",
"func LengthSq(v Vect) f.Float { return Dot(v, v) }",
"func (vec Vector2) Distance(vec2 Vector2) float32 {\n\txd := vec2.X - vec.X\n\tyd := vec2.Y - vec.Y\n\treturn Sqrt(xd*xd + yd*yd)\n}",
"func (v Quat) Length() float64 {\n\tl := float64(math.Sqrt(float64((v.W * v.W) + (v.X * v.X) + (v.Y * v.Y) + (v.Z * v.Z))))\n\treturn l\n}",
"func Size(value Vec2) *SimpleElement { return newSEVec2(\"size\", value) }",
"func (e *Edge) Length() float64 {\n\tp0 := e.t.points[e.p0]\n\tp1 := e.t.points[e.p1]\n\tdist := p0.squaredDistance(p1)\n\treturn math.Sqrt(dist)\n}",
"func (l Line) Length() float64 {\n\tdx, dy := l[0].X-l[1].X, l[0].Y-l[1].Y\n\treturn math.Sqrt(dx*dx + dy*dy)\n}",
"func (p Point) Length() float64 {\n\treturn math.Sqrt(p.X*p.X + p.Y*p.Y)\n}",
"func (v Vector3D) LengthSquared() int {\n\treturn v.x*v.x + v.y*v.y + v.z*v.z\n}",
"func (v Vector2D) Normalize() Vector {\n\tlength := v.Length()\n\tv.X /= length\n\tv.Y /= length\n\treturn v\n}",
"func (p Point2D) Distance2(p2 Point2D) float32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (l *Line) Length() float64 {\n\treturn l.q.Subtract(l.p).Magnitude()\n}",
"func (vec Vector2) Distance2(vec2 Vector2) float32 {\n\txd := vec2.X - vec.X\n\tyd := vec2.Y - vec.Y\n\treturn xd*xd + yd*yd\n}",
"func (gdt *Vector3) LengthSquared() Real {\n\targ0 := gdt.getBase()\n\n\tret := C.go_godot_vector3_length_squared(GDNative.api, arg0)\n\n\treturn Real(ret)\n}",
"func (p PointI) Distance2(p2 PointI) int32 {\n\treturn p.VecTo(p2).Len2()\n}",
"func (gdt *Vector3) Length() Real {\n\targ0 := gdt.getBase()\n\n\tret := C.go_godot_vector3_length(GDNative.api, arg0)\n\n\treturn Real(ret)\n}",
"func (v *Vec4) Length() float32 {\n\treturn Fsqrt32(Fsqr32(v.X) + Fsqr32(v.Y) + Fsqr32(v.Z) + Fsqr32(v.W))\n}",
"func (i I) Distance(i2 I) float64 {\n\treturn i.Subtract(i2).Mag()\n}",
"func Vector2Normalize(v Vector2) Vector2 {\n\treturn Vector2Scale(v, 1/Vector2Length(v))\n}",
"func (r *sparseRow) L2Norm() float64 {\n\treturn math.Sqrt(r.Square())\n}",
"func DistSq(v1, v2 Vect) f.Float { return LengthSq(Sub(v1, v2)) }",
"func (a Vector3) Len() float64 {\n\treturn math.Sqrt(a.X*a.X + a.Y*a.Y + a.Z*a.Z)\n}",
"func (self *T) LengthSqr() float64 {\n\treturn self[0]*self[0] + self[1]*self[1]\n}",
"func L2norm64(x Vector64) float64 {\n\tif len(x) == 0 {\n\t\treturn math.NaN()\n\t}\n\treturn math.Pow(Dot64(x, x), 0.5)\n}",
"func (this *BoundingBox) AxisLength(i int) float64 {\n\tif i < 0 || i > len(this.Min)-1 {\n\t\treturn 0\n\t}\n\treturn math.Abs(this.Min[i] - this.Max[i])\n}",
"func (p Point2D) Distance(p2 Point2D) float32 {\n\treturn p.VecTo(p2).Len()\n}",
"func (v Vector2D) GetDimension() int {\n\treturn 2\n}",
"func (a Vec2) Max(b Vec2) Vec2 {\n\tvar r Vec2\n\tif a.X > b.X {\n\t\tr.X = a.X\n\t} else {\n\t\tr.X = b.X\n\t}\n\tif a.Y > b.Y {\n\t\tr.Y = a.Y\n\t} else {\n\t\tr.Y = b.Y\n\t}\n\treturn r\n}",
"func (this *RectangleShape) GetSize() (size Vector2f) {\n\tsize.fromC(C.sfRectangleShape_getSize(this.cptr))\n\treturn\n}",
"func Nrm2(x Vector) float64 {\n\tif x.Inc < 0 {\n\t\tpanic(negInc)\n\t}\n\treturn cblas128.Dznrm2(x.N, x.Data, x.Inc)\n}",
"func (vn *VecN) Len() float64 {\n\tif vn == nil {\n\t\treturn float64(math.NaN())\n\t}\n\tif len(vn.vec) == 0 {\n\t\treturn 0\n\t}\n\n\treturn float64(math.Sqrt(float64(vn.Dot(vn))))\n}",
"func Vector2Scale(v Vector2, scale float32) Vector2 {\n\treturn NewVector2(v.X*scale, v.Y*scale)\n}",
"func (o VolumeV2Output) Size() pulumi.IntOutput {\n\treturn o.ApplyT(func(v *VolumeV2) pulumi.IntOutput { return v.Size }).(pulumi.IntOutput)\n}",
"func (vp *baseVectorParty) GetLength() int {\n\treturn vp.length\n}",
"func (v Vector2) X() float64 {\r\n\treturn v.x\r\n}",
"func (m Vec2Modulus) Dist(v1, v2 mgl.Vec2) mgl.Vec2 {\n\treturn mgl.Vec2{\n\t\tm.x.Dist(v1[0], v2[0]),\n\t\tm.y.Dist(v1[1], v2[1]),\n\t}\n}",
"func (v Vector) Z() float64 {\n\treturn v[2]\n}",
"func (v Vector3D) Length() int {\n\treturn v.x + v.y + v.z\n}",
"func (a Vec2) Normalized() (v Vec2, ok bool) {\n\tlength := math.Sqrt(a.X*a.X + a.Y*a.Y)\n\tif Equal(length, 0) {\n\t\treturn Vec2Zero, false\n\t}\n\treturn Vec2{\n\t\ta.X / length,\n\t\ta.Y / length,\n\t}, true\n}",
"func (p Point) Distance(p2 Point) float32 {\n\treturn p.VecTo(p2).Len()\n}",
"func (vd VectorDistancer) Len() int { return len(vd) }",
"func Vec2(src dprec.Vec2) sprec.Vec2 {\n\treturn sprec.Vec2{\n\t\tX: float32(src.X),\n\t\tY: float32(src.Y),\n\t}\n}",
"func Max(arg, arg2 float64) float64 {\n\treturn math.Max(arg, arg2)\n}",
"func (q Quat) Length() float32 {\n\treturn math32.Sqrt(q.W*q.W + q.X*q.X + q.Y*q.Y + q.Z*q.Z)\n}",
"func (vec Vector) Len() Number {\n\treturn Number(len(vec))\n}",
"func (v0 Vector2) Distance(v1 Vector2) float32 {\n\treturn Sqrtf(v0.DistanceSq(v1))\n}"
] | [
"0.76289505",
"0.76026416",
"0.746187",
"0.7426065",
"0.6838833",
"0.68316275",
"0.6733582",
"0.6617069",
"0.65825546",
"0.64890444",
"0.642629",
"0.6374563",
"0.63549167",
"0.61985433",
"0.61635196",
"0.6017369",
"0.60036904",
"0.5997282",
"0.5973113",
"0.5966783",
"0.5868814",
"0.5830737",
"0.5826937",
"0.57899225",
"0.5727297",
"0.568234",
"0.5670212",
"0.55393046",
"0.55129206",
"0.55101234",
"0.5482594",
"0.54550683",
"0.5429373",
"0.5382978",
"0.53575075",
"0.533609",
"0.5305513",
"0.5304816",
"0.5301036",
"0.5299489",
"0.5237387",
"0.5225351",
"0.5149481",
"0.51405495",
"0.51157194",
"0.5071584",
"0.50630116",
"0.5057439",
"0.5031063",
"0.5023118",
"0.5016677",
"0.501064",
"0.50002676",
"0.4980492",
"0.49672818",
"0.49327308",
"0.49275663",
"0.49159026",
"0.4908274",
"0.49066293",
"0.48870426",
"0.48851117",
"0.4862612",
"0.48624903",
"0.48612",
"0.48379955",
"0.4825994",
"0.48113048",
"0.48033714",
"0.47947618",
"0.4792363",
"0.47852978",
"0.47803766",
"0.47503015",
"0.47446883",
"0.47390866",
"0.473359",
"0.47293708",
"0.47223297",
"0.46977532",
"0.46916115",
"0.46908596",
"0.4662473",
"0.46346864",
"0.4623823",
"0.46189484",
"0.4616348",
"0.46124268",
"0.46040136",
"0.45990643",
"0.45846426",
"0.45767015",
"0.45728728",
"0.45472303",
"0.4546607",
"0.45124578",
"0.45014954",
"0.44873983",
"0.44637698",
"0.44468424"
] | 0.85427374 | 0 |
Add one Vec2 to this one, returning the resulting Vec2 | func (v Vec2) Add(other Vec2) Vec2 {
return Vec2{v.X + other.X, v.Y + other.Y}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v Vec2) Add(x Vec2) Vec2 {\n\treturn Vec2{v[0] + x[0], v[1] + x[1]}\n}",
"func (v Vector2D) Add(v1 Vector) Vector {\n\tv2 := v1.(Vector2D)\n\treturn Vector2D{v.X + v2.X, v.Y + v2.Y}\n}",
"func (a Vec2) Add(b Vec2) Vec2 {\n\treturn Vec2{a.X + b.X, a.Y + b.Y}\n}",
"func (v Vector2) Add(other Vector) Vector {\r\n\totherv := checkVector2(other)\r\n\treturn Vector2{\r\n\t\tv[0] + otherv[0],\r\n\t\tv[1] + otherv[1],\r\n\t}\r\n}",
"func (v1 Vector2) Add(v2 Vector2) Vector2 {\n\treturn Vector2{v1.X + v2.X, v1.Y + v2.Y}\n}",
"func (v *Vector2) Add(b Vector2) {\r\n\tv.x += b.x\r\n\tv.y += b.y\r\n}",
"func Vector2Add(v1, v2 Vector2) Vector2 {\n\treturn NewVector2(v1.X+v2.X, v1.Y+v2.Y)\n}",
"func (v Vec3) Add(v2 Vec3) Vec3 {\n\treturn Vec3{X: v.X + v2.X, Y: v.Y + v2.Y, Z: v.Z + v2.Z}\n}",
"func (p Point2D) Add(v Vec2D) Point2D {\n\treturn Point2D(Vec2D(p).Add(v))\n}",
"func Add(v1, v2 Vector) Vector {\n\treturn Vector{\n\t\tX: v1.X + v2.X,\n\t\tY: v1.Y + v2.Y,\n\t\tZ: v1.Z + v2.Z,\n\t}\n}",
"func Add(v, u *Vec) *Vec {\n\treturn &Vec{\n\t\tv.X + u.X,\n\t\tv.Y + u.Y,\n\t}\n}",
"func (v *Vector3D) Add(v2 Vector3D) Vector3D {\n\treturn Vector3D{v.E1 + v2.E1, v.E2 + v2.E2, v.E3 + v2.E3}\n}",
"func AddV2(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"AddV2\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (v *Vector) Add(rhs *Vector) *Vector {\n\tif v == nil {\n\t\treturn rhs\n\t}\n\tif rhs == nil {\n\t\treturn v\n\t}\n\tl := min(len(v.vec), len(rhs.vec))\n\tvec := make([]float64, l)\n\tcopy(vec, v.vec)\n\tsaxpy(l, 1, rhs.vec, 1, vec, 1)\n\telems := make([]string, len(v.elems)+len(rhs.elems))\n\telems = append(elems, rhs.elems...)\n\telems = append(elems, v.elems...)\n\treturn &Vector{\n\t\tword: v.word + \" + \" + rhs.word,\n\t\tvec: vec,\n\t\telems: elems,\n\t}\n}",
"func (p *Polygon) AddV2(x v2.Vec) *PolygonVertex {\n\tv := PolygonVertex{}\n\tv.vertex = x\n\tv.vtype = pvNormal\n\tp.vlist = append(p.vlist, v)\n\treturn &p.vlist[len(p.vlist)-1]\n}",
"func (u Vec) Add(v Vec) Vec {\n\treturn Vec{\n\t\tu.X + v.X,\n\t\tu.Y + v.Y,\n\t}\n}",
"func (v1 *Vec3f) Add(v2 Vec3f) Vec3f {\n\tX := v1.X + v2.X\n\tY := v1.Y + v2.Y\n\tZ := v1.Z + v2.Z\n\n\tv := Vec3f{X, Y, Z}\n\n\treturn v\n}",
"func (v Vector) Add(other Vector) Vector {\n\treturn Vector{\n\t\tX: v.X + other.X,\n\t\tY: v.Y + other.Y,\n\t\tZ: v.Z + other.Z,\n\t}\n}",
"func (v Vector) Add(o Vector) *Vector {\n\treturn &Vector{v[0] + o[0], v[1] + o[1], v[2] + o[2]}\n}",
"func (self *Vector) Add(other *Vector) {\n\tself.X = self.X + other.X\n\tself.Y = self.Y + other.Y\n}",
"func (v Vec) Add(other Vec) Vec {\n\treturn v.Copy().AddBy(other)\n}",
"func (v Vector) Add(v1 Vector) Vector {\n\tfor i, x := range v1 {\n\t\tv[i] += x\n\t}\n\treturn v\n}",
"func (v1 Vector3) AddV(v2 Vector3) Vector3 {\n\treturn Vector3{v1.X + v2.X, v1.Y + v2.Y, v1.Z + v2.Z}\n}",
"func (v1 *Vector) Add(v2 Vector) (Vector, error){\n\terr := v1.CompareDimensions(v2)\n\tif err != nil {\n\t\treturn Vector{}, err\n\t}\n\n\tvar sum []float64\n\n\tfor i, val1 := range v1.Values {\n\t\tval2 := v2.Values[i]\n\t\tsum = append(sum, val1 + val2)\n\t}\n\n\treturn New(sum), nil\n}",
"func (v *Vector) AddTo(e *Vector) {\n\tv.X += e.X\n\tv.Y += e.Y\n\tv.Z += e.Z\n}",
"func (v *Vector) Add(u *Vector) *Vector {\n\n\tx := (v.X + u.X)\n\ty := (v.Y + u.Y)\n\treturn &Vector{\n\t\tX: (x),\n\t\tY: (y),\n\t}\n}",
"func (v Posit16x2) Add(x Posit16x2) Posit16x2 {\n\tout := Posit16x2{impl: make([]Posit16, 2)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Add(x.impl[i])\n\t}\n\treturn out\n}",
"func Append(v1 Vector, v2 Vector) Vector {\n\tbaseDim := v1.Size()\n\tv1.dim = baseDim + v2.Size()\n\tfor n, d := range v2.data {\n\t\tv1.Set(baseDim+n, d)\n\t}\n\n\treturn v1\n}",
"func (p Point) Add(v Vec) Point {\n\treturn Point(Vec(p).Add(v))\n}",
"func (v1 *Vector3) Add(v2 *Vector3) Vector3 {\n\treturn Vector3{v1.X + v2.X, v1.Y + v2.Y, v1.Z + v2.Z}\n}",
"func (a Vec2) AddScalar(b float64) Vec2 {\n\treturn Vec2{a.X + b, a.Y + b}\n}",
"func (v Vector3) Add(other Vector3) Vector3 {\n\treturn Vector3{X: v.X + other.X, Y: v.Y + other.Y, Z: v.Z + other.Z}\n}",
"func (v Vec3i) Add(other Vec3i) Vec3i {\n\treturn Vec3i{v.X + other.X, v.Y + other.Y, v.Z + other.Z}\n}",
"func (v Vec3) Add(w Vec3) Vec3 {\n\treturn Vec3{v[0] + w[0], v[1] + w[1], v[2] + w[2]}\n}",
"func (vn *VecN) Vec2() Vec2 {\n\traw := vn.Raw()\n\treturn Vec2{raw[0], raw[1]}\n}",
"func (v Vector3D) Add(other Vector3D) Vector3D {\n\treturn Vector3D{\n\t\tx: v.x + other.x,\n\t\ty: v.y + other.y,\n\t\tz: v.z + other.z,\n\t}\n}",
"func (u *Vec3) Add(v *Vec3) *Vec3 {\n\ts := Vec3{\n\t\tu.X + v.X,\n\t\tu.Y + v.Y,\n\t\tu.Z + v.Z,\n\t}\n\treturn &s\n}",
"func (n *Uint256) Add2(n1, n2 *Uint256) *Uint256 {\n\tvar c uint64\n\tn.n[0], c = bits.Add64(n1.n[0], n2.n[0], c)\n\tn.n[1], c = bits.Add64(n1.n[1], n2.n[1], c)\n\tn.n[2], c = bits.Add64(n1.n[2], n2.n[2], c)\n\tn.n[3], _ = bits.Add64(n1.n[3], n2.n[3], c)\n\treturn n\n}",
"func (p PointI) Add(v VecI) PointI {\n\treturn PointI(VecI(p).Add(v))\n}",
"func (p *Point) Add(p2 Point) {\n\tp.X += p2.X\n\tp.Y += p2.Y\n\tp.Z += p2.Z\n}",
"func (v *V) Add(x *V) *V {\n\tif !IsVSameShape(x, v) {\n\t\tpanic(ErrShape)\n\t}\n\tfor i, e := range x.Data {\n\t\tv.Data[i] += e\n\t}\n\treturn v\n}",
"func (p Point2) Add(ps ...Point2) Point2 {\n\tfor _, p2 := range ps {\n\t\tp[0] += p2[0]\n\t\tp[1] += p2[1]\n\t}\n\treturn p\n}",
"func (v *Vector) Plus(a *Vector) *Vector {\n\treturn &Vector{X: v.X + a.X, Y: v.Y + a.Y, Z: v.Z + a.Z}\n}",
"func (p *Polygon) AddV2Set(x []v2.Vec) {\n\tfor _, v := range x {\n\t\tp.AddV2(v)\n\t}\n}",
"func (n *Uint256) Add(n2 *Uint256) *Uint256 {\n\treturn n.Add2(n, n2)\n}",
"func Vec2(src dprec.Vec2) sprec.Vec2 {\n\treturn sprec.Vec2{\n\t\tX: float32(src.X),\n\t\tY: float32(src.Y),\n\t}\n}",
"func (i I) Add(i2 I) I {\n\treturn I{i.X + i2.X, i.Y + i2.Y}\n}",
"func (idx *Index) Add(idx2 *Index) error {\n\tif idx == nil {\n\t\treturn fmt.Errorf(\"can't use Index.Add with nil receiver Index\")\n\t}\n\tif idx2 == nil || len(idx2.Blocks) == 0 {\n\t\treturn nil\n\t}\n\tif idx.Blocks == nil {\n\t\tidx.Blocks = idx2.Blocks\n\t\treturn nil\n\t}\n\tfor zyx, svc2 := range idx2.Blocks {\n\t\tsvc, found := idx.Blocks[zyx]\n\t\tif !found || svc == nil || svc.Counts == nil {\n\t\t\tidx.Blocks[zyx] = svc2\n\t\t} else {\n\t\t\t// supervoxels cannot be in more than one set index, so if it's in idx2,\n\t\t\t// that supervoxel can't be in idx.\n\t\t\tfor sv2, c2 := range svc2.Counts {\n\t\t\t\tsvc.Counts[sv2] = c2\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (a *Vector3) Plus(b Vector3) {\n\t*a = Vector3{a.X + b.X, a.Y + b.Y, a.Z + b.Z}\n}",
"func (vec Vector2) Len2() float32 {\n\treturn vec.X*vec.X + vec.Y*vec.Y\n}",
"func (vn *VecN) Add(dst *VecN, subtrahend *VecN) *VecN {\n\tif vn == nil || subtrahend == nil {\n\t\treturn nil\n\t}\n\tsize := intMin(len(vn.vec), len(subtrahend.vec))\n\tdst = dst.Resize(size)\n\n\tfor i := 0; i < size; i++ {\n\t\tdst.vec[i] = vn.vec[i] + subtrahend.vec[i]\n\t}\n\n\treturn dst\n}",
"func Add(V, W Vector) Vector {\n\tLengthCheck(V, W)\n\tX := make(Vector, len(V), len(V))\n\tfor i := range X {\n\t\tX[i] = V[i] + W[i]\n\t}\n\treturn X\n}",
"func (p Point) add(p1 Point) Point {\n\tp.x = p.x + p1.x\n\tp.y = p.y + p1.y\n\treturn p\n}",
"func (m Mat2f) Add(other Mat2f) Mat2f {\n\treturn Mat2f{\n\t\tm[0] + other[0], m[1] + other[1],\n\t\tm[2] + other[2], m[3] + other[3]}\n}",
"func (v Vec) AddBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] += val\n\t}\n\treturn v\n}",
"func (this *BoundingBox) Add(point *vec3.T) *BoundingBox {\n\tif !this.initialized {\n\t\tcopy(this.Min[:], point[:])\n\t\tcopy(this.Max[:], point[:])\n\t\tthis.initialized = true\n\n\t\treturn this\n\t}\n\n\tfor i, val := range point[:] {\n\t\tif val > this.Max[i] {\n\t\t\tthis.Max[i] = val\n\t\t}\n\t\tif val < this.Min[i] {\n\t\t\tthis.Min[i] = val\n\t\t}\n\t}\n\n\treturn this\n\n}",
"func (a *EncryptedVec) Add(b *EncryptedVec) (*EncryptedVec, error) {\n\n\tif len(a.Coords) != len(b.Coords) {\n\t\treturn nil, errors.New(\"cannot add vectors of different length\")\n\t}\n\n\tpk := a.Pk\n\tres := make([]*paillier.Ciphertext, len(a.Coords))\n\n\tfor i := range a.Coords {\n\t\tres[i] = pk.Add(a.Coords[i], b.Coords[i])\n\t}\n\n\treturn &EncryptedVec{\n\t\tPk: pk,\n\t\tCoords: res,\n\t}, nil\n}",
"func (p *Polygon) Add(x, y float64) *PolygonVertex {\n\treturn p.AddV2(v2.Vec{x, y})\n}",
"func (m *Matrix) Add(x, y int, v int64) {\n\tov, _ := m.Get(x, y)\n\tm.Set(x, y, int64(ov)+v)\n}",
"func Vector2One() Vector2 {\n\treturn NewVector2(1.0, 1.0)\n}",
"func (v *Vector) AddSet(o Vector) *Vector {\n\tv[0] += o[0]\n\tv[1] += o[1]\n\tv[2] += o[2]\n\treturn v\n}",
"func (a Vector3) Plus(b Vector3) Vector3 {\n\treturn Vector3{a.X + b.X, a.Y + b.Y, a.Z + b.Z}\n}",
"func (n *Uint256) AddUint64(n2 uint64) *Uint256 {\n\tvar c uint64\n\tn.n[0], c = bits.Add64(n.n[0], n2, c)\n\tn.n[1], c = bits.Add64(n.n[1], 0, c)\n\tn.n[2], c = bits.Add64(n.n[2], 0, c)\n\tn.n[3], _ = bits.Add64(n.n[3], 0, c)\n\treturn n\n}",
"func (v *Vec4) Add(x *Vec4) {\n\tv.X += x.X\n\tv.Y += x.Y\n\tv.Z += x.Z\n\tv.W += x.W\n}",
"func (s *Scalar) Add(x, y *Scalar) *Scalar {\n\ts.s.Add(&x.s, &y.s)\n\treturn s\n}",
"func (p Point) Add(v Point) Point {\n\treturn NewPoint(p.X+v.X, p.Y+v.Y)\n}",
"func Add(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := (arg1[0] + arg2[0])\n\tx2 := (arg1[1] + arg2[1])\n\tx3 := (arg1[2] + arg2[2])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n}",
"func (a Vec4) Plus(b Vec4) Vec4 {\n\treturn Vec4{a.X + b.X, a.Y + b.Y, a.Z + b.Z, a.W + b.W}\n}",
"func (gdt *Vector3) OperatorAdd(b Vector3) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_add(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}",
"func (v *Vec3i) SetAdd(other Vec3i) {\n\tv.X += other.X\n\tv.Y += other.Y\n\tv.Z += other.Z\n}",
"func (a *Vec4) Add(b Vec4) {\n\ta.X += b.X\n\ta.Y += b.Y\n\ta.Z += b.Z\n\ta.W += b.W\n}",
"func (v Posit8x4) Add(x Posit8x4) Posit8x4 {\n\tout := Posit8x4{impl: make([]Posit8, 4)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Add(x.impl[i])\n\t}\n\treturn out\n}",
"func (v Vec) SAdd(val float64) Vec {\n\treturn v.Copy().SAddBy(val)\n}",
"func Add(z, x, y *Elt)",
"func (s VectOp) Plusv(v []float64) VectOp {\n\treturn fs.Plusv(s, v)\n}",
"func (wv *Spectrum) Add(other Spectrum) {\n\t// if wv.Lambda != other.Lambda IS ERROR\n\twv.C[0] += other.C[0]\n\twv.C[1] += other.C[1]\n\twv.C[2] += other.C[2]\n\twv.C[3] += other.C[3]\n}",
"func (z *polyGF2) Add(a, b *polyGF2) *polyGF2 {\n\tz.coeff.Xor(&a.coeff, &b.coeff)\n\treturn z\n}",
"func (z fermat) Add(x, y fermat) fermat {\n\tif len(z) != len(x) {\n\t\tpanic(\"Add: len(z) != len(x)\")\n\t}\n\taddVV(z, x, y) // there cannot be a carry here.\n\tz.norm()\n\treturn z\n}",
"func (c Coordinate) Add(other Coordinate) Coordinate {\n\tresult := Coordinate{\n\t\tX: c.X + other.X,\n\t\tY: c.Y + other.Y,\n\t}\n\treturn result\n}",
"func (a Vector) Add(b []float64) {\n\tla, lb := len(a), len(b)\n\tif la != lb {\n\t\tpanic(fmt.Sprintf(\"can not calculate a.Add(b); len(a)==%d, len(b)==%d\", la, lb))\n\t}\n\tfor k := 0; k < la; k++ {\n\t\ta[k] += b[k]\n\t}\n}",
"func (t Tuple) Add(o Tuple) Tuple {\n\tif t.IsPoint() && o.IsPoint() {\n\t\tpanic(\"cannot add 2 point tuples\")\n\t}\n\treturn Tuple{t.X + o.X, t.Y + o.Y, t.Z + o.Z, t.W + o.W}\n}",
"func Vec3Add(a, b Vec3) (v Vec3) {\n\tv[0] = a[0] + b[0]\n\tv[1] = a[1] + b[1]\n\tv[2] = a[2] + b[2]\n\n\treturn\n}",
"func (v1 Vector2) Mul(v2 Vector2) Vector2 {\n\tv1.MulThis(v2)\n\treturn v1\n}",
"func (geom Geometry) AddPoint2D(x, y float64) {\n\tC.OGR_G_AddPoint_2D(geom.cval, C.double(x), C.double(y))\n}",
"func (t *Tuple) Add(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x + o.x,\n\t\tt.y + o.y,\n\t\tt.z + o.z,\n\t\tt.w + o.w,\n\t}\n}",
"func Add(out1 *LooseFieldElement, arg1 *TightFieldElement, arg2 *TightFieldElement) {\n\tx1 := (arg1[0] + arg2[0])\n\tx2 := (arg1[1] + arg2[1])\n\tx3 := (arg1[2] + arg2[2])\n\tx4 := (arg1[3] + arg2[3])\n\tx5 := (arg1[4] + arg2[4])\n\tout1[0] = x1\n\tout1[1] = x2\n\tout1[2] = x3\n\tout1[3] = x4\n\tout1[4] = x5\n}",
"func Vector3Add(v1, v2 Vector3) Vector3 {\n\treturn NewVector3(v1.X+v2.X, v1.Y+v2.Y, v1.Z+v2.Z)\n}",
"func (v Vec2) Sub(other Vec2) Vec2 {\n\treturn Vec2{v.X - other.X, v.Y - other.Y}\n}",
"func (vec Vector) Append(obj LangType) Sequence {\n\treturn append(vec, obj)\n}",
"func (u UDim) Add(v UDim) UDim {\n\treturn UDim{\n\t\tScale: u.Scale + v.Scale,\n\t\tOffset: u.Offset + v.Offset,\n\t}\n}",
"func (ts Uint64Uint64Tuples) Append2(k1 uint64, v1 uint64, k2 uint64, v2 uint64) Uint64Uint64Tuples {\n\treturn append(ts, Uint64Uint64Tuple{k1, v1}, Uint64Uint64Tuple{k2, v2})\n}",
"func Add(t, other Tuplelike) Tuplelike {\n\tresult := []float64{}\n\n\tfor idx, value := range t.Values() {\n\t\tresult = append(result, value+other.At(idx))\n\t}\n\n\treturn Tuple(result)\n}",
"func Add(v1, v2 sqltypes.Value) (sqltypes.Value, error) {\n\tif v1.IsNull() || v2.IsNull() {\n\t\treturn sqltypes.NULL, nil\n\t}\n\te1, err := valueToEval(v1, collationNumeric)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\te2, err := valueToEval(v2, collationNumeric)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\tr, err := addNumericWithError(e1, e2)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\treturn evalToSQLValue(r), nil\n}",
"func (vector *Vector) AppendVector(vs ...interface{}) {\n\t*vector = append(*vector, vs...)\n\t// Note that this method is not checking for \"uniqueness\" of the element being added.\n}",
"func (q1 Quat) Add(q2 Quat) Quat {\n\treturn Quat{q1.W + q2.W, q1.V.Add(q2.V)}\n}",
"func (z *BiComplex) Add(x, y *BiComplex) *BiComplex {\n\tz.l.Add(&x.l, &y.l)\n\tz.r.Add(&x.r, &y.r)\n\treturn z\n}",
"func (b *ValueArrayBuilder) Add(value Value) *ValueArrayBuilder {\n\tif b == nil {\n\t\treturn b\n\t}\n\tif b.copyOnWrite {\n\t\tn := len(b.output)\n\t\tnewSlice := make([]Value, n, n+1)\n\t\tcopy(newSlice[0:n], b.output)\n\t\tb.output = newSlice\n\t\tb.copyOnWrite = false\n\t}\n\tif b.output == nil {\n\t\tb.output = make([]Value, 0, 1)\n\t}\n\tb.output = append(b.output, value)\n\treturn b\n}",
"func ConcatV2(scope *Scope, values []tf.Output, axis tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"ConcatV2\",\n\t\tInput: []tf.Input{\n\t\t\ttf.OutputList(values), axis,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (z *Float64) Add(x, y *Float64) *Float64 {\n\tz.l = x.l + y.l\n\tz.r = x.r + y.r\n\treturn z\n}",
"func (v Vector2) Y() float64 {\r\n\treturn v.y\r\n}"
] | [
"0.74521434",
"0.73208445",
"0.72367424",
"0.71565145",
"0.7120489",
"0.71018904",
"0.7047271",
"0.6895478",
"0.664067",
"0.6560502",
"0.65546304",
"0.6542794",
"0.644412",
"0.6421642",
"0.64206374",
"0.64024866",
"0.63917994",
"0.6380805",
"0.6356837",
"0.63473177",
"0.62898964",
"0.6174734",
"0.61648846",
"0.6159404",
"0.6147063",
"0.60840535",
"0.6080755",
"0.60641026",
"0.6040373",
"0.60069",
"0.600561",
"0.5932036",
"0.5924943",
"0.59052277",
"0.59034",
"0.5891072",
"0.5869365",
"0.58597076",
"0.58587706",
"0.58255476",
"0.58080065",
"0.57810944",
"0.5739158",
"0.5623118",
"0.56158686",
"0.55535674",
"0.5532543",
"0.5522508",
"0.5517963",
"0.54937655",
"0.5489993",
"0.54762065",
"0.5468455",
"0.54598767",
"0.5429759",
"0.54220456",
"0.54127157",
"0.540314",
"0.5394311",
"0.5379271",
"0.5312706",
"0.5271848",
"0.5260058",
"0.5259548",
"0.5247738",
"0.5245141",
"0.523869",
"0.5232077",
"0.5221921",
"0.52196383",
"0.52081174",
"0.5201613",
"0.5199735",
"0.51960516",
"0.5194131",
"0.5190206",
"0.518646",
"0.51837665",
"0.5177098",
"0.51701224",
"0.51315486",
"0.512221",
"0.51080894",
"0.510294",
"0.50973",
"0.50800186",
"0.50799775",
"0.5066883",
"0.50636345",
"0.50380325",
"0.5036502",
"0.5025632",
"0.50168097",
"0.50106835",
"0.500825",
"0.4981242",
"0.49598816",
"0.4957838",
"0.49518675",
"0.492946"
] | 0.76290447 | 0 |
Subtract one Vec2 from this one, returning the resulting Vec2 | func (v Vec2) Sub(other Vec2) Vec2 {
return Vec2{v.X - other.X, v.Y - other.Y}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (v Vector2D) Subtract(v1 Vector) Vector {\n\tv2 := v1.(Vector2D)\n\treturn Vector2D{v.X - v2.X, v.Y - v2.Y}\n}",
"func (v Vector2) Subtract(other Vector) Vector {\r\n\totherv := checkVector2(other)\r\n\treturn Vector2{\r\n\t\tv[0] - otherv[0],\r\n\t\tv[1] - otherv[1],\r\n\t}\r\n}",
"func (v *Vector2) Subtract(b Vector2) {\r\n\tv.x -= b.x\r\n\tv.y -= b.y\r\n}",
"func (v Vector) Subtract(other Vector) Vector {\n\treturn Vector{\n\t\tX: v.X - other.X,\n\t\tY: v.Y - other.Y,\n\t\tZ: v.Z - other.Z,\n\t}\n}",
"func Subtract(v1, minusV2 Vector) Vector {\n\treturn Vector{\n\t\tX: v1.X - minusV2.X,\n\t\tY: v1.Y - minusV2.Y,\n\t\tZ: v1.Z - minusV2.Z,\n\t}\n}",
"func (a Vec2) Sub(b Vec2) Vec2 {\n\treturn Vec2{a.X - b.X, a.Y - b.Y}\n}",
"func (v1 Vector2) Sub(v2 Vector2) Vector2 {\n\treturn Vector2{v1.X - v2.X, v1.Y - v2.Y}\n}",
"func Subtract(v, u *Vec) *Vec {\n\treturn &Vec{\n\t\tv.X - u.X,\n\t\tv.Y - u.Y,\n\t}\n}",
"func (v Vec2) Sub(x Vec2) Vec2 {\n\treturn Vec2{v[0] - x[0], v[1] - x[1]}\n}",
"func (v1 *Vector) Subtract(v2 Vector) (Vector, error){\n\terr := v1.CompareDimensions(v2)\n\tif err != nil {\n\t\treturn Vector{}, err\n\t}\n\n\tvar diff []float64\n\n\tfor i, val1 := range v1.Values {\n\t\tval2 := v2.Values[i]\n\t\tdiff = append(diff, val1 - val2)\n\t}\n\n\treturn New(diff), nil\n}",
"func (v1 *Vec3f) Subtract(v2 Vec3f) Vec3f {\n\tX := v1.X - v2.X\n\tY := v1.Y - v2.Y\n\tZ := v1.Z - v2.Z\n\n\tv := Vec3f{X, Y, Z}\n\n\treturn v\n}",
"func (v Vec3i) Sub(other Vec3i) Vec3i {\n\treturn Vec3i{v.X - other.X, v.Y - other.Y, v.Z - other.Z}\n}",
"func (v Vec3) Sub(v2 Vec3) Vec3 {\n\treturn Vec3{X: v.X - v2.X, Y: v.Y - v2.Y, Z: v.Z - v2.Z}\n}",
"func (v Vector3D) Sub(other Vector3D) Vector3D {\n\treturn Vector3D{\n\t\tx: v.x - other.x,\n\t\ty: v.y - other.y,\n\t\tz: v.z - other.z,\n\t}\n}",
"func (v *VersionVector) Subtract(other *VersionVector) *VersionVector {\n\tdominatingDots := make(Dots)\n\n\tv.l.RLock()\n\tfor actor, time := range v.dots {\n\t\totherTime, exists := other.Get(actor)\n\t\tif !exists || time > otherTime {\n\t\t\tdominatingDots[actor] = time\n\t\t}\n\t}\n\tv.l.RUnlock()\n\n\treturn &VersionVector{\n\t\tdots: dominatingDots,\n\t}\n}",
"func (v1 Vec3) Sub(v2 Vec3) *Vec3 {\n\treturn &Vec3{e: [3]float32{v1.X() - v2.X(), v1.Y() - v2.Y(), v1.Z() - v2.Z()}}\n}",
"func (u Vec) Sub(v Vec) Vec {\n\treturn Vec{\n\t\tu.X - v.X,\n\t\tu.Y - v.Y,\n\t}\n}",
"func (t Tuple) Sub(o Tuple) Tuple {\n\tif t.IsVector() && o.IsPoint() {\n\t\tpanic(\"cannot subtract point from vector\")\n\t}\n\treturn Tuple{t.X - o.X, t.Y - o.Y, t.Z - o.Z, t.W - o.W}\n}",
"func (v Vec) Sub(other Vec) Vec {\n\treturn v.Copy().SubBy(other)\n}",
"func (p Point2) Sub(ps ...Point2) Point2 {\n\tfor _, p2 := range ps {\n\t\tp[0] -= p2[0]\n\t\tp[1] -= p2[1]\n\t}\n\treturn p\n}",
"func (v *V) Sub(x *V) *V {\n\tif !IsVSameShape(x, v) {\n\t\tpanic(ErrShape)\n\t}\n\tfor i, e := range x.Data {\n\t\tv.Data[i] -= e\n\t}\n\treturn v\n}",
"func (v Vector) Sub(o Vector) *Vector {\n\treturn &Vector{v[0] - o[0], v[1] - o[1], v[2] - o[2]}\n}",
"func Vector2Subtract(v1, v2 Vector2) Vector2 {\n\treturn NewVector2(v1.X-v2.X, v1.Y-v2.Y)\n}",
"func (p Vector3) Sub(o Vector3) Vector3 {\n\treturn Vector3{p.X - o.X, p.Y - o.Y, p.Z - o.Z}\n}",
"func Sub(v1, v2 *Vec) *Vec {\n\tnegV2 := Negate(v2)\n\treturn Add(v1, negV2)\n}",
"func (v Vec3) Sub(w Vec3) Vec3 {\n\treturn Vec3{v[0] - w[0], v[1] - w[1], v[2] - w[2]}\n}",
"func (u *Vec3) Sub(v *Vec3) *Vec3 {\n\ts := Vec3{\n\t\tu.X - v.X,\n\t\tu.Y - v.Y,\n\t\tu.Z - v.Z,\n\t}\n\treturn &s\n}",
"func (p *Point) Sub(p2 Point) {\n\tp.X -= p2.X\n\tp.Y -= p2.Y\n\tp.Z -= p2.Z\n}",
"func (a *Vec4) Subtract(b Vec4) {\n\ta.X -= b.X\n\ta.Y -= b.Y\n\ta.Z -= b.Z\n\ta.W -= b.W\n}",
"func (v *Vec4) Subtract(x *Vec4) {\n\tv.X -= x.X\n\tv.Y -= x.Y\n\tv.Z -= x.Z\n\tv.W -= v.W\n}",
"func (s *Scalar) Subtract(x, y *Scalar) *Scalar {\n\ts.s.Sub(&x.s, &y.s)\n\treturn s\n}",
"func (v Vec) AbsSub(other Vec) Vec {\n\treturn v.Copy().AbsSubBy(other)\n}",
"func (i I) Subtract(i2 I) I {\n\treturn I{i.X - i2.X, i.Y - i2.Y}\n}",
"func (v Vector) Negative() Vector {\n\treturn Vector{\n\t\tX: -v.X,\n\t\tY: -v.Y,\n\t\tZ: -v.Z,\n\t}\n}",
"func (vn *VecN) Sub(dst *VecN, addend *VecN) *VecN {\n\tif vn == nil || addend == nil {\n\t\treturn nil\n\t}\n\tsize := intMin(len(vn.vec), len(addend.vec))\n\tdst = dst.Resize(size)\n\n\tfor i := 0; i < size; i++ {\n\t\tdst.vec[i] = vn.vec[i] - addend.vec[i]\n\t}\n\n\treturn dst\n}",
"func (gdt *Vector3) OperatorSubtract(b Vector3) Vector3 {\n\targ0 := gdt.getBase()\n\targ1 := b.getBase()\n\n\tret := C.go_godot_vector3_operator_subtract(GDNative.api, arg0, arg1)\n\n\treturn Vector3{base: &ret}\n\n}",
"func (v *Vector) Sub(rhs *Vector) *Vector {\n\tif rhs == nil {\n\t\treturn v\n\t}\n\tif v == nil {\n\t\tv = &Vector{\n\t\t\tword: \"\",\n\t\t\tvec: make([]float64, len(rhs.vec)),\n\t\t\telems: nil,\n\t\t}\n\t}\n\n\tl := min(len(v.vec), len(rhs.vec))\n\tvec := make([]float64, l)\n\tcopy(vec, v.vec)\n\tsaxpy(l, -1, rhs.vec, 1, vec, 1)\n\telems := make([]string, len(v.elems)+len(rhs.elems))\n\telems = append(elems, rhs.elems...)\n\telems = append(elems, v.elems...)\n\treturn &Vector{\n\t\tword: v.word + \" - \" + rhs.word,\n\t\tvec: vec,\n\t\telems: elems,\n\t}\n}",
"func (v Vec) SubBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] -= val\n\t}\n\treturn v\n}",
"func (m Mat2f) Sub(other Mat2f) Mat2f {\n\treturn Mat2f{\n\t\tm[0] - other[0], m[1] - other[1],\n\t\tm[2] - other[2], m[3] - other[3]}\n}",
"func (p Point3) Sub(ps ...Point3) Point3 {\n\tfor _, p2 := range ps {\n\t\tp[0] -= p2[0]\n\t\tp[1] -= p2[1]\n\t\tp[2] -= p2[2]\n\t}\n\treturn p\n}",
"func (v1 *Vec) Sub(v2 *Vec) *Vec {\n\treturn Sub(v1, v2)\n}",
"func (s *server) Subtract(ctx context.Context, in *pb.SubtractRequest) (*pb.SubtractReply, error) {\n\treturn &pb.SubtractReply{N1: in.N1 - in.N2}, nil\n}",
"func (a Vec2) Inverse() Vec2 {\n\treturn Vec2{-a.X, -a.Y}\n}",
"func (u Vec) To(v Vec) Vec {\n\treturn Vec{\n\t\tv.X - u.X,\n\t\tv.Y - u.Y,\n\t}\n}",
"func Subtract(t, other Tuplelike) Tuplelike {\n\tresult := []float64{}\n\n\tfor idx, value := range t.Values() {\n\t\tresult = append(result, value-other.At(idx))\n\t}\n\n\treturn Tuple(result)\n}",
"func (t Torus) Sub(a, b Point) Point {\n\ta, b = t.normPair(a, b)\n\treturn a.Sub(b)\n}",
"func Subtract(v1, v2 sqltypes.Value) (sqltypes.Value, error) {\n\tif v1.IsNull() || v2.IsNull() {\n\t\treturn sqltypes.NULL, nil\n\t}\n\te1, err := valueToEval(v1, collationNumeric)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\te2, err := valueToEval(v2, collationNumeric)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\tr, err := subtractNumericWithError(e1, e2)\n\tif err != nil {\n\t\treturn sqltypes.NULL, err\n\t}\n\treturn evalToSQLValue(r), nil\n}",
"func subtract(x, y float64) float64 {\n\treturn x - y\n}",
"func Sub(V, W Vector) Vector {\n\tLengthCheck(V, W)\n\tX := make(Vector, len(V), len(V))\n\tfor i := range X {\n\t\tX[i] = V[i] - W[i]\n\t}\n\treturn X\n}",
"func (a Vec2) SubScalar(b float64) Vec2 {\n\treturn Vec2{a.X - b, a.Y - b}\n}",
"func (s UserSet) Subtract(other m.UserSet) m.UserSet {\n\tres := s.Collection().Call(\"Subtract\", other)\n\tresTyped := res.(models.RecordSet).Collection().Wrap(\"User\").(m.UserSet)\n\treturn resTyped\n}",
"func (z fermat) Sub(x, y fermat) fermat {\n\tif len(z) != len(x) {\n\t\tpanic(\"Add: len(z) != len(x)\")\n\t}\n\tn := len(y) - 1\n\tb := subVV(z[:n], x[:n], y[:n])\n\tb += y[n]\n\t// If b > 0, we need to subtract b<<n, which is the same as adding b.\n\tz[n] = x[n]\n\tif z[0] <= ^big.Word(0)-b {\n\t\tz[0] += b\n\t} else {\n\t\taddVW(z, z, b)\n\t}\n\tz.norm()\n\treturn z\n}",
"func (x *BigUInt) Subtract(y *BigUInt) (*BigUInt, error) {\n\tif len(y.data) > len(x.data) {\n\t\treturn nil, ErrUnderflow\n\t}\n\toriginalX := make([]byte, len(x.data), cap(x.data))\n\tcopy(originalX, x.data)\n\txSize, ySize := len(x.data), len(y.data)\n\tborrow, diff := uint16(0), uint16(0)\n\tfor i := 0; i < xSize || i < ySize; i++ {\n\t\txData, yData := uint16(x.data[i]), uint16(0)\n\t\tif i < ySize {\n\t\t\tyData = uint16(y.data[i])\n\t\t}\n\n\t\tif xData < (yData + borrow) {\n\t\t\tdiff = (xData + 0x100) - yData - borrow\n\t\t\tborrow = 0x01\n\t\t} else {\n\t\t\tdiff = (xData + 0x100) - yData - borrow\n\t\t\tborrow = 0x00\n\t\t}\n\t\tx.data[i] = uint8(diff & 0xff)\n\t}\n\tif borrow > 0 {\n\t\tcopy(x.data, originalX)\n\t\treturn nil, ErrUnderflow\n\t}\n\n\tcnt := len(x.data) - 1\n\tfor cnt >= 0 && x.data[cnt] == 0 {\n\t\tcnt--\n\t}\n\n\tx.data = x.data[:cnt+1]\n\treturn x, nil\n}",
"func (v *Vec3i) SetSub(other Vec3i) {\n\tv.X -= other.X\n\tv.Y -= other.Y\n\tv.Z -= other.Z\n}",
"func (p Point2D) VecTo(p2 Point2D) Vec2D {\n\treturn Vec2D(p2).Sub(Vec2D(p))\n}",
"func (t *Tuple) Sub(o *Tuple) *Tuple {\n\treturn &Tuple{\n\t\tt.x - o.x,\n\t\tt.y - o.y,\n\t\tt.z - o.z,\n\t\tt.w - o.w,\n\t}\n\n}",
"func Subtract(a cty.Value, b cty.Value) (cty.Value, error) {\n\treturn SubtractFunc.Call([]cty.Value{a, b})\n}",
"func (n *Uint256) Sub(n2 *Uint256) *Uint256 {\n\tvar borrow uint64\n\tn.n[0], borrow = bits.Sub64(n.n[0], n2.n[0], borrow)\n\tn.n[1], borrow = bits.Sub64(n.n[1], n2.n[1], borrow)\n\tn.n[2], borrow = bits.Sub64(n.n[2], n2.n[2], borrow)\n\tn.n[3], _ = bits.Sub64(n.n[3], n2.n[3], borrow)\n\treturn n\n}",
"func (p Point) Sub(other Point) Point {\n\treturn Pt(p.X-other.X, p.Y-other.Y)\n}",
"func (ibf *InvertibleBloomFilter) Subtract(ibf2 *InvertibleBloomFilter) *InvertibleBloomFilter {\n\tdifference := NewIBF(ibf.Size)\n\tcopy(difference.Cells, ibf.Cells)\n\n\tfor j := 0; j < ibf.Size; j++ {\n\t\tdifference.Cells[j].Subtract(&ibf2.Cells[j])\n\t}\n\n\treturn difference\n}",
"func (p Point) VecTo(p2 Point) Vec {\n\treturn Vec(p2).Sub(Vec(p))\n}",
"func (v Posit16x2) Sub(x Posit16x2) Posit16x2 {\n\tout := Posit16x2{impl: make([]Posit16, 2)}\n\tfor i, posit := range v.impl {\n\t\tout.impl[i] = posit.Sub(x.impl[i])\n\t}\n\treturn out\n}",
"func (z *Float64) Sub(x, y *Float64) *Float64 {\n\tz.l = x.l - y.l\n\tz.r = x.r - y.r\n\treturn z\n}",
"func (a *Allocation) Subtract(other *Allocation) *Allocation {\n\tresult := initializeZeroAlloc()\n\tfor t, v := range a.Value {\n\t\tresult.Value[t] = v.Subtract(other.Value[t])\n\t}\n\treturn result\n}",
"func (b *IBFCell) Subtract(b2 *IBFCell) {\n\tb.IDSum.XOR(&b2.IDSum)\n\tb.HashSum.XOR(&b2.HashSum)\n\tb.Count -= b2.Count\n}",
"func Subtract(x, y *Matrix) (*Matrix, error) {\n\tminusY, _ := Scale(-1, y)\n\tresult, err := Add(x, minusY)\n\treturn result, err\n}",
"func Vector3Subtract(v1, v2 Vector3) Vector3 {\n\treturn NewVector3(v1.X-v2.X, v1.Y-v2.Y, v1.Z-v2.Z)\n}",
"func (c MethodsCollection) Subtract() pSubtract {\n\treturn pSubtract{\n\t\tMethod: c.MustGet(\"Subtract\"),\n\t}\n}",
"func (a *EncryptedVec) Sub(b *EncryptedVec) (*EncryptedVec, error) {\n\n\tif len(a.Coords) != len(b.Coords) {\n\t\treturn nil, errors.New(\"cannot add vectors of different length\")\n\t}\n\n\tpk := a.Pk\n\tres := make([]*paillier.Ciphertext, len(a.Coords))\n\n\tfor i := range a.Coords {\n\t\tres[i] = pk.Sub(a.Coords[i], b.Coords[i])\n\t}\n\n\treturn &EncryptedVec{\n\t\tPk: pk,\n\t\tCoords: res,\n\t}, nil\n}",
"func (c *collection) subtract(other *collection) *collection {\n\tret := newCollection(c.defaultNs, c.meta)\n\tfor k, v := range c.objects {\n\t\tif _, ok := other.objects[k]; !ok {\n\t\t\tret.objects[k] = v\n\t\t}\n\t}\n\treturn ret\n}",
"func (p PointI) VecTo(p2 PointI) VecI {\n\treturn VecI(p2).Sub(VecI(p))\n}",
"func (a *Int64s) Subtract(b *Int64s) *Int64s {\n\tresult := a.Clone()\n\tfor x, _ := range b.backing {\n\t\tresult.Remove(x)\n\t}\n\treturn result\n}",
"func (ec *ECPoint) Sub(first, second *ECPoint) *ECPoint {\n\tec.checkNil()\n\tif first.Equal(second) {\n\t\tec.X = big.NewInt(0)\n\t\tec.Y = big.NewInt(0)\n\t\tec.Curve = first.Curve\n\t\treturn ec\n\t}\n\tnegation := new(ECPoint).Negation(second)\n\tec.X, ec.Y = first.Curve.Add(negation.X, negation.Y, first.X, first.Y)\n\tec.Curve = first.Curve\n\n\treturn ec\n}",
"func (z *polyGF2) Sub(a, b *polyGF2) *polyGF2 {\n\treturn z.Add(a, b)\n}",
"func (q1 Quat) Sub(q2 Quat) Quat {\n\treturn Quat{q1.W - q2.W, q1.V.Sub(q2.V)}\n}",
"func (p *Point) Sub(to *Point) *Point {\n\treturn &Point{p.X - to.X, p.Y - to.Y}\n}",
"func (v Vec3) DropZ() Vec2 {\n\treturn Vec2{v[0], v[1]}\n}",
"func (a Balance) Sub(b *Balance) Balance {\n\tfor i, v := range b {\n\t\ta[i] -= v\n\t}\n\treturn a\n}",
"func (v Vec) AbsSubBy(other Vec) Vec {\n\tassertSameLen(v, other)\n\tfor i, val := range other {\n\t\tv[i] = math.Abs(v[i] - val)\n\t}\n\treturn v\n}",
"func subtract(this js.Value, i []js.Value) interface{} {\n\tint1, int2 := getInputValues(i)\n\tsetValueById(i[2].String(), int1-int2)\n\treturn nil\n}",
"func (q Quat) Sub(other Quat) Quat {\n\treturn Quat{q.W - other.W, q.X - other.X, q.Y - other.Y, q.Z - other.Z}\n}",
"func subtract(x, y int) (res int) {\n\tres = x - y\n\treturn\n}",
"func Vec3Sub(a, b Vec3) (v Vec3) {\n\tv[0] = a[0] - b[0]\n\tv[1] = a[1] - b[1]\n\tv[2] = a[2] - b[2]\n\n\treturn\n}",
"func (n *Uint256) Sub2(n1, n2 *Uint256) *Uint256 {\n\tvar borrow uint64\n\tn.n[0], borrow = bits.Sub64(n1.n[0], n2.n[0], borrow)\n\tn.n[1], borrow = bits.Sub64(n1.n[1], n2.n[1], borrow)\n\tn.n[2], borrow = bits.Sub64(n1.n[2], n2.n[2], borrow)\n\tn.n[3], _ = bits.Sub64(n1.n[3], n2.n[3], borrow)\n\treturn n\n}",
"func Subtract(a float64, b float64) float64 {\n\treturn a - b\n}",
"func (ms *MVCCStats) Subtract(oms MVCCStats) {\n\t// Enforce the max LastUpdateNanos for both ages based on their\n\t// pre-subtraction state.\n\tms.Forward(oms.LastUpdateNanos)\n\toms.Forward(ms.LastUpdateNanos)\n\t// If either stats object contains estimates, their difference does too.\n\tms.ContainsEstimates = ms.ContainsEstimates || oms.ContainsEstimates\n\t// Now that we've done that, we may subtract.\n\tms.IntentAge -= oms.IntentAge\n\tms.GCBytesAge -= oms.GCBytesAge\n\tms.LiveBytes -= oms.LiveBytes\n\tms.KeyBytes -= oms.KeyBytes\n\tms.ValBytes -= oms.ValBytes\n\tms.IntentBytes -= oms.IntentBytes\n\tms.LiveCount -= oms.LiveCount\n\tms.KeyCount -= oms.KeyCount\n\tms.ValCount -= oms.ValCount\n\tms.IntentCount -= oms.IntentCount\n\tms.SysBytes -= oms.SysBytes\n\tms.SysCount -= oms.SysCount\n}",
"func (z *Big) Sub(x, y *Big) *Big { return z.Context.Sub(z, x, y) }",
"func Subtract(a, b float64) float64 {\n\treturn a - b\n}",
"func Subtract(a, b float64) float64 {\n\treturn a - b\n}",
"func (z *BiComplex) Sub(x, y *BiComplex) *BiComplex {\n\tz.l.Sub(&x.l, &y.l)\n\tz.r.Sub(&x.r, &y.r)\n\treturn z\n}",
"func VPHSUBSW(mxy, xy, xy1 operand.Op) { ctx.VPHSUBSW(mxy, xy, xy1) }",
"func (v *Vector) Minus(a *Vector) *Vector {\n\treturn &Vector{X: v.X - a.X, Y: v.Y - a.Y, Z: v.Z - a.Z}\n}",
"func Sub(valueA gcv.Value, valueB gcv.Value) gcv.Value {\n\tif valueA.Type() == gcv.Complex || valueB.Type() == gcv.Complex {\n\t\treturn gcv.MakeValue(valueA.Complex() - valueB.Complex())\n\t}\n\treturn gcv.MakeValue(valueA.Real() - valueB.Real())\n}",
"func (z *Rat) Sub(x, y *Rat) *Rat {}",
"func (a Vector3) Minus(b Vector3) Vector3 {\n\treturn Vector3{a.X - b.X, a.Y - b.Y, a.Z - b.Z}\n}",
"func (l *Limits) Sub(l1 Limits) *Limits {\n\tl.Cpu -= l1.Cpu\n\tl.Memory -= l1.Memory\n\tl.Disk -= l1.Disk\n\tl.Fee -= l1.Fee\n\treturn l\n}",
"func (v Vec2) Add(other Vec2) Vec2 {\n\treturn Vec2{v.X + other.X, v.Y + other.Y}\n}",
"func (o *WlRegion) Subtract(x wire.Int, y wire.Int, width wire.Int, height wire.Int) error {\n\tmsg, err := wire.NewMessage(o.ID(), 2)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err = msg.Write(x); err != nil {\n\t\treturn err\n\t}\n\n\tif err = msg.Write(y); err != nil {\n\t\treturn err\n\t}\n\n\tif err = msg.Write(width); err != nil {\n\t\treturn err\n\t}\n\n\tif err = msg.Write(height); err != nil {\n\t\treturn err\n\t}\n\n\tif err = o.Base.Conn.Write(msg); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Subtract(x, y int) (res int) {\n\treturn x - y\n}",
"func (p Point) Sub(q Point) Point { return Point{p.X - q.X, p.Y - q.Y} }"
] | [
"0.7689187",
"0.7526785",
"0.7491658",
"0.7456818",
"0.7352217",
"0.71778595",
"0.71758455",
"0.71502984",
"0.6977851",
"0.6902914",
"0.68851614",
"0.68810874",
"0.68723845",
"0.6834537",
"0.68303734",
"0.67983705",
"0.6745432",
"0.6611283",
"0.6539055",
"0.6434441",
"0.64340395",
"0.6410761",
"0.64047337",
"0.63849705",
"0.63831574",
"0.6362434",
"0.63251644",
"0.6279907",
"0.6229362",
"0.62012184",
"0.61781025",
"0.61742586",
"0.6110016",
"0.61064404",
"0.60885733",
"0.6038365",
"0.6011278",
"0.59592134",
"0.59445345",
"0.5937855",
"0.5928604",
"0.5891106",
"0.5889465",
"0.58759546",
"0.5872419",
"0.58633846",
"0.58496153",
"0.58445495",
"0.57982564",
"0.5792243",
"0.5782025",
"0.5766038",
"0.5764552",
"0.5734389",
"0.5725899",
"0.56968164",
"0.56886196",
"0.5687889",
"0.56862116",
"0.5681037",
"0.5674909",
"0.56742615",
"0.5635152",
"0.5626802",
"0.56219965",
"0.5610534",
"0.56099737",
"0.55862635",
"0.55645406",
"0.556212",
"0.55594575",
"0.5548739",
"0.55172503",
"0.55115986",
"0.54987",
"0.5497636",
"0.54963595",
"0.54896283",
"0.5485922",
"0.5485054",
"0.5466248",
"0.54645395",
"0.54603803",
"0.54584897",
"0.5449273",
"0.5444155",
"0.5439375",
"0.5430959",
"0.5430959",
"0.538912",
"0.5374211",
"0.53719556",
"0.5370506",
"0.536867",
"0.534694",
"0.5340588",
"0.5337847",
"0.5334069",
"0.5333074",
"0.52934116"
] | 0.75407815 | 1 |
Fetch fetches XML data | func (request *preparedRequest) Bytes() ([]byte, error) {
resp, err := request.Execute()
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
msg := fmt.Sprintf("Status error: %d", resp.StatusCode)
return nil, fmt.Errorf(msg)
}
defer resp.Body.Close()
data, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("read body: %v", err)
}
return data, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func fetchXML() Fetcher {\n\treturn func(r Resource, hr *HttpRequest) ([]byte, error) {\n\t\terr := r.verifyParams(hr.params)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfullPath, err := hr.url(r)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treadCh := make(chan cacheResults)\n\t\tcaResults := cacheResults{key: fullPath}\n\t\tgo hr.checkCache(caResults, readCh)\n\t\tread := <-readCh\n\t\tif read.err != nil {\n\t\t\treturn nil, read.err\n\t\t}\n\t\tif read.results != nil {\n\t\t\treturn read.results, nil\n\t\t}\n\n\t\tresp, err := hr.makeRequest(fullPath, r.method)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdefer resp.Body.Close()\n\t\tresults, err := ioutil.ReadAll(resp.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, parse := range hr.parsers {\n\t\t\tresults, err = parse(results, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\twriteCh := make(chan cacheResults)\n\t\tca := cacheResults{key: fullPath, results: results, duration: r.cacheDuration}\n\t\tgo hr.cacheResults(ca, writeCh)\n\t\tcached := <-writeCh\n\t\tif cached.err != nil {\n\t\t\treturn nil, cached.err\n\t\t}\n\t\treturn results, nil\n\t}\n}",
"func (s *server) Fetch(ctx context.Context, in *pb.FetchRequest) (*pb.FetchResponse, error) {\n\tbuf, err := fetchClient.DownloadFile(in.GetUrl())\n\tif err != nil {\n\t\treturn &pb.FetchResponse{StatusCode: 400, Message: err.Error()}, nil\n\t}\n\tproducts, err := misc.ReadCSV(buf)\n\tif err != nil {\n\t\treturn &pb.FetchResponse{StatusCode: 400, Message: err.Error()}, nil\n\t}\n\tfmt.Printf(\"products: %v\\n\", products)\n\tif err := s.InsertProducts(products); err != nil {\n\t\treturn &pb.FetchResponse{StatusCode: 500, Message: err.Error()}, nil\n\t}\n\treturn &pb.FetchResponse{StatusCode: 200, Message: \"success\"}, nil\n}",
"func Fetch(url string, seen Seen) (*Feed, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tout, err := Parse(body, seen)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif out.Link == \"\" {\n\t\tout.Link = url\n\t}\n\n\tout.UpdateURL = url\n\n\treturn out, nil\n}",
"func (d *CatalogNodesQuery) Fetch(clients *ClientSet, opts *QueryOptions) (interface{}, *ResponseMetadata, error) {\n\tselect {\n\tcase <-d.stopCh:\n\t\treturn nil, nil, ErrStopped\n\tdefault:\n\t}\n\n\topts = opts.Merge(&QueryOptions{\n\t\tDatacenter: d.dc,\n\t\tNear: d.near,\n\t})\n\n\tlog.Printf(\"[TRACE] %s: GET %s\", d, &url.URL{\n\t\tPath: \"/v1/catalog/nodes\",\n\t\tRawQuery: opts.String(),\n\t})\n\tn, qm, err := clients.Consul().Catalog().Nodes(opts.ToConsulOpts())\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, d.String())\n\t}\n\n\tlog.Printf(\"[TRACE] %s: returned %d results\", d, len(n))\n\n\tnodes := make([]*Node, 0, len(n))\n\tfor _, node := range n {\n\t\tnodes = append(nodes, &Node{\n\t\t\tID: node.ID,\n\t\t\tNode: node.Node,\n\t\t\tAddress: node.Address,\n\t\t\tDatacenter: node.Datacenter,\n\t\t\tTaggedAddresses: node.TaggedAddresses,\n\t\t\tMeta: node.Meta,\n\t\t})\n\t}\n\n\t// Sort unless the user explicitly asked for nearness\n\tif d.near == \"\" {\n\t\tsort.Stable(ByNode(nodes))\n\t}\n\n\trm := &ResponseMetadata{\n\t\tLastIndex: qm.LastIndex,\n\t\tLastContact: qm.LastContact,\n\t}\n\n\treturn nodes, rm, nil\n}",
"func (p *BeeswaxServiceClient) Fetch(query_id *QueryHandle, start_over bool, fetch_size int32) (r *Results, err error) {\n\tif err = p.sendFetch(query_id, start_over, fetch_size); err != nil {\n\t\treturn\n\t}\n\treturn p.recvFetch()\n}",
"func fetchData(client *http.Client, url, format string) ([]byte, error) {\n\tif format == \"json\" {\n\t\tformat = \"application/json\"\n\t} else if format == \"xml\" {\n\t\tformat = \"application/xml\"\n\t}\n\n\t// search in-memory store\n\tif CacheOn {\n\t\tvalue, found := Memory.Get(url)\n\t\tif found {\n\t\t\treturn value.([]byte), nil\n\t\t}\n\t}\n\n\treq, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Set(\"Accept\", format)\n\treq.Header.Set(\"User-Agent\", \"\")\n\n\tr, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer r.Body.Close()\n\n\tif r.StatusCode >= 400 {\n\t\tbody, err := ioutil.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn nil, errors.New(string(body))\n\t}\n\n\tdata, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// save data to in-memory store\n\tif CacheOn {\n\t\tMemory.Set(url, data, cache.DefaultExpiration)\n\t}\n\n\treturn data, nil\n}",
"func (f *Feed) Fetch(uri string, client *http.Client, charset func(charset string, input io.Reader) (io.Reader, error)) (status int, err error) {\n\tif !f.CanUpdate() {\n\t\treturn -1, nil\n\t}\n\n\tf.uri, _ = url.Parse(uri)\n\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\treq.Header.Set(\"User-Agent\", userAgent)\n\treq.Header.Set(\"If-Modified-Since\", f.lastupdate.Format(time.RFC1123))\n\tif f.eTag != \"\" {\n\t\treq.Header.Set(\"If-None-Match\", f.eTag)\n\t}\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn resp.StatusCode, nil\n\t}\n\n\tf.eTag = resp.Header.Get(\"ETag\")\n\n\treturn resp.StatusCode, f.load(resp.Body, charset)\n}",
"func (rc *Client) Fetch(url string) ([]*gofeed.Item, error) {\n\tfeed, err := rc.Parser.ParseURL(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn feed.Items, nil\n}",
"func (s *server) Fetch(ctx context.Context, req *discovery.DiscoveryRequest) (*discovery.DiscoveryResponse, error) {\n\treturn s.rest.Fetch(ctx, req)\n}",
"func (f *localFetcher) Fetch() (nodes []cluster.Node, err error) {\n\tvar data []byte\n\tif data, err = f.fetchData(); err != nil {\n\t\treturn nil, err\n\t} else if nodes, err = parseData(data, f.procName, f.addrFlag, f.re); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\treturn nodes, nil\n\t}\n}",
"func Fetch(transport interface {\n\tGet(addr string) (tame.Document, error)\n}, url string) (*Rates, error) {\n\tr := Rates{}\n\tif transport == nil {\n\t\ttransport = client.New()\n\t}\n\tif len(url) == 0 {\n\t\turl = \"https://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml\"\n\t}\n\n\tdoc, err := transport.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif err := r.Unmarshal(doc.GetBody()); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &r, nil\n}",
"func (s *server) Fetch(ctx context.Context, req *xdspb.DiscoveryRequest) (*xdspb.DiscoveryResponse, error) {\n\tresp, err := s.cache.Fetch(req)\n\treturn resp, err\n}",
"func Fetch(c Client, st Start, offset uint64, su *Subset, filters ...*Filter) (*Response, error) {\n\treq := &Request{Start: st, Subset: su, Filters: filters}\n\tif st == StartOffset {\n\t\treq.Offset = &offset\n\t}\n\tif err := req.Validate(); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Override Accept header with ndjson type\n\textra := http.Header{\"Accept\": []string{\"application/vnd.urbanairship+x-ndjson;version=3;\"}}\n\n\t// Valid request, post to API\n\tresp, err := c.Post(evurl, req, extra)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Valid response, return events iterator\n\treturn NewResponse(resp)\n}",
"func Fetch(addr string) (res *http.Response, err error) {\n\thc := newDefaultClient()\n\treq, err := newGetRequest(addr, map[string]string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn hc.Do(req)\n}",
"func (f *RSSFeed) Fetch() ([]Item, error) {\n\tlast := f.storage.GetLastUpdate(f.url)\n\tif last.IsZero() {\n\t\t// First access\n\t\tif err := f.storage.SaveLastUpdate(f.url, time.Now()); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"save last update time: %w\", err)\n\t\t}\n\t\treturn nil, nil\n\t}\n\n\tfeed, err := f.parser.ParseURL(f.url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse url: %w\", err)\n\t}\n\tif len(feed.Items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tvar items []Item //nolint: prealloc\n\tfor _, fitem := range feed.Items {\n\t\titem := parse(fitem)\n\t\tif !item.Published.After(last) {\n\t\t\tbreak\n\t\t}\n\t\titems = append(items, item)\n\t}\n\tif len(items) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif err := f.storage.SaveLastUpdate(f.url, items[0].Published); err != nil {\n\t\treturn nil, fmt.Errorf(\"save last update time: %w\", err)\n\t}\n\treturn items, nil\n}",
"func Fetch(url string) ([]byte, error) {\n\tlog.Info(\"Fetching list from %s\", log.Bold(url))\n\n\tif *Opt.FetchInsecure == true {\n\t\t//this prevents error because of insecure certificate\n\t\thttp.DefaultTransport.(*http.Transport).TLSClientConfig = &tls.Config{InsecureSkipVerify: true}\n\t}\n\n\t//set a global timeout for GET calls\n\thttp.DefaultClient.Timeout = time.Duration(3 * time.Second)\n\n\t//Query given URL\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Close body after call ends\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != 200 {\n\t\treturn nil, err\n\t}\n\n\t//parse body to bytes\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//return call body\n\treturn body, nil\n}",
"func Fetch(baseURL string, resourceType string, id string) (*jsh.Document, *http.Response, error) {\n\trequest, err := FetchRequest(baseURL, resourceType, id)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treturn Do(request, jsh.ObjectMode)\n}",
"func Fetch(printConfig bool, ignoreSettingsAtPrint ...[]string) error {\n\terr := parameter.LoadConfigFile(NodeConfig, *configDirPath, *configName, true, true)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif printConfig {\n\t\tparameter.PrintConfig(NodeConfig, ignoreSettingsAtPrint...)\n\t}\n\treturn nil\n}",
"func Fetch(req Request) (io.ReadCloser, error) {\n\t//logger.Println(splashURL)\n\tresponse, err := GetResponse(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Println(err)\n\tcontent, err := response.GetContent()\n\tif err == nil {\n\t\treturn content, nil\n\t}\n\treturn nil, err\n}",
"func (w *WorldBank) Fetch(country string, from, to time.Time) (*Response, error) {\n\tu, err := w.buildURL(country, from, to)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := w.HTTPClient.Get(u.String())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tbdy, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twr := &econ.WorldBankResponse{}\n\n\tif err = xml.Unmarshal(bdy, &wr); err != nil {\n\t\treturn nil, err\n\t}\n\n\tr := &Response{\n\t\tProvider: econ.TheWorldBankProvider,\n\t}\n\n\tfor _, pp := range wr.Data {\n\t\ti := Instant{\n\t\t\tDate: time.Date(pp.Date.Date, 12, 31, 0, 0, 0, 0, time.UTC),\n\t\t\tValue: pp.Value.Value,\n\t\t}\n\t\tr.History = append(r.History, i)\n\t}\n\n\treturn r, err\n}",
"func fetch(ctx context.Context, params newsclient.Params) (*news.Response, error) {\n\tauthKey, err := auth.LookupAPIAuthKey()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn client.Get(ctx, authKey, params)\n}",
"func Fetch(vars []Var) {\n\tFetchWithConfig(vars, DefaultConfig())\n}",
"func (p *para) fetch(url string) (*http.Response, error) {\n\treq, err := http.NewRequest(\"get\", url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tres, err := p.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn res, nil\n}",
"func getXml(url string) (html string) {\n\tres, err := http.Get(url)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbuf := bytes.NewBuffer(body)\n\n\thtml = buf.String()\n\treturn\n}",
"func (c HTTPClient) Fetch(ids []string) ([]byte, error) {\n\tres := []byte(`response for Fetch reminder`)\n\treturn res, nil\n}",
"func (fs FetchService) Fetch(req interface{}) (interface{}, error) {\n\t res, err := fs.Response(req)\n\t if err != nil {\n\t\t \treturn nil, err\n\t\t }\n\t return res, nil\n\n}",
"func Fetch(url string) (*Response, error) {\n\treturn DefaultClient.Fetch(url)\n}",
"func (i *Novel) Fetch(ctx context.Context) (err error) {\n\tif i.ID == \"\" {\n\t\treturn errors.New(\"pixiv: novel: no id specified\")\n\t}\n\tvar c = client.For(ctx)\n\tresp, err := c.GetWithContext(ctx, c.EndpointURL(\"/ajax/novel/\"+i.ID, nil).String())\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer resp.Body.Close()\n\tdata, err := client.ParseAPIResult(resp.Body)\n\tif err != nil {\n\t\treturn\n\t}\n\ti.Title = data.Get(\"title\").String()\n\ti.Description = data.Get(\"description\").String()\n\ti.CoverURL = data.Get(\"coverUrl\").String()\n\ti.Created = data.Get(\"createDate\").Time()\n\ti.Uploaded = data.Get(\"uploadDate\").Time()\n\ti.Author.ID = data.Get(\"userId\").String()\n\ti.Author.Name = data.Get(\"userName\").String()\n\ti.PageCount = data.Get(\"pageCount\").Int()\n\ti.CommentCount = data.Get(\"commentCount\").Int()\n\ti.LikeCount = data.Get(\"likeCount\").Int()\n\ti.ViewCount = data.Get(\"viewCount\").Int()\n\ti.BookmarkCount = data.Get(\"bookmarkCount\").Int()\n\ttags := []string{}\n\tfor _, i := range data.Get(\"tags.tags.#.tag\").Array() {\n\t\ttags = append(tags, i.String())\n\t}\n\ti.Tags = tags\n\tdata.Get(\"textEmbeddedImages\").ForEach(func(key, value gjson.Result) bool {\n\t\tif i.EmbeddedImages == nil {\n\t\t\ti.EmbeddedImages = make(map[string]image.URLs)\n\t\t}\n\t\ti.EmbeddedImages[key.String()] = image.URLs{\n\t\t\tThumb: value.Get(\"urls.128x128\").String(),\n\t\t\tSmall: value.Get(\"urls.480mw\").String(),\n\t\t\tRegular: value.Get(\"urls.1200x1200\").String(),\n\t\t\tOriginal: value.Get(\"urls.original\").String(),\n\t\t}\n\t\treturn true\n\t})\n\ti.Content = data.Get(\"content\").String()\n\treturn\n}",
"func (u *USPS) Fetch(trackingNumber string) (Response, error) {\n\tr := USPSResponse{}\n\n\tuu, err := url.Parse(\"http://production.shippingapis.com/ShippingAPI.dll\")\n\tif err != nil {\n\t\treturn r.Response, err\n\t}\n\n\t// Adding <Revision>1</Revision> will give a bit more detail but nothing useful\n\tx := fmt.Sprintf(`<TrackFieldRequest USERID=\"%v\"><TrackID ID=\"%v\"></TrackID></TrackFieldRequest>`,\n\t\tu.User, trackingNumber,\n\t)\n\n\tq := uu.Query()\n\tq.Set(\"API\", \"TrackV2\")\n\tq.Set(\"XML\", x)\n\tuu.RawQuery = q.Encode()\n\n\tresp, err := http.Get(uu.String())\n\tif err != nil {\n\t\treturn r.Response, err\n\t}\n\tdefer resp.Body.Close()\n\n\terr = xml.NewDecoder(resp.Body).Decode(&r)\n\treturn r.Response, err\n}",
"func Fetch(c Context, route string) ([]byte, error) {\n\tstart := time.Now()\n\n\treq, err := http.NewRequest(\"GET\", c.URI+route, nil)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\n\treq.SetBasicAuth(c.Username, c.Password)\n\tclient := http.Client{Timeout: c.Timeout}\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != 200 {\n\t\tlog.Error(req.Method + \" \" + req.URL.Path + \": \" + res.Status)\n\t\treturn []byte{}, err\n\t}\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\tlog.Error(err.Error())\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Debug(\"Get \" + c.URI + route + \" (\" + time.Since(start).String() + \")\")\n\n\treturn body, nil\n}",
"func (m *Medium) Fetch() (*Response, error) {\r\n\tvar response Response\r\n\t// check whether response is already stored in cache. If so, return from cache\r\n\tcachedResponse, found := m.Cache.Get(config.CacheKey)\r\n\tresponse, ok := cachedResponse.(Response)\r\n\tif found && ok {\r\n\t\treturn &response, nil\r\n\t}\r\n\r\n\tfp := gofeed.NewParser()\r\n\tsource := fmt.Sprintf(\"%s/%s\", m.EnvVar.MediumRSSFeedURL, m.EnvVar.MediumProfile)\r\n\tfeed, err := fp.ParseURL(source)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\t// replace ?source=rss----c4c00d9be425---4 with empty string\r\n\tsourceRSSRegexPattern := regexp.MustCompile(\"\\\\?source=rss.*\")\r\n\r\n\tposts := make([]Post, 0)\r\n\tfor _, item := range feed.Items {\r\n\t\tguid := strings.Split(item.GUID, \"/\")\r\n\t\tcontent, err := TokenizeHTML(item.Content)\r\n\t\tif err != nil {\r\n\t\t\treturn nil, err\r\n\t\t}\r\n\t\tposts = append(posts, Post{\r\n\t\t\tID: guid[len(guid)-1:][0],\r\n\t\t\tTitle: item.Title,\r\n\t\t\tLink: sourceRSSRegexPattern.ReplaceAllString(item.Link, \"\"),\r\n\t\t\tPublished: item.PublishedParsed.String(),\r\n\t\t\tCategories: item.Categories,\r\n\t\t\tAuthor: item.Author.Name,\r\n\t\t\tContent: content,\r\n\t\t})\r\n\t}\r\n\r\n\tresponse = Response{\r\n\t\tTitle: feed.Title,\r\n\t\tDescription: feed.Description,\r\n\t\tLink: sourceRSSRegexPattern.ReplaceAllString(feed.Link, \"\"),\r\n\t\tPosts: posts,\r\n\t}\r\n\r\n\tm.Cache.Set(config.CacheKey, response, config.CacheDefaultExpiration)\r\n\r\n\treturn &response, nil\r\n}",
"func (c *Client) FetchData(ctx context.Context, url string) ([]byte, error) {\n\n\t// Implement semaphores to ensure maximum concurrency threshold.\n\tc.semaphore <- struct{}{}\n\tdefer func() { <-c.semaphore }()\n\n\t// If there is an in-flight request for a unique URL, send response\n\t// from the in-flight request. Else, create the in-flight request.\n\tresponseRaw, err, shared := c.RequestGroup.Do(url, func() (interface{}, error) {\n\t\treturn c.fetchResponse(ctx)\n\t})\n\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tlog.Infof(\"in-flight status : %t\", shared)\n\n\t//time.Sleep(time.Second * 4)\n\n\tresponse := responseRaw.([]byte)\n\n\treturn response, err\n}",
"func (local *Node) Fetch(key string) (isRoot bool, replicas []RemoteNode) {\n\t// TODO: students should implement this\n\treturn\n}",
"func getRSS(url string, xmlhttp *ole.IDispatch, MinimalTest bool) (int, error) {\n\n\t// call using url,nil to see memory leak\n\tif xmlhttp == nil {\n\t\t//Initialize inside loop if not passed in from outer section\n\t\tcomshim.Add(1)\n\t\tdefer comshim.Done()\n\n\t\t//fmt.Println(\"CreateObject Microsoft.XMLHTTP\")\n\t\tunknown, err := oleutil.CreateObject(\"Microsoft.XMLHTTP\")\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tdefer func() { refcount1 += xmlhttp.Release() }()\n\n\t\t//Memory leak occurs here\n\t\txmlhttp, err = unknown.QueryInterface(ole.IID_IDispatch)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tdefer func() { refcount2 += xmlhttp.Release() }()\n\t\t//Nothing below this really matters. Can be removed if you want a tighter loop\n\t}\n\n\t//fmt.Printf(\"Download %s\\n\", url)\n\topenRaw, err := oleutil.CallMethod(xmlhttp, \"open\", \"GET\", url, false)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer openRaw.Clear()\n\n\tif MinimalTest {\n\t\treturn 1, nil\n\t}\n\n\t//Initiate http request\n\tsendRaw, err := oleutil.CallMethod(xmlhttp, \"send\", nil)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer sendRaw.Clear()\n\tstate := -1 // https://developer.mozilla.org/en-US/docs/Web/API/XMLHttpRequest/readyState\n\tfor state != 4 {\n\t\ttime.Sleep(5 * time.Millisecond)\n\t\tstateRaw := oleutil.MustGetProperty(xmlhttp, \"readyState\")\n\t\tstate = int(stateRaw.Val)\n\t\tstateRaw.Clear()\n\t}\n\n\tresponseXMLRaw := oleutil.MustGetProperty(xmlhttp, \"responseXml\")\n\tresponseXML := responseXMLRaw.ToIDispatch()\n\tdefer responseXMLRaw.Clear()\n\titemsRaw := oleutil.MustCallMethod(responseXML, \"selectNodes\", \"/rdf:RDF/item\")\n\titems := itemsRaw.ToIDispatch()\n\tdefer itemsRaw.Clear()\n\tlengthRaw := oleutil.MustGetProperty(items, \"length\")\n\tdefer lengthRaw.Clear()\n\tlength := int(lengthRaw.Val)\n\n\treturn length, nil\n}",
"func (u *FetchPositionsUseCase) Fetch() error {\n\n\tpositions, err := u._metrobusService.FetchPositions(u._pageSize)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, position := range positions {\n\n\t\texist, err := u._metrobusRepository.ExistPosition(position)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tcontinue\n\t\t}\n\n\t\tif exist {\n\t\t\tcontinue\n\t\t}\n\n\t\talcaldia, err := u._metrobusRepository.GetAlcaldiaByPosition(position.PositionAttributes.Latitude, position.PositionAttributes.Longitude)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\n\t\tposition.Alcaldia = \"\"\n\t\tif alcaldia != nil {\n\t\t\tposition.Alcaldia = alcaldia.Attributes.Name\n\t\t}\n\n\t\terr = u._metrobusRepository.SavePosition(position)\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (ac *ArticleController) Fetch(w http.ResponseWriter, r *http.Request) {\n\tp := httptreemux.ContextParams(r.Context())\n\tid, _ := strconv.Atoi(p[\"id\"])\n\n\tif id <= 0 {\n\t\tsendJSON(ErrIDNotValid, http.StatusNotFound, w)\n\t\treturn\n\t}\n\n\tarticle, err := models.ArticleGet(uint(id))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tsendJSON(\"Article not found\", http.StatusNotFound, w)\n\t\treturn\n\t}\n\n\tsendJSON(article, http.StatusOK, w)\n}",
"func fetch(url string) ([]byte, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"Failed to read from URL\")\n\t}\n\n\treturn ioutil.ReadAll(resp.Body)\n}",
"func (f *SourceFetcher) Fetch(url string, namespace string) (*info.ImageProperties, bool, error) {\n\tc := make(chan FetchResult)\n\tgo f.uniqueFetchSource(c, url, namespace)\n\tr := <-c\n\treturn r.ImageDetails, r.Downloaded, r.Error\n}",
"func (n *ns1) fetch() error {\n\tn.log.Debug(\"Performing fetch from NS1\", \"zone\", n.serviceZone.name)\n\tzone, err := n.fetchZone(n.serviceZone.name)\n\tif err != nil {\n\t\treturn err\n\t}\n\tservices := n.transformZoneRecords(zone)\n\tn.setServices(services)\n\treturn nil\n}",
"func (v *viewHelloSystem) Fetch(e ecs.Entity) (data VIHelloSystem, ok bool) {\n \n i := v.indexof(e)\n if i == -1 {\n return VIHelloSystem{}, false\n }\n return v.entities[i], true\n}",
"func (r *SpyStore) Fetch(ctx context.Context) (string, error) {\n\tdata := make(chan string, 1)\n\n\tgo func() {\n\t\tvar result string\n\t\tfor _, c := range r.response {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tr.t.Log(\"spy store got cancelled\")\n\t\t\t\treturn\n\t\t\tdefault:\n\t\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\t\tresult += string(c)\n\t\t\t}\n\t\t}\n\t\tdata <- result\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn \"\", ctx.Err()\n\tcase res := <-data:\n\t\treturn res, nil\n\t}\n}",
"func (n *News) fetch() {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\n\tif !storiesCached {\n\t\tstoriesCached = true\n\t\tlog.Println(\"loading stories...\")\n\t\tn.loadTopStoryIDs()\n\t\tn.loadStories()\n\n\t\ttime.AfterFunc(refreshTimer, func() {\n\t\t\tmutex.Lock()\n\t\t\tstoriesCached = false\n\t\t\tmutex.Unlock()\n\t\t\tn.fetch()\n\t\t})\n\t}\n}",
"func Fetch(url string) ([]byte, error) {\n\trequest, err := http.NewRequest(http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\trequest.Header.Add(\"User-Agent\", \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36 LBBROWSER\")\n\tresp, err := http.DefaultClient.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"wrong status code %d\\n\", resp.StatusCode)\n\t}\n\n\t//change io.Reader to bufio.Reader\n\tr := bufio.NewReader(resp.Body)\n\te := determinEncoding(r)\n\tuf8Reader := transform.NewReader(r, e.NewDecoder())\n\treturn ioutil.ReadAll(uf8Reader)\n}",
"func (b *Binary) Fetch(override bool) error {\n\treturn b.file.Fetch(override)\n}",
"func (c *ConfigManager) Fetch(n string) {\n\tvar con Config\n\n\tc.gm.Log.Infof(\"Loading configuration file: %s\", n)\n\n\tdata, err := ioutil.ReadFile(n)\n\n\tif err != nil {\n\t\tc.gm.Log.Error(err)\n\t\treturn\n\t}\n\n\tb := path.Base(n)\n\tname := strings.TrimSuffix(b, filepath.Ext(b))\n\n\tcon = Config{Raw: data, Name: name}\n\tc.config.Store(con.Name, con)\n}",
"func (tapestry *Tapestry) fetch(remote Node, key string) (bool, []Node, error) {\n\tvar rsp FetchResponse\n\terr := makeRemoteNodeCall(remote, \"Fetch\", FetchRequest{remote, key}, &rsp)\n\treturn rsp.IsRoot, rsp.Values, err\n}",
"func (s *LocalSnapStore) Fetch(snap brtypes.Snapshot) (io.ReadCloser, error) {\n\treturn os.Open(path.Join(snap.Prefix, snap.SnapDir, snap.SnapName))\n}",
"func (d *Dao) XMLCache(c context.Context, oid int64) (data []byte, err error) {\n\tkey := keyXML(oid)\n\tconn := d.dmMC.Get(c)\n\tdefer conn.Close()\n\titem, err := conn.Get(key)\n\tif err != nil {\n\t\tif err == memcache.ErrNotFound {\n\t\t\terr = nil\n\t\t\tPromCacheMiss(\"dm_xml\", 1)\n\t\t} else {\n\t\t\tlog.Error(\"mc.Get(%s) error(%v)\", key, err)\n\t\t}\n\t\treturn\n\t}\n\tPromCacheHit(\"dm_xml\", 1)\n\tdata = item.Value\n\treturn\n}",
"func (m *MSSQLTx) Fetch(ctx context.Context, query string, container interface{}, args ...interface{}) error {\n\treturn m.FetchWithMetrics(ctx, &metrics.NoOp{}, query, container, args...)\n}",
"func Fetch(url string, params map[string]string, headers map[string]string) (*http.Response, error) {\n\n\tclient := &http.Client{}\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\n\t// TODO: Fill in query strings\n\t//for k, v := range params {\n\t//}\n\n\tfor k, v := range headers {\n\t\treq.Header.Set(k, v)\n\t}\n\n\tres, err := client.Do(req)\n\treturn res, err\n}",
"func (c *Client) Fetch(filename, cf string, options ...interface{}) (*Fetch, error) {\n\tr := &Fetch{}\n\tlines, err := c.fetch(\"fetch\", filename, cf, r, options...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfor _, l := range lines {\n\t\tparts := strings.SplitN(l, \":\", 2)\n\t\tif len(parts) != 2 {\n\t\t\treturn nil, NewInvalidResponseError(\"fetch: unsupported value\", l)\n\t\t}\n\n\t\ti, err := strconv.ParseInt(parts[0], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, NewInvalidResponseError(\"fetch: invalid ds\", l)\n\t\t}\n\n\t\tfr := FetchRow{\n\t\t\tTime: time.Unix(i, 0),\n\t\t\tData: make([]*float64, len(r.Names)),\n\t\t}\n\t\tfor i, val := range strings.Split(strings.TrimSpace(parts[1]), \" \") {\n\t\t\tif val == \"nan\" || val == \"-nan\" {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tv, err := strconv.ParseFloat(val, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, NewInvalidResponseError(\"fetch: invalid ds val\", l)\n\t\t\t}\n\t\t\tfr.Data[i] = &v\n\t\t}\n\t\tr.Rows = append(r.Rows, fr)\n\t}\n\n\treturn r, nil\n}",
"func (cg *CellGroup) Fetch() error {\n\tp := params{}\n\tfor _, c := range cg.cells {\n\t\tp.Path(\"paths\", []string{c.Path.String()})\n\t}\n\trows, err := cg.cube.doRequest(\"/cell/values\", p)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"cells: %s\", err)\n\t}\n\tfor i := range cg.cells {\n\t\tif err := rows[i].Unmarshal(&cg.cells[i]); err != nil {\n\t\t\treturn fmt.Errorf(\"cell: bad row %d (%s)\", i, err)\n\t\t}\n\t}\n\treturn nil\n}",
"func fetch(page string) (string, error) {\n\tresp, err := http.Get(page)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn string(body), nil\n}",
"func (c Client) Fetch() (*FetchRevisionResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}",
"func (s *GCSSnapStore) Fetch(snap Snapshot) (io.ReadCloser, error) {\n\tobjectName := path.Join(s.prefix, snap.SnapDir, snap.SnapName)\n\treturn s.client.Bucket(s.bucket).Object(objectName).NewReader(s.ctx)\n}",
"func (c *Collector) Fetch(address, username, password string) error {\n\treqJobs, err := http.NewRequest(\n\t\t\"GET\",\n\t\tfmt.Sprintf(\"%s/api/json\", address),\n\t\tnil,\n\t)\n\n\tif username != \"\" && password != \"\" {\n\t\treqJobs.SetBasicAuth(username, password)\n\t}\n\n\tjobs, err := simpleClient().Do(reqJobs)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to request jobs api. %s\", err)\n\t}\n\tdefer jobs.Body.Close()\n\n\tif err := json.NewDecoder(jobs.Body).Decode(c); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse jobs api. %s\", err)\n\t}\n\n\treqQueue, err := http.NewRequest(\n\t\t\"GET\",\n\t\tfmt.Sprintf(\"%s/queue/api/json\", address),\n\t\tnil,\n\t)\n\n\tif username != \"\" && password != \"\" {\n\t\treqQueue.SetBasicAuth(username, password)\n\t}\n\n\tqueue, err := simpleClient().Do(reqQueue)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to request queue api. %s\", err)\n\t}\n\tdefer queue.Body.Close()\n\n\tif err := json.NewDecoder(queue.Body).Decode(c); err != nil {\n\t\treturn fmt.Errorf(\"failed to parse queue api. %s\", err)\n\t}\n\n\treturn nil\n}",
"func fetchFromServer(server string) []Element {\n\telements := make([]Element, 0)\n\tresp, err := http.Get(getServerPath(server, \"/fetch\"))\n\tif err != nil {\n\t\tlog.Fatal(\"Error\", err)\n\t}\n\tjson.NewDecoder(resp.Body).Decode(&elements)\n\treturn elements\n}",
"func fetchData(url string) (*info, error) {\r\n\r\n\t// Throttle the data request rate\r\n\ttime.Sleep(100 * time.Millisecond)\r\n\r\n\tresp, _ := http.Get(source + url)\r\n\tdefer resp.Body.Close()\r\n\r\n\tresult, err := ioutil.ReadAll(resp.Body)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\tvar i info\r\n\terr = json.Unmarshal(result, &i)\r\n\tif err != nil {\r\n\t\t//log.Println(err, err.Error())\r\n\t\treturn nil, err\r\n\t}\r\n\r\n\ttotalRequest++\r\n\t//fmt.Printf(\"%v\\n\\n\", i)\r\n\treturn &i, nil\r\n}",
"func (soca *SongCategory) Fetch() error {\n\t_soca, err := GetSongCategory(soca.Genre, soca.Page)\n\t*soca = *_soca\n\t// not complete\n\treturn err\n}",
"func (client *HTTPClient) GetAsXML(url string, dest interface{}, opts *RequestOptions) error {\n\tresp, err := client.Get(url, opts)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\treturn xml.NewDecoder(resp.Body).Decode(dest)\n}",
"func getXMLInfo(id string) ([]byte, error) {\n\tua := &http.Client{Timeout: 10 * time.Second}\n\tstationURL := fmt.Sprintf(baseURL+\"%s\", id)\n\n\tr, err := ua.Get(stationURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Could not get station info: %q\", err)\n\t}\n\tdefer r.Body.Close()\n\n\tresBody, err := ioutil.ReadAll(r.Body)\n\treturn resBody, nil\n}",
"func (b *Bill) FetchBillData() (string, string, []LegistarAction, error) {\n\tvar actions []LegistarAction\n\n\tresponse, err := http.Get(b.URL)\n\tif err != nil {\n\t\treturn \"\", \"\", actions, err\n\t}\n\tdefer response.Body.Close()\n\n\tdocument, err := goquery.NewDocumentFromReader(response.Body)\n\tif err != nil {\n\t\treturn \"\", \"\", actions, err\n\t}\n\n\ttitle := strings.TrimSpace(document.Find(\"#ctl00_ContentPlaceHolder1_lblTitle2\").First().Text())\n\tclassification := strings.TrimSpace(document.Find(\"#ctl00_ContentPlaceHolder1_lblType2\").First().Text())\n\tstatus := strings.TrimSpace(document.Find(\"#ctl00_ContentPlaceHolder1_lblStatus2\").First().Text())\n\tcommittee := strings.TrimSpace(document.Find(\"#ctl00_ContentPlaceHolder1_hypInControlOf2\").First().Text())\n\n\tdocument.Find(\".rgMasterTable tbody tr\").Each(func(index int, element *goquery.Selection) {\n\t\taction := LegistarAction{}\n\t\telement.Find(\"td\").Each(func(tdIdx int, tdEl *goquery.Selection) {\n\t\t\tif tdIdx == 0 {\n\t\t\t\taction.Date, _ = time.Parse(\"1/2/2006\", strings.TrimSpace(tdEl.Text()))\n\t\t\t} else if tdIdx == 2 {\n\t\t\t\taction.Actor = strings.TrimSpace(tdEl.Text())\n\t\t\t} else if tdIdx == 3 {\n\t\t\t\taction.Action = strings.TrimSpace(tdEl.Text())\n\t\t\t\tif action.Action == \"\" {\n\t\t\t\t\taction.Action = status\n\t\t\t\t}\n\t\t\t}\n\t\t\tif strings.Contains(action.Action, \"Referred\") {\n\t\t\t\taction.Committee = committee\n\t\t\t}\n\t\t})\n\t\tactions = append(actions, action)\n\t})\n\treturn title, classification, actions, nil\n}",
"func FetchContent(path string) (interface{}, error) {\n\treturn util.LoadFile(path)\n}",
"func (f *IpfsFetcher) Fetch(ctx context.Context, filePath string) ([]byte, error) {\n\tsh, _, err := ApiShell(\"\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := sh.Request(\"cat\", path.Join(f.distPath, filePath)).Send(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.Error != nil {\n\t\treturn nil, resp.Error\n\t}\n\n\tvar rc io.ReadCloser\n\tif f.limit != 0 {\n\t\trc = migrations.NewLimitReadCloser(resp.Output, f.limit)\n\t} else {\n\t\trc = resp.Output\n\t}\n\tdefer rc.Close()\n\n\treturn io.ReadAll(rc)\n}",
"func Fetch(settings *Settings, taskSettings *TaskSettings) {\n\ttoken := fetchToken(settings, taskSettings)\n\tprintToken(token, taskSettings.Format, settings)\n}",
"func FetchFeedFile(urlstr string, gzip bool) ([]byte, error) {\n\tlog.Infof(\"Fetching... %s\", urlstr)\n\tbody, err := fetchFile(urlstr, gzip, 5)\n\tif err != nil {\n\t\treturn nil, xerrors.Errorf(\"Failed to fetch file. err: %w\", err)\n\t}\n\treturn body, nil\n}",
"func (s *Server) Fetch(ctx context.Context, job *pb.FetchRequest) (*pb.FetchResponse, error) {\n\t// FIXME(tony): to make function fetch easily to mock, we should decouple server package\n\t// with argo package by introducing s.fetch\n\t_, wf, e := workflow.New(getWorkflowBackend())\n\tif e != nil {\n\t\treturn nil, e\n\t}\n\treturn wf.Fetch(job)\n}",
"func (h *StackOverflow) Fetch(queries []string, result *[]string) {\n\n\t*result = append(*result, \"https://stackoverflow.com/search?q=\"+h.formatQuery(queries))\n\n}",
"func (fs *Memory) Fetch(key string) (io.ReadCloser, error) {\n\tsourcePath := filepath.Join(fs.root, key)\n\tf, err := fs.fs.Open(sourcePath)\n\treturn f, errors.Wrap(err, \"could not open file\")\n}",
"func (h *Handler) Fetch(c echo.Context) error {\n\tidP, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\treturn c.JSON(http.StatusNotFound, err)\n\t}\n\n\tid := uint(idP)\n\n\trec, err := h.Store.Fetch(id)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err)\n\t}\n\n\treturn c.JSON(http.StatusOK, rec)\n}",
"func (wf WebFetcher) Fetch(url string) (body string, urls []string, err error) {\n\trawURL, err := urlUtil.Parse(url)\n\tif err != nil {\n\t\treturn\n\t}\n\thost := rawURL.Host\n\tscheme := rawURL.Scheme\n\trawURL.Fragment = \"\" // remove #..\n\turl = rawURL.String()\n\tfr := fetchReq{url: url, ch: make(chan []byte), t: network}\n\tfetchCh <- fr\n\tdigits, ok := <-fr.ch\n\tif !ok {\n\t\terr = errors.New(\"CHAN_FAILED\")\n\t}\n\tbody = string(digits)\n\turls = GetLinks(body, host, scheme)\n\treturn\n}",
"func (m *Model) Fetch(storeLDAP storage.LDAP) error {\n\tif m.Base == \"\" {\n\t\treturn errors.New(\"model requires base to be set\")\n\t}\n\n\tif m.Username == \"\" {\n\t\treturn errors.New(\"model requires username to be set\")\n\t}\n\n\tsr := ldap.NewSimpleSearchRequest(m.Base, ldap.ScopeWholeSubtree, fmt.Sprintf(\"(uid=%s)\", m.Username), []string{\"displayName\", \"mail\", \"mobile\", \"postalAddress\"})\n\tresult, err := storeLDAP.Search(sr)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(result.Entries) != 1 {\n\t\treturn errors.New(\"user lookup failed\")\n\t}\n\n\tm.DN = result.Entries[0].DN\n\tm.Name = result.Entries[0].GetAttributeValue(\"displayName\")\n\tm.Email = result.Entries[0].GetAttributeValue(\"mail\")\n\tm.Mobile = result.Entries[0].GetAttributeValue(\"mobile\")\n\tm.Address = result.Entries[0].GetAttributeValue(\"postalAddress\")\n\n\treturn nil\n}",
"func fetchAllArticleList() {\n\t// loop over the articleList jumping 16 items\n\tfor i := 0; i < articleL; i++ {\n\t\t// go routine to fetch data\n\t\tgo func(url string, index int) {\n\t\t\tfetchDoc(url, func(doc *goquery.Document) {\n\t\t\t\t// check blank URLs\n\t\t\t\tif url == \"\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tvar text string\n\t\t\t\tat := new(artText)\n\n\t\t\t\t// set the index of artText for proper synchronization of the data\n\t\t\t\tat.I = index\n\n\t\t\t\t// query the article element\n\t\t\t\tq := doc.Find(paraSelector)\n\n\t\t\t\t// check the no. of elements\n\t\t\t\tif q.Length() <= 0 {\n\t\t\t\t\t// create an error msg\n\t\t\t\t\tat.Text = fmt.Sprintf(\n\t\t\t\t\t\t\"error: %v: 0 result from \\\"%v \\\"(selector) \",\n\t\t\t\t\t\turl,\n\t\t\t\t\t\tparaSelector)\n\t\t\t\t\t// send the error msg\n\t\t\t\t\tch <- *at\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// scrap the data\n\t\t\t\tq.Each(func(j int, t *goquery.Selection) {\n\t\t\t\t\ttext = strings.TrimSpace(t.Text())\n\t\t\t\t\t// add the text to the final text\n\t\t\t\t\t// if the text is not blank\n\t\t\t\t\tif text != \"\" {\n\t\t\t\t\t\t// add extra space if the no the 1st element\n\t\t\t\t\t\tif j != 0 {\n\t\t\t\t\t\t\tat.Text += (\"\\n\\n\" + text)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tat.Text += text\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t})\n\n\t\t\t\t// send data back to the channel\n\t\t\t\tch <- *at\n\t\t\t})\n\t\t}(articleList[i].URL, i)\n\n\t\t// sleep for 2 seconds to avoid rejection from the website\n\t\tif i%16 == 0 {\n\t\t\ttime.Sleep(2 * time.Second)\n\t\t}\n\t}\n}",
"func FetchArticle(ctx context.Context, url string) (*model.Article, error) {\n\t// Validate URL\n\t_, err := nurl.ParseRequestURI(url)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid URL: %v\", err)\n\t}\n\n\t// Get URL content type\n\tcontentType, err := getContentType(ctx, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif !strings.HasPrefix(contentType, \"text/html\") {\n\t\treturn nil, fmt.Errorf(\"invalid content-type: %s\", contentType)\n\t}\n\n\t// Get URL content\n\tres, err := get(ctx, url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer res.Body.Close()\n\tbody, err := charset.NewReader(res.Body, contentType)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Extract metas\n\tmetas, err := ExtractMetas(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Create article with Open Graph atributes\n\tresult := &model.Article{\n\t\tText: metas.GetContent(\"og:description\", \"twitter:description\", \"description\"),\n\t\tImage: metas.GetContent(\"og:image\", \"twitter:image\"),\n\t}\n\ttitle := metas.GetContent(\"og:title\")\n\tif title != nil {\n\t\tresult.Title = *title\n\t}\n\n\tvar buffer bytes.Buffer\n\ttee := io.TeeReader(body, &buffer)\n\n\t// Test if the HTML page is readable by Shiori readability\n\tif !read.IsReadable(tee) {\n\t\treturn result, fmt.Errorf(\"unable to extract content from HTML page\")\n\t}\n\n\t// Extract content from the HTML page\n\tarticle, err := read.FromReader(&buffer, url)\n\tif err != nil {\n\t\treturn result, err\n\t}\n\n\t// Complete result with extracted properties\n\tresult.HTML = &article.Content\n\tif result.Title == \"\" {\n\t\tresult.Title = article.Title\n\t}\n\tif result.Text == nil {\n\t\t// FIXME: readability excerpt don't well support UTF8\n\t\ttext := tooling.ToUTF8(article.Excerpt)\n\t\tresult.Text = &text\n\t}\n\tif result.Image == nil {\n\t\tresult.Image = &article.Image\n\t}\n\n\t// TODO: add other properties to the result\n\t// article.Favicon\n\t// article.Length\n\t// article.SiteName\n\n\treturn result, nil\n}",
"func (c *carService) FetchData() {\n\tclient := http.Client{}\n\n\tfmt.Printf(\"Fetching the url %s\", carServiceURL)\n\n\t// Call the external API\n\tresp, _ := client.Get(carServiceURL)\n\tfmt.Println(\"Response\", resp)\n\n\t// Write response to the channel\n\tcarDataChannel <- resp\n\n}",
"func (f *RealFetcher) Fetch(url string) (body string, urls []string, err error) {\n\treq, _ := http.NewRequest(\"POST\", url, nil)\n\turls = make([]string, 0)\n\treq.Header.Add(\"cache-control\", \"no-cache\")\n\tres, erro := http.DefaultClient.Do(req)\n\tif erro != nil {\n\t\treturn \"\", []string{}, erro\n\t}\n\tdefer res.Body.Close()\n\tb, _ := ioutil.ReadAll(res.Body)\n\tre := regexp.MustCompile(`href *= *\"http[^-\"]+`)\n\th := re.FindAll(b, -1)\n\tfor _, i := range h {\n\t\tre = regexp.MustCompile(`http[^\"]+`)\n\t\takt := re.Find(i)\n\t\turls = append(urls, string(akt))\n\t}\n\treturn string(b), urls, nil\n}",
"func (f realFetcher) Fetch(urlToFetch string) (string, []string, error) {\n\tf.guard <- struct{}{}\n\tdefer func() {\n\t\t<-f.guard\n\t}()\n\n\tdomain, _ := getDomainFromURL(urlToFetch)\n\tresults := make([]string, 0, maxLinksScraped)\n\tlinksScraped := 0\n\tresp, err := f.client.Get(urlToFetch)\n\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn \"\", nil, err\n\t}\n\n\tdefer func() {\n\t\tresp.Body.Close()\n\t}()\n\n\tz := html.NewTokenizer(resp.Body)\n\n\tfor {\n\t\ttt := z.Next()\n\n\t\tswitch tt {\n\t\tcase html.ErrorToken:\n\t\t\treturn \"\", results, nil\n\t\tcase html.StartTagToken:\n\n\t\t\ttn, _ := z.TagName()\n\t\t\tif len(tn) == 1 && tn[0] == 'a' {\n\t\t\t\t// Scan anchor tag for href attribute\n\t\t\t\tkey, val, moreAttrs := z.TagAttr()\n\t\t\t\tfor {\n\n\t\t\t\t\tif string(key) == \"href\" {\n\t\t\t\t\t\tif isHTTP, _ := regexp.Match(`https?://.*`, val); isHTTP {\n\t\t\t\t\t\t\tchildDomain, err := getDomainFromURL(string(val))\n\n\t\t\t\t\t\t\t// Check if the url was valid (html document could always be bad)\n\t\t\t\t\t\t\t// Then check that the domain is different from our parent\n\t\t\t\t\t\t\tif err == nil && domain != childDomain {\n\t\t\t\t\t\t\t\tresults = append(results, string(val))\n\t\t\t\t\t\t\t\tlinksScraped++\n\t\t\t\t\t\t\t\tif linksScraped >= maxLinksScraped {\n\t\t\t\t\t\t\t\t\treturn \"\", results, nil\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t}\n\t\t\t\t\t\t// Stop reading attributes once we get to the href attribute\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tif !moreAttrs {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\tkey, val, moreAttrs = z.TagAttr()\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n}",
"func (c *Client) Fetch(rawURL string) (*Response, error) {\n\tparsedURL, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse URL: %v\", err)\n\t}\n\thost := parsedURL.Host\n\tif parsedURL.Port() == \"\" {\n\t\thost = net.JoinHostPort(parsedURL.Hostname(), \"1965\")\n\t}\n\treturn c.FetchWithHost(host, rawURL)\n}",
"func Fetch(ctx *cli.Context) {\n\tc, err := config.Read(ctx.GlobalString(\"config\"))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\targs := ctx.Args()\n\tvar url string\n\tswitch len(args) {\n\tcase 0:\n\t\turl = fmt.Sprintf(\"%s/%s?key=%s\", c.XAPI, \"v2/exercises\", c.APIKey)\n\tcase 1:\n\t\turl = fmt.Sprintf(\"%s/%s/%s?key=%s\", c.XAPI, \"v2/exercises\", args[0], c.APIKey)\n\tcase 2:\n\t\turl = fmt.Sprintf(\"%s/%s/%s/%s\", c.XAPI, \"v2/exercises\", args[0], args[1])\n\tdefault:\n\t\tmsg := \"Usage: exercism fetch\\n or: exercism fetch LANGUAGE\\n or: exercism fetch LANGUAGE PROBLEM\"\n\t\tlog.Fatal(msg)\n\t}\n\n\tproblems, err := api.Fetch(url)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thw := NewHomework(problems, c)\n\terr = hw.Save()\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\thw.Summarize()\n}",
"func Fetch(id string) (Data, error) {\n\tresponseStatus, responsePayload, err := doRequest(&request{\n\t\tmethod: \"GET\",\n\t\tresource: \"v1/organisation/accounts/\" + id,\n\t})\n\n\tif err != nil {\n\t\treturn Data{}, err\n\t}\n\n\treturn handleResponseData(responsePayload, http.StatusOK, responseStatus)\n}",
"func (f *Fetch) fetch(url string, readBody bool) ([]byte, error) {\n\tf.mutex.Lock()\n\tcontents, present := f.cache[url]\n\tf.mutex.Unlock()\n\tif present {\n\t\tlog.Debug(\"Retrieved %s from cache\", url)\n\t\treturn contents, nil\n\t}\n\tvar err error\n\tfor _, repo := range f.repos {\n\t\tif contents, err = f.fetchURL(repo+url, readBody); err == nil {\n\t\t\tf.mutex.Lock()\n\t\t\tdefer f.mutex.Unlock()\n\t\t\tf.cache[url] = contents\n\t\t\treturn contents, nil\n\t\t}\n\t}\n\treturn nil, err\n}",
"func (c *Client) Fetch(target interface{}) error {\n\tlog.Debug(\"Fetching data from \", c.URL())\n\tresp, err := http.Get(c.URL())\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\te := fmt.Errorf(\"fetch error: %s\", resp.Status)\n\t\tlog.Error(e)\n\t\treturn e\n\t}\n\n\tb, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\te := fmt.Errorf(\"read error: %s: %v\\n\", c.URL(), err)\n\t\tlog.Error(e)\n\t\treturn e\n\t}\n\n\tif err := json.Unmarshal(b, &target); err != nil {\n\t\te := fmt.Errorf(\"unmarshal error: %s: %v\\n\", b, err)\n\t\tlog.Error(e)\n\t\treturn e\n\t}\n\n\treturn nil\n}",
"func (p *psqlExchangeRepository) Fetch(page int, size int) ([]*models.Exchange, error) {\n\toffset := page * size\n\tquery := \"SELECT id, from_cur, to_cur FROM exchange_rate LIMIT $1 OFFSET $2;\"\n\treturn p.fetch(query, size, offset)\n}",
"func (tbl AssociationTable) Fetch(req require.Requirement, query string, args ...interface{}) ([]*Association, error) {\n\treturn doAssociationTableQueryAndScan(tbl, req, false, query, args...)\n}",
"func (page *Page) Fetch() (body io.ReadCloser, e error) {\n\tif resp, err := http.Get(page.URL.String()); err != nil {\n\t\te = err\n\t} else {\n\t\tif resp.StatusCode == 200 {\n\t\t\tbody = resp.Body\n\t\t}\n\t}\n\treturn\n}",
"func FetchKML(day, month, year string, config *tomlConfig) ([]byte, error) {\n\tfmt.Println(\"Start fetching timeline\")\n\tfmt.Println(\"Month:\" + month + \"-- Day:\" + day)\n\tfmt.Println(\"Reading Cookie info:\")\n\tfmt.Printf(\"SID: %s\\nHSID: %s\\nSSID: %s\\nAPISID: %s\\nSAPISID: %s\\nNID: %s\\nJAR: %s\\n\",\n\t\tconfig.Cookie.SID, config.Cookie.HSID, config.Cookie.SSID, config.Cookie.APISID, config.Cookie.SAPISID,\n\t\tconfig.Cookie.NID, config.Cookie.JAR)\n\tfmt.Println(\"Setup curl like fetch:\")\n\n\treq, err := http.NewRequest(\"GET\", \"https://www.google.be/maps/timeline/kml?authuser=0&pb=!1m8!1m3!1i\"+year+\"!2i\"+month+\"!3i\"+day+\"!2m3!1i2018!2i3!3i21\", nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Authority\", \"www.google.be\")\n\treq.Header.Set(\"Cache-Control\", \"max-age=0\")\n\treq.Header.Set(\"Upgrade-Insecure-Requests\", \"1\")\n\treq.Header.Set(\"User-Agent\", \"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Mobile Safari/537.36\")\n\treq.Header.Set(\"Accept\", \"text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8\")\n\treq.Header.Set(\"X-Client-Data\", \"CJe2yQEIprbJAQjEtskBCKmdygEIqKPKAQ==\")\n\treq.Header.Set(\"Referer\", \"https://www.google.be/\")\n\treq.Header.Set(\"Accept-Language\", \"en-US,en;q=0.9,nl;q=0.8\")\n\treq.Header.Set(\"Cookie\", \"SID=\"+config.Cookie.SID+\"; HSID=\"+config.Cookie.HSID+\"; SSID=\"+config.Cookie.SSID+\"; APISID=\"+config.Cookie.APISID+\"; SAPISID=\"+config.Cookie.SAPISID+\"; CONSENT=\"+config.Cookie.CONSENT+\"; NID=\"+config.Cookie.NID+\"; 1P_JAR=\"+config.Cookie.JAR)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfmt.Println(resp)\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn body, nil\n}",
"func main() {\n\tvar data Students\n\terr := xml.Unmarshal([]byte(XML_Data), &data)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\treturn\n\t}\n\tfor _, s := range data.Student {\n\t\tfmt.Println(s.Name)\n\t}\n}",
"func (i *interactor) Fetch(arg ...string) error {\n\tremote, err := i.remote()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not resolve remote for fetching: %w\", err)\n\t}\n\targ = append([]string{\"fetch\", remote}, arg...)\n\ti.logger.Infof(\"Fetching from %s\", remote)\n\tif out, err := i.executor.Run(arg...); err != nil {\n\t\treturn fmt.Errorf(\"error fetching: %w %v\", err, string(out))\n\t}\n\treturn nil\n}",
"func (s *Server) Fetch(ctx context.Context, req *pb.FetchRequest) (*pb.FetchResponse, error) {\n\tp, ok := peer.FromContext(ctx)\n\tif !ok {\n\t\treturn nil, errors.New(\"failed to extract peer details from context\")\n\t}\n\tlogger := s.logger.With(zap.Stringer(\"addr\", p.Addr), zap.String(\"key\", req.Key))\n\tlogger.Info(\"fetch request received\")\n\n\t// pull record from store\n\tval, ts, err := s.store.Get(req.GetKey())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to process get: %w\", err)\n\t}\n\n\treturn &pb.FetchResponse{\n\t\tValue: val,\n\t\tTimestamp: ts,\n\t}, nil\n}",
"func Fetch(webURL string) (*http.Response, error) {\n\tclient := &http.Client{\n\t\tTimeout: time.Second * 10,\n\t}\n\n\t// fetch by proxy\n\tif viper.GetBool(\"fetch.proxy\") == true && store.CountProxy() > 0 {\n\t\tproxy := util.CombURL(store.RandomOne())\n\t\turlproxy, err := new(url.URL).Parse(proxy)\n\t\tif err != nil {\n\t\t\tlogrus.WithField(\"proxy\", proxy).Error(err)\n\t\t} else {\n\t\t\tlogrus.WithField(\"proxy\", proxy).Info(\"Fetch by proxy\")\n\t\t}\n\t\tclient.Transport = &http.Transport{\n\t\t\tProxy: http.ProxyURL(urlproxy),\n\t\t}\n\t}\n\n\trequest, err := http.NewRequest(\"GET\", webURL, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trequest.Header.Set(\"User-Agent\", browser.Computer())\n\n\tresp, err := client.Do(request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp, nil\n}",
"func ParseXMLUrl(s NewsFeedStruct, u string) chan []byte {\n\tresp := GetHTTPResponse(u)\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tCheck(err)\n\tc := string(body)\n\n\tfp := gofeed.NewParser()\n\tfeed, _ := fp.ParseString(c)\n\tfeed1 := feed.Items\n\tj := make(chan []byte)\n\tgo func() {\n\t\tfor i := 0; i < len(feed1); i++ {\n\t\t\ts.Title = feed1[i].Title\n\t\t\ts.Link = feed1[i].Link\n\t\t\ta, err := json.MarshalIndent(s, \"\", \" \")\n\t\t\tCheck(err)\n\t\t\tj <- a\n\t\t}\n\t\tclose(j)\n\t}()\n\treturn j\n}",
"func (s *StatFS) Fetch() error {\n\treturn syscall.Statfs(s.path, &s.stat)\n}",
"func FetchSemester(s string) Semester {\n\tbody, err := util.ByteRequest(fmt.Sprintf(\"https://web.stevens.edu/scheduler/core/core.php?cmd=getxml&term=%s\", s))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar sm Semester\n\terr = xml.Unmarshal(body, &sm)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn sm\n}",
"func (s Site) Fetch(url string) (body string, r ResponseInfo, urls []string) {\n\t// see if the passed url is outside of the baseURL\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn \"\", r, nil\n\t}\n\tdefer resp.Body.Close()\n\tr.Status = resp.Status\n\tr.StatusCode = resp.StatusCode\n\tbuff := &bytes.Buffer{}\n\ttee := io.TeeReader(resp.Body, buff)\n\ttokens := getTokens(tee)\n\tif len(tokens) == 0 {\n\t\tr.Err = fmt.Errorf(\"%s: nothing in body\", url)\n\t\treturn \"\", r, nil\n\t}\n\turls, err = s.linksFromTokens(tokens)\n\tif err != nil {\n\t\tr.Err = err\n\t\treturn \"\", r, nil\n\t}\n\treturn buff.String(), r, urls\n}",
"func Fetch(c *gin.Context) {\n\tvar recipe model.Recipe\n\tid, err := strconv.ParseUint(c.Param(\"id\"), 10, 64)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"message\": \"invalid id-format: \" + c.Param(\"id\")})\n\t\treturn\n\t}\n\trecipe.ID = uint(id)\n\terr = recipe.Read()\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"message\": err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"found\", \"data\": recipe.Description()})\n}",
"func (s *SignatureRequestAPI) Fetch(ctx context.Context, p SignatureRequestListParam) (SignatureRequestList, error) {\n\tpath := s.client.BaseURL + subURLSignatureRequest\n\treq, err := s.client.prepareRequest(\n\t\tctx,\n\t\trequestParam{\n\t\t\tpath: path,\n\t\t\tmethod: http.MethodGet,\n\t\t})\n\tif err != nil {\n\t\treturn SignatureRequestList{}, err\n\t}\n\n\tq := req.URL.Query()\n\tq.Add(\"account_id\", p.AccountID)\n\tq.Add(\"page\", strconv.Itoa(p.Page))\n\tq.Add(\"page_size\", strconv.Itoa(p.PageSize))\n\tq.Add(\"query\", p.Query)\n\n\treq.URL.RawQuery = q.Encode()\n\n\tresp, err := s.client.executeRequest(req)\n\tif err != nil {\n\t\treturn SignatureRequestList{}, err\n\t}\n\n\tsignatureRequestList := SignatureRequestList{}\n\terr = json.NewDecoder(resp.Body).Decode(&signatureRequestList)\n\tif err != nil {\n\t\treturn SignatureRequestList{}, err\n\t}\n\n\treturn signatureRequestList, nil\n}",
"func (tbl DbCompoundTable) Fetch(req require.Requirement, query string, args ...interface{}) ([]*Compound, error) {\n\treturn tbl.doQuery(req, false, query, args...)\n}",
"func (i *Inbox[M]) Fetch(offset int) (Render, error) {\n\tID := &i.InboxItems[offset].ID\n\tdoc, err := i.client.GetMailPage(i.Name, *ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm, err := mail.Parse(doc)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm.SetID(*ID)\n\treturn m, nil\n}",
"func (d *AttachmentData) Fetch() ([]byte, error) {\n\tif d.JSON != nil {\n\t\tbits, err := json.Marshal(d.JSON)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to marshal json contents : %w\", err)\n\t\t}\n\n\t\treturn bits, nil\n\t}\n\n\tif d.Base64 != \"\" {\n\t\tbits, err := base64.StdEncoding.DecodeString(d.Base64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to base64 decode attachment contents : %w\", err)\n\t\t}\n\n\t\treturn bits, nil\n\t}\n\n\t// TODO add support for checksum verification\n\n\t// TODO add support to fetch links\n\n\t// TODO add support for jws signatures\n\n\treturn nil, errors.New(\"no contents in this attachment\")\n}",
"func LoadURL(url string) (*Node, error) {\n\tresp, err := http.Get(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\t// Make sure the Content-Type has a valid XML MIME type\n\tif xmlMIMERegex.MatchString(resp.Header.Get(\"Content-Type\")) {\n\t\treturn Parse(resp.Body)\n\t}\n\treturn nil, fmt.Errorf(\"invalid XML document(%s)\", resp.Header.Get(\"Content-Type\"))\n}",
"func (t *Target) fetch(namespace string, name string) (*unstructured.Unstructured, error) {\n\tisvc := &unstructured.Unstructured{}\n\tisvc.SetGroupVersionKind(schema.GroupVersionKind{\n\t\tGroup: \"serving.kubeflow.org\",\n\t\tKind: \"InferenceService\",\n\t\tVersion: \"v1beta1\",\n\t})\n\terr := t.k8sclient.Get(context.Background(), client.ObjectKey{\n\t\tNamespace: namespace,\n\t\tName: name,\n\t}, isvc)\n\treturn isvc, err\n}"
] | [
"0.71618414",
"0.58755547",
"0.57526934",
"0.5746789",
"0.5668734",
"0.56234515",
"0.5609424",
"0.5606312",
"0.5600024",
"0.5576584",
"0.5536955",
"0.552927",
"0.55244035",
"0.55164766",
"0.55108875",
"0.5485493",
"0.5482583",
"0.5451337",
"0.5439825",
"0.5424137",
"0.53986615",
"0.5386703",
"0.5375423",
"0.533813",
"0.533109",
"0.5321227",
"0.5302541",
"0.53024197",
"0.5299178",
"0.52811307",
"0.52806485",
"0.5272952",
"0.52527654",
"0.52455354",
"0.51993936",
"0.51842463",
"0.5183264",
"0.5179796",
"0.51696235",
"0.5169268",
"0.51441675",
"0.51431644",
"0.5141938",
"0.51365733",
"0.5134015",
"0.51251346",
"0.5118128",
"0.51012677",
"0.5088663",
"0.50874585",
"0.5084678",
"0.5079166",
"0.50748545",
"0.50691986",
"0.5061176",
"0.5052741",
"0.504897",
"0.50376064",
"0.5031626",
"0.50298494",
"0.5022592",
"0.5021057",
"0.50204444",
"0.50088346",
"0.50064224",
"0.5003613",
"0.50028443",
"0.5001285",
"0.5000227",
"0.49958843",
"0.49958152",
"0.49905196",
"0.49776554",
"0.49758086",
"0.4972991",
"0.49696454",
"0.49658135",
"0.496506",
"0.49624634",
"0.4959289",
"0.49395835",
"0.4934341",
"0.49318892",
"0.49317786",
"0.49292383",
"0.49292144",
"0.49275804",
"0.49269107",
"0.49243617",
"0.49183926",
"0.49180424",
"0.4913289",
"0.49092343",
"0.4896102",
"0.48855",
"0.4884236",
"0.48822892",
"0.48789287",
"0.48777896",
"0.48633665",
"0.48532945"
] | 0.0 | -1 |
Build arrow chunk based on RowSet of base64 | func buildFirstArrowChunk(rowsetBase64 string, loc *time.Location, alloc memory.Allocator) arrowResultChunk {
rowSetBytes, err := base64.StdEncoding.DecodeString(rowsetBase64)
if err != nil {
return arrowResultChunk{}
}
rr, err := ipc.NewReader(bytes.NewReader(rowSetBytes), ipc.WithAllocator(alloc))
if err != nil {
return arrowResultChunk{}
}
return arrowResultChunk{rr, 0, loc, alloc}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (r *ViewResult) NextBytes() []byte {\n\n\tif len(r.Errors) > 0 || r.iterErr != nil {\n\t\treturn nil\n\t}\n\n\tif r.iterIndex >= len(r.Rows) {\n\t\treturn nil\n\t}\n\tr.iterIndex++\n\n\tvar rowBytes []byte\n\trowBytes, r.iterErr = json.Marshal(r.Rows[r.iterIndex-1])\n\tif r.iterErr != nil {\n\t\treturn nil\n\t}\n\n\treturn rowBytes\n\n}",
"func ExamplePdfMaroto_Base64Image() {\n\tm := pdf.NewMaroto(consts.Portrait, consts.A4)\n\trowHeight := 5.0\n\tbase64String := \"y7seWGHE923Sdgs...\"\n\n\tm.Row(rowHeight, func() {\n\t\tm.Col(func() {\n\t\t\tm.Base64Image(base64String, consts.Png, props.Rect{\n\t\t\t\tLeft: 5,\n\t\t\t\tTop: 5,\n\t\t\t\tCenter: true,\n\t\t\t\tPercent: 85,\n\t\t\t})\n\t\t})\n\t})\n\n\t// Do more things and save...\n}",
"func (r *analyticsDeferredResultHandle) NextBytes() []byte {\n\tif r.err != nil {\n\t\treturn nil\n\t}\n\n\tif r.status == \"success\" && !r.hasResult {\n\t\treq := &gocbcore.HttpRequest{\n\t\t\tService: gocbcore.CbasService,\n\t\t\tPath: r.handleUri,\n\t\t\tMethod: \"GET\",\n\t\t}\n\n\t\terr := r.executeHandle(req, &r.rows.rows)\n\t\tif err != nil {\n\t\t\tr.err = err\n\t\t\treturn nil\n\t\t}\n\t\tr.hasResult = true\n\t} else if r.status != \"success\" {\n\t\treturn nil\n\t}\n\n\treturn r.rows.NextBytes()\n}",
"func Arrow(a, b, c int) End {\n\treturn EndArrow | End(a<<5|b<<14|c<<23)\n}",
"func Base64Handler(pattern *regexp.Regexp, out io.Writer, in []byte) {\n\tseek := 0 // init cursor\n\tfor _, s := range pattern.FindAllIndex(in, -1) { // foreach matching\n\t\tout.Write(in[seek:s[0]]) // copy string between two matching\n\n\t\t// append hex string if convert successful or append orig in failed\n\t\tif str, err := Base64ToHex(in[s[0]:s[1]]); err == nil {\n\t\t\tout.Write(str)\n\t\t} else {\n\t\t\tfmt.Fprintf(os.Stderr, \"%v\\n\", err)\n\t\t\tout.Write(in[s[0]:s[1]])\n\t\t}\n\t\tseek = s[1] // update cursor\n\t}\n\n\tout.Write(in[seek:]) // copy remaining string\n}",
"func (s Sigstore) blobChunk() ([]byte, error) {\n\treturn json.Marshal(sigstoreJSONRepresentation{\n\t\tUntrustedMIMEType: s.UntrustedMIMEType(),\n\t\tUntrustedPayload: s.UntrustedPayload(),\n\t\tUntrustedAnnotations: s.UntrustedAnnotations(),\n\t})\n}",
"func writeAttachedData(data []byte, partWriter io.Writer) {\n\t// buffer line with trailing CRLF\n\tbuffer := make([]byte, maxLineLength+len(\"\\r\\n\"))\n\tcopy(buffer[maxLineLength:], \"\\r\\n\")\n\n\t// for loop raw chunks until bytes shorter than a line\n\tfor len(data) >= maxRaw {\n\t\tbase64.StdEncoding.Encode(buffer, data[:maxRaw])\n\t\tpartWriter.Write(buffer)\n\t\tdata = data[maxRaw:]\n\t}\n\t// write the last chunk of data\n\tif len(data) > 0 {\n\t\tsiz := base64.StdEncoding.EncodedLen(len(data))\n\t\tout := buffer[:siz]\n\t\tbase64.StdEncoding.Encode(out, data)\n\t\tout = append(out, \"\\r\\n\"...)\n\t\tpartWriter.Write(out)\n\t}\n}",
"func Base64Encode(src []byte) []byte {\n\tif len(src) == 0 {\n\t\treturn []byte{}\n\t}\n\tdstLen := ((len(src) + 2) / 3 * 4) // base64 encoded length\n\tdstLen += (dstLen - 1) / 76 * 2 // add 2 bytes for each full 76-char line\n\tdst := make([]byte, dstLen)\n\t// fmt.Println(len(src), dstLen)\n\n\tvar (\n\t\tp [4]int\n\t)\n\n\tfor pos, lpos := 0, 0; len(src) > 0; {\n\t\t// fmt.Println(\"step\", pos, len(src), len(dst))\n\t\tswitch 76 - lpos {\n\t\tcase 0:\n\t\t\tdst[pos], dst[pos+1] = '\\r', '\\n'\n\t\t\tp[0], p[1], p[2], p[3] = pos+2, pos+3, pos+4, pos+5\n\t\t\tpos += 6\n\t\t\tlpos = 4\n\t\tcase 1:\n\t\t\tdst[pos+1], dst[pos+2] = '\\r', '\\n'\n\t\t\tp[0], p[1], p[2], p[3] = pos, pos+3, pos+4, pos+5\n\t\t\tpos += 6\n\t\t\tlpos = 3\n\t\tcase 2:\n\t\t\tdst[pos+2], dst[pos+3] = '\\r', '\\n'\n\t\t\tp[0], p[1], p[2], p[3] = pos, pos+1, pos+4, pos+5\n\t\t\tpos += 6\n\t\t\tlpos = 2\n\t\tcase 3:\n\t\t\tdst[pos+3], dst[pos+4] = '\\r', '\\n'\n\t\t\tp[0], p[1], p[2], p[3] = pos, pos+1, pos+2, pos+5\n\t\t\tpos += 6\n\t\t\tlpos = 1\n\t\tdefault:\n\t\t\tp[0], p[1], p[2], p[3] = pos, pos+1, pos+2, pos+3\n\t\t\tpos += 4\n\t\t\tlpos += 4\n\t\t}\n\n\t\tswitch len(src) {\n\t\tcase 1:\n\t\t\tdst[p[3]], dst[p[2]] = '=', '='\n\t\t\tdst[p[1]] = base64table[(src[0]<<4)&0x3F]\n\t\t\tdst[p[0]] = base64table[src[0]>>2]\n\t\t\treturn dst\n\t\tcase 2:\n\t\t\tdst[p[3]] = '='\n\t\t\tdst[p[2]] = base64table[(src[1]<<2)&0x3F]\n\t\t\tdst[p[1]] = base64table[(src[1]>>4)|(src[0]<<4)&0x3F]\n\t\t\tdst[p[0]] = base64table[src[0]>>2]\n\t\t\treturn dst\n\t\tdefault:\n\t\t\tdst[p[3]] = base64table[src[2]&0x3F]\n\t\t\tdst[p[2]] = base64table[(src[2]>>6)|(src[1]<<2)&0x3F]\n\t\t\tdst[p[1]] = base64table[(src[1]>>4)|(src[0]<<4)&0x3F]\n\t\t\tdst[p[0]] = base64table[src[0]>>2]\n\t\t\tsrc = src[3:]\n\t\t}\n\t}\n\n\treturn dst\n}",
"func Base64MimeEncoder(b string) (m string) {\n\n\tm = base64.StdEncoding.EncodeToString([]byte(b))\n\tthe_len := len(m)\n\n\tif (the_len <= maxLen) {\n\t\treturn m\n\t}\n\n\tnew_m := []byte(m)\n\n\t// set the slice capacity to the slice len + each newline delimiters\n\tm1 := make([]byte, 0, the_len+(len(delimiter)*int(the_len/maxLen)))\n\tii := 0\n\tfor i := 0; i < int(the_len/maxLen); i++ {\n\t\tm1 = append(m1, new_m[i*maxLen:(i+1)*maxLen]...)\n\t\tm1 = append(m1, delimiter...)\n\t\tii++\n\t}\n\tm1 = append(m1, new_m[ii*maxLen:the_len]...)\n\tm = string(m1)\n\treturn m\n}",
"func ExamplePdfMaroto_Base64Image() {\n\t// When props.Rect is nil, method make Image fulfill the context cell.\n\t// When center is true, left and top has no effect.\n\t// Percent represents the width/height of the Image inside the cell,\n\t// Ex: 85, means that Image will have width of 85% of column width.\n\t// When center is false, is possible to manually positioning the Image.\n\n\tm := pdf.NewMaroto(consts.Portrait, consts.A4)\n\trowHeight := 5.0\n\n\t// Bytes of the image loaded\n\tbytes := []byte{1, 2, 3}\n\tbase64String := base64.StdEncoding.EncodeToString(bytes)\n\n\tm.Row(rowHeight, func() {\n\t\tm.Col(12, func() {\n\t\t\t_ = m.Base64Image(base64String, consts.Png, props.Rect{\n\t\t\t\tLeft: 5,\n\t\t\t\tTop: 5,\n\t\t\t\tCenter: true,\n\t\t\t\tPercent: 85,\n\t\t\t})\n\t\t})\n\t})\n\n\t// Do more things and save...\n}",
"func (*ArrowRecordBatch) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_bigquery_storage_v1_arrow_proto_rawDescGZIP(), []int{1}\n}",
"func gopherPNG() io.Reader { return base64.NewDecoder(base64.StdEncoding, strings.NewReader(gopher)) }",
"func Base64Transaction(tx []byte) (bs string) {\n jtx, _ := json.Marshal(tx)\n bs = base64.StdEncoding.EncodeToString(jtx)\n return bs\n}",
"func Split(data []byte, encode bool, maxSize int, iterFunc func([]byte)) {\n\tif encode {\n\t\tencoded := make([]byte, base64.StdEncoding.EncodedLen(len(data)))\n\t\tbase64.StdEncoding.Encode(encoded, data)\n\t\tdata = encoded\n\t}\n\tbuff := bytes.NewBuffer(data)\n\tfor {\n\t\tchunk := buff.Next(maxSize)\n\t\tif len(chunk) == 0 {\n\t\t\treturn\n\t\t}\n\t\titerFunc(chunk)\n\t}\n}",
"func NewByteSliceFromBase64(b []byte) (ByteSlice, error) {\n\tif b == nil {\n\t\treturn NullByteSlice(), nil\n\t}\n\n\ttmp := make([]byte, base64.StdEncoding.DecodedLen(len(b)))\n\tn, err := base64.StdEncoding.Decode(tmp, b)\n\tif err != nil {\n\t\treturn ByteSlice{}, err\n\t}\n\treturn ByteSlice{\n\t\tByteSlice: tmp[:n],\n\t\tValid: true,\n\t}, nil\n\n}",
"func (dt *ColumnDiffTable) newCommitHistoryRowItrFromCommits(ctx *sql.Context, commits []*doltdb.Commit) (*doltColDiffCommitHistoryRowItr, error) {\n\tdchItr := &doltColDiffCommitHistoryRowItr{\n\t\tctx: ctx,\n\t\tddb: dt.ddb,\n\t\ttableChangesIdx: -1,\n\t\tcommits: commits,\n\t}\n\treturn dchItr, nil\n}",
"func ChunkFromReader(cr flux.ColReader) Chunk {\n\tbuf := arrow.TableBuffer{\n\t\tGroupKey: cr.Key(),\n\t\tColumns: cr.Cols(),\n\t\tValues: make([]array.Array, len(cr.Cols())),\n\t}\n\tfor j := range buf.Values {\n\t\tbuf.Values[j] = Values(cr, j)\n\t}\n\treturn ChunkFromBuffer(buf)\n}",
"func AddRelatedPropertyGeneratorsForManagementPolicyBaseBlob(gens map[string]gopter.Gen) {\n\tgens[\"Delete\"] = gen.PtrOf(DateAfterModificationGenerator())\n\tgens[\"TierToArchive\"] = gen.PtrOf(DateAfterModificationGenerator())\n\tgens[\"TierToCool\"] = gen.PtrOf(DateAfterModificationGenerator())\n}",
"func BuildBlob(b *Blob) []byte {\n\tbu := flatbuffers.NewBuilder(128)\n\n\tputTid := func(tid *core.TractID) flatbuffers.UOffsetT {\n\t\tif tid == nil {\n\t\t\treturn 0 // default value, will make flatbuffers not add field\n\t\t}\n\t\treturn PutTractID(bu, *tid)\n\t}\n\n\tputTract := func(t *Tract) flatbuffers.UOffsetT {\n\t\thosts012, hosts3p := TractFSetupHosts(bu, t.Hosts)\n\t\tTractFStart(bu)\n\t\tTractFAddHosts012(bu, hosts012)\n\t\tTractFAddHosts3p(bu, hosts3p)\n\t\tTractFAddVersion(bu, uint32(t.Version))\n\t\tTractFAddRs63Chunk(bu, putTid(t.Rs63Chunk))\n\t\tTractFAddRs83Chunk(bu, putTid(t.Rs83Chunk))\n\t\tTractFAddRs103Chunk(bu, putTid(t.Rs103Chunk))\n\t\tTractFAddRs125Chunk(bu, putTid(t.Rs125Chunk))\n\t\treturn TractFEnd(bu)\n\t}\n\n\tputTracts := func(tracts []*Tract) flatbuffers.UOffsetT {\n\t\ttLen := len(tracts)\n\t\tif tLen == 0 {\n\t\t\treturn 0\n\t\t}\n\n\t\ttOffs := make([]flatbuffers.UOffsetT, tLen)\n\t\tfor i := tLen - 1; i >= 0; i-- {\n\t\t\ttOffs[tLen-i-1] = putTract(tracts[i])\n\t\t}\n\n\t\tBlobFStartTractsVector(bu, tLen)\n\t\tfor _, off := range tOffs {\n\t\t\tbu.PrependUOffsetT(off)\n\t\t}\n\t\treturn bu.EndVector(tLen)\n\t}\n\n\ttVec := putTracts(b.Tracts)\n\n\tBlobFStart(bu)\n\tBlobFAddPackedMeta(bu, PackMeta(b.Storage, b.Hint, int(b.Repl)))\n\tBlobFAddTracts(bu, tVec)\n\tBlobFAddDeleted(bu, b.Deleted)\n\tBlobFAddMtime(bu, b.Mtime)\n\tBlobFAddAtime(bu, b.Atime)\n\tBlobFAddExpires(bu, b.Expires)\n\tbu.Finish(BlobFEnd(bu))\n\treturn bu.FinishedBytes()\n}",
"func BlobGTE(v []byte) predicate.User {\n\treturn predicate.User(sql.FieldGTE(FieldBlob, v))\n}",
"func Encode(base64Data string) ([][]byte, uint, error) {\n\t// convert base64 data to byte array\n\tbytes, err := base64.StdEncoding.DecodeString(base64Data)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t// create reed solomon encoder\n\tenc, _ := reedsolomon.New(dataShards, parityShards)\n\n\t// split the file data\n\tsplitData, err := enc.Split(bytes)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\t// encode the split file using reed solomon algorithm\n\terr = enc.Encode(splitData)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\treturn splitData, uint(len(bytes)), nil\n}",
"func BuildPartition(p *Partition) []byte {\n\tbu := flatbuffers.NewBuilder(64)\n\tPartitionFStart(bu)\n\tPartitionFAddNextBlobKey(bu, uint32(p.NextBlobKey))\n\tPartitionFAddNextRsChunkKey(bu, p.NextRsChunkKey)\n\tbu.Finish(PartitionFEnd(bu))\n\treturn bu.FinishedBytes()\n}",
"func addIconsStep(line string) string {\n\telms := strings.Split(line, delimiter)\n\tfor _, index := range iconIndices {\n\t\tif !(0 <= index && index < len(elms)) {\n\t\t\treturn line\n\t\t}\n\t\telms[index] = addIcon(elms[index])\n\t}\n\treturn fmt.Sprintf(\n\t\t\"%v\",\n\t\tstrings.Join(elms, delimiter),\n\t)\n}",
"func (t *Thread) GetFileDataBase64(path string, block *repo.Block) (string, error) {\n\tfile, err := t.GetFileData(path, block)\n\tif err != nil {\n\t\treturn \"error\", err\n\t}\n\treturn base64.StdEncoding.EncodeToString(file), nil\n}",
"func buildJSON(line []string) []byte {\n\tvar dataArray []Data\n\tfor _, imgurl := range line {\n\t\tvar urlMap = ImageURL{}\n\t\turlMap.Url = imgurl\n\t\tvar imageMap = Image{}\n\t\timageMap.Image = urlMap\n\t\tvar dataMap = Data{}\n\t\tdataMap.Data = imageMap\n\t\tdataArray = append(dataArray, dataMap)\n\t}\n\tpMap = make(map[string][]Data)\n\tpMap[\"inputs\"] = dataArray\n\tpagesJson, err := json.Marshal(pMap)\n\tcheck(err)\n\treturn pagesJson\n}",
"func (f *EvaluationReportFormFiller) drawArrow(width float64) {\n\tarrowWidth := pxToMM(13.33)\n\tcenterX := f.pdf.GetX() + (width-arrowWidth)/2.0\n\tf.pdf.Image(arrowImageName, centerX, f.pdf.GetY(), arrowWidth, 0.0, flow, arrowImageFormat, imageLink, imageLinkURL)\n}",
"func (p *Pxl) setEncodedPayload(c <-chan splittedResult) {\n\tsorted := make(map[int]splittedResult)\n\n\tfor sr := range c {\n\t\tsorted[sr.Index] = sr\n\t}\n\n\tvar data []color.NRGBA\n\tfor i := 0; i < len(sorted); i++ {\n\t\tdata = append(data, sorted[i].Payload...)\n\t}\n\n\tdimensions := int(math.Sqrt(float64(len(data)))) + 1\n\timg := image.NewNRGBA((image.Rect(0, 0, dimensions, dimensions)))\n\n\tx := 0\n\ty := 0\n\tfor _, rgba := range data {\n\t\timg.Set(x, y, rgba)\n\t\tx++\n\t\tif x >= dimensions {\n\t\t\ty++\n\t\t\tx = 0\n\t\t}\n\t}\n\tfor posY := y; posY < dimensions; posY++ {\n\t\tfor posX := x; posX < dimensions; posX++ {\n\t\t\timg.Set(posX, posY, color.NRGBA{0, 0, 0, 255})\n\t\t}\n\t}\n\tp.encodedPayload = img\n}",
"func CreateMigrationBlob(rw io.ReadWriter, srkAuth Digest, migrationAuth Digest, keyBlob []byte, migrationKeyBlob []byte) ([]byte, error) {\n\t// Run OSAP for the SRK, reading a random OddOSAP for our initial\n\t// command and getting back a secret and a handle.\n\tsharedSecret, osapr, err := newOSAPSession(rw, etSRK, khSRK, srkAuth[:])\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer osapr.Close(rw)\n\tdefer zeroBytes(sharedSecret[:])\n\n\t// The createMigrationBlob command needs an OIAP session in addition to the\n\t// OSAP session.\n\toiapr, err := oiap(rw)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer oiapr.Close(rw)\n\n\tencData := tpmutil.U32Bytes(keyBlob)\n\n\t// The digest for auth1 and auth2 for the createMigrationBlob command is\n\t// SHA1(ordCreateMigrationBlob || migrationScheme || migrationKeyBlob || encData)\n\tauthIn := []interface{}{ordCreateMigrationBlob, msRewrap, migrationKeyBlob, encData}\n\n\t// The first commandAuth uses the shared secret as an HMAC key.\n\tca1, err := newCommandAuth(osapr.AuthHandle, osapr.NonceEven, nil, sharedSecret[:], authIn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The second commandAuth is based on OIAP instead of OSAP and uses the\n\t// migration auth as the HMAC key.\n\tca2, err := newCommandAuth(oiapr.AuthHandle, oiapr.NonceEven, nil, migrationAuth[:], authIn)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t_, outData, _, _, _, err := createMigrationBlob(rw, khSRK, msRewrap, migrationKeyBlob, encData, ca1, ca2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// For now, ignore the response authenticatino.\n\treturn outData, nil\n}",
"func (dt *ColumnDiffTable) newCommitHistoryRowItrFromItr(ctx *sql.Context, iter doltdb.CommitItr) (*doltColDiffCommitHistoryRowItr, error) {\n\tdchItr := &doltColDiffCommitHistoryRowItr{\n\t\tctx: ctx,\n\t\tddb: dt.ddb,\n\t\ttableChangesIdx: -1,\n\t\tchild: iter,\n\t}\n\treturn dchItr, nil\n}",
"func bytesFromResultsIterator(stub shim.ChaincodeStubInterface, resultsIterator shim.StateQueryIteratorInterface, extract func([]byte) ([]byte, error)) ([]byte, error) {\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"[\")\n\n\tbArrayMemberAlreadyWritten := false\n\tfor resultsIterator.HasNext() {\n\t\tif bArrayMemberAlreadyWritten == true {\n\t\t\tbuffer.WriteString(\",\")\n\t\t}\n\n\t\tqueryResponse, err := resultsIterator.Next()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tresultBytes, err := extract(queryResponse.Value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tbuffer.Write(resultBytes)\n\n\t\tbArrayMemberAlreadyWritten = true\n\t}\n\n\tbuffer.WriteString(\"]\")\n\treturn buffer.Bytes(), nil\n}",
"func (c *Cursor) FromBase64(b64 string) (err error) {\n\tcursor, err := base64.URLEncoding.DecodeString(b64)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn json.Unmarshal(cursor, c)\n}",
"func (rows *snowflakeRows) GetArrowBatches() ([]*ArrowBatch, error) {\n\t// Wait for all arrow batches before fetching.\n\t// Otherwise, a panic error \"invalid memory address or nil pointer dereference\" will be thrown.\n\tif err := rows.waitForAsyncQueryStatus(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rows.ChunkDownloader.getArrowBatches(), nil\n}",
"func BuildBytes(bytes []byte) string {\n\tvar builder strings.Builder\n\tbuilder.WriteString(\"[\\n\")\n\tfor i, b := range bytes {\n\t\tbuilder.WriteString(fmt.Sprintf(\"0x%02x,\", b))\n\t\tif i%8 == 7 {\n\t\t\tbuilder.WriteString(\"\\n\")\n\t\t}\n\t}\n\tbuilder.WriteString(\"]\")\n\treturn builder.String()\n}",
"func writeBase64(data []byte, partWriter io.Writer) error {\n\tbufsiz := base64.StdEncoding.EncodedLen(len(data))\n\tbuffer := make([]byte, bufsiz)\n\tbase64.StdEncoding.Encode(buffer, data)\n\t_, err := partWriter.Write(buffer)\n\n\treturn err\n}",
"func DecodeTxnRowNext(s string) (round uint64, intra uint32, err error) {\n\tvar b []byte\n\tb, err = base64.URLEncoding.DecodeString(s)\n\tif err != nil {\n\t\treturn\n\t}\n\tround = binary.LittleEndian.Uint64(b[:8])\n\tintra = binary.LittleEndian.Uint32(b[8:])\n\treturn\n}",
"func (b *BarChart) Thumbnail(c *draw.Canvas) {\n\n// fmt.Println (\"Thumbnail start \" )\n\n\tpts := []vg.Point{\n\t\t{c.Min.X, c.Min.Y},\n\t\t{c.Min.X, c.Max.Y},\n\t\t{c.Max.X, c.Max.Y},\n\t\t{c.Max.X, c.Min.Y},\n\t}\n\tpoly := c.ClipPolygonY(pts)\n\tc.FillPolygon(b.Color, poly)\n\n\tpts = append(pts, vg.Point{X: c.Min.X, Y: c.Min.Y})\n\toutline := c.ClipLinesY(pts)\n\tc.StrokeLines(b.LineStyle, outline...)\n\n//\tfmt.Println (\"Thumbnail end \" )\n}",
"func arrow(x float64, y float64, w float64, h float64, ah float64, color string, canvas *gensvg.SVG) {\n\tend := x + w\n\tbot := y + h\n\tap := end - ah\n\tvar xp = []float64{x, ap, end, ap, x}\n\tvar yp = []float64{y, y, y + (h / 2), bot, bot}\n\tcanvas.Polyline(xp, yp, color)\n}",
"func (tr TxnRow) Next() string {\n\tvar b [12]byte\n\tbinary.LittleEndian.PutUint64(b[:8], tr.Round)\n\tbinary.LittleEndian.PutUint32(b[8:], uint32(tr.Intra))\n\treturn base64.URLEncoding.EncodeToString(b[:])\n}",
"func testRecord(mem memory.Allocator) arrow.Record {\n\tcol1 := func() arrow.Array {\n\t\tib := array.NewInt8Builder(mem)\n\t\tdefer ib.Release()\n\n\t\tib.AppendValues([]int8{-1, -2, -3, -4, -5, -6, -7, -8, -9, -10}, nil)\n\t\treturn ib.NewInt8Array()\n\t}()\n\tdefer col1.Release()\n\tcol2 := func() arrow.Array {\n\t\tib := array.NewInt16Builder(mem)\n\t\tdefer ib.Release()\n\n\t\tib.AppendValues([]int16{-11, -12, -13, -14, -15, -16, -17, -18, -19,\n\t\t\t-20}, nil)\n\t\treturn ib.NewInt16Array()\n\t}()\n\tdefer col2.Release()\n\tcol3 := func() arrow.Array {\n\t\tib := array.NewInt32Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]int32{-21, -22, -23, -24, -25, -26, -27, -28, -29,\n\t\t\t-30}, nil)\n\t\treturn ib.NewInt32Array()\n\t}()\n\tdefer col3.Release()\n\tcol4 := func() arrow.Array {\n\t\tib := array.NewInt64Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]int64{-31, -32, -33, -34, -35, -36, -37, -38, -39,\n\t\t\t-40}, nil)\n\t\treturn ib.NewInt64Array()\n\t}()\n\tdefer col4.Release()\n\tcol5 := func() arrow.Array {\n\t\tib := array.NewUint8Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]uint8{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, nil)\n\t\treturn ib.NewUint8Array()\n\t}()\n\tdefer col5.Release()\n\tcol6 := func() arrow.Array {\n\t\tib := array.NewUint16Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]uint16{11, 12, 13, 14, 15, 16, 17, 18, 19,\n\t\t\t20}, nil)\n\t\treturn ib.NewUint16Array()\n\t}()\n\tdefer col6.Release()\n\tcol7 := func() arrow.Array {\n\t\tib := array.NewUint32Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]uint32{21, 22, 23, 24, 25, 26, 27, 28, 29,\n\t\t\t30}, nil)\n\t\treturn ib.NewUint32Array()\n\t}()\n\tdefer col7.Release()\n\tcol8 := func() arrow.Array {\n\t\tib := array.NewUint64Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]uint64{31, 32, 33, 34, 35, 36, 37, 38, 39,\n\t\t\t40}, nil)\n\t\treturn ib.NewUint64Array()\n\t}()\n\tdefer col8.Release()\n\tcol9 := func() arrow.Array {\n\t\tib := array.NewFloat32Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]float32{1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7,\n\t\t\t8.8, 9.9, 10.10}, nil)\n\t\treturn ib.NewFloat32Array()\n\t}()\n\tdefer col9.Release()\n\tcol10 := func() arrow.Array {\n\t\tib := array.NewFloat64Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]float64{10.1, 12.2, 13.3, 14.4, 15.5, 16.6,\n\t\t\t17.7, 18.8, 19.9, 20.10}, nil)\n\t\treturn ib.NewFloat64Array()\n\t}()\n\tdefer col10.Release()\n\tcol11 := func() arrow.Array {\n\t\tib := array.NewDate32Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Date32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},\n\t\t\tnil)\n\t\treturn ib.NewDate32Array()\n\t}()\n\tdefer col11.Release()\n\tcol12 := func() arrow.Array {\n\t\tib := array.NewDate64Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Date64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},\n\t\t\tnil)\n\t\treturn ib.NewDate64Array()\n\t}()\n\tdefer col12.Release()\n\tcol13 := func() arrow.Array {\n\t\tib := array.NewBinaryBuilder(mem, arrow.BinaryTypes.Binary)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([][]byte{[]byte(\"A\"), []byte(\"B\"), []byte(\"C\"),\n\t\t\t[]byte(\"D\"), []byte(\"E\"), []byte(\"F\"), []byte(\"G\"),\n\t\t\t[]byte(\"H\"), []byte(\"I\"), []byte(\"J\")}, nil)\n\t\treturn ib.NewBinaryArray()\n\t}()\n\tdefer col13.Release()\n\tcol14 := func() arrow.Array {\n\t\tib := array.NewStringBuilder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]string{\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\",\n\t\t\t\"h\", \"i\", \"j\"}, nil)\n\t\treturn ib.NewStringArray()\n\t}()\n\tdefer col14.Release()\n\tcol15 := func() arrow.Array {\n\t\tib := array.NewBooleanBuilder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]bool{true, false, true, false, true,\n\t\t\tfalse, true, false, true, false}, nil)\n\t\treturn ib.NewBooleanArray()\n\t}()\n\tdefer col15.Release()\n\tcol16 := func() arrow.Array {\n\t\tib := array.NewDate32Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Date32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},\n\t\t\tnil)\n\t\treturn ib.NewDate32Array()\n\t}()\n\tdefer col16.Release()\n\tcol17 := func() arrow.Array {\n\t\tib := array.NewDate64Builder(mem)\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Date64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},\n\t\t\tnil)\n\t\treturn ib.NewDate64Array()\n\t}()\n\tdefer col17.Release()\n\tcol18 := func() arrow.Array {\n\t\tdtype := arrow.FixedWidthTypes.Time32ms\n\t\tib := array.NewTime32Builder(mem, dtype.(*arrow.Time32Type))\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Time32{arrow.Time32(1), arrow.Time32(2),\n\t\t\tarrow.Time32(3), arrow.Time32(4), arrow.Time32(5),\n\t\t\tarrow.Time32(6), arrow.Time32(7), arrow.Time32(8),\n\t\t\tarrow.Time32(9), arrow.Time32(10)}, nil)\n\t\treturn ib.NewTime32Array()\n\t}()\n\tdefer col18.Release()\n\tcol19 := func() arrow.Array {\n\t\tdtype := arrow.FixedWidthTypes.Timestamp_ms\n\t\tib := array.NewTimestampBuilder(mem, dtype.(*arrow.TimestampType))\n\t\tdefer ib.Release()\n\t\tib.AppendValues([]arrow.Timestamp{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},\n\t\t\tnil)\n\t\treturn ib.NewTimestampArray()\n\t}()\n\tdefer col19.Release()\n\tcols := []arrow.Array{col1, col2, col3, col4, col5, col6, col7,\n\t\tcol8, col9, col10, col11, col12, col13, col14, col15, col16, col17,\n\t\tcol18, col19}\n\treturn array.NewRecord(testSchema, cols, -1)\n}",
"func _encode_map(bitmap []byte) string {\n\treturn base64.StdEncoding.EncodeToString(bitmap)\n}",
"func decodeFromBase64(encodedXdr string) *b.TransactionEnvelopeBuilder {\n // Unmarshall from base64 encoded XDR format\n var decoded xdr.TransactionEnvelope\n e := xdr.SafeUnmarshalBase64(encodedXdr, &decoded)\n if e != nil {\n log.Fatal(e)\n }\n\n // convert to TransactionEnvelopeBuilder\n txEnvelopeBuilder := b.TransactionEnvelopeBuilder{E: &decoded}\n txEnvelopeBuilder.Init()\n\n return &txEnvelopeBuilder\n}",
"func decodeFromBase64(encodedXdr string) *b.TransactionEnvelopeBuilder {\n // Unmarshall from base64 encoded XDR format\n var decoded xdr.TransactionEnvelope\n e := xdr.SafeUnmarshalBase64(encodedXdr, &decoded)\n if e != nil {\n log.Fatal(e)\n }\n\n // convert to TransactionEnvelopeBuilder\n txEnvelopeBuilder := b.TransactionEnvelopeBuilder{E: &decoded}\n txEnvelopeBuilder.Init()\n\n return &txEnvelopeBuilder\n}",
"func TestNew_paddedBase64(t *testing.T) {\n\n\ttestPartRaw := \"Content-Type: text/plain; name=\\\"test.txt\\\"\\r\\n\" +\n\t\t\"Content-Transfer-Encoding: base64\\r\\n\" +\n\t\t\"Content-ID: <[email protected]>\\r\\n\" +\n\t\t\"Content-Disposition: attachment; filename=\\\"text.txt\\\"\\r\\n\" +\n\t\t\"\\r\\n\" +\n\t\t\"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNlY3RldHVyIGFkaXBpc2NpbmcgZWxpdCwgc2VkIGRvIGVpdXNtb2QgdGVtc\\r\\n\" +\n\t\t\" G9yIGluY2lkaWR1bnQgdXQgbGFib3JlIGV0IGRvbG9yZSBtYWduYSBhbGlxdWEuIFV0IGVuaW0gYWQgbWluaW0gdmVuaWFtLCBxd\\r\\n\" +\n\t\t\" WlzIG5vc3RydWQgZXhlcmNpdGF0aW9uIHVsbGFtY28gbGFib3JpcyBuaXNpIHV0IGFsaXF1aXAgZXggZWEgY29tbW9kbyBjb25zZ\\r\\n\" +\n\t\t\" XF1YXQuIER1aXMgYXV0ZSBpcnVyZSBkb2xvciBpbiByZXByZWhlbmRlcml0IGluIHZvbHVwdGF0ZSB2ZWxpdCBlc3NlIGNpbGx1b\\r\\n\" +\n\t\t\" SBkb2xvcmUgZXUgZnVnaWF0IG51bGxhIHBhcmlhdHVyLiBFeGNlcHRldXIgc2ludCBvY2NhZWNhdCBjdXBpZGF0YXQgbm9uIHByb\\r\\n\" +\n\t\t\" 2lkZW50LCBzdW50IGluIGN1bHBhIHF1aSBvZmZpY2lhIGRlc2VydW50IG1vbGxpdCBhbmltIGlkIGVzdCBsYWJvcnVtLg==\"\n\n\texpected := \"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed\" +\n\t\t\" do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut e\" +\n\t\t\"nim ad minim veniam, quis nostrud exercitation ullamco laboris nisi \" +\n\t\t\"ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehe\" +\n\t\t\"nderit in voluptate velit esse cillum dolore eu fugiat nulla pariatu\" +\n\t\t\"r. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui\" +\n\t\t\" officia deserunt mollit anim id est laborum.\"\n\n\te, err := Read(strings.NewReader(testPartRaw))\n\tif err != nil {\n\t\tt.Fatal(\"New(padded Base64): expected no error, got\", err)\n\t}\n\n\tif b, err := ioutil.ReadAll(e.Body); err != nil {\n\t\tt.Error(\"Expected no error while reading entity body, got\", err)\n\t} else if s := string(b); s != expected {\n\t\tt.Errorf(\"Expected %q as entity body but got %q\", expected, s)\n\t}\n\n}",
"func copyOuterRows(innerColOffset, outerColOffset int, src *Chunk, numRows int, dst *Chunk) {\n\ttrace_util_0.Count(_chunk_util_00000, 17)\n\tif numRows <= 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 20)\n\t\treturn\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 18)\n\trow := src.GetRow(0)\n\tvar srcCols []*column\n\tif innerColOffset == 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 21)\n\t\tsrcCols = src.columns[outerColOffset:]\n\t} else {\n\t\ttrace_util_0.Count(_chunk_util_00000, 22)\n\t\t{\n\t\t\tsrcCols = src.columns[:innerColOffset]\n\t\t}\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 19)\n\tfor i, srcCol := range srcCols {\n\t\ttrace_util_0.Count(_chunk_util_00000, 23)\n\t\tdstCol := dst.columns[outerColOffset+i]\n\t\tdstCol.appendMultiSameNullBitmap(!srcCol.isNull(row.idx), numRows)\n\t\tdstCol.length += numRows\n\t\tif srcCol.isFixed() {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 24)\n\t\t\telemLen := len(srcCol.elemBuf)\n\t\t\tstart := row.idx * elemLen\n\t\t\tend := start + numRows*elemLen\n\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t} else {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 25)\n\t\t\t{\n\t\t\t\tstart, end := srcCol.offsets[row.idx], srcCol.offsets[row.idx+numRows]\n\t\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t\t\toffsets := dstCol.offsets\n\t\t\t\telemLen := srcCol.offsets[row.idx+1] - srcCol.offsets[row.idx]\n\t\t\t\tfor j := 0; j < numRows; j++ {\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 27)\n\t\t\t\t\toffsets = append(offsets, int64(offsets[len(offsets)-1]+elemLen))\n\t\t\t\t}\n\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 26)\n\t\t\t\tdstCol.offsets = offsets\n\t\t\t}\n\t\t}\n\t}\n}",
"func (lrc *largeRowsetChaincode) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\t// Create a new table\n\tif err := stub.CreateTable(\"LargeTable\", []*shim.ColumnDefinition{\n\t\t{Name: \"Key\", Type: shim.ColumnDefinition_STRING, Key: true},\n\t\t{\"Name\", shim.ColumnDefinition_STRING, false},\n\t\t{\"Value\", shim.ColumnDefinition_STRING, false},\n\t}); err != nil {\n\t\t//just assume the table exists and was populated\n\t\treturn nil, nil\n\t}\n\n\tfor i := 0; i < totalRows; i++ {\n\t\tcol1 := fmt.Sprintf(\"Key_%d\", i)\n\t\tcol2 := fmt.Sprintf(\"Name_%d\", i)\n\t\tcol3 := fmt.Sprintf(\"Value_%d\", i)\n\t\tif _, err := lrc.retInAdd(stub.InsertRow(\"LargeTable\", shim.Row{Columns: []*shim.Column{\n\t\t\t&shim.Column{Value: &shim.Column_String_{String_: col1}},\n\t\t\t&shim.Column{Value: &shim.Column_String_{String_: col2}},\n\t\t\t&shim.Column{Value: &shim.Column_String_{String_: col3}},\n\t\t}})); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn nil, nil\n}",
"func base64EncodedLen(n int) int {\n\treturn ((n + 2) / 3) * 4 // integer division\n}",
"func (o EnvironmentCertificateOutput) CertificateBlobBase64() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *EnvironmentCertificate) pulumi.StringOutput { return v.CertificateBlobBase64 }).(pulumi.StringOutput)\n}",
"func (f *fragment) rowFromStorage(rowID uint64) *Row {\n\t// Only use a subset of the containers.\n\t// NOTE: The start & end ranges must be divisible by container width.\n\t//\n\t// Note that OffsetRange now returns a new bitmap which uses frozen\n\t// containers which will use copy-on-write semantics. The actual bitmap\n\t// and Containers object are new and not shared, but the containers are\n\t// shared.\n\tdata := f.storage.OffsetRange(f.shard*ShardWidth, rowID*ShardWidth, (rowID+1)*ShardWidth)\n\n\trow := &Row{\n\t\tsegments: []rowSegment{{\n\t\t\tdata: data,\n\t\t\tshard: f.shard,\n\t\t\twritable: true,\n\t\t}},\n\t}\n\trow.invalidateCount()\n\n\treturn row\n}",
"func (p *Packed2DGenericTypeBuilder) FinishRow() {\n\tp.Rows = append(p.Rows, p.buf[p.head:p.tail])\n\tp.head = p.tail\n}",
"func marshalBinary(b *strings.Builder, bs []byte) error {\n\tif err := b.WriteByte(':'); err != nil {\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, base64.StdEncoding.EncodedLen(len(bs)))\n\tbase64.StdEncoding.Encode(buf, bs)\n\n\tif _, err := b.Write(buf); err != nil {\n\t\treturn err\n\t}\n\n\treturn b.WriteByte(':')\n}",
"func (f *SAMDGClientForwarder) Base64() string {\n\treturn f.SamKeys.Addr().Base64()\n}",
"func (r renderer) TableRow(out *bytes.Buffer, text []byte) {}",
"func MarshalDBtoByte(batch *DataBatch) []byte {\n\n\tdata := make([]byte, 0)\n\n\t//intSize := unsafe.Sizeof(batch.CubeId)\n\t//uintSize := unsafe.Sizeof(batch.Capacity) // capacity(uint) and Dims(utin[])\n\t//float64Size := unsafe.Sizeof(float64(0))\n\tdimLength := len(batch.Dims)\n\tminsLength := len(batch.Mins)\n\tmaxsLength := len(batch.Maxs)\n\tdPointLength := len(batch.DPoints)\n\t// first intSize byte for cubeid\n\tbyteData, _ := json.Marshal(batch.CubeId)\n\tdata = append(data, byteData...)\n\t// next uintSize byte for capacity\n\tbyteData, _ = json.Marshal(batch.Capacity)\n\tdata = append(data, byteData...)\n\t// next intSize byte for lengof of Dims\n\tbyteData, _ = json.Marshal(dimLength)\n\tdata = append(data, byteData...)\n\t// next intSize byte for length of minslength\n\tbyteData, _ = json.Marshal(minsLength)\n\tdata = append(data, byteData...)\n\t// next intSize byte for length of maxslength\n\tbyteData, _ = json.Marshal(maxsLength)\n\tdata = append(data, byteData...)\n\t// next intSize byte for length of dPointLength\n\tbyteData, _ = json.Marshal(dPointLength)\n\tdata = append(data, byteData...)\n\n\t// trans dims into byte\n\tbyteData = marshalUintArray(batch.Dims)\n\tdata = append(data, byteData...)\n\n\t// trans mins into byte\n\tbyteData = marshalFloat64Array(batch.Mins)\n\tdata = append(data, byteData...)\n\n\t// trans maxs into byte\n\tbyteData = marshalFloat64Array(batch.Maxs)\n\tdata = append(data, byteData...)\n\n\t// trans dPoints into byte\n\tfor _, dp := range batch.DPoints {\n\t\theader, body := convertDPoint(dp)\n\t\tdata = append(data, header...)\n\t\tdata = append(data, body...)\n\t}\n\treturn data\n}",
"func copySelectedInnerRows(innerColOffset, outerColOffset int, src *Chunk, selected []bool, dst *Chunk) int {\n\ttrace_util_0.Count(_chunk_util_00000, 3)\n\toldLen := dst.columns[innerColOffset].length\n\tvar srcCols []*column\n\tif innerColOffset == 0 {\n\t\ttrace_util_0.Count(_chunk_util_00000, 6)\n\t\tsrcCols = src.columns[:outerColOffset]\n\t} else {\n\t\ttrace_util_0.Count(_chunk_util_00000, 7)\n\t\t{\n\t\t\tsrcCols = src.columns[innerColOffset:]\n\t\t}\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 4)\n\tfor j, srcCol := range srcCols {\n\t\ttrace_util_0.Count(_chunk_util_00000, 8)\n\t\tdstCol := dst.columns[innerColOffset+j]\n\t\tif srcCol.isFixed() {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 9)\n\t\t\tfor i := 0; i < len(selected); i++ {\n\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 10)\n\t\t\t\tif !selected[i] {\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 12)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 11)\n\t\t\t\tdstCol.appendNullBitmap(!srcCol.isNull(i))\n\t\t\t\tdstCol.length++\n\n\t\t\t\telemLen := len(srcCol.elemBuf)\n\t\t\t\toffset := i * elemLen\n\t\t\t\tdstCol.data = append(dstCol.data, srcCol.data[offset:offset+elemLen]...)\n\t\t\t}\n\t\t} else {\n\t\t\ttrace_util_0.Count(_chunk_util_00000, 13)\n\t\t\t{\n\t\t\t\tfor i := 0; i < len(selected); i++ {\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 14)\n\t\t\t\t\tif !selected[i] {\n\t\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 16)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\ttrace_util_0.Count(_chunk_util_00000, 15)\n\t\t\t\t\tdstCol.appendNullBitmap(!srcCol.isNull(i))\n\t\t\t\t\tdstCol.length++\n\n\t\t\t\t\tstart, end := srcCol.offsets[i], srcCol.offsets[i+1]\n\t\t\t\t\tdstCol.data = append(dstCol.data, srcCol.data[start:end]...)\n\t\t\t\t\tdstCol.offsets = append(dstCol.offsets, int64(len(dstCol.data)))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\ttrace_util_0.Count(_chunk_util_00000, 5)\n\treturn dst.columns[innerColOffset].length - oldLen\n}",
"func (o ReplicaExternalKeyOutput) KeyMaterialBase64() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ReplicaExternalKey) pulumi.StringPtrOutput { return v.KeyMaterialBase64 }).(pulumi.StringPtrOutput)\n}",
"func BlobGTE(v []byte) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldBlob), v))\n\t})\n}",
"func byteEncoder(e *encodeState, v reflect.Value) error {\n\tval := v.Uint()\n\tsz := e.size\n\th := sz / 8.0\n\tfactor := .2\n\tblkH := h * (1 - factor)\n\tmargin := h * (factor / 2)\n\n\tfor y := sz - h; y > 0; y -= h {\n\t\tbit := val % 2\n\t\tif bit == 1 {\n\t\t\tr := e.Rect(margin, y-margin, sz-margin, blkH)\n\t\t\tr.Style.Set(\"stroke-width\", \"0\")\n\t\t\tr.Style.Set(\"fill\", \"black\")\n\t\t}\n\t\tval = val >> 1\n\t\tif val == 0 {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (b *Bar) Bytes() []byte {\n\tcompletedWidth := int(float64(b.Width) * (b.CompletedPercent() / 100.00))\n\n\t// add fill and empty bits\n\tvar buf bytes.Buffer\n\tfor i := 0; i < completedWidth; i++ {\n\t\tbuf.WriteByte(b.Fill)\n\t}\n\tfor i := 0; i < b.Width-completedWidth; i++ {\n\t\tbuf.WriteByte(b.Empty)\n\t}\n\n\t// set head bit\n\tpb := buf.Bytes()\n\tif completedWidth > 0 && completedWidth < b.Width {\n\t\tpb[completedWidth-1] = b.Head\n\t}\n\n\t// set left and right ends bits\n\tpb[0], pb[len(pb)-1] = b.LeftEnd, b.RightEnd\n\n\t// render append functions to the right of the bar\n\tfor _, f := range b.appendFuncs {\n\t\tpb = append(pb, ' ')\n\t\tpb = append(pb, []byte(f(b))...)\n\t}\n\n\t// render prepend functions to the left of the bar\n\tfor _, f := range b.prependFuncs {\n\t\targs := []byte(f(b))\n\t\targs = append(args, ' ')\n\t\tpb = append(args, pb...)\n\t}\n\treturn pb\n}",
"func (r *AnalyticsResult) NextBytes() []byte {\n\tif r.streamResult.Closed() {\n\t\treturn nil\n\t}\n\n\traw, err := r.streamResult.NextBytes()\n\tif err != nil {\n\t\tr.err = err\n\t\treturn nil\n\t}\n\n\treturn raw\n}",
"func __b64encode(out *[]byte, src *[]byte, mode int)",
"func Decode_Line(geom []uint32) [][][]int {\n\tpos := 0\n\tcurrentpt := []int{0, 0}\n\tnewline := [][]int{}\n\tlines := [][][]int{}\n\tfor pos < len(geom) {\n\t\tgeomval := geom[pos]\n\n\t\tcmd, length := Get_Command_Length(geomval)\n\n\t\t// conde for a move to cmd\n\t\tif cmd == 1 {\n\t\t\txdelta := DecodeDelta(geom[pos+1])\n\t\t\tydelta := DecodeDelta(geom[pos+2])\n\t\t\tcurrentpt = []int{currentpt[0] + xdelta, currentpt[1] + ydelta}\n\t\t\tpos += 2\n\n\t\t\tif pos == len(geom)-1 {\n\t\t\t\tlines = append(lines, [][]int{currentpt})\n\t\t\t}\n\t\t} else if cmd == 2 {\n\t\t\tnewline = [][]int{currentpt}\n\t\t\tcurrentpos := pos + 1\n\t\t\tendpos := currentpos + int(length*2)\n\t\t\tfor currentpos < endpos {\n\t\t\t\txdelta := DecodeDelta(geom[currentpos])\n\t\t\t\tydelta := DecodeDelta(geom[currentpos+1])\n\t\t\t\tcurrentpt = []int{currentpt[0] + xdelta, currentpt[1] + ydelta}\n\t\t\t\tnewline = append(newline, currentpt)\n\t\t\t\tcurrentpos += 2\n\t\t\t}\n\n\t\t\tpos = currentpos - 1\n\t\t\tlines = append(lines, newline)\n\n\t\t}\n\t\tnewline = [][]int{}\n\t\t//fmt.Println(cmd,length)\n\t\tpos += 1\n\t}\n\treturn lines\n}",
"func GetEncodedChunk(chunk []interface{}, delim []byte) ([]byte, error) {\n\tvar enc []byte\n\n\tfor _, v := range chunk {\n\t\tj, err := json.Marshal(v)\n\t\tif err != nil {\n\t\t\tlog.Error(\"failed to encode: %v\", v)\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(enc) > 0 {\n\t\t\tenc = append(enc, delim...)\n\t\t}\n\t\tenc = append(enc, j...)\n\t}\n\n\treturn enc, nil\n}",
"func Base64(bin []byte) string {\n\treturn base64.StdEncoding.EncodeToString(bin)\n}",
"func (c *Cursor) ToBase64() (string, error) {\n\tb, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn base64.URLEncoding.EncodeToString(b), nil\n}",
"func (h *Handler) PrestoGetRows(ctx context.Context, splitID *PrestoThriftId, columns []string, maxBytes int64, nextToken *PrestoThriftNullableToken) (r *PrestoThriftPageResult_, err error) {\n\n\tr = &PrestoThriftPageResult_{}\n\n\t// TODO: connector should honor maxBytes, nextToken, columns correctly...\n\t// next token unused...\n\t// maxBytes unused ...\n\t// columns not used at all\n\n\tlog.Println(\"Got\", columns, maxBytes, nextToken)\n\n\tif columns == nil {\n\t\treturn r, errNotImplemented\n\t}\n\n\tif len(columns) == 0 {\n\t\treturn r, errNotImplemented\n\t}\n\n\tvar bbs BlobStoreSplit\n\terr = json.Unmarshal(splitID.ID, &bbs)\n\tif err != nil {\n\t\treturn r, err\n\t}\n\n\t// or to keep it idempotent do encode everything int he split id (it is a byte slice after all so we use that to avoid caching)\n\t/*\n\t\thmd5 := md5.Sum(splitID.ID)\n\t\tce, ok := h.SplitCache[hmd5]\n\t\tif !ok {\n\t\t\treturn r, errNotImplemented\n\t\t}\n\t*/\n\n\teh, err := getExcelFile(h.BaseDir, bbs.Schema, bbs.Table, bbs.File)\n\tif err != nil {\n\t\tlog.Println(\"Unable to read excel file\", err)\n\t\treturn r, err\n\t}\n\n\tif len(eh.Sheets) == 0 {\n\t\tlog.Println(\"Unable to read excel file with no sheets\")\n\t\treturn r, err\n\t}\n\n\tsh, ok := eh.Sheet[bbs.Sheet]\n\tif !ok {\n\t\tlog.Println(\"sheetname not found\", bbs.Sheet)\n\t\treturn r, err\n\t}\n\n\tv := newvisit()\n\tv.MaxRow = sh.MaxRow\n\t// we do our own housekeeping of extracted rows and cols anyway\n\terr = sh.ForEachRow(v.rowVisitorExtractor, xlsx.SkipEmptyRows)\n\tif err != nil {\n\t\tlog.Println(\"Error examining file structure\", err)\n\t\treturn r, err\n\t}\n\n\textractedRows := int32(v.Row) - 1\n\n\t// lets not forget to set the bytebuffers to the Varchar columns\n\t// an reset the array to less size\n\tfor idx, ctyp := range v.ColTyps {\n\t\tswitch ctyp {\n\t\tcase ColTypVarchar:\n\t\t\tv.Records[idx].VarcharData.Bytes = v.Buffers[idx].Bytes()\n\t\t\tv.Records[idx].VarcharData.Sizes = v.Records[idx].VarcharData.Sizes[0:extractedRows]\n\t\t\tv.Records[idx].VarcharData.Nulls = v.Records[idx].VarcharData.Nulls[0:extractedRows]\n\t\tcase ColTypBoolean:\n\t\t\tv.Records[idx].BooleanData.Booleans = v.Records[idx].BooleanData.Booleans[0:extractedRows]\n\t\t\tv.Records[idx].BooleanData.Nulls = v.Records[idx].BooleanData.Nulls[0:extractedRows]\n\t\tcase ColTypDouble:\n\t\t\tv.Records[idx].DoubleData.Doubles = v.Records[idx].DoubleData.Doubles[0:extractedRows]\n\t\t\tv.Records[idx].DoubleData.Nulls = v.Records[idx].DoubleData.Nulls[0:extractedRows]\n\t\tcase ColTypTimestamp:\n\t\t\tv.Records[idx].TimestampData.Timestamps = v.Records[idx].TimestampData.Timestamps[0:extractedRows]\n\t\t\tv.Records[idx].TimestampData.Nulls = v.Records[idx].TimestampData.Nulls[0:extractedRows]\n\t\t}\n\t}\n\n\tr = &PrestoThriftPageResult_{}\n\tr.ColumnBlocks = v.Records\n\tr.RowCount = extractedRows\n\n\tlog.Println(v.ColNames, r.RowCount)\n\n\treturn r, nil\n}",
"func (f *EvaluationReportFormFiller) loadArrowImage() error {\n\t// load image from assets\n\tarrow, err := assets.Asset(arrowImagePath)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"could not load image asset\")\n\t}\n\tarrowImage := bytes.NewReader(arrow)\n\n\topt := gofpdf.ImageOptions{\n\t\tImageType: arrowImageFormat,\n\t\tReadDpi: true,\n\t}\n\n\t// After the image is registered, we can use its name to draw it\n\tf.pdf.RegisterImageOptionsReader(arrowImageName, opt, arrowImage)\n\treturn f.pdf.Error()\n}",
"func parseBytes(data []byte, rslt *result, src *source, opts *Options, f *File) ([]element, error) {\n\tvar elements []element\n\n\tlines := strings.Split(formatLF(string(data)), lf)\n\n\ti := 0\n\tl := len(lines)\n\n\t// Ignore the last empty line.\n\tif l > 0 && lines[l-1] == \"\" {\n\t\tl--\n\t}\n\n\tfor i < l {\n\t\t// Fetch a line.\n\t\tln := newLine(i+1, lines[i], opts, f)\n\t\ti++\n\n\t\t// Ignore the empty line.\n\t\tif ln.isEmpty() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif ln.isTopIndent() {\n\t\t\te, err := newElement(ln, rslt, src, nil, opts)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Append child elements to the element.\n\t\t\tif err := appendChildren(e, rslt, lines, &i, l, src, opts, f); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\telements = append(elements, e)\n\t\t}\n\t}\n\n\treturn elements, nil\n}",
"func (qr *queryResult) Next(dest []driver.Value) error {\n\tif qr.pos >= qr.numRow() {\n\t\tif qr.attributes.LastPacket() {\n\t\t\treturn io.EOF\n\t\t}\n\t\tif err := qr.conn._fetchNext(context.Background(), qr); err != nil {\n\t\t\tqr.lastErr = err //fieldValues and attrs are nil\n\t\t\treturn err\n\t\t}\n\t\tif qr.numRow() == 0 {\n\t\t\treturn io.EOF\n\t\t}\n\t\tqr.pos = 0\n\t}\n\n\tqr.copyRow(qr.pos, dest)\n\terr := qr.decodeErrors.RowError(qr.pos)\n\tqr.pos++\n\n\tfor _, v := range dest {\n\t\tif v, ok := v.(p.LobDecoderSetter); ok {\n\t\t\tv.SetDecoder(qr.conn.decodeLob)\n\t\t}\n\t}\n\treturn err\n}",
"func (c *Reader) GetChunk(sha256sum []byte) {\n}",
"func getSequentialFileContent() (string, map[int]compareFunc) {\n\tsb := strings.Builder{}\n\texpectedLines := make(map[int]compareFunc)\n\n\tfor i := 0; i < 1000000; i++ {\n\t\tline := fmt.Sprintf(`{ \"line\": \"%d\", \"id\": \"i%d\", data: \"some event %d\" }`, i, i, i)\n\t\tsb.WriteString(line)\n\t\tsb.WriteString(\"\\n\")\n\n\t\texpectedLines[i] = equals(line)\n\t}\n\n\treturn sb.String(), expectedLines\n}",
"func Base64(data []byte) string {\n\treturn base64.StdEncoding.EncodeToString(data)\n}",
"func displayAsBlob(node *Node, level int) string {\n\tnc := len(node.children)\n\tret := fmt.Sprintf(\"%vBlob ID %v (%v reuses):\\n\", strings.Repeat(\" \", level), node.fullPath, nc)\n\n\t// Iterate over the childen in a sorted order.\n\tkeys := make([]string, 0, len(node.children))\n\tfor k := range node.children {\n\t\tkeys = append(keys, k)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, k := range keys {\n\t\tn := node.children[k]\n\t\tret += n.storageBreakdown(level + 1)\n\t}\n\treturn ret\n}",
"func (qr *queryResult) Next(dest []driver.Value) error {\n\tif qr.pos >= qr.numRow() {\n\t\tif qr.attributes.LastPacket() {\n\t\t\treturn io.EOF\n\t\t}\n\t\tif err := qr.conn._fetchNext(qr); err != nil {\n\t\t\tqr.lastErr = err //fieldValues and attrs are nil\n\t\t\treturn err\n\t\t}\n\t\tif qr.numRow() == 0 {\n\t\t\treturn io.EOF\n\t\t}\n\t\tqr.pos = 0\n\t}\n\n\tqr.copyRow(qr.pos, dest)\n\terr := qr.decodeErrors.RowError(qr.pos)\n\tqr.pos++\n\n\tfor _, v := range dest {\n\t\tif v, ok := v.(p.LobDecoderSetter); ok {\n\t\t\tv.SetDecoder(qr.conn.decodeLob)\n\t\t}\n\t}\n\treturn err\n}",
"func (j *baseJoiner) makeShallowJoinRow(isRightJoin bool, inner, outer chunk.Row) {\n\tif !isRightJoin {\n\t\tinner, outer = outer, inner\n\t}\n\tj.shallowRow.ShallowCopyPartialRow(0, inner)\n\tj.shallowRow.ShallowCopyPartialRow(inner.Len(), outer)\n}",
"func (*ArrowSchema) Descriptor() ([]byte, []int) {\n\treturn file_google_cloud_bigquery_storage_v1_arrow_proto_rawDescGZIP(), []int{0}\n}",
"func TestE2ESequentialValid(t *testing.T) {\n\tvar err error\n\tts := testSchema\n\n\tbuf := new(bytes.Buffer)\n\tfw := writerfile.NewWriterFile(buf)\n\tassert.Nil(t, err)\n\n\tw, err := NewArrowWriter(ts, fw, 1)\n\tassert.Nil(t, err)\n\n\tmem := memory.NewCheckedAllocator(memory.NewGoAllocator())\n\trec := testRecord(mem)\n\tdefer rec.Release()\n\terr = w.WriteArrow(rec)\n\tassert.Nil(t, err)\n\n\terr = w.WriteStop()\n\tassert.Nil(t, err)\n\n\tparquetFile, err := buffer.NewBufferFile(buf.Bytes())\n\tassert.Nil(t, err)\n\n\tpr, err := reader.NewParquetReader(parquetFile, nil, 1)\n\tassert.Nil(t, err)\n\n\tnum := int(pr.GetNumRows())\n\tres, err := pr.ReadByNumber(num)\n\tassert.Nil(t, err)\n\n\tactualTable := \"\"\n\tfor _, row := range res {\n\t\tactualTable = actualTable + fmt.Sprintf(\"%v\", row)\n\t}\n\texpectedTable := \"\" +\n\t\t\"{-1 -11 -21 -31 1 11 21 31 1.1 10.1 1 1 A a true 1 1 1 1}\" +\n\t\t\"{-2 -12 -22 -32 2 12 22 32 2.2 12.2 2 2 B b false 2 2 2 2}\" +\n\t\t\"{-3 -13 -23 -33 3 13 23 33 3.3 13.3 3 3 C c true 3 3 3 3}\" +\n\t\t\"{-4 -14 -24 -34 4 14 24 34 4.4 14.4 4 4 D d false 4 4 4 4}\" +\n\t\t\"{-5 -15 -25 -35 5 15 25 35 5.5 15.5 5 5 E e true 5 5 5 5}\" +\n\t\t\"{-6 -16 -26 -36 6 16 26 36 6.6 16.6 6 6 F f false 6 6 6 6}\" +\n\t\t\"{-7 -17 -27 -37 7 17 27 37 7.7 17.7 7 7 G g true 7 7 7 7}\" +\n\t\t\"{-8 -18 -28 -38 8 18 28 38 8.8 18.8 8 8 H h false 8 8 8 8}\" +\n\t\t\"{-9 -19 -29 -39 9 19 29 39 9.9 19.9 9 9 I i true 9 9 9 9}\" +\n\t\t\"{-10 -20 -30 -40 10 20 30 40 10.1 20.1 10 10 J j false 10 10 10 10}\"\n\tassert.Equal(t, expectedTable, actualTable)\n\n\terr = fw.Close()\n\tassert.Nil(t, err)\n\tpr.ReadStop()\n\terr = parquetFile.Close()\n\tassert.Nil(t, err)\n}",
"func (pr *newPartialResult) Next() (data []types.Datum, err error) {\n\tchunk := pr.getChunk()\n\tif chunk == nil {\n\t\treturn nil, nil\n\t}\n\tdata = make([]types.Datum, pr.rowLen)\n\tfor i := 0; i < pr.rowLen; i++ {\n\t\tvar l []byte\n\t\tl, chunk.RowsData, err = codec.CutOne(chunk.RowsData)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Trace(err)\n\t\t}\n\t\tdata[i].SetRaw(l)\n\t}\n\treturn\n}",
"func expandImageRawDiskSlice(c *Client, f []ImageRawDisk) ([]map[string]interface{}, error) {\n\tif f == nil {\n\t\treturn nil, nil\n\t}\n\n\titems := []map[string]interface{}{}\n\tfor _, item := range f {\n\t\ti, err := expandImageRawDisk(c, &item)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\titems = append(items, i)\n\t}\n\n\treturn items, nil\n}",
"func FlattenBytes64(xs [][]byte) ([]byte, error) {\n\treturn FlattenInternal(xs, 8)\n}",
"func ConcatenateBytes(data ...[]byte) []byte {\n\tfinalLength := 0\n\tfor _, slice := range data {\n\t\tfinalLength += len(slice)\n\t}\n\tresult := make([]byte, finalLength)\n\tlast := 0\n\tfor _, slice := range data {\n\t\tfor i := range slice {\n\t\t\tresult[i+last] = slice[i]\n\t\t}\n\t\tlast += len(slice)\n\t}\n\treturn result\n}",
"func (a *Account) exportBase64() (map[string]string, error) {\n\tbuf := &bytes.Buffer{}\n\tm := make(map[string]string)\n\n\t_, err := a.Wallet.WriteTo(buf)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tm[\"wallet\"] = base64.StdEncoding.EncodeToString(buf.Bytes())\n\tbuf.Reset()\n\n\tif _, err = a.TxStore.WriteTo(buf); err != nil {\n\t\treturn nil, err\n\t}\n\tm[\"tx\"] = base64.StdEncoding.EncodeToString(buf.Bytes())\n\tbuf.Reset()\n\n\treturn m, nil\n}",
"func (re renderEdge) render(row string, vertIdx, vertDist int) string {\n\tsetEdgeChar := func(s string, i int, r rune) string {\n\t\tif s[i] == byte(r) {\n\t\t\treturn s\n\t\t} else if s[i] == ' ' {\n\t\t\treturn s[:i] + string(r) + s[i+1:]\n\t\t}\n\t\treturn s[:i] + \"+\" + s[i+1:] // set the coordinate to \"+\" if there's an edge crossing\n\t}\n\tif re.src == re.dest {\n\t\treturn setEdgeChar(row, re.src, '|')\n\t}\n\tconst srcEdgeCenterOffset = 1 // start drawing a diagonal edge one space away from the center of a node\n\tadjustedXDist := re.distance() - srcEdgeCenterOffset // number of horizontal spaces we must fill with edges\n\t// vertical line\n\tif vertDist > adjustedXDist && vertIdx >= adjustedXDist/2 && vertIdx < vertDist-adjustedXDist/2 {\n\t\treturn setEdgeChar(row, (re.src+re.dest)/2, '|')\n\t}\n\t// horizontal line\n\tif vertDist < adjustedXDist && vertIdx == vertDist/2 {\n\t\tstep := 1\n\t\tif re.src > re.dest {\n\t\t\tstep = -1\n\t\t}\n\t\tdiagCoverage := (vertDist / 2) * step\n\t\ttmp := re.src + diagCoverage\n\t\tfor tmp != re.dest-diagCoverage-(step*vertDist%2) {\n\t\t\ttmp += step\n\t\t\trow = setEdgeChar(row, tmp, '-')\n\t\t}\n\t\treturn row\n\t}\n\t// diagonal line\n\toffset := vertIdx + srcEdgeCenterOffset\n\t// calculate offset based on distance from the end in case we're on the bottom half of the edge\n\tif vertIdx > vertDist/2 {\n\t\toffset = adjustedXDist - (vertDist - offset)\n\t}\n\tif re.src > re.dest {\n\t\treturn setEdgeChar(row, re.src-offset, '/')\n\t} else {\n\t\treturn setEdgeChar(row, re.src+offset, '\\\\')\n\t}\n}",
"func (i *gocbRawIterator) NextBytes() []byte {\n\treturn i.rawResult.NextBytes()\n}",
"func decodeFromBase64(encodedXdr string) *b.TransactionEnvelopeBuilder {\n\t// Unmarshall from base64 encoded XDR format\n\tvar decoded xdr.TransactionEnvelope\n\te := xdr.SafeUnmarshalBase64(encodedXdr, &decoded)\n\tif e != nil {\n\t\tlog.Fatal(e)\n\t}\n\n\t// convert to TransactionEnvelopeBuilder\n\ttxEnvelopeBuilder := b.TransactionEnvelopeBuilder{E: &decoded}\n\ttxEnvelopeBuilder.Init()\n\n\treturn &txEnvelopeBuilder\n}",
"func bitsToRoaringData(ps pairSet) ([]byte, error) {\n\tbmp := roaring.NewBitmap()\n\tfor j := 0; j < len(ps.columnIDs); j++ {\n\t\tbmp.DirectAdd(ps.rowIDs[j]*ShardWidth + (ps.columnIDs[j] % ShardWidth))\n\t}\n\n\tvar buf bytes.Buffer\n\t_, err := bmp.WriteTo(&buf)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"writing to buffer\")\n\t}\n\n\treturn buf.Bytes(), nil\n}",
"func Base64(n int) string { return String(n, Base64Chars) }",
"func reassembleChunks(chunks *[][]byte) *[]byte {\n\tr := make([]byte, 0)\n\tfor i := 0; i < len(*chunks); i++ {\n\t\tr = append(r, (*chunks)[i]...)\n\t}\n\n\treturn &r\n}",
"func convert(db storage.Storage, tailHeight uint64) string {\n\tvar data string\n\tinsert_info := \"INSERT INTO `t_block` (`id`, `index`, `hash`, `pre_hash`, \" +\n\t\t\t\t \"`nonce`, `timestamp`, `transactions`, `miner`, `size`, `crt_time`) VALUES\\n\"\n\tfor i := uint64(1); i <= tailHeight; i++ {\n\t\thash, err := db.Get(util.UintToHex(i))\n\t\tif err != nil {\n\t\t\tfmt.Println(err)\n\t\t\treturn \"Block at height \" + strconv.FormatUint(i, 10) + \" does not exist\"\n\t\t}\n\t\trawBytes, err := db.Get(hash)\n\t\tblock := block.Deserialize(rawBytes)\n\t\tordered_data := orderedData(block) + \"\\n\"\n\t\tif i % 164 == 1 {\n\t\t\tdata += insert_info + ordered_data\n\t\t} else {\n\t\t\tdata += ordered_data\n\t\t}\n\t}\n\treturn data\n}",
"func EnqueueBlobTuples(reqs *list.List) error {\n\tgqi.enqueueBlobTuples(reqs)\n\treturn nil\n}",
"func (client StringClient) GetBase64EncodedResponder(resp *http.Response) (result Base64URL, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result.Value),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func ExportAsBase64String(insta *goinsta.Instagram) (string, error) {\n\tbytes, err := ExportAsBytes(insta)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tsEnc := base64.StdEncoding.EncodeToString(bytes)\n\treturn sEnc, nil\n}",
"func (b *Buffer) ToStringBase64() string {\n\treturn base64.StdEncoding.EncodeToString(b.b)\n}",
"func init() {\n\tblob.Add(\"/details.tmpl\", []byte{60, 33, 68, 79, 67, 84, 89, 80, 69, 32, 104, 116, 109, 108, 62, 10, 60, 72, 84, 77, 76, 62, 10, 10, 60, 72, 69, 65, 68, 62, 10, 32, 32, 32, 32, 60, 109, 101, 116, 97, 32, 104, 116, 116, 112, 45, 101, 113, 117, 105, 118, 61, 34, 67, 111, 110, 116, 101, 110, 116, 45, 84, 121, 112, 101, 34, 32, 99, 111, 110, 116, 101, 110, 116, 61, 34, 116, 101, 120, 116, 47, 104, 116, 109, 108, 59, 32, 99, 104, 97, 114, 115, 101, 116, 61, 117, 116, 102, 45, 56, 34, 62, 10, 32, 32, 32, 32, 60, 109, 101, 116, 97, 32, 99, 104, 97, 114, 115, 101, 116, 61, 34, 117, 116, 102, 45, 56, 34, 62, 10, 32, 32, 32, 32, 60, 115, 99, 114, 105, 112, 116, 32, 116, 121, 112, 101, 61, 34, 116, 101, 120, 116, 47, 106, 97, 118, 97, 115, 99, 114, 105, 112, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 100, 111, 99, 117, 109, 101, 110, 116, 46, 97, 100, 100, 69, 118, 101, 110, 116, 76, 105, 115, 116, 101, 110, 101, 114, 40, 39, 99, 108, 105, 99, 107, 39, 44, 32, 102, 117, 110, 99, 116, 105, 111, 110, 32, 40, 101, 118, 101, 110, 116, 41, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 105, 102, 32, 40, 33, 101, 118, 101, 110, 116, 46, 116, 97, 114, 103, 101, 116, 46, 109, 97, 116, 99, 104, 101, 115, 40, 39, 46, 116, 111, 103, 103, 108, 101, 39, 41, 41, 32, 114, 101, 116, 117, 114, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 101, 118, 101, 110, 116, 46, 112, 114, 101, 118, 101, 110, 116, 68, 101, 102, 97, 117, 108, 116, 40, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 101, 118, 101, 110, 116, 46, 116, 97, 114, 103, 101, 116, 46, 99, 108, 97, 115, 115, 76, 105, 115, 116, 46, 116, 111, 103, 103, 108, 101, 40, 34, 99, 111, 108, 108, 97, 112, 115, 101, 34, 41, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 116, 32, 101, 108, 101, 109, 32, 61, 32, 40, 101, 118, 101, 110, 116, 46, 116, 97, 114, 103, 101, 116, 46, 110, 101, 120, 116, 69, 108, 101, 109, 101, 110, 116, 83, 105, 98, 108, 105, 110, 103, 41, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 101, 108, 101, 109, 46, 99, 108, 97, 115, 115, 76, 105, 115, 116, 46, 116, 111, 103, 103, 108, 101, 40, 34, 104, 105, 100, 101, 34, 41, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 44, 32, 102, 97, 108, 115, 101, 41, 59, 10, 32, 32, 32, 32, 60, 47, 115, 99, 114, 105, 112, 116, 62, 10, 32, 32, 32, 32, 60, 84, 73, 84, 76, 69, 62, 87, 65, 70, 32, 84, 101, 115, 116, 105, 110, 103, 32, 114, 101, 112, 111, 114, 116, 60, 47, 84, 73, 84, 76, 69, 62, 10, 32, 32, 32, 32, 60, 115, 116, 121, 108, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 100, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 111, 110, 116, 97, 105, 110, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 102, 97, 109, 105, 108, 121, 58, 32, 45, 97, 112, 112, 108, 101, 45, 115, 121, 115, 116, 101, 109, 44, 32, 66, 108, 105, 110, 107, 77, 97, 99, 83, 121, 115, 116, 101, 109, 70, 111, 110, 116, 44, 32, 34, 83, 101, 103, 111, 101, 32, 85, 73, 34, 44, 32, 82, 111, 98, 111, 116, 111, 44, 32, 79, 120, 121, 103, 101, 110, 44, 32, 85, 98, 117, 110, 116, 117, 44, 32, 67, 97, 110, 116, 97, 114, 101, 108, 108, 44, 32, 34, 70, 105, 114, 97, 32, 83, 97, 110, 115, 34, 44, 32, 34, 68, 114, 111, 105, 100, 32, 83, 97, 110, 115, 34, 44, 32, 34, 72, 101, 108, 118, 101, 116, 105, 99, 97, 32, 78, 101, 117, 101, 34, 44, 32, 115, 97, 110, 115, 45, 115, 101, 114, 105, 102, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 111, 110, 102, 105, 103, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 111, 110, 102, 105, 103, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 111, 110, 102, 105, 103, 45, 98, 111, 100, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 48, 112, 120, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 104, 105, 100, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 110, 111, 110, 101, 32, 33, 105, 109, 112, 111, 114, 116, 97, 110, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 111, 103, 103, 108, 101, 58, 97, 102, 116, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 34, 45, 34, 32, 33, 105, 109, 112, 111, 114, 116, 97, 110, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 49, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 111, 103, 103, 108, 101, 46, 99, 111, 108, 108, 97, 112, 115, 101, 58, 97, 102, 116, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 39, 43, 39, 32, 33, 105, 109, 112, 111, 114, 116, 97, 110, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 49, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 106, 117, 115, 116, 105, 102, 121, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 115, 112, 97, 99, 101, 45, 98, 101, 116, 119, 101, 101, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 32, 104, 49, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 51, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 115, 99, 97, 110, 45, 116, 105, 109, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 114, 105, 103, 104, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 52, 55, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 53, 112, 120, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 45, 109, 97, 116, 114, 105, 120, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 62, 104, 52, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 102, 108, 101, 120, 45, 101, 110, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 50, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 98, 111, 100, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 120, 45, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 104, 101, 97, 100, 101, 114, 45, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 112, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 116, 97, 98, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 48, 112, 120, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 102, 97, 105, 108, 101, 100, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 53, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 97, 98, 101, 108, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 102, 108, 101, 120, 45, 115, 116, 97, 114, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 116, 111, 112, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 119, 101, 105, 103, 104, 116, 58, 32, 98, 111, 108, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 53, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 119, 104, 111, 108, 101, 114, 111, 119, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 106, 117, 115, 116, 105, 102, 121, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 115, 112, 97, 99, 101, 45, 98, 101, 116, 119, 101, 101, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 111, 118, 101, 114, 102, 108, 111, 119, 45, 119, 114, 97, 112, 58, 32, 97, 110, 121, 119, 104, 101, 114, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 113, 117, 101, 115, 116, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 53, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 112, 111, 110, 115, 101, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 53, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 119, 104, 111, 108, 101, 114, 111, 119, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 113, 117, 101, 115, 116, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 53, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 111, 118, 101, 114, 102, 108, 111, 119, 58, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 112, 111, 110, 115, 101, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 53, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 111, 118, 101, 114, 102, 108, 111, 119, 58, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 104, 101, 97, 100, 101, 114, 45, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 101, 102, 101, 102, 101, 102, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 116, 111, 112, 45, 108, 101, 102, 116, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 116, 111, 112, 45, 114, 105, 103, 104, 116, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 101, 115, 116, 45, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 32, 32, 32, 32, 60, 47, 115, 116, 121, 108, 101, 62, 10, 60, 47, 72, 69, 65, 68, 62, 10, 10, 60, 66, 79, 68, 89, 62, 10, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 111, 110, 116, 97, 105, 110, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 112, 111, 114, 116, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 49, 62, 87, 65, 70, 32, 84, 101, 115, 116, 105, 110, 103, 32, 82, 101, 112, 111, 114, 116, 32, 45, 32, 68, 101, 116, 97, 105, 108, 115, 60, 47, 104, 49, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 115, 99, 97, 110, 45, 116, 105, 109, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 84, 101, 115, 116, 32, 83, 116, 97, 114, 116, 58, 32, 123, 123, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 83, 116, 97, 114, 116, 84, 105, 109, 101, 125, 125, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 84, 101, 115, 116, 32, 69, 110, 100, 58, 32, 123, 123, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 69, 110, 100, 84, 105, 109, 101, 125, 125, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 109, 97, 116, 114, 105, 120, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 111, 110, 102, 105, 103, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 111, 110, 102, 105, 103, 45, 116, 105, 116, 108, 101, 32, 116, 111, 103, 103, 108, 101, 32, 99, 111, 108, 108, 97, 112, 115, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 50, 62, 84, 101, 115, 116, 32, 67, 111, 110, 102, 105, 103, 117, 114, 97, 116, 105, 111, 110, 115, 60, 47, 104, 50, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 111, 110, 102, 105, 103, 45, 98, 111, 100, 121, 32, 104, 105, 100, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 114, 101, 62, 123, 123, 46, 82, 101, 112, 111, 114, 116, 46, 67, 111, 110, 102, 105, 103, 125, 125, 60, 47, 112, 114, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 50, 62, 82, 101, 115, 117, 108, 116, 115, 32, 68, 101, 116, 97, 105, 108, 115, 60, 47, 104, 50, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 60, 97, 32, 104, 114, 101, 102, 61, 34, 46, 47, 115, 117, 109, 109, 97, 114, 121, 46, 104, 116, 109, 108, 34, 62, 71, 111, 32, 84, 111, 32, 83, 117, 109, 109, 97, 114, 121, 60, 47, 97, 62, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 109, 97, 116, 114, 105, 120, 45, 98, 111, 100, 121, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 102, 105, 108, 101, 78, 97, 109, 101, 44, 32, 36, 102, 105, 108, 101, 32, 58, 61, 32, 36, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 70, 105, 108, 101, 82, 101, 115, 117, 108, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 108, 105, 110, 101, 44, 32, 36, 114, 101, 115, 117, 108, 116, 115, 32, 58, 61, 32, 36, 102, 105, 108, 101, 46, 80, 97, 121, 108, 111, 97, 100, 82, 101, 115, 117, 108, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 116, 97, 98, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 104, 101, 97, 100, 101, 114, 45, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 97, 98, 101, 108, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 70, 105, 108, 101, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 76, 105, 110, 101, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 102, 105, 108, 101, 78, 97, 109, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 114, 101, 115, 117, 108, 116, 115, 46, 76, 105, 110, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 97, 98, 101, 108, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 119, 104, 111, 108, 101, 114, 111, 119, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 80, 97, 121, 108, 111, 97, 100, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 119, 104, 111, 108, 101, 114, 111, 119, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 114, 101, 115, 117, 108, 116, 115, 46, 80, 97, 121, 108, 111, 97, 100, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 115, 101, 116, 78, 97, 109, 101, 44, 32, 36, 108, 111, 99, 97, 116, 105, 111, 110, 115, 32, 58, 61, 32, 36, 114, 101, 115, 117, 108, 116, 115, 46, 83, 101, 116, 82, 101, 115, 117, 108, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 102, 97, 105, 108, 101, 100, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 97, 98, 101, 108, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 119, 104, 111, 108, 101, 114, 111, 119, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 87, 65, 70, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 119, 104, 111, 108, 101, 114, 111, 119, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 115, 101, 116, 78, 97, 109, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 102, 97, 105, 108, 101, 100, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 32, 58, 61, 32, 36, 108, 111, 99, 97, 116, 105, 111, 110, 115, 46, 76, 111, 99, 97, 116, 105, 111, 110, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 116, 101, 115, 116, 45, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 97, 98, 101, 108, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 32, 114, 101, 113, 117, 101, 115, 116, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 76, 111, 99, 97, 116, 105, 111, 110, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 32, 114, 101, 115, 112, 111, 110, 115, 101, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 79, 117, 116, 99, 111, 109, 101, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 46, 76, 111, 99, 97, 116, 105, 111, 110, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 46, 79, 117, 116, 99, 111, 109, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 97, 98, 101, 108, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 32, 114, 101, 113, 117, 101, 115, 116, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 82, 101, 113, 117, 101, 115, 116, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 108, 97, 98, 101, 108, 32, 114, 101, 115, 112, 111, 110, 115, 101, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 105, 102, 32, 101, 113, 32, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 46, 79, 117, 116, 99, 111, 109, 101, 32, 34, 105, 110, 118, 97, 108, 105, 100, 34, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 73, 110, 118, 97, 108, 105, 100, 32, 82, 101, 97, 115, 111, 110, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 108, 115, 101, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 82, 101, 115, 112, 111, 110, 115, 101, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 113, 117, 101, 115, 116, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 114, 101, 62, 123, 123, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 46, 82, 101, 113, 117, 101, 115, 116, 32, 124, 32, 104, 116, 109, 108, 125, 125, 60, 47, 112, 114, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 112, 111, 110, 115, 101, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 114, 101, 62, 123, 123, 36, 116, 101, 115, 116, 82, 101, 115, 117, 108, 116, 46, 82, 101, 115, 112, 111, 110, 115, 101, 32, 124, 32, 104, 116, 109, 108, 125, 125, 60, 47, 112, 114, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 60, 47, 66, 79, 68, 89, 62, 10, 10, 60, 47, 72, 84, 77, 76, 62})\n\tblob.Add(\"/summary.tmpl\", []byte{60, 33, 68, 79, 67, 84, 89, 80, 69, 32, 104, 116, 109, 108, 62, 10, 60, 72, 84, 77, 76, 62, 10, 10, 60, 72, 69, 65, 68, 62, 10, 32, 32, 32, 32, 60, 109, 101, 116, 97, 32, 104, 116, 116, 112, 45, 101, 113, 117, 105, 118, 61, 34, 67, 111, 110, 116, 101, 110, 116, 45, 84, 121, 112, 101, 34, 32, 99, 111, 110, 116, 101, 110, 116, 61, 34, 116, 101, 120, 116, 47, 104, 116, 109, 108, 59, 32, 99, 104, 97, 114, 115, 101, 116, 61, 117, 116, 102, 45, 56, 34, 62, 10, 32, 32, 32, 32, 60, 109, 101, 116, 97, 32, 99, 104, 97, 114, 115, 101, 116, 61, 34, 117, 116, 102, 45, 56, 34, 62, 10, 32, 32, 32, 32, 60, 84, 73, 84, 76, 69, 62, 87, 65, 70, 32, 84, 101, 115, 116, 105, 110, 103, 32, 114, 101, 112, 111, 114, 116, 60, 47, 84, 73, 84, 76, 69, 62, 10, 32, 32, 32, 32, 60, 115, 116, 121, 108, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 100, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 111, 110, 116, 97, 105, 110, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 102, 97, 109, 105, 108, 121, 58, 32, 45, 97, 112, 112, 108, 101, 45, 115, 121, 115, 116, 101, 109, 44, 32, 66, 108, 105, 110, 107, 77, 97, 99, 83, 121, 115, 116, 101, 109, 70, 111, 110, 116, 44, 32, 34, 83, 101, 103, 111, 101, 32, 85, 73, 34, 44, 32, 82, 111, 98, 111, 116, 111, 44, 32, 79, 120, 121, 103, 101, 110, 44, 32, 85, 98, 117, 110, 116, 117, 44, 32, 67, 97, 110, 116, 97, 114, 101, 108, 108, 44, 32, 34, 70, 105, 114, 97, 32, 83, 97, 110, 115, 34, 44, 32, 34, 68, 114, 111, 105, 100, 32, 83, 97, 110, 115, 34, 44, 32, 34, 72, 101, 108, 118, 101, 116, 105, 99, 97, 32, 78, 101, 117, 101, 34, 44, 32, 115, 97, 110, 115, 45, 115, 101, 114, 105, 102, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 106, 117, 115, 116, 105, 102, 121, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 115, 112, 97, 99, 101, 45, 98, 101, 116, 119, 101, 101, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 32, 104, 49, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 51, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 115, 99, 97, 110, 45, 116, 105, 109, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 114, 105, 103, 104, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 106, 117, 115, 116, 105, 102, 121, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 115, 112, 97, 99, 101, 45, 101, 118, 101, 110, 108, 121, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 119, 114, 97, 112, 58, 32, 119, 114, 97, 112, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 52, 55, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 53, 112, 120, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 48, 112, 120, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 45, 98, 111, 116, 116, 111, 109, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 50, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 119, 101, 105, 103, 104, 116, 58, 32, 98, 111, 108, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 45, 115, 117, 109, 109, 97, 114, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 56, 53, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 114, 105, 103, 104, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 103, 114, 97, 112, 104, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 111, 115, 105, 116, 105, 111, 110, 58, 32, 114, 101, 108, 97, 116, 105, 118, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 50, 48, 48, 44, 32, 50, 48, 48, 44, 32, 50, 48, 48, 44, 32, 48, 46, 56, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 122, 45, 105, 110, 100, 101, 120, 58, 32, 45, 49, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 111, 115, 105, 116, 105, 111, 110, 58, 32, 97, 98, 115, 111, 108, 117, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 105, 110, 104, 101, 114, 105, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 111, 115, 105, 116, 105, 111, 110, 58, 32, 97, 98, 115, 111, 108, 117, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 58, 32, 114, 103, 98, 97, 40, 50, 48, 48, 44, 32, 50, 48, 48, 44, 32, 50, 48, 48, 44, 32, 48, 46, 56, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 50, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 50, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 52, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 52, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 54, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 54, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 56, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 56, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 46, 108, 45, 49, 48, 48, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 101, 102, 116, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 98, 108, 111, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 111, 115, 105, 116, 105, 111, 110, 58, 32, 97, 98, 115, 111, 108, 117, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 116, 116, 111, 109, 58, 32, 45, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 114, 105, 103, 104, 116, 58, 32, 45, 53, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 108, 97, 98, 101, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 48, 48, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 106, 117, 115, 116, 105, 102, 121, 45, 99, 111, 110, 116, 101, 110, 116, 58, 32, 115, 112, 97, 99, 101, 45, 98, 101, 116, 119, 101, 101, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 98, 97, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 51, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 58, 32, 108, 105, 110, 101, 97, 114, 45, 103, 114, 97, 100, 105, 101, 110, 116, 40, 116, 111, 32, 108, 101, 102, 116, 44, 32, 35, 51, 100, 51, 100, 51, 100, 44, 32, 35, 52, 100, 52, 100, 52, 100, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 116, 111, 112, 45, 114, 105, 103, 104, 116, 45, 114, 97, 100, 105, 117, 115, 58, 32, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 45, 114, 105, 103, 104, 116, 45, 114, 97, 100, 105, 117, 115, 58, 32, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 108, 101, 102, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 50, 112, 120, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 109, 101, 116, 114, 105, 99, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 114, 105, 103, 104, 116, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 50, 112, 120, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 116, 101, 120, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 116, 111, 112, 58, 32, 51, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 99, 104, 97, 114, 116, 45, 116, 101, 120, 116, 32, 112, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 55, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 50, 112, 120, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 114, 103, 98, 97, 40, 49, 48, 44, 32, 49, 48, 44, 32, 49, 48, 44, 32, 48, 46, 55, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 53, 37, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 108, 101, 116, 116, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 53, 112, 120, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 56, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 56, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 118, 101, 114, 116, 105, 99, 97, 108, 45, 97, 108, 105, 103, 110, 58, 32, 109, 105, 100, 100, 108, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 70, 98, 103, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 100, 51, 51, 99, 52, 51, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 68, 98, 103, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 99, 52, 54, 56, 52, 99, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 67, 98, 103, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 99, 57, 98, 98, 48, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 66, 98, 103, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 52, 99, 57, 52, 99, 52, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 65, 98, 103, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 50, 53, 57, 101, 53, 98, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 70, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 101, 120, 116, 45, 115, 116, 114, 111, 107, 101, 58, 32, 48, 46, 50, 112, 120, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 35, 100, 51, 51, 99, 52, 51, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 68, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 101, 120, 116, 45, 115, 116, 114, 111, 107, 101, 58, 32, 48, 46, 50, 112, 120, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 35, 99, 52, 54, 56, 52, 99, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 67, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 101, 120, 116, 45, 115, 116, 114, 111, 107, 101, 58, 32, 48, 46, 50, 112, 120, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 35, 99, 57, 98, 98, 48, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 66, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 101, 120, 116, 45, 115, 116, 114, 111, 107, 101, 58, 32, 48, 46, 50, 112, 120, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 35, 52, 99, 57, 52, 99, 52, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 65, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 101, 120, 116, 45, 115, 116, 114, 111, 107, 101, 58, 32, 48, 46, 50, 112, 120, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 35, 50, 53, 57, 101, 53, 98, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 108, 101, 116, 116, 101, 114, 32, 104, 53, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 53, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 105, 110, 101, 45, 104, 101, 105, 103, 104, 116, 58, 32, 55, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 108, 101, 116, 116, 101, 114, 32, 112, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 45, 49, 48, 112, 120, 32, 48, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 115, 99, 97, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 49, 53, 112, 120, 32, 97, 117, 116, 111, 32, 48, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 54, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 103, 114, 97, 100, 101, 45, 115, 99, 97, 108, 101, 32, 112, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 52, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 114, 103, 98, 97, 40, 49, 48, 44, 32, 49, 48, 44, 32, 49, 48, 44, 32, 48, 46, 55, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 45, 109, 97, 116, 114, 105, 120, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 62, 104, 52, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 102, 108, 101, 120, 45, 101, 110, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 50, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 105, 116, 108, 101, 45, 107, 101, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 102, 108, 101, 120, 45, 101, 110, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 108, 101, 102, 116, 58, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 105, 116, 108, 101, 45, 107, 101, 121, 62, 112, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 114, 105, 103, 104, 116, 58, 32, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 105, 116, 108, 101, 45, 107, 101, 121, 32, 46, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 97, 108, 105, 103, 110, 45, 115, 101, 108, 102, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 104, 101, 97, 100, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 111, 115, 105, 116, 105, 111, 110, 58, 32, 115, 116, 105, 99, 107, 121, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 111, 112, 58, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 109, 97, 116, 114, 105, 120, 45, 98, 111, 100, 121, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 100, 105, 114, 101, 99, 116, 105, 111, 110, 58, 32, 99, 111, 108, 117, 109, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 97, 117, 116, 111, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 101, 115, 116, 45, 115, 101, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 116, 101, 115, 116, 45, 115, 101, 116, 45, 116, 105, 116, 108, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 45, 98, 111, 116, 116, 111, 109, 58, 32, 50, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 104, 101, 97, 100, 101, 114, 45, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 105, 110, 101, 45, 112, 97, 121, 108, 111, 97, 100, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 103, 114, 111, 119, 58, 32, 51, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 120, 45, 119, 105, 100, 116, 104, 58, 32, 54, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 54, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 114, 105, 103, 104, 116, 58, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 105, 110, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 103, 114, 111, 119, 58, 32, 49, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 120, 45, 119, 105, 100, 116, 104, 58, 32, 53, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 53, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 105, 110, 101, 45, 104, 101, 105, 103, 104, 116, 58, 32, 51, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 112, 97, 121, 108, 111, 97, 100, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 103, 114, 111, 119, 58, 32, 50, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 111, 118, 101, 114, 102, 108, 111, 119, 58, 32, 104, 105, 100, 100, 101, 110, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 120, 45, 119, 105, 100, 116, 104, 58, 32, 53, 53, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 108, 105, 110, 101, 45, 104, 101, 105, 103, 104, 116, 58, 32, 51, 54, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 102, 105, 108, 101, 45, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 49, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 45, 98, 111, 116, 116, 111, 109, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 119, 101, 105, 103, 104, 116, 58, 32, 98, 111, 108, 100, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 111, 99, 97, 116, 105, 111, 110, 45, 119, 114, 97, 112, 112, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 120, 45, 119, 105, 100, 116, 104, 58, 32, 50, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 105, 110, 45, 119, 105, 100, 116, 104, 58, 32, 50, 48, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 49, 48, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 108, 101, 120, 45, 103, 114, 111, 119, 58, 32, 50, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 115, 101, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 101, 115, 117, 108, 116, 45, 114, 111, 119, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 100, 105, 115, 112, 108, 97, 121, 58, 32, 102, 108, 101, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 98, 111, 116, 116, 111, 109, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 114, 103, 98, 97, 40, 48, 44, 32, 48, 44, 32, 48, 44, 32, 46, 51, 50, 53, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 111, 99, 97, 116, 105, 111, 110, 45, 104, 101, 97, 100, 101, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 114, 111, 116, 97, 116, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 58, 32, 116, 114, 97, 110, 115, 108, 97, 116, 101, 40, 48, 112, 120, 44, 32, 53, 112, 120, 41, 32, 114, 111, 116, 97, 116, 101, 40, 51, 49, 53, 100, 101, 103, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 109, 111, 122, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 58, 32, 116, 114, 97, 110, 115, 108, 97, 116, 101, 40, 48, 112, 120, 44, 32, 53, 112, 120, 41, 32, 114, 111, 116, 97, 116, 101, 40, 51, 49, 53, 100, 101, 103, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 109, 115, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 58, 32, 116, 114, 97, 110, 115, 108, 97, 116, 101, 40, 48, 112, 120, 44, 32, 53, 112, 120, 41, 32, 114, 111, 116, 97, 116, 101, 40, 51, 49, 53, 100, 101, 103, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 111, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 58, 32, 116, 114, 97, 110, 115, 108, 97, 116, 101, 40, 48, 112, 120, 44, 32, 53, 112, 120, 41, 32, 114, 111, 116, 97, 116, 101, 40, 51, 49, 53, 100, 101, 103, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 114, 97, 110, 115, 102, 111, 114, 109, 58, 32, 116, 114, 97, 110, 115, 108, 97, 116, 101, 40, 48, 112, 120, 44, 32, 53, 112, 120, 41, 32, 114, 111, 116, 97, 116, 101, 40, 51, 49, 53, 100, 101, 103, 41, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 119, 101, 98, 107, 105, 116, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 45, 111, 114, 105, 103, 105, 110, 58, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 109, 111, 122, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 45, 111, 114, 105, 103, 105, 110, 58, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 109, 115, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 45, 111, 114, 105, 103, 105, 110, 58, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 45, 111, 45, 116, 114, 97, 110, 115, 102, 111, 114, 109, 45, 111, 114, 105, 103, 105, 110, 58, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 114, 97, 110, 115, 102, 111, 114, 109, 45, 111, 114, 105, 103, 105, 110, 58, 32, 48, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 112, 97, 100, 100, 105, 110, 103, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 58, 32, 49, 112, 120, 32, 115, 111, 108, 105, 100, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 111, 114, 100, 101, 114, 45, 114, 97, 100, 105, 117, 115, 58, 32, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 119, 105, 100, 116, 104, 58, 32, 49, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 104, 101, 105, 103, 104, 116, 58, 32, 49, 53, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 102, 111, 110, 116, 45, 115, 105, 122, 101, 58, 32, 49, 50, 112, 120, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 116, 101, 120, 116, 45, 97, 108, 105, 103, 110, 58, 32, 99, 101, 110, 116, 101, 114, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 112, 97, 115, 115, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 50, 53, 57, 101, 53, 98, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 112, 97, 115, 115, 58, 58, 98, 101, 102, 111, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 39, 92, 50, 55, 49, 51, 39, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 102, 97, 105, 108, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 100, 51, 51, 99, 52, 51, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 102, 97, 105, 108, 58, 58, 98, 101, 102, 111, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 39, 92, 50, 55, 49, 53, 39, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 105, 110, 118, 97, 108, 105, 100, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 55, 101, 55, 101, 55, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 105, 110, 118, 97, 108, 105, 100, 58, 58, 98, 101, 102, 111, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 39, 92, 50, 48, 49, 51, 39, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 119, 104, 105, 116, 101, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 101, 114, 114, 111, 114, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 98, 97, 99, 107, 103, 114, 111, 117, 110, 100, 45, 99, 111, 108, 111, 114, 58, 32, 35, 100, 98, 98, 54, 48, 102, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 46, 101, 114, 114, 111, 114, 58, 58, 98, 101, 102, 111, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 110, 116, 101, 110, 116, 58, 32, 39, 92, 48, 48, 50, 49, 39, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 111, 108, 111, 114, 58, 32, 98, 108, 97, 99, 107, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 10, 32, 32, 32, 32, 32, 32, 32, 32, 112, 114, 101, 32, 123, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 109, 97, 114, 103, 105, 110, 58, 32, 48, 59, 10, 32, 32, 32, 32, 32, 32, 32, 32, 125, 10, 32, 32, 32, 32, 60, 47, 115, 116, 121, 108, 101, 62, 10, 60, 47, 72, 69, 65, 68, 62, 10, 10, 60, 66, 79, 68, 89, 62, 10, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 111, 110, 116, 97, 105, 110, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 112, 111, 114, 116, 45, 104, 101, 97, 100, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 112, 111, 114, 116, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 49, 62, 87, 65, 70, 32, 84, 101, 115, 116, 105, 110, 103, 32, 82, 101, 112, 111, 114, 116, 32, 45, 32, 83, 117, 109, 109, 97, 114, 121, 60, 47, 104, 49, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 115, 99, 97, 110, 45, 116, 105, 109, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 84, 101, 115, 116, 32, 83, 116, 97, 114, 116, 58, 32, 123, 123, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 83, 116, 97, 114, 116, 84, 105, 109, 101, 125, 125, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 84, 101, 115, 116, 32, 69, 110, 100, 58, 32, 123, 123, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 69, 110, 100, 84, 105, 109, 101, 125, 125, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 115, 101, 116, 78, 97, 109, 101, 44, 32, 36, 99, 111, 117, 110, 116, 115, 32, 32, 58, 61, 32, 46, 82, 101, 112, 111, 114, 116, 46, 82, 101, 115, 117, 108, 116, 115, 46, 83, 101, 116, 67, 111, 117, 110, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 115, 101, 116, 78, 97, 109, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 115, 117, 109, 109, 97, 114, 121, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 103, 114, 97, 100, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 103, 114, 97, 100, 101, 45, 116, 105, 116, 108, 101, 34, 62, 79, 118, 101, 114, 97, 108, 108, 32, 82, 97, 116, 105, 110, 103, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 103, 114, 97, 100, 101, 45, 108, 101, 116, 116, 101, 114, 32, 123, 123, 103, 114, 97, 100, 101, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 97, 105, 108, 80, 101, 114, 99, 101, 110, 116, 125, 125, 98, 103, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 53, 62, 123, 123, 103, 114, 97, 100, 101, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 97, 105, 108, 80, 101, 114, 99, 101, 110, 116, 125, 125, 60, 47, 104, 53, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 97, 105, 108, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 103, 114, 97, 100, 101, 45, 115, 99, 97, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 65, 58, 32, 48, 45, 49, 37, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 66, 58, 32, 49, 45, 50, 37, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 67, 58, 32, 50, 45, 51, 37, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 68, 58, 32, 51, 45, 52, 37, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 70, 58, 32, 52, 37, 43, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 84, 111, 116, 97, 108, 32, 69, 114, 114, 111, 114, 115, 58, 32, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 69, 114, 114, 67, 111, 117, 110, 116, 125, 125, 32, 124, 32, 84, 111, 116, 97, 108, 32, 73, 110, 118, 97, 108, 105, 100, 32, 84, 101, 115, 116, 115, 58, 32, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 73, 110, 118, 67, 111, 117, 110, 116, 125, 125, 32, 124, 32, 84, 111, 116, 97, 108, 32, 86, 97, 108, 105, 100, 32, 84, 101, 115, 116, 115, 58, 32, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 84, 111, 116, 97, 108, 67, 111, 117, 110, 116, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 103, 114, 97, 112, 104, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 48, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 50, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 49, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 52, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 50, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 54, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 51, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 56, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 52, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 32, 108, 45, 49, 48, 48, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 108, 105, 110, 101, 45, 108, 97, 98, 101, 108, 34, 62, 53, 37, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 116, 105, 116, 108, 101, 34, 62, 70, 97, 108, 115, 101, 32, 80, 111, 115, 105, 116, 105, 118, 101, 115, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 109, 101, 116, 114, 105, 99, 34, 62, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 112, 67, 111, 117, 110, 116, 125, 125, 47, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 84, 111, 116, 97, 108, 70, 80, 84, 101, 115, 116, 67, 111, 117, 110, 116, 125, 125, 32, 40, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 112, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 41, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 34, 32, 115, 116, 121, 108, 101, 61, 34, 119, 105, 100, 116, 104, 58, 32, 123, 123, 109, 117, 108, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 112, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 59, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 108, 97, 98, 101, 108, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 116, 105, 116, 108, 101, 34, 62, 70, 97, 108, 115, 101, 32, 78, 101, 103, 97, 116, 105, 118, 101, 115, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 45, 109, 101, 116, 114, 105, 99, 34, 62, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 110, 67, 111, 117, 110, 116, 125, 125, 47, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 84, 111, 116, 97, 108, 70, 78, 84, 101, 115, 116, 67, 111, 117, 110, 116, 125, 125, 32, 40, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 110, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 41, 60, 47, 115, 112, 97, 110, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 98, 97, 114, 34, 32, 115, 116, 121, 108, 101, 61, 34, 119, 105, 100, 116, 104, 58, 32, 123, 123, 109, 117, 108, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 110, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 59, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 99, 104, 97, 114, 116, 45, 116, 101, 120, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 87, 65, 70, 32, 70, 97, 108, 115, 101, 32, 80, 111, 115, 105, 116, 105, 118, 101, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, 116, 108, 121, 32, 98, 108, 111, 99, 107, 101, 100, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 123, 123, 103, 114, 97, 100, 101, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 97, 105, 108, 80, 101, 114, 99, 101, 110, 116, 125, 125, 34, 32, 115, 116, 121, 108, 101, 61, 34, 102, 111, 110, 116, 45, 119, 101, 105, 103, 104, 116, 58, 32, 98, 111, 108, 100, 101, 114, 59, 34, 62, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 112, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 60, 47, 115, 112, 97, 110, 62, 32, 111, 102, 32, 116, 101, 115, 116, 32, 112, 97, 121, 108, 111, 97, 100, 115, 46, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 87, 65, 70, 32, 70, 97, 108, 115, 101, 32, 78, 101, 103, 97, 116, 105, 118, 101, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, 116, 108, 121, 32, 97, 108, 108, 111, 119, 101, 100, 32, 60, 115, 112, 97, 110, 32, 99, 108, 97, 115, 115, 61, 34, 123, 123, 103, 114, 97, 100, 101, 32, 36, 99, 111, 117, 110, 116, 115, 46, 70, 97, 105, 108, 80, 101, 114, 99, 101, 110, 116, 125, 125, 34, 32, 115, 116, 121, 108, 101, 61, 34, 102, 111, 110, 116, 45, 119, 101, 105, 103, 104, 116, 58, 32, 98, 111, 108, 100, 101, 114, 59, 34, 62, 123, 123, 36, 99, 111, 117, 110, 116, 115, 46, 70, 110, 80, 101, 114, 99, 101, 110, 116, 125, 125, 37, 60, 47, 115, 112, 97, 110, 62, 32, 111, 102, 32, 116, 101, 115, 116, 32, 112, 97, 121, 108, 111, 97, 100, 115, 46, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 109, 97, 116, 114, 105, 120, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 109, 97, 116, 114, 105, 120, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 50, 62, 70, 97, 105, 108, 101, 100, 32, 82, 101, 115, 117, 108, 116, 115, 32, 66, 114, 101, 97, 107, 100, 111, 119, 110, 60, 47, 104, 50, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 52, 62, 60, 97, 32, 104, 114, 101, 102, 61, 34, 46, 47, 100, 101, 116, 97, 105, 108, 115, 46, 104, 116, 109, 108, 34, 62, 71, 111, 32, 84, 111, 32, 68, 101, 116, 97, 105, 108, 115, 60, 47, 97, 62, 60, 47, 104, 52, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 116, 105, 116, 108, 101, 45, 107, 101, 121, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 84, 101, 115, 116, 32, 82, 101, 115, 117, 108, 116, 32, 75, 101, 121, 58, 32, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 112, 97, 115, 115, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 45, 32, 80, 97, 115, 115, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 102, 97, 105, 108, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 45, 32, 70, 97, 105, 108, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 105, 110, 118, 97, 108, 105, 100, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 45, 32, 73, 110, 118, 97, 108, 105, 100, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 101, 114, 114, 111, 114, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 45, 32, 69, 114, 114, 111, 114, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 109, 97, 116, 114, 105, 120, 45, 98, 111, 100, 121, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 109, 97, 116, 114, 105, 120, 45, 104, 101, 97, 100, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 116, 101, 115, 116, 45, 115, 101, 116, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 105, 110, 101, 45, 112, 97, 121, 108, 111, 97, 100, 34, 62, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 115, 101, 116, 78, 97, 109, 101, 32, 58, 61, 32, 46, 82, 101, 112, 111, 114, 116, 46, 84, 101, 115, 116, 83, 101, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 119, 114, 97, 112, 112, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 116, 101, 115, 116, 45, 115, 101, 116, 45, 116, 105, 116, 108, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 104, 51, 62, 123, 123, 36, 115, 101, 116, 78, 97, 109, 101, 125, 125, 60, 47, 104, 51, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 104, 101, 97, 100, 101, 114, 45, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 105, 110, 101, 45, 112, 97, 121, 108, 111, 97, 100, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 105, 110, 101, 34, 62, 76, 105, 110, 101, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 112, 97, 121, 108, 111, 97, 100, 34, 62, 80, 97, 121, 108, 111, 97, 100, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 118, 97, 108, 32, 58, 61, 32, 36, 46, 82, 101, 112, 111, 114, 116, 46, 84, 101, 115, 116, 83, 101, 116, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 119, 114, 97, 112, 112, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 115, 101, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 108, 111, 99, 97, 116, 105, 111, 110, 72, 101, 97, 100, 101, 114, 32, 58, 61, 32, 36, 46, 82, 101, 112, 111, 114, 116, 46, 76, 111, 99, 97, 116, 105, 111, 110, 115, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 104, 101, 97, 100, 101, 114, 32, 114, 111, 116, 97, 116, 101, 34, 62, 123, 123, 36, 108, 111, 99, 97, 116, 105, 111, 110, 72, 101, 97, 100, 101, 114, 125, 125, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 102, 105, 108, 101, 32, 58, 61, 32, 36, 46, 82, 101, 112, 111, 114, 116, 46, 77, 97, 116, 114, 105, 120, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 114, 111, 119, 32, 102, 105, 108, 101, 45, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 70, 105, 108, 101, 58, 32, 123, 123, 36, 102, 105, 108, 101, 46, 70, 105, 108, 101, 78, 97, 109, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 105, 102, 32, 101, 113, 32, 40, 108, 101, 110, 32, 36, 102, 105, 108, 101, 46, 82, 111, 119, 82, 101, 112, 111, 114, 116, 41, 32, 48, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 62, 78, 111, 32, 102, 97, 105, 108, 101, 100, 32, 116, 101, 115, 116, 115, 60, 47, 112, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 114, 111, 119, 32, 58, 61, 32, 36, 102, 105, 108, 101, 46, 82, 111, 119, 82, 101, 112, 111, 114, 116, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 114, 101, 115, 117, 108, 116, 45, 114, 111, 119, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 105, 110, 101, 45, 112, 97, 121, 108, 111, 97, 100, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 105, 110, 101, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 36, 114, 111, 119, 46, 76, 105, 110, 101, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 112, 97, 121, 108, 111, 97, 100, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 112, 114, 101, 62, 123, 123, 36, 114, 111, 119, 46, 80, 97, 121, 108, 111, 97, 100, 125, 125, 60, 47, 112, 114, 101, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 115, 101, 116, 78, 97, 109, 101, 44, 32, 36, 115, 101, 116, 82, 101, 112, 111, 114, 116, 32, 58, 61, 32, 36, 114, 111, 119, 46, 83, 101, 116, 82, 101, 112, 111, 114, 116, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 119, 114, 97, 112, 112, 101, 114, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 32, 99, 108, 97, 115, 115, 61, 34, 115, 101, 116, 45, 108, 111, 99, 97, 116, 105, 111, 110, 115, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 114, 97, 110, 103, 101, 32, 36, 114, 101, 115, 117, 108, 116, 32, 58, 61, 32, 36, 115, 101, 116, 82, 101, 112, 111, 114, 116, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 100, 105, 118, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 99, 108, 97, 115, 115, 61, 34, 108, 111, 99, 97, 116, 105, 111, 110, 45, 114, 101, 115, 117, 108, 116, 32, 123, 123, 105, 102, 32, 101, 113, 32, 36, 114, 101, 115, 117, 108, 116, 32, 49, 125, 125, 102, 97, 105, 108, 123, 123, 101, 108, 115, 101, 32, 105, 102, 32, 101, 113, 32, 36, 114, 101, 115, 117, 108, 116, 32, 50, 125, 125, 105, 110, 118, 97, 108, 105, 100, 123, 123, 101, 108, 115, 101, 32, 105, 102, 32, 101, 113, 32, 36, 114, 101, 115, 117, 108, 116, 32, 51, 125, 125, 101, 114, 114, 111, 114, 123, 123, 101, 108, 115, 101, 125, 125, 112, 97, 115, 115, 123, 123, 101, 110, 100, 125, 125, 34, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 123, 123, 101, 110, 100, 32, 45, 125, 125, 10, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 32, 32, 32, 32, 60, 47, 100, 105, 118, 62, 10, 60, 47, 66, 79, 68, 89, 62, 10, 10, 60, 47, 72, 84, 77, 76, 62})\n}",
"func (o FluxConfigurationBlobStorageServicePrincipalOutput) ClientCertificateBase64() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FluxConfigurationBlobStorageServicePrincipal) *string { return v.ClientCertificateBase64 }).(pulumi.StringPtrOutput)\n}",
"func (col *Column) readBin(table *Table, icol int, irow int64, ptr interface{}) error {\n\tvar err error\n\n\trv := reflect.Indirect(reflect.ValueOf(ptr))\n\trt := reflect.TypeOf(rv.Interface())\n\n\tswitch rt.Kind() {\n\tcase reflect.Slice:\n\n\t\tbeg := table.rowsz*int(irow) + col.offset\n\t\tend := beg + col.dtype.dsize\n\t\trow := table.data[beg:end]\n\t\tbuf := bytes.NewBuffer(row)\n\t\tbdec := binary.NewDecoder(buf)\n\t\tbdec.Order = binary.BigEndian\n\n\t\tslice := reflect.ValueOf(ptr).Elem()\n\t\tnmax := 0\n\n\t\tswitch col.dtype.dsize {\n\t\tcase 8:\n\t\t\tvar n int32\n\t\t\tvar offset int32\n\t\t\terr = bdec.Decode(&n)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fitsio: problem decoding slice 32b-length: %v\\n\", err)\n\t\t\t}\n\t\t\terr = bdec.Decode(&offset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fitsio: problem decoding slice 32b-offset: %v\\n\", err)\n\t\t\t}\n\t\t\tbeg = int(offset)\n\t\t\tend = beg + int(n)*int(col.dtype.gotype.Elem().Size())\n\t\t\tnmax = int(n)\n\n\t\tcase 16:\n\t\t\tvar n int64\n\t\t\tvar offset int64\n\t\t\terr = bdec.Decode(&n)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fitsio: problem decoding slice 64b-length: %v\\n\", err)\n\t\t\t}\n\t\t\terr = bdec.Decode(&offset)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fitsio: problem decoding slice 64b-offset: %v\\n\", err)\n\t\t\t}\n\t\t\tbeg = int(offset)\n\t\t\tend = beg + int(n)*int(col.dtype.gotype.Elem().Size())\n\t\t\tnmax = int(n)\n\t\t}\n\n\t\tbuf = bytes.NewBuffer(table.heap[beg:end])\n\t\tbdec = binary.NewDecoder(buf)\n\t\tbdec.Order = binary.BigEndian\n\n\t\tslice.SetLen(0)\n\t\tfor i := 0; i < nmax; i++ {\n\t\t\tvv := reflect.New(rt.Elem())\n\t\t\terr = bdec.Decode(vv.Interface())\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"fitsio: problem encoding: %v\", err)\n\t\t\t}\n\t\t\tslice = reflect.Append(slice, vv.Elem())\n\t\t}\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fitsio: %v\\n\", err)\n\t\t}\n\t\trv := reflect.ValueOf(ptr)\n\t\trv.Elem().Set(slice)\n\n\tcase reflect.Array:\n\n\t\tbeg := table.rowsz*int(irow) + col.offset\n\t\tend := beg + (col.dtype.dsize * col.dtype.len)\n\t\trow := table.data[beg:end]\n\t\tbuf := bytes.NewBuffer(row)\n\t\tbdec := binary.NewDecoder(buf)\n\t\tbdec.Order = binary.BigEndian\n\n\t\terr = bdec.Decode(ptr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fitsio: %v\\n\", err)\n\t\t}\n\n\tcase reflect.Bool,\n\t\treflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,\n\t\treflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,\n\t\treflect.Float32, reflect.Float64,\n\t\treflect.Complex64, reflect.Complex128:\n\n\t\t//scalar := true\n\t\t//err = col.decode(table, n, rt, rv, scalar)\n\n\t\tbeg := table.rowsz*int(irow) + col.offset\n\t\tend := beg + col.dtype.dsize\n\t\trow := table.data[beg:end]\n\t\tbuf := bytes.NewBuffer(row)\n\t\tbdec := binary.NewDecoder(buf)\n\t\tbdec.Order = binary.BigEndian\n\t\terr = bdec.Decode(ptr)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"fitsio: %v\\n\", err)\n\t\t}\n\n\tcase reflect.String:\n\n\t\t//scalar := true\n\t\t//err = col.decode(table, n, rt, rv, scalar)\n\n\t\tbeg := table.rowsz*int(irow) + col.offset\n\t\tend := beg + col.dtype.dsize\n\t\trow := table.data[beg:end]\n\t\tstr := \"\"\n\t\tif row[0] == '\\x00' {\n\t\t\tstr = string(row[1:])\n\t\t\tstr = strings.TrimRight(str, string([]byte(\"\\x00\")))\n\t\t} else {\n\t\t\tstr = string(row)\n\t\t}\n\n\t\trv.SetString(str)\n\n\tdefault:\n\t\treturn fmt.Errorf(\"fitsio: binary-table can not read/write %v\", rt.Kind())\n\t}\n\treturn err\n}",
"func ProcessRotateLeft(data []byte, amount int) []byte {\n\tout := make([]byte, len(data))\n\tfor i := range data {\n\t\tout[i] = bits.RotateLeft8(data[i], amount)\n\t}\n\treturn out\n}",
"func (s *TransactionRows) NextRaw() ([]byte, bool) {\n\tsnap, err := s.iter.Next()\n\tif err != nil {\n\t\ts.lastError = err\n\t\treturn nil, false\n\t}\n\tdata := snap.Data()\n\tb, err := json.Marshal(data)\n\tif err == nil {\n\t\treturn b, true\n\t}\n\treturn nil, false\n}",
"func encodeByteSequence(v [][]byte) []byte {\n\tvar hexstrings []string\n\tfor _, a := range v {\n\t\thexstrings = append(hexstrings, hexutil.Encode(a))\n\t}\n\treturn []byte(strings.Join(hexstrings, \",\"))\n}",
"func newRowIterator(tbl *DoltTable, ctx *sql.Context, partition *doltTablePartition) (*doltTableRowIter, error) {\n\trowData, err := tbl.table.GetRowData(ctx)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar mapIter types.MapIterator\n\tvar end types.LesserValuable\n\tif partition == nil {\n\t\tmapIter, err = rowData.BufferedIterator(ctx)\n\t} else {\n\t\tendIter, err := rowData.IteratorAt(ctx, partition.end)\n\n\t\tif err != nil && err != io.EOF {\n\t\t\treturn nil, err\n\t\t} else if err == nil {\n\t\t\tkeyTpl, _, err := endIter.Next(ctx)\n\n\t\t\tif err != nil && err != io.EOF {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tif keyTpl != nil {\n\t\t\t\tend, err = keyTpl.(types.Tuple).AsSlice()\n\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tmapIter, err = rowData.BufferedIteratorAt(ctx, partition.start)\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &doltTableRowIter{table: tbl, rowData: rowData, ctx: ctx, nomsIter: mapIter, end: end, nbf: rowData.Format()}, nil\n}",
"func AddFakeAsciidoctorBinForDiagramsToPath(baseURL string) (fakeBinaryPath string, err error) {\n\n\tif runtime.GOOS == \"windows\" {\n\t\tlog.Warn(\"Can't apply asciidoctor diagram workaround on Windows\")\n\t\treturn \"\", nil\n\t}\n\n\turl, err := url.Parse(baseURL)\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Error parsing base url %s\", baseURL))\n\t}\n\tpath := url.Path\n\n\t// Single slashes will add up to \"//\" which some webservers don't support\n\tif path == \"/\" {\n\t\tpath = \"\"\n\t}\n\tescapedPath := strings.ReplaceAll(path, \"/\", \"\\\\/\")\n\tescapedPath = strings.ReplaceAll(escapedPath, \"\\\"\", \"\\\\\\\"\")\n\n\t// Asciidoctor attributes: https://asciidoctor.org/docs/user-manual/#builtin-attributes\n\n\tshellscript := fmt.Sprintf(`#!/bin/bash\n\t# inspired by: https://zipproth.de/cheat-sheets/hugo-asciidoctor/#_how_to_make_hugo_use_asciidoctor_with_extensions\n\tset -e\n\n\t# Use first non fake-binary in path as asciidoctorbin\n\tad=$(which -a asciidoctor | grep -v monako_asciidoctor_fake_binary | head -n 1)\n\n\t# Use empty css to trick asciidoctor into using none without error\n\techo \"\" > empty.css\n\n\t# This trick only works with the relative dir workarounds\n\t$ad -B . \\\n\t\t-r asciidoctor-diagram \\\n\t\t-a nofooter \\\n\t\t-a stylesheet=empty.css \\\n\t\t--safe \\\n\t\t--trace \\\n\t\t- | sed -E -e \"s/img src=\\\"([^/]+)\\\"/img src=\\\"%s\\/diagram\\/\\1\\\"/\"\n\n\t# For some reason static is not parsed with integrated Hugo\n\tmkdir -p compose/public/diagram\n\t\n\t# Hopefully this will also be fixed by https://github.com/gohugoio/hugo/pull/6561\n\tif ls *.svg >/dev/null 2>&1; then\n\t mv -f *.svg compose/public/diagram\n\tfi\n\t\n\tif ls *.png >/dev/null 2>&1; then\n\t mv -f *.png compose/public/diagram\n\tfi\n\t`, escapedPath)\n\n\ttempDir := filepath.Join(os.TempDir(), \"monako_asciidoctor_fake_binary\")\n\terr = os.Mkdir(tempDir, os.FileMode(0700))\n\tif err != nil && !os.IsExist(err) {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Error creating asciidoctor fake dir : %s\", tempDir))\n\t}\n\tfakeBinaryPath = filepath.Join(tempDir, \"asciidoctor\")\n\n\terr = ioutil.WriteFile(fakeBinaryPath, []byte(shellscript), os.FileMode(0700))\n\tif err != nil {\n\t\treturn \"\", errors.Wrap(err, fmt.Sprintf(\"Error creating asciidoctor fake binary: %s\", fakeBinaryPath))\n\t}\n\n\tos.Setenv(\"PATH\", tempDir+\":\"+os.Getenv(\"PATH\"))\n\n\tlog.Debugf(\"Added temporary binary %s to PATH %s\", fakeBinaryPath, os.Getenv(\"PATH\"))\n\n\treturn fakeBinaryPath, nil\n\n}"
] | [
"0.49601516",
"0.48495087",
"0.47244224",
"0.46970236",
"0.4649988",
"0.44896185",
"0.44832572",
"0.44635195",
"0.44426832",
"0.44039324",
"0.4394501",
"0.43936595",
"0.43771163",
"0.43657848",
"0.4308649",
"0.42926666",
"0.42898908",
"0.42886",
"0.42765093",
"0.42641222",
"0.42441922",
"0.42408934",
"0.4230654",
"0.42244905",
"0.421626",
"0.42119175",
"0.42026415",
"0.41976318",
"0.41942522",
"0.41925648",
"0.41880882",
"0.41819844",
"0.41803458",
"0.41531357",
"0.4150035",
"0.41459298",
"0.41458356",
"0.412588",
"0.4105201",
"0.40999097",
"0.40827426",
"0.40827426",
"0.40789106",
"0.4077847",
"0.40722048",
"0.4069407",
"0.40656424",
"0.4060291",
"0.4054851",
"0.40536466",
"0.4051984",
"0.40515068",
"0.4046526",
"0.40424585",
"0.40399164",
"0.40392599",
"0.40308788",
"0.4030689",
"0.40298763",
"0.4024757",
"0.40229532",
"0.40147564",
"0.40138492",
"0.40104845",
"0.40050426",
"0.40022603",
"0.39982763",
"0.39930215",
"0.39918247",
"0.3991699",
"0.39895973",
"0.39856493",
"0.39836267",
"0.39809257",
"0.39789146",
"0.39722395",
"0.39708927",
"0.39629665",
"0.39578816",
"0.39462975",
"0.39337346",
"0.3931511",
"0.3925027",
"0.39159",
"0.39142066",
"0.39070475",
"0.3906769",
"0.39040074",
"0.39024967",
"0.3899283",
"0.38972235",
"0.38933715",
"0.38884354",
"0.38868707",
"0.3875007",
"0.38738963",
"0.38715068",
"0.38681826",
"0.38648757",
"0.38636974"
] | 0.7066708 | 0 |
Ortho generates an Ortho Matrix. | func Ortho(left, right, bottom, top, near, far float64) Mat4 {
rml, tmb, fmn := (right - left), (top - bottom), (far - near)
return Mat4{float64(2. / rml), 0, 0, 0, 0, float64(2. / tmb), 0, 0, 0, 0, float64(-2. / fmn), 0, float64(-(right + left) / rml), float64(-(top + bottom) / tmb), float64(-(far + near) / fmn), 1}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func MatrixOrtho(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = 2.0 / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\tresult.M4 = 0.0\n\tresult.M5 = 2.0 / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\tresult.M8 = 0.0\n\tresult.M9 = 0.0\n\tresult.M10 = -2.0 / fn\n\tresult.M11 = 0.0\n\tresult.M12 = -(left + right) / rl\n\tresult.M13 = -(top + bottom) / tb\n\tresult.M14 = -(far + near) / fn\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func NewMatrixOrtho(left, right, bottom, top, near, far float64) Matrix {\n\trl := (right - left)\n\ttb := (top - bottom)\n\tfn := (far - near)\n\treturn Matrix{\n\t\tM0: float32(2 / rl),\n\t\tM1: 0,\n\t\tM2: 0,\n\t\tM3: 0,\n\t\tM4: 0,\n\t\tM5: float32(2 / tb),\n\t\tM6: 0,\n\t\tM7: 0,\n\t\tM8: 0,\n\t\tM9: 0,\n\t\tM10: float32(-2 / fn),\n\t\tM11: 0,\n\t\tM12: float32(-(left + right) / rl),\n\t\tM13: float32(-(top + bottom) / tb),\n\t\tM14: float32(-(far + near) / fn),\n\t\tM15: 1,\n\t}\n}",
"func MakeOrtho(width, height int) [16]gl.Float {\n\treturn [16]gl.Float{\n\t\t2.0 / gl.Float(width), 0.0, 0.0, 0.0,\n\t\t0.0, 2.0 / gl.Float(height), 0.0, 0.0,\n\t\t0.0, 0.0, -1.0, 0.0,\n\t\t-1.0, -1.0, 0.0, 1.0}\n}",
"func (m *Mat4) Ortho(left, right, bottom, top, nearVal, farVal float64) *Mat4{\n\t*m = *Ortho4(left, right, bottom, top, nearVal, farVal)\n\n\treturn m\n}",
"func Ortho(left, right, bottom, top, nearVal, farVal gl.Float) *Mat4 {\n\tm := IdentMat4()\n\tm[0].X = 2.0 / (right - left)\n\tm[1].Y = 2.0 / (top - bottom)\n\tm[2].Z = -2.0 / (farVal - nearVal)\n\tm[3].X = -(right + left) / (right - left)\n\tm[3].Y = -(top + bottom) / (top - bottom)\n\tm[3].Z = -(farVal + nearVal) / (farVal - nearVal)\n\treturn m\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tC.glowOrtho(gpOrtho, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tsyscall.Syscall6(gpOrtho, 6, uintptr(math.Float64bits(left)), uintptr(math.Float64bits(right)), uintptr(math.Float64bits(bottom)), uintptr(math.Float64bits(top)), uintptr(math.Float64bits(zNear)), uintptr(math.Float64bits(zFar)))\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowOrtho(gpOrtho, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func (c *Camera) SetOrtho(left, right, bottom, top, near, far float32) {\n\tglm.OrthoIn(left, right, bottom, top, near, far, &c.Projection)\n}",
"func (c *Camera) SetOrtho(view image.Rectangle, near, far float64) {\n\tw := float64(view.Dx())\n\tw = float64(int((w / 2.0)) * 2)\n\th := float64(view.Dy())\n\th = float64(int((h / 2.0)) * 2)\n\tm := lmath.Mat4Ortho(0, w, 0, h, near, far)\n\tc.Projection = ConvertMat4(m)\n}",
"func MatrixOrthoSubProjection(projection Matrix4f, orthoScale Vector2f, orthoDistance, eyeViewAdjustX float32) Matrix4f {\n\treturn matrix4f(C.ovrMatrix4f_OrthoSubProjection(c_matrix4f(projection), c_vector2f(orthoScale), C.float(orthoDistance), C.float(eyeViewAdjustX)))\n}",
"func Ortho2D(left, right, bottom, top float32) Mat4 {\n\treturn Ortho(left, right, bottom, top, -1, 1)\n}",
"func Ortho2D(left, right, bottom, top float64) Mat4 {\n\treturn Ortho(left, right, bottom, top, -1, 1)\n}",
"func NewMatrix4Orthographic(left, right, top, bottom, near, far float32) *Matrix4 {\n\tma := NewDefaultMatrix4()\n\tma.MakeOrthographic(left, right, top, bottom, near, far)\n\treturn ma\n}",
"func Ortho4(left, right, bottom, top, nearVal, farVal float64) (m *Mat4) {\n\ttx := -(right + left) / (right - left)\n\tty := -(top + bottom) / (top - bottom)\n\ttz := -(farVal + nearVal) / (farVal - nearVal)\n\n\tm = (*Mat4)(&[16]float64{2/(right-left), 0, 0, 0,\n 0, 2/(top-bottom), 0, 0,\n 0, 0, -2/(farVal-nearVal), 0,\n\t tx, ty, tz, 1})\n\n\treturn m;\n}",
"func (v Vector) IsOrth(w Vector) bool {\n\treturn v.Dot(w) == 0\n}",
"func (c *Context) SetOrtho2DProjection(windowWidth int, windowHeight int, screenScale float32, centered bool) {\n\tvar left, right, top, bottom float32\n\tif centered {\n\t\t// 0,0 is placed at the center of the window\n\t\thalfWidth := float32(windowWidth) / 2 / screenScale\n\t\thalfHeight := float32(windowHeight) / 2 / screenScale\n\t\tleft = -halfWidth\n\t\tright = halfWidth\n\t\ttop = halfHeight\n\t\tbottom = -halfHeight\n\t} else {\n\t\tleft = 0\n\t\tright = float32(windowWidth)\n\t\ttop = float32(windowHeight)\n\t\tbottom = 0\n\t}\n\tc.projectionMatrix = mgl32.Ortho(left, right, top, bottom, 1, -1)\n}",
"func (t *Dense) oshape() Shape {\n\tif t.old != nil {\n\t\treturn t.old.Shape()\n\t}\n\treturn t.Shape()\n}",
"func MatrixProjection(fov FovPort, znear, zfar float32, rightHanded bool) Matrix4f {\n\tif rightHanded {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 1))\n\t} else {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 0))\n\t}\n}",
"func orIdentity(a *bmatrix) {\n\tfor i := 0; i < n; i++ {\n\t\ta[i][i] = true\n\t}\n}",
"func GenerateFull(n, m int) Matrix {\n\tout := GenerateEmpty(n, m)\n\n\tfor i, _ := range out {\n\t\tfor j, _ := range out[i] {\n\t\t\tout[i][j].SetOne()\n\t\t}\n\t}\n\n\treturn out\n}",
"func ExampleHorizJoinDense() {\n\n\tfilename := \"ex1data2.txt\"\n\torig := mat.NewDense(CsvToArray(path.Join(\"testdata\", filename)))\n\tor, oc := orig.Dims()\n\t// assign Y\n\tvar Y mat.VecDense\n\tY.CloneVec(orig.ColView(oc - 1))\n\t// assign Y\n\tones := mat.NewVecDense(or, Ones(or))\n\tX := HorizJoinDense(ones, orig.Slice(0, or, 0, oc-1)) //X shape is: 'or by (oc)'\n\n\tfx := mat.Formatted(X, mat.Prefix(\"\"), mat.Squeeze())\n\tfyt := mat.Formatted(Y.T(), mat.Prefix(\"\"), mat.Squeeze())\n\tfmt.Println(fx)\n\tfmt.Println(fyt)\n\t//output:\n\t// ⎡1 2104 3⎤\n\t// ⎢1 1600 3⎥\n\t// ⎢1 2400 3⎥\n\t// ⎢1 1416 2⎥\n\t// ⎢1 3000 4⎥\n\t// ⎢1 1985 4⎥\n\t// ⎢1 1534 3⎥\n\t// ⎢1 1427 3⎥\n\t// ⎢1 1380 3⎥\n\t// ⎢1 1494 3⎥\n\t// ⎢1 1940 4⎥\n\t// ⎢1 2000 3⎥\n\t// ⎢1 1890 3⎥\n\t// ⎢1 4478 5⎥\n\t// ⎢1 1268 3⎥\n\t// ⎢1 2300 4⎥\n\t// ⎢1 1320 2⎥\n\t// ⎢1 1236 3⎥\n\t// ⎢1 2609 4⎥\n\t// ⎢1 3031 4⎥\n\t// ⎢1 1767 3⎥\n\t// ⎢1 1888 2⎥\n\t// ⎢1 1604 3⎥\n\t// ⎢1 1962 4⎥\n\t// ⎢1 3890 3⎥\n\t// ⎢1 1100 3⎥\n\t// ⎢1 1458 3⎥\n\t// ⎢1 2526 3⎥\n\t// ⎢1 2200 3⎥\n\t// ⎢1 2637 3⎥\n\t// ⎢1 1839 2⎥\n\t// ⎢1 1000 1⎥\n\t// ⎢1 2040 4⎥\n\t// ⎢1 3137 3⎥\n\t// ⎢1 1811 4⎥\n\t// ⎢1 1437 3⎥\n\t// ⎢1 1239 3⎥\n\t// ⎢1 2132 4⎥\n\t// ⎢1 4215 4⎥\n\t// ⎢1 2162 4⎥\n\t// ⎢1 1664 2⎥\n\t// ⎢1 2238 3⎥\n\t// ⎢1 2567 4⎥\n\t// ⎢1 1200 3⎥\n\t// ⎢1 852 2⎥\n\t// ⎢1 1852 4⎥\n\t// ⎣1 1203 3⎦\n\t//[399900 329900 369000 232000 539900 299900 314900 198999 212000 242500 239999 347000 329999 699900 259900 449900 299900 199900 499998 599000 252900 255000 242900 259900 573900 249900 464500 469000 475000 299900 349900 169900 314900 579900 285900 249900 229900 345000 549000 287000 368500 329900 314000 299000 179900 299900 239500]\n}",
"func GetOrmer() orm.Ormer {\n\tonce.Do(func() {\n\t\t// override the default value(1000) to return all records when setting no limit\n\t\torm.DefaultRowsLimit = -1\n\t\tglobalOrm = orm.NewOrm()\n\t})\n\treturn globalOrm\n}",
"func Or(ee ...TemporalExpression) OrExpression {\n\treturn OrExpression{ee}\n}",
"func Orient(pts ...[2]float64) int8 {\n\tif len(pts) < 3 {\n\t\treturn 0\n\t}\n\tvar (\n\t\tsum = 0.0\n\t\tdop = 0.0\n\t\tli = len(pts) - 1\n\t)\n\n\tif debug {\n\t\tlog.Printf(\"pts: %v\", pts)\n\t}\n\tfor i := range pts {\n\t\tdop = (pts[li][0] * pts[i][1]) - (pts[i][0] * pts[li][1])\n\t\tsum += dop\n\t\tif debug {\n\t\t\tlog.Printf(\"sum(%v,%v): %g -- %g\", li, i, sum, dop)\n\t\t}\n\t\tli = i\n\t}\n\tswitch {\n\tcase sum == 0:\n\t\treturn 0\n\tcase sum < 0:\n\t\treturn -1\n\tdefault:\n\t\treturn 1\n\t}\n}",
"func (c *Context) ORW(imr, amr operand.Op) {\n\tc.addinstruction(x86.ORW(imr, amr))\n}",
"func TSWorldMatrix(index int) TRANSFORMSTATETYPE {\n\treturn TRANSFORMSTATETYPE(index + 256)\n}",
"func (session *Session) Or(query interface{}, args ...interface{}) *Session {\n\tsession.Statement.Or(query, args...)\n\treturn session\n}",
"func (dwr *DifferentialWheeledRobot) OdometryPosition(pulsesLeft, pulsesRight int, prev Position) Position {\n\tdistancePerPulse := 2 * dwr.WheelRadius * math.Pi / float64(dwr.OdometryPPR)\n\treturn dwr.RollPosition(distancePerPulse*float64(pulsesLeft), distancePerPulse*float64(pulsesRight), prev)\n}",
"func NewOrbitFromOE(a, e, i, Ω, ω, ν float64, c CelestialObject) *Orbit {\n\t// Convert angles to radians\n\ti = i * deg2rad\n\tΩ = Ω * deg2rad\n\tω = ω * deg2rad\n\tν = ν * deg2rad\n\n\t// Algorithm from Vallado, 4th edition, page 118 (COE2RV).\n\tif e < eccentricityε {\n\t\t// Circular...\n\t\tif i < angleε {\n\t\t\t// ... equatorial\n\t\t\tΩ = 0\n\t\t\tω = 0\n\t\t\tν = math.Mod(ω+Ω+ν, 2*math.Pi)\n\t\t} else {\n\t\t\t// ... inclined\n\t\t\tω = 0\n\t\t\tν = math.Mod(ν+ω, 2*math.Pi)\n\t\t}\n\t} else if i < angleε && !(c.Equals(Sun) && config.meeus) {\n\t\t// Meeus breaks if doing this correction by Vallado\n\t\t// Elliptical equatorial\n\t\tΩ = 0\n\t\tω = math.Mod(ω+Ω, 2*math.Pi)\n\t}\n\tp := a * (1 - e*e)\n\tif floats.EqualWithinAbs(e, 1, eccentricityε) || e > 1 {\n\t\tpanic(\"[ERROR] should initialize parabolic or hyperbolic orbits with R, V\")\n\t}\n\tμOp := math.Sqrt(c.μ / p)\n\tsinν, cosν := math.Sincos(ν)\n\trPQW := []float64{p * cosν / (1 + e*cosν), p * sinν / (1 + e*cosν), 0}\n\tvPQW := []float64{-μOp * sinν, μOp * (e + cosν), 0}\n\trIJK := Rot313Vec(-ω, -i, -Ω, rPQW)\n\tvIJK := Rot313Vec(-ω, -i, -Ω, vPQW)\n\torbit := Orbit{rIJK, vIJK, c, a, e, i, Ω, ω, ν, 0, 0, 0, 0.0}\n\torbit.Elements()\n\treturn &orbit\n}",
"func Ones(m, n int) *Matrix {\n\tA := Zeros(m, n)\n\tfor i := range(A.data) {\n\t\tA.data[i] = 1.0\n\t}\n\treturn A\n}",
"func (session *Session) Or(query interface{}, args ...interface{}) *Session {\n\tsession.Session = session.Session.Or(query, args...)\n\treturn session\n}",
"func Perspective(fovy, aspect, zNear, zFar gl.Float) *Mat4 {\n\tf := 1 / (TanGL(fovy / 2.0))\n\tm := IdentMat4()\n\tm[0].X = f / aspect\n\tm[1].Y = f\n\tm[2].Z = (zFar + zNear) / (zNear - zFar)\n\tm[3].W = 0\n\tm[2].W = -1\n\tm[3].Z = (2 * zFar * zNear) / (zNear - zFar)\n\treturn m\n}",
"func ORW(imr, amr operand.Op) { ctx.ORW(imr, amr) }",
"func ORL(imr, emr operand.Op) { ctx.ORL(imr, emr) }",
"func newOrExpr(lhs, rhs Expr) Expr {\n\t// Compute constant if both sides are constant.\n\tif lhs, ok := lhs.(*ConstantExpr); ok {\n\t\tif rhs, ok := rhs.(*ConstantExpr); ok {\n\t\t\treturn lhs.Or(rhs)\n\t\t}\n\t}\n\n\t// If constant is on left side, swap to right side.\n\tif IsConstantExpr(lhs) && !IsConstantExpr(rhs) {\n\t\tlhs, rhs = rhs, lhs\n\t}\n\n\t// Optimize for if constant is all ones or zeros.\n\tif rhs, ok := rhs.(*ConstantExpr); ok {\n\t\tif rhs.IsAllOnes() {\n\t\t\treturn rhs\n\t\t} else if rhs.Value == 0 {\n\t\t\treturn lhs\n\t\t}\n\t}\n\treturn &BinaryExpr{Op: OR, LHS: lhs, RHS: rhs}\n}",
"func MatrixMode(mode uint32) {\n C.glowMatrixMode(gpMatrixMode, (C.GLenum)(mode))\n}",
"func Or(predicates ...predicate.OnlineSession) predicate.OnlineSession {\n\treturn predicate.OnlineSession(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func (m *Message) ORO() (*ORO, error) {\n\tps, err := m.Parse(\"ORO\")\n\tpst, ok := ps.(*ORO)\n\tif ok {\n\t\treturn pst, err\n\t}\n\treturn nil, err\n}",
"func (q *Query) Or(n int) *Query {\n\tq.headers = append(q.headers, fmt.Sprintf(\"Or: %d\", n))\n\treturn q\n}",
"func Identity() Matrix {\n\treturn Matrix{1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1}\n}",
"func (c *Context) ORL(imr, emr operand.Op) {\n\tc.addinstruction(x86.ORL(imr, emr))\n}",
"func NewGameWorld(gameWorldTemplate [][]string) *GameWorld {\n\tgw := GameWorld{}\n\n\t//fill gameArea with defautlvalue\n\tcount := 0\n\tgw.gameArea = make([][]*GameTile, len(gameWorldTemplate))\n\tfor i := 0; i < len(gw.gameArea); i++ {\n\t\tgw.gameArea[i] = make([]*GameTile, len(gameWorldTemplate[i]))\n\n\t\tfor j := 0; j < len(gw.gameArea[i]); j++ {\n\t\t\t//create tile\n\t\t\tgw.gameArea[i][j] = NewDefaultTile(count, gameWorldTemplate[i][j])\n\t\t\tcount++\n\t\t}\n\t}\n\n\t//set connections\n\tfor i := 0; i < len(gw.gameArea); i++ {\n\t\tfor j := 0; j < len(gw.gameArea[i]); j++ {\n\t\t\t//set connections\n\t\t\tcurrentTile := gw.gameArea[i][j]\n\n\t\t\t//set connection up\n\t\t\tif (i - 1) > 0 {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i-1][j], Up)\n\t\t\t\tgw.GameArea()[i-1][j].SetConnetionTile(currentTile, Down)\n\t\t\t}\n\n\t\t\t//set connection right\n\t\t\tif (j + 1) < len(gw.gameArea[i]) {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i][j+1], Right)\n\t\t\t\tgw.gameArea[i][j+1].SetConnetionTile(currentTile, Left)\n\t\t\t}\n\n\t\t\t//set connection down\n\t\t\tif (i + 1) < len(gw.gameArea) {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i+1][j], Down)\n\t\t\t\tgw.gameArea[i+1][j].SetConnetionTile(currentTile, Up)\n\t\t\t}\n\n\t\t\t//set connection left\n\t\t\tif (j - 1) > 0 {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i][j-1], Left)\n\t\t\t\tgw.gameArea[i][j-1].SetConnetionTile(currentTile, Right)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &gw\n}",
"func (objx *CoderSession) Or(query interface{}, args ...interface{}) *CoderSession {\n\treturn (*CoderSession)(((*xorm.Session)(objx)).Or(query, args...))\n}",
"func ORPS(mx, x operand.Op) { ctx.ORPS(mx, x) }",
"func NewOmise() *Omise {\n\treturn &Omise{}\n}",
"func Or(disjuncts []TermT) TermT {\n\tcount := C.uint32_t(len(disjuncts))\n\t//iam: FIXME need to unify the yices errors and the go errors...\n\tif count == 0 {\n\t\treturn TermT(C.yices_false())\n\t}\n\treturn TermT(C.yices_or(count, (*C.term_t)(&disjuncts[0])))\n}",
"func (p Perspective) Opposite() Perspective {\n\treturn 3 - p\n}",
"func Or(predicates ...predicate.OfflineSession) predicate.OfflineSession {\n\treturn predicate.OfflineSession(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func Or(operands ...Operand) OrOperator {\n\treturn OrOperator{\n\t\toperands,\n\t}\n}",
"func (g *Grammar) Or(terms ...Term) (out Term) {\n\tname := fmt.Sprintf(\"OR_%v\", terms)\n\trule := g.Lookup(name)\n\tfor _, t := range terms {\n\t\trule.Productions = append(rule.Productions, Production{t})\n\t}\n\tg.rules[name] = rule\n\treturn rule\n}",
"func NewHodor(router Router) *Hodor {\n\th := &Hodor{\n\t\trouter: router,\n\t\tfilter: emptyFilter,\n\t}\n\treturn h\n}",
"func (s *Server) ListOrgs() gin.HandlerFunc {\n\treturn handler(func(c *gin.Context) error {\n\t\trooms, err := getRooms(c)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\torgs, err := database.ListOrgs(s.db, rooms...)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tlist := make([]*orgLevel, len(orgs))\n\t\tfor i := range orgs {\n\t\t\tlist[i], err = getOrgLevel(c, orgs[i])\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tc.JSON(http.StatusOK, list)\n\t\treturn nil\n\t})\n}",
"func (grid *SquareGrid) ToWorld(c, r float64) (float64, float64) {\n\tworld := grid.toWorldMat.Mul2x1(mgl64.Vec2{c, r})\n\treturn world.X(), world.Y()\n}",
"func (self *TStatement) Or(query string, args ...interface{}) *TStatement {\r\n\tself.Op(domain.OR_OPERATOR, query, args...)\r\n\treturn self\r\n}",
"func Xor(xorjuncts []TermT) TermT {\n\tcount := C.uint32_t(len(xorjuncts))\n\t//iam: FIXME need to unify the yices errors and the go errors...\n\tif count == 0 {\n\t\t//FIXME what is xor of an empty array\n\t\tvar dummy = C.yices_true()\n\t\treturn TermT(C.yices_xor(count, &dummy))\n\t}\n\treturn TermT(C.yices_xor(count, (*C.term_t)(&xorjuncts[0])))\n}",
"func (oe *OrExpression) Or(e TemporalExpression) {\n\toe.ee = append(oe.ee, e)\n}",
"func (leg *Leg) Matrix() math3d.Matrix44 {\n\treturn *math3d.MakeMatrix44(*leg.Origin, *math3d.MakeSingularEulerAngle(math3d.RotationHeading, leg.Angle))\n}",
"func (pc *perspectiveCameraImp) MatrixWorldInverse() *threejs.Matrix4 {\n\treturn &threejs.Matrix4{Value: pc.JSValue().Get(\"matrixWorldInverse\")}\n}",
"func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"LogicalOr\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (o Orbit) HNorm() float64 {\n\treturn o.RNorm() * o.VNorm() * o.CosΦfpa()\n}",
"func GenerateIdentity(n int) Matrix {\n\treturn GeneratePartialIdentity(n, IgnoreNoRows)\n}",
"func (d *DB) OrderMatrix(orderID string) OrderMatrix {\n\to, _ := d.OrderByID(orderID)\n\treturn loadOrderMatrix(*d, o)\n}",
"func MatrixLogarithm(scope *Scope, input tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"MatrixLogarithm\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func Or(lhs, rhs Expression, ops ...Expression) Expression {\n\tfolded := foldLeft(or, append([]Expression{lhs, rhs}, ops...)...)\n\tif folded != nil {\n\t\treturn folded\n\t}\n\treturn NewLiteral(false)\n}",
"func Or(a, b Dense) Dense {\n\tshort, long := a, b\n\tif b.len < a.len {\n\t\tshort, long = b, a\n\t}\n\trLen := long.len\n\tif short.negated {\n\t\trLen = short.len\n\t}\n\tr := Dense{\n\t\tbits: make([]byte, 0, BytesFor(rLen)),\n\t\tlen: rLen,\n\t\tnegated: a.negated || b.negated,\n\t}\n\tfor i := range short.bits {\n\t\tr.bits = append(r.bits, a.bits[i]|b.bits[i])\n\t}\n\tif !short.negated {\n\t\tfor i := len(short.bits); i < len(long.bits); i++ {\n\t\t\tr.bits = append(r.bits, long.bits[i])\n\t\t}\n\t}\n\treturn r\n}",
"func Or(patts ...Pattern) Pattern {\n\tif len(patts) <= 0 {\n\t\treturn &EmptyNode{}\n\t}\n\n\t// optimization: make or right associative\n\tacc := patts[len(patts)-1]\n\tfor i := len(patts) - 2; i >= 0; i-- {\n\t\tacc = &AltNode{\n\t\t\tLeft: patts[i],\n\t\t\tRight: acc,\n\t\t}\n\t}\n\n\treturn acc\n}",
"func (m *Message) AllORO() ([]*ORO, error) {\n\tpss, err := m.ParseAll(\"ORO\")\n\treturn pss.([]*ORO), err\n}",
"func Or(subs ...Formula) Formula {\n\treturn or(subs)\n}",
"func (q *Query) Or() *Query {\n\tq.logicalOr = true\n\treturn q\n}",
"func GenerateEmpty(n, m int) Matrix {\n\tout := make([]Row, n)\n\n\tfor i := 0; i < n; i++ {\n\t\tout[i] = NewRow(m)\n\t}\n\n\treturn Matrix(out)\n}",
"func (p Point) OrthogonalAdjacent() Area {\n\treturn []Point{\n\t\tPoint{p.X + 1, p.Y}, // east\n\t\tPoint{p.X - 1, p.Y}, // west\n\t\tPoint{p.X, p.Y + 1}, // north\n\t\tPoint{p.X, p.Y - 1}, // south\n\t}\n}",
"func NewLinbo3Photoelastic() Tensor4 {\n\treturn NewTrigonalPhotoelasticTensor(-0.021, 0.06, 0.172, 0.141, 0.118, -0.052, 0.109, 0.121)\n}",
"func NewOr(x, y Constant) *ExprOr {\n\treturn &ExprOr{\n\t\tX: x,\n\t\tY: y,\n\t}\n}",
"func Or(predicates ...predicate.RoomStatus) predicate.RoomStatus {\n\treturn predicate.RoomStatus(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func (o WebAclRuleStatementAndStatementStatementOrStatementStatementOutput) OrStatement() WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementPtrOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementAndStatementStatementOrStatementStatement) *WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatement {\n\t\treturn v.OrStatement\n\t}).(WebAclRuleStatementAndStatementStatementOrStatementStatementOrStatementPtrOutput)\n}",
"func (o AuthenticationRuleOutput) Oauth() OAuthRequirementsPtrOutput {\n\treturn o.ApplyT(func(v AuthenticationRule) *OAuthRequirements { return v.Oauth }).(OAuthRequirementsPtrOutput)\n}",
"func (m Matrix) Transpose() Matrix {\n\tfor i := 0; i < 3; i++ {\n\t\tfor j := i + 1; j < 4; j++ {\n\t\t\tm[i][j], m[j][i] = m[j][i], m[i][j]\n\t\t}\n\t}\n\treturn m\n}",
"func Identity() *Mtx {\n\treturn NewMat(\n\t\t1, 0, 0, 0,\n\t\t0, 1, 0, 0,\n\t\t0, 0, 1, 0,\n\t\t0, 0, 0, 1,\n\t)\n}",
"func (t *TriDense) T() Matrix {\n\treturn Transpose{t}\n}",
"func (matrix Matrix4) Transposed() Matrix4 {\n\n\tnew := NewMatrix4()\n\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tnew[i][j] = matrix[j][i]\n\t\t}\n\t}\n\n\treturn new\n\n}",
"func (db *DB) Or(query interface{}, args ...interface{}) (tx *DB) {\n\ttx = db.getInstance()\n\tif conds := tx.Statement.BuildCondition(query, args...); len(conds) > 0 {\n\t\ttx.Statement.AddClause(clause.Where{Exprs: []clause.Expression{clause.Or(clause.And(conds...))}})\n\t}\n\treturn\n}",
"func (o WebAclRuleStatementOrStatementStatementOutput) OrStatement() WebAclRuleStatementOrStatementStatementOrStatementPtrOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatement) *WebAclRuleStatementOrStatementStatementOrStatement {\n\t\treturn v.OrStatement\n\t}).(WebAclRuleStatementOrStatementStatementOrStatementPtrOutput)\n}",
"func or(e ...semantic.Expression) semantic.Expression {\n\tvar out semantic.Expression\n\tfor _, e := range e {\n\t\tswitch {\n\t\tcase e == nil:\n\t\t\t// skip\n\t\tcase out == nil:\n\t\t\tout = e\n\t\tdefault:\n\t\t\tout = &semantic.BinaryOp{Type: semantic.BoolType, LHS: out, Operator: ast.OpOr, RHS: e}\n\t\t}\n\t}\n\treturn out\n}",
"func Or(predicates ...predicate.Menugroup) predicate.Menugroup {\n\treturn predicate.Menugroup(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func buildMatrix(dataShards, totalShards int) matrix {\n\t// Start with a Vandermonde matrix. This matrix would work, in theory, but\n\t// doesn't have the property that the data shards are unchanged after\n\t// encoding.\n\tvm := vandermonde(totalShards, dataShards)\n\n\t// Multiply by the inverse of the top square of the matrix. This will make\n\t// the top square be the identity matrix, but preserve the property that any\n\t// square subset of rows is invertible.\n\ttop := vm.SubMatrix(0, 0, dataShards, dataShards)\n\ttopInv, _ := top.Invert()\n\treturn vm.Multiply(topInv)\n}",
"func (c *CSC) ToCOO() *COO {\n\trows := make([]int, c.NNZ())\n\tcols := make([]int, c.NNZ())\n\tdata := make([]float64, c.NNZ())\n\n\tfor i := 0; i < len(c.matrix.Indptr)-1; i++ {\n\t\tfor j := c.matrix.Indptr[i]; j < c.matrix.Indptr[i+1]; j++ {\n\t\t\tcols[j] = i\n\t\t}\n\t}\n\tcopy(rows, c.matrix.Ind)\n\tcopy(data, c.matrix.Data)\n\n\tcoo := NewCOO(c.matrix.J, c.matrix.I, rows, cols, data)\n\treturn coo\n}",
"func (o WebAclRuleStatementOrStatementStatementAndStatementStatementOutput) OrStatement() WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatementPtrOutput {\n\treturn o.ApplyT(func(v WebAclRuleStatementOrStatementStatementAndStatementStatement) *WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatement {\n\t\treturn v.OrStatement\n\t}).(WebAclRuleStatementOrStatementStatementAndStatementStatementOrStatementPtrOutput)\n}",
"func (e *ConstantExpr) Or(other *ConstantExpr) *ConstantExpr {\n\tassert(e.Width == other.Width, \"or: width mismatch: %d != %d\", e.Width, other.Width)\n\treturn NewConstantExpr(e.Value|other.Value, e.Width)\n}",
"func ORB(imr, amr operand.Op) { ctx.ORB(imr, amr) }",
"func initOpenGL() uint32 {\n\tif err := gl.Init(); err != nil {\n\t\tpanic(err)\n\t}\n\tversion := gl.GoStr(gl.GetString(gl.VERSION))\n\tlog.Println(\"OpenGL version\", version)\n\n\tvar vertexShaderSource string\n\tvar fragmentShaderSource string\n\n\tvertexShaderSource = `\n\t#version 410\n\tlayout (location=0) in vec3 position;\n\tlayout (location=1) in vec2 texcoord;\n\tout vec2 tCoord;\n\tuniform mat4 projection;\n\tuniform mat4 world;\n\tuniform mat4 view;\n\tuniform vec2 texScale;\n\tuniform vec2 texOffset;\n\tvoid main() {\n\t\tgl_Position = projection * world * vec4(position, 1.0);\n\t\ttCoord = (texcoord+texOffset) * texScale;\n\t}\n\t` + \"\\x00\"\n\t//gl_Position = vec4(position, 10.0, 1.0) * camera * projection;\n\n\tfragmentShaderSource = `\n\t#version 410\n\tin vec2 tCoord;\n\tout vec4 frag_colour;\n\tuniform sampler2D ourTexture;\n\tuniform vec4 color;\n\tvoid main() {\n\t\t\tfrag_colour = texture(ourTexture, tCoord) * color;\n\t}\n\t` + \"\\x00\"\n\n\tprog := CreateProgram(vertexShaderSource, fragmentShaderSource)\n\n\tgl.UseProgram(prog)\n\tgl.Uniform2f(\n\t\tgl.GetUniformLocation(prog, gl.Str(\"texScale\\x00\")),\n\t\t1.0, 1.0,\n\t)\n\tgl.Uniform4f(\n\t\tgl.GetUniformLocation(prog, gl.Str(\"color\\x00\")),\n\t\t1, 1, 1, 1,\n\t)\n\n\t// line opengl program\n\tvertexShaderSource = `\n\t#version 330 core\n\tlayout (location = 0) in vec3 aPos;\n\tuniform mat4 uProjection;\n\tuniform mat4 uWorld;\n\n\tvoid main()\n\t{\n\t gl_Position = uProjection * vec4(aPos, 1.0);\n\t}` + \"\\x00\"\n\n\tfragmentShaderSource = `\n\t#version 330 core\n\tout vec4 FragColor;\n\tuniform vec3 uColor;\n\n\tvoid main()\n\t{\n\t FragColor = vec4(uColor, 1.0f);\n\t}` + \"\\x00\"\n\n\tlineProgram = CreateProgram(vertexShaderSource, fragmentShaderSource)\n\n\treturn prog\n}",
"func Perspective(fovy, aspect, near, far float64) Mat4 {\n\t// fovy = (fovy * math.Pi) / 180.0 // convert from degrees to radians\n\tnmf, f := near-far, float64(1./math.Tan(float64(fovy)/2.0))\n\n\treturn Mat4{float64(f / aspect), 0, 0, 0, 0, float64(f), 0, 0, 0, 0, float64((near + far) / nmf), -1, 0, 0, float64((2. * far * near) / nmf), 0}\n}",
"func (t1 *Tensor) Transpose() *Tensor {\n\tret := NewTensor(t1.Size.Y, t1.Size.X, t1.Size.Z)\n\tfor z := 0; z < t1.Size.Z; z++ {\n\t\tfor y := 0; y < t1.Size.Y; y++ {\n\t\t\tfor x := 0; x < t1.Size.X; x++ {\n\t\t\t\tret.Set(y, x, z, t1.Get(x, y, z))\n\t\t\t}\n\t\t}\n\t}\n\treturn ret\n}",
"func main() {\n\tvar (\n\t\tdbc *obinary.DBClient\n\t\terr error\n\t)\n\n\tgo func() {\n\t\tlog.Println(http.ListenAndServe(\"localhost:6060\", nil))\n\t}()\n\n\t/* ---[ set ogl log level ]--- */\n\togl.SetLevel(ogl.NORMAL)\n\n\ttestType := \"dataOnly\"\n\n\tif len(os.Args) > 1 {\n\t\tif os.Args[1] == \"full\" || os.Args[1] == \"create\" {\n\t\t\ttestType = os.Args[1]\n\t\t}\n\t}\n\n\tdbc, err = obinary.NewDBClient(obinary.ClientOptions{})\n\tOk(err)\n\tdefer dbc.Close()\n\n\t/* ---[ run clean up in case of panics ]--- */\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\togl.Warn(\">> >> >> >> PANIC CAUGHT ----> cleanup called\") // DEBUG\n\t\t\tcleanUp(dbc, testType == \"full\")\n\t\t\tos.Exit(1)\n\t\t}\n\t}()\n\n\t/* ---[ Use \"native\" API ]--- */\n\tcreateOgonoriTestDB(dbc, adminUser, adminPassw, testType != \"dataOnly\")\n\tdefer cleanUp(dbc, testType == \"full\")\n\n\t// document database tests\n\togl.SetLevel(ogl.NORMAL)\n\tdbCommandsNativeAPI(dbc, testType != \"dataOnly\")\n\tif testType == \"full\" {\n\t\togl.SetLevel(ogl.WARN)\n\t\tdbClusterCommandsNativeAPI(dbc)\n\t}\n\n\t/* ---[ Use Go database/sql API on Document DB ]--- */\n\togl.SetLevel(ogl.WARN)\n\tconxStr := \"admin@admin:localhost/ogonoriTest\"\n\tdatabaseSqlAPI(conxStr)\n\tdatabaseSqlPreparedStmtAPI(conxStr)\n\n\t/* ---[ Graph DB ]--- */\n\t// graph database tests\n\togl.SetLevel(ogl.NORMAL)\n\tgraphCommandsNativeAPI(dbc, testType != \"dataOnly\")\n\n\t//\n\t// experimenting with JSON functionality\n\t//\n\t// ogl.Println(\"-------- JSON ---------\")\n\t// fld := oschema.OField{int32(44), \"foo\", oschema.LONG, int64(33341234)}\n\t// bsjson, err := fld.ToJSON()\n\t// Ok(err)\n\t// ogl.Printf(\"%v\\n\", string(bsjson))\n\n\t// doc := oschema.NewDocument(\"Coolio\")\n\t// doc.AddField(\"foo\", &fld)\n\t// bsjson, err = doc.ToJSON()\n\t// Ok(err)\n\t// ogl.Printf(\"%v\\n\", string(bsjson))\n\n\togl.Println(\"DONE\")\n}",
"func (self *Viewport) Reshape(width int, height int) {\n\tself.selectionDirty = false\n\tself.screenWidth = width\n\tself.screenHeight = height\n\n\tgl.Viewport(0, 0, width, height)\n\n\tviewWidth := float64(self.screenWidth) / float64(SCREEN_SCALE)\n\tviewHeight := float64(self.screenHeight) / float64(SCREEN_SCALE)\n\n\tself.lplane = -viewWidth / 2\n\tself.rplane = viewWidth / 2\n\tself.bplane = -viewHeight / 4\n\tself.tplane = 3 * viewHeight / 4\n\n\tgl.MatrixMode(gl.PROJECTION)\n\tgl.LoadIdentity()\n\tgl.Ortho(self.lplane, self.rplane, self.bplane, self.tplane, -60, 60)\n\n\t// self.Perspective(90, 1, 0.01,1000);\n\n\tgl.MatrixMode(gl.MODELVIEW)\n\tgl.LoadIdentity()\n\tpicker.x = float32(viewport.rplane) - picker.radius + BLOCK_SCALE*0.5\n\tpicker.y = float32(viewport.bplane) + picker.radius - BLOCK_SCALE*0.5\n\n}",
"func RORW(ci, mr operand.Op) { ctx.RORW(ci, mr) }",
"func Or(predicates ...predicate.Project) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func Or(predicates ...predicate.Project) predicate.Project {\n\treturn predicate.Project(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func MakeProductOperator(op *Operator, operands map[string]string) {\n\tinvertRight := operands[\"invertright\"] == \"yes\"\n\tparent1 := op.Parents[0]\n\tparent2 := op.Parents[1]\n\tvar matrix1 map[[2]int]*MatrixData\n\tvar matrix2 map[[2]int]*MatrixData\n\n\top.InitFunc = func(frame *Frame) {\n\t\tdriver.DeleteMatrixAfter(op.Name, frame.Time)\n\t\tmatrix1 = driver.LoadMatrixBefore(parent1.Name, frame.Time)\n\t\tmatrix2 = driver.LoadMatrixBefore(parent2.Name, frame.Time)\n\t\top.updateChildRerunTime(frame.Time)\n\t}\n\n\top.Func = func(frame *Frame, pd ParentData) {\n\t\tnewCells := make(map[[2]int]bool)\n\t\tfor _, md := range pd.MatrixData[0] {\n\t\t\tcell := [2]int{md.I, md.J}\n\t\t\tmatrix1[cell] = md\n\t\t\tnewCells[cell] = true\n\t\t}\n\t\tfor _, md := range pd.MatrixData[1] {\n\t\t\tcell := [2]int{md.I, md.J}\n\t\t\tmatrix2[cell] = md\n\t\t\tnewCells[cell] = true\n\t\t}\n\t\tfor cell := range newCells {\n\t\t\tif matrix1[cell] == nil || matrix2[cell] == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tleft := matrix1[cell].Val\n\t\t\tright := matrix2[cell].Val\n\t\t\tif invertRight {\n\t\t\t\tright = 1 - right\n\t\t\t}\n\t\t\tAddMatrixData(op.Name, cell[0], cell[1], left * right, \"\", frame.Time)\n\t\t}\n\t}\n\n\top.Loader = op.MatrixLoader\n}",
"func NewOneDimMatrix(initialValue float64, n int, topBoundary, bottomBoundary, leftBoundary, rightBoundary float64) OneDimMatrix {\n\tmat := OneDimMatrix{\n\t\tmatrix: make([]float64, n*n),\n\t\tnDim: n,\n\t}\n\n\t// Init inner cells value\n\tfor i := 0; i < n; i++ {\n\t\tfor j := 0; j < n; j++ {\n\t\t\tmat.SetCell(i, j, initialValue)\n\t\t}\n\t}\n\n\t// Init top, right and left boundaries\n\tfor i := 0; i < n; i++ {\n\t\tmat.SetCell(0, i, topBoundary)\n\t\tmat.SetCell(i, 0, leftBoundary)\n\t\tmat.SetCell(i, n-1, rightBoundary)\n\t}\n\n\t// Init bottom boundary\n\tfor j := 0; j < n; j++ {\n\t\tmat.SetCell(n-1, j, bottomBoundary)\n\t}\n\n\treturn mat\n}"
] | [
"0.80101526",
"0.786557",
"0.7378602",
"0.72870845",
"0.6966766",
"0.69345576",
"0.6813901",
"0.67008436",
"0.6640046",
"0.65874267",
"0.6338183",
"0.628314",
"0.62605625",
"0.6048192",
"0.5790114",
"0.5180142",
"0.49350113",
"0.4705382",
"0.44425777",
"0.4424666",
"0.43745247",
"0.4325965",
"0.42444125",
"0.42287168",
"0.42050564",
"0.41521335",
"0.412431",
"0.41237238",
"0.41111767",
"0.40973735",
"0.4094534",
"0.40797696",
"0.40644544",
"0.40431538",
"0.40324292",
"0.40318635",
"0.40275022",
"0.40195233",
"0.4018627",
"0.40141037",
"0.40085572",
"0.40034345",
"0.39938927",
"0.39904934",
"0.39784572",
"0.39776272",
"0.39670175",
"0.39585367",
"0.39559934",
"0.39555496",
"0.39544392",
"0.39538047",
"0.39512232",
"0.39480716",
"0.39413384",
"0.39217946",
"0.39106208",
"0.3900158",
"0.3898273",
"0.38819435",
"0.38792357",
"0.38732114",
"0.3865985",
"0.3860754",
"0.3853572",
"0.38508028",
"0.38437152",
"0.38397703",
"0.38384923",
"0.38291824",
"0.38268027",
"0.38265705",
"0.38185537",
"0.38122404",
"0.38049424",
"0.3796334",
"0.37935326",
"0.37915123",
"0.37830243",
"0.37776875",
"0.37701783",
"0.3769899",
"0.37672153",
"0.37643957",
"0.3761247",
"0.3759773",
"0.37590757",
"0.37556013",
"0.3748597",
"0.3743419",
"0.37423044",
"0.37414092",
"0.37392345",
"0.37374157",
"0.3736617",
"0.37318793",
"0.37313968",
"0.37313968",
"0.37306574",
"0.3729707"
] | 0.6993952 | 4 |
Ortho2D is equivalent to Ortho with the near and far planes being 1 and 1, respectively. | func Ortho2D(left, right, bottom, top float64) Mat4 {
return Ortho(left, right, bottom, top, -1, 1)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tsyscall.Syscall6(gpOrtho, 6, uintptr(math.Float64bits(left)), uintptr(math.Float64bits(right)), uintptr(math.Float64bits(bottom)), uintptr(math.Float64bits(top)), uintptr(math.Float64bits(zNear)), uintptr(math.Float64bits(zFar)))\n}",
"func Ortho2D(left, right, bottom, top float32) Mat4 {\n\treturn Ortho(left, right, bottom, top, -1, 1)\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowOrtho(gpOrtho, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tC.glowOrtho(gpOrtho, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func (m *Mat4) Ortho(left, right, bottom, top, nearVal, farVal float64) *Mat4{\n\t*m = *Ortho4(left, right, bottom, top, nearVal, farVal)\n\n\treturn m\n}",
"func Ortho(left, right, bottom, top, near, far float64) Mat4 {\n\trml, tmb, fmn := (right - left), (top - bottom), (far - near)\n\n\treturn Mat4{float64(2. / rml), 0, 0, 0, 0, float64(2. / tmb), 0, 0, 0, 0, float64(-2. / fmn), 0, float64(-(right + left) / rml), float64(-(top + bottom) / tmb), float64(-(far + near) / fmn), 1}\n}",
"func MakeOrtho(width, height int) [16]gl.Float {\n\treturn [16]gl.Float{\n\t\t2.0 / gl.Float(width), 0.0, 0.0, 0.0,\n\t\t0.0, 2.0 / gl.Float(height), 0.0, 0.0,\n\t\t0.0, 0.0, -1.0, 0.0,\n\t\t-1.0, -1.0, 0.0, 1.0}\n}",
"func Ortho(left, right, bottom, top, nearVal, farVal gl.Float) *Mat4 {\n\tm := IdentMat4()\n\tm[0].X = 2.0 / (right - left)\n\tm[1].Y = 2.0 / (top - bottom)\n\tm[2].Z = -2.0 / (farVal - nearVal)\n\tm[3].X = -(right + left) / (right - left)\n\tm[3].Y = -(top + bottom) / (top - bottom)\n\tm[3].Z = -(farVal + nearVal) / (farVal - nearVal)\n\treturn m\n}",
"func (c *Camera) SetOrtho(left, right, bottom, top, near, far float32) {\n\tglm.OrthoIn(left, right, bottom, top, near, far, &c.Projection)\n}",
"func (c *Camera) SetOrtho(view image.Rectangle, near, far float64) {\n\tw := float64(view.Dx())\n\tw = float64(int((w / 2.0)) * 2)\n\th := float64(view.Dy())\n\th = float64(int((h / 2.0)) * 2)\n\tm := lmath.Mat4Ortho(0, w, 0, h, near, far)\n\tc.Projection = ConvertMat4(m)\n}",
"func MatrixOrtho(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = 2.0 / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\tresult.M4 = 0.0\n\tresult.M5 = 2.0 / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\tresult.M8 = 0.0\n\tresult.M9 = 0.0\n\tresult.M10 = -2.0 / fn\n\tresult.M11 = 0.0\n\tresult.M12 = -(left + right) / rl\n\tresult.M13 = -(top + bottom) / tb\n\tresult.M14 = -(far + near) / fn\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func NewMatrixOrtho(left, right, bottom, top, near, far float64) Matrix {\n\trl := (right - left)\n\ttb := (top - bottom)\n\tfn := (far - near)\n\treturn Matrix{\n\t\tM0: float32(2 / rl),\n\t\tM1: 0,\n\t\tM2: 0,\n\t\tM3: 0,\n\t\tM4: 0,\n\t\tM5: float32(2 / tb),\n\t\tM6: 0,\n\t\tM7: 0,\n\t\tM8: 0,\n\t\tM9: 0,\n\t\tM10: float32(-2 / fn),\n\t\tM11: 0,\n\t\tM12: float32(-(left + right) / rl),\n\t\tM13: float32(-(top + bottom) / tb),\n\t\tM14: float32(-(far + near) / fn),\n\t\tM15: 1,\n\t}\n}",
"func MatrixOrthoSubProjection(projection Matrix4f, orthoScale Vector2f, orthoDistance, eyeViewAdjustX float32) Matrix4f {\n\treturn matrix4f(C.ovrMatrix4f_OrthoSubProjection(c_matrix4f(projection), c_vector2f(orthoScale), C.float(orthoDistance), C.float(eyeViewAdjustX)))\n}",
"func (c *Context) SetOrtho2DProjection(windowWidth int, windowHeight int, screenScale float32, centered bool) {\n\tvar left, right, top, bottom float32\n\tif centered {\n\t\t// 0,0 is placed at the center of the window\n\t\thalfWidth := float32(windowWidth) / 2 / screenScale\n\t\thalfHeight := float32(windowHeight) / 2 / screenScale\n\t\tleft = -halfWidth\n\t\tright = halfWidth\n\t\ttop = halfHeight\n\t\tbottom = -halfHeight\n\t} else {\n\t\tleft = 0\n\t\tright = float32(windowWidth)\n\t\ttop = float32(windowHeight)\n\t\tbottom = 0\n\t}\n\tc.projectionMatrix = mgl32.Ortho(left, right, top, bottom, 1, -1)\n}",
"func Ortho4(left, right, bottom, top, nearVal, farVal float64) (m *Mat4) {\n\ttx := -(right + left) / (right - left)\n\tty := -(top + bottom) / (top - bottom)\n\ttz := -(farVal + nearVal) / (farVal - nearVal)\n\n\tm = (*Mat4)(&[16]float64{2/(right-left), 0, 0, 0,\n 0, 2/(top-bottom), 0, 0,\n 0, 0, -2/(farVal-nearVal), 0,\n\t tx, ty, tz, 1})\n\n\treturn m;\n}",
"func Or2(arg1 TermT, arg2 TermT) TermT {\n\treturn TermT(C.yices_or2(C.term_t(arg1), C.term_t(arg2)))\n}",
"func NewMatrix4Orthographic(left, right, top, bottom, near, far float32) *Matrix4 {\n\tma := NewDefaultMatrix4()\n\tma.MakeOrthographic(left, right, top, bottom, near, far)\n\treturn ma\n}",
"func (c *Camera) debugUpdate() {\n\tc.State = gfx.NewState()\n\tc.Shader = shader\n\tc.State.FaceCulling = gfx.BackFaceCulling\n\n\tm := gfx.NewMesh()\n\tm.Primitive = gfx.Lines\n\n\tm.Vertices = []gfx.Vec3{}\n\tm.Colors = []gfx.Color{}\n\n\tnear := float32(c.Near)\n\tfar := float32(c.Far)\n\n\tif c.Ortho {\n\t\twidth := float32(c.View.Dx())\n\t\theight := float32(c.View.Dy())\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{width / 2, 0, height / 2},\n\n\t\t\t// Near\n\t\t\t{0, near, 0},\n\t\t\t{width, near, 0},\n\t\t\t{width, near, height},\n\t\t\t{0, near, height},\n\n\t\t\t// Far\n\t\t\t{0, far, 0},\n\t\t\t{width, far, 0},\n\t\t\t{width, far, height},\n\t\t\t{0, far, height},\n\n\t\t\t{width / 2, far, height / 2},\n\n\t\t\t// Up\n\t\t\t{0, near, height},\n\t\t\t{0, near, height},\n\t\t\t{width, near, height},\n\t\t}\n\t} else {\n\t\tratio := float32(c.View.Dx()) / float32(c.View.Dy())\n\t\tfovRad := c.FOV / 180 * math.Pi\n\n\t\thNear := float32(2 * math.Tan(fovRad/2) * c.Near)\n\t\twNear := hNear * ratio\n\n\t\thFar := float32(2 * math.Tan(fovRad/2) * c.Far)\n\t\twFar := hFar * ratio\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{0, 0, 0},\n\n\t\t\t// Near\n\t\t\t{-wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, hNear / 2},\n\t\t\t{-wNear / 2, near, hNear / 2},\n\n\t\t\t// Far\n\t\t\t{-wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, hFar / 2},\n\t\t\t{-wFar / 2, far, hFar / 2},\n\n\t\t\t{0, far, 0},\n\n\t\t\t// Up\n\t\t\t{0, near, hNear},\n\t\t\t{-wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t\t{wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t}\n\t}\n\n\tm.Colors = []gfx.Color{\n\t\t{1, 1, 1, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 1, 1, 1},\n\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t}\n\n\tm.Indices = []uint32{\n\t\t// From 0 to near plane\n\t\t0, 1,\n\t\t0, 2,\n\t\t0, 3,\n\t\t0, 4,\n\n\t\t// Near plane\n\t\t1, 2,\n\t\t2, 3,\n\t\t3, 4,\n\t\t4, 1,\n\n\t\t// Far plane\n\t\t5, 6,\n\t\t6, 7,\n\t\t7, 8,\n\t\t8, 5,\n\n\t\t// Lines from near to far plane\n\t\t1, 5,\n\t\t2, 6,\n\t\t3, 7,\n\t\t4, 8,\n\n\t\t0, 9,\n\n\t\t// Up\n\t\t10, 11,\n\t\t11, 12,\n\t\t12, 10,\n\t}\n\n\tc.Meshes = []*gfx.Mesh{m}\n}",
"func (v Vector) IsOrth(w Vector) bool {\n\treturn v.Dot(w) == 0\n}",
"func VPOR(mxy, xy, xy1 operand.Op) { ctx.VPOR(mxy, xy, xy1) }",
"func orient2d(pa, pb, pc *Point) int {\n\tdetleft := (pa.X - pc.X) * (pb.Y - pc.Y)\n\tdetright := (pa.Y - pc.Y) * (pb.X - pc.X)\n\tval := detleft - detright\n\tif val > -Epsilon && val < Epsilon {\n\t\treturn COLLINEAR\n\t} else if val > 0 {\n\t\treturn CCW\n\t}\n\treturn CW\n}",
"func (self *GameObjectCreator) Tilemap2O(key string, tileWidth int) {\n self.Object.Call(\"tilemap\", key, tileWidth)\n}",
"func Perspective(fovy, aspect, near, far float64) Mat4 {\n\t// fovy = (fovy * math.Pi) / 180.0 // convert from degrees to radians\n\tnmf, f := near-far, float64(1./math.Tan(float64(fovy)/2.0))\n\n\treturn Mat4{float64(f / aspect), 0, 0, 0, 0, float64(f), 0, 0, 0, 0, float64((near + far) / nmf), -1, 0, 0, float64((2. * far * near) / nmf), 0}\n}",
"func (p *Projection) project(wx float64, wy float64) (float64, float64) {\n return ((wx / p.worldWidth) * p.canvasWidth) + (p.canvasWidth * 0.5),\n ((wy / p.worldHeight) * -p.canvasHeight) + (p.canvasHeight * 0.5)\n}",
"func (this *DtNavMeshQuery) MoveAlongSurface(startRef DtPolyRef, startPos, endPos []float32,\n\tfilter *DtQueryFilter,\n\tresultPos []float32, visited []DtPolyRef, visitedCount *int, maxVisitedSize int,\n\tbHit *bool) DtStatus {\n\tDtAssert(this.m_nav != nil)\n\tDtAssert(this.m_tinyNodePool != nil)\n\n\t*visitedCount = 0\n\n\t// Validate input\n\tif startRef == 0 {\n\t\treturn DT_FAILURE | DT_INVALID_PARAM\n\t}\n\tif !this.m_nav.IsValidPolyRef(startRef) {\n\t\treturn DT_FAILURE | DT_INVALID_PARAM\n\t}\n\tstatus := DT_SUCCESS\n\n\tconst MAX_STACK int = 48\n\tvar stack [MAX_STACK]*DtNode\n\tvar nstack int\n\n\tthis.m_tinyNodePool.Clear()\n\n\tstartNode := this.m_tinyNodePool.GetNode(startRef, 0)\n\tstartNode.Pidx = 0\n\tstartNode.Cost = 0\n\tstartNode.Total = 0\n\tstartNode.Id = startRef\n\tstartNode.Flags = DT_NODE_CLOSED\n\tstack[nstack] = startNode\n\tnstack++\n\n\tvar bestPos [3]float32\n\tbestDist := float32(math.MaxFloat32)\n\tvar bestNode *DtNode\n\tDtVcopy(bestPos[:], startPos)\n\n\t// Search constraints\n\tvar searchPos [3]float32\n\tvar searchRadSqr float32\n\tDtVlerp(searchPos[:], startPos, endPos, 0.5)\n\tsearchRadSqr = DtSqrFloat32(DtVdist(startPos, endPos)/2.0 + 0.001)\n\n\tvar verts [DT_VERTS_PER_POLYGON * 3]float32\n\n\tvar wallNode *DtNode\n\tfor nstack != 0 {\n\t\t// Pop front.\n\t\tcurNode := stack[0]\n\t\tfor i := 0; i < nstack-1; i++ {\n\t\t\tstack[i] = stack[i+1]\n\t\t}\n\t\tnstack--\n\n\t\t// Get poly and tile.\n\t\t// The API input has been cheked already, skip checking internal data.\n\t\tcurRef := curNode.Id\n\t\tvar curTile *DtMeshTile\n\t\tvar curPoly *DtPoly\n\t\tthis.m_nav.GetTileAndPolyByRefUnsafe(curRef, &curTile, &curPoly)\n\n\t\t// Collect vertices.\n\t\tnverts := int(curPoly.VertCount)\n\t\tfor i := 0; i < nverts; i++ {\n\t\t\tDtVcopy(verts[i*3:], curTile.Verts[curPoly.Verts[i]*3:])\n\t\t}\n\t\t// If target is inside the poly, stop search.\n\t\tif DtPointInPolygon(endPos, verts[:], nverts) {\n\t\t\tbestNode = curNode\n\t\t\tDtVcopy(bestPos[:], endPos)\n\t\t\tbreak\n\t\t}\n\n\t\t// Find wall edges and find nearest point inside the walls.\n\t\tfor i, j := 0, (int)(curPoly.VertCount-1); i < (int)(curPoly.VertCount); j, i = i, i+1 {\n\t\t\t// Find links to neighbours.\n\t\t\tconst MAX_NEIS int = 8\n\t\t\tnneis := 0\n\t\t\tvar neis [MAX_NEIS]DtPolyRef\n\n\t\t\tif (curPoly.Neis[j] & DT_EXT_LINK) != 0 {\n\t\t\t\t// Tile border.\n\t\t\t\tfor k := curPoly.FirstLink; k != DT_NULL_LINK; k = curTile.Links[k].Next {\n\t\t\t\t\tlink := &curTile.Links[k]\n\t\t\t\t\tif link.Edge == uint8(j) {\n\t\t\t\t\t\tif link.Ref != 0 {\n\t\t\t\t\t\t\tvar neiTile *DtMeshTile\n\t\t\t\t\t\t\tvar neiPoly *DtPoly\n\t\t\t\t\t\t\tthis.m_nav.GetTileAndPolyByRefUnsafe(link.Ref, &neiTile, &neiPoly)\n\t\t\t\t\t\t\tif filter.PassFilter(link.Ref, neiTile, neiPoly) {\n\t\t\t\t\t\t\t\tif nneis < MAX_NEIS {\n\t\t\t\t\t\t\t\t\tneis[nneis] = link.Ref\n\t\t\t\t\t\t\t\t\tnneis++\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t} else if curPoly.Neis[j] != 0 {\n\t\t\t\tidx := (uint32)(curPoly.Neis[j] - 1)\n\t\t\t\tref := this.m_nav.GetPolyRefBase(curTile) | DtPolyRef(idx)\n\t\t\t\tif filter.PassFilter(ref, curTile, &curTile.Polys[idx]) {\n\t\t\t\t\t// Internal edge, encode id.\n\t\t\t\t\tneis[nneis] = ref\n\t\t\t\t\tnneis++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif nneis == 0 {\n\t\t\t\t// Wall edge, calc distance.\n\t\t\t\tvj := verts[j*3:]\n\t\t\t\tvi := verts[i*3:]\n\t\t\t\tvar tseg float32\n\t\t\t\tdistSqr := DtDistancePtSegSqr2D(endPos, vj, vi, &tseg)\n\t\t\t\tif distSqr < bestDist {\n\t\t\t\t\t// Update nearest distance.\n\t\t\t\t\tDtVlerp(bestPos[:], vj, vi, tseg)\n\t\t\t\t\tbestDist = distSqr\n\t\t\t\t\tbestNode = curNode\n\t\t\t\t\twallNode = curNode\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tfor k := 0; k < nneis; k++ {\n\t\t\t\t\t// Skip if no node can be allocated.\n\t\t\t\t\tneighbourNode := this.m_tinyNodePool.GetNode(neis[k], 0)\n\t\t\t\t\tif neighbourNode == nil {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Skip if already visited.\n\t\t\t\t\tif (neighbourNode.Flags & DT_NODE_CLOSED) != 0 {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Skip the link if it is too far from search constraint.\n\t\t\t\t\t// TODO: Maybe should use getPortalPoints(), but this one is way faster.\n\t\t\t\t\tvj := verts[j*3:]\n\t\t\t\t\tvi := verts[i*3:]\n\t\t\t\t\tvar tseg float32\n\t\t\t\t\tdistSqr := DtDistancePtSegSqr2D(searchPos[:], vj, vi, &tseg)\n\t\t\t\t\tif distSqr > searchRadSqr {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\t// Mark as the node as visited and push to queue.\n\t\t\t\t\tif nstack < MAX_STACK {\n\t\t\t\t\t\tneighbourNode.Pidx = this.m_tinyNodePool.GetNodeIdx(curNode)\n\t\t\t\t\t\tneighbourNode.Flags |= DT_NODE_CLOSED\n\t\t\t\t\t\tstack[nstack] = neighbourNode\n\t\t\t\t\t\tnstack++\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tvar n int\n\tif bestNode != nil {\n\t\t// Reverse the path.\n\t\tvar prev *DtNode\n\t\tnode := bestNode\n\t\tfor node != nil {\n\t\t\tnext := this.m_tinyNodePool.GetNodeAtIdx(node.Pidx)\n\t\t\tnode.Pidx = this.m_tinyNodePool.GetNodeIdx(prev)\n\t\t\tprev = node\n\t\t\tnode = next\n\t\t}\n\n\t\t// Store result\n\t\tnode = prev\n\t\tfor node != nil {\n\t\t\tvisited[n] = node.Id\n\t\t\tn++\n\t\t\tif n >= maxVisitedSize {\n\t\t\t\tstatus |= DT_BUFFER_TOO_SMALL\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tnode = this.m_tinyNodePool.GetNodeAtIdx(node.Pidx)\n\t\t}\n\t}\n\n\t*bHit = (wallNode != nil && wallNode == bestNode)\n\n\tDtVcopy(resultPos, bestPos[:])\n\n\t*visitedCount = n\n\n\treturn status\n}",
"func Or(a, b Dense) Dense {\n\tshort, long := a, b\n\tif b.len < a.len {\n\t\tshort, long = b, a\n\t}\n\trLen := long.len\n\tif short.negated {\n\t\trLen = short.len\n\t}\n\tr := Dense{\n\t\tbits: make([]byte, 0, BytesFor(rLen)),\n\t\tlen: rLen,\n\t\tnegated: a.negated || b.negated,\n\t}\n\tfor i := range short.bits {\n\t\tr.bits = append(r.bits, a.bits[i]|b.bits[i])\n\t}\n\tif !short.negated {\n\t\tfor i := len(short.bits); i < len(long.bits); i++ {\n\t\t\tr.bits = append(r.bits, long.bits[i])\n\t\t}\n\t}\n\treturn r\n}",
"func (this *DtNavMeshQuery) QueryPolygons2(center, halfExtents []float32,\n\tfilter *DtQueryFilter, query DtPolyQuery) DtStatus {\n\tDtAssert(this.m_nav != nil)\n\n\tif center == nil || halfExtents == nil || filter == nil || query == nil {\n\t\treturn DT_FAILURE | DT_INVALID_PARAM\n\t}\n\tvar bmin, bmax [3]float32\n\tDtVsub(bmin[:], center, halfExtents)\n\tDtVadd(bmax[:], center, halfExtents)\n\n\t// Find tiles the query touches.\n\tvar minx, miny, maxx, maxy int32\n\tthis.m_nav.CalcTileLoc(bmin[:], &minx, &miny)\n\tthis.m_nav.CalcTileLoc(bmax[:], &maxx, &maxy)\n\n\tconst MAX_NEIS int = 32\n\tvar neis [MAX_NEIS]*DtMeshTile\n\n\tfor y := miny; y <= maxy; y++ {\n\t\tfor x := minx; x <= maxx; x++ {\n\t\t\tnneis := this.m_nav.GetTilesAt(x, y, neis[:], MAX_NEIS)\n\t\t\tfor j := 0; j < nneis; j++ {\n\t\t\t\tthis.queryPolygonsInTile(neis[j], bmin[:], bmax[:], filter, query)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn DT_SUCCESS\n}",
"func (*Camera2D) New() akara.Component {\n\treturn &Camera2D{\n\t\tCamera2D: rl.NewCamera2D(rl.Vector2{}, rl.Vector2{}, 0, 1),\n\t\tBackground: color.Transparent,\n\t}\n}",
"func Perspective(fovy, aspect, zNear, zFar gl.Float) *Mat4 {\n\tf := 1 / (TanGL(fovy / 2.0))\n\tm := IdentMat4()\n\tm[0].X = f / aspect\n\tm[1].Y = f\n\tm[2].Z = (zFar + zNear) / (zNear - zFar)\n\tm[3].W = 0\n\tm[2].W = -1\n\tm[3].Z = (2 * zFar * zNear) / (zNear - zFar)\n\treturn m\n}",
"func ORW(imr, amr operand.Op) { ctx.ORW(imr, amr) }",
"func T2(obj SDF2, x, y float64) SDF2 {\n\tm := Translate2d(V2{X: x, Y: y})\n\treturn Transform2D(obj, m)\n}",
"func TestTwoDimensionalPlaneShouldPass2(t *testing.T) {\n\tvar err error\n\n\tmodel := NewLogistic(base.StochasticGD, .0001, 0, 3000, twoDX, twoDY)\n\terr = model.Learn()\n\tassert.Nil(t, err, \"Learning error should be nil\")\n\n\tvar guess []float64\n\n\tfor i := -40; i < 20; i++ {\n\t\tguess, err = model.Predict([]float64{float64(i)})\n\n\t\tif i/2+10 > 0 {\n\t\t\tassert.True(t, guess[0] > 0.5, \"Guess should be more likely to be 1 when i=%v\", i)\n\t\t\tassert.True(t, guess[0] < 1.001, \"Guess should not exceed 1 ever when\")\n\t\t} else {\n\t\t\tassert.True(t, guess[0] < 0.5, \"Guess should be more likely to be 0 when i=%v\", i)\n\t\t\tassert.True(t, guess[0] > 0.0, \"Guess should not be below 0 even\")\n\t\t}\n\n\t\tassert.Len(t, guess, 1, \"Length of a Logistic model output from the hypothesis should always be a 1 dimensional vector. Never multidimensional.\")\n\t\tassert.Nil(t, err, \"Prediction error should be nil\")\n\t}\n}",
"func Xor2(arg1 TermT, arg2 TermT) TermT {\n\treturn TermT(C.yices_xor2(C.term_t(arg1), C.term_t(arg2)))\n}",
"func NewTeo2Photoelastic() Tensor4 {\n\treturn NewTetragonalPhotoelasticTensor(0.0074, 0.187, 0.34, 0.0905, 0.24, -0.17, -0.463)\n}",
"func Or(ee ...TemporalExpression) OrExpression {\n\treturn OrExpression{ee}\n}",
"func Orient(pts ...[2]float64) int8 {\n\tif len(pts) < 3 {\n\t\treturn 0\n\t}\n\tvar (\n\t\tsum = 0.0\n\t\tdop = 0.0\n\t\tli = len(pts) - 1\n\t)\n\n\tif debug {\n\t\tlog.Printf(\"pts: %v\", pts)\n\t}\n\tfor i := range pts {\n\t\tdop = (pts[li][0] * pts[i][1]) - (pts[i][0] * pts[li][1])\n\t\tsum += dop\n\t\tif debug {\n\t\t\tlog.Printf(\"sum(%v,%v): %g -- %g\", li, i, sum, dop)\n\t\t}\n\t\tli = i\n\t}\n\tswitch {\n\tcase sum == 0:\n\t\treturn 0\n\tcase sum < 0:\n\t\treturn -1\n\tdefault:\n\t\treturn 1\n\t}\n}",
"func Render(r *sdl.Renderer, w *World) {\n\twidth := float64(r.GetViewport().W)\n\theight := float64(r.GetViewport().H)\n\n\trenderedVertices := [][2]int{}\n\n\tfor _, obj := range w.Objects {\n\t\tfor _, vertex := range obj.Geometry.Vertices {\n\t\t\trenderCoordinates := AsRelativeToSystem(w.ActiveCamera.ObjSys, ToSystem(obj.ObjSys, vertex.Pos))\n\n\t\t\tZ := Clamp(renderCoordinates.Z, 0.0001, math.NaN())\n\t\t\tratio := w.ActiveCamera.FocalLength / math.Abs(Z)\n\t\t\trenderX := ratio * renderCoordinates.X\n\t\t\trenderY := ratio * renderCoordinates.Y\n\n\t\t\tnormX, normY := NormalizeScreen(\n\t\t\t\twidth,\n\t\t\t\theight,\n\t\t\t\trenderX,\n\t\t\t\trenderY,\n\t\t\t)\n\n\t\t\trasterX := int(math.Floor(normX*width) + width/2)\n\t\t\trasterY := int(math.Floor(normY*height) + height/2)\n\n\t\t\trenderedVertices = append(renderedVertices, [2]int{rasterX, rasterY})\n\n\t\t\tDrawCircle(r, rasterX, rasterY, 5)\n\t\t}\n\t\tfor _, edge := range obj.Geometry.Edges {\n\t\t\tif len(renderedVertices) > edge.From && len(renderedVertices) > edge.To {\n\t\t\t\tr.DrawLine(\n\t\t\t\t\tint32(renderedVertices[edge.From][0]),\n\t\t\t\t\tint32(renderedVertices[edge.From][1]),\n\t\t\t\t\tint32(renderedVertices[edge.To][0]),\n\t\t\t\t\tint32(renderedVertices[edge.To][1]),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\tr.Present()\n}",
"func (self *PhysicsP2) Enable2O(object interface{}, debug bool, children bool) {\n self.Object.Call(\"enable\", object, debug, children)\n}",
"func Frustum(left, right, bottom, top, near, far float64) Mat4 {\n\trml, tmb, fmn := (right - left), (top - bottom), (far - near)\n\tA, B, C, D := (right+left)/rml, (top+bottom)/tmb, -(far+near)/fmn, -(2*far*near)/fmn\n\n\treturn Mat4{float64((2. * near) / rml), 0, 0, 0, 0, float64((2. * near) / tmb), 0, 0, float64(A), float64(B), float64(C), -1, 0, 0, float64(D), 0}\n}",
"func main() {\n\t// Initialization\n\t//--------------------------------------------------------------------------------------\n\tconst (\n\t\tscreenWidth = 800\n\t\tscreenHeight = 450\n\t)\n\n\trl.InitWindow(screenWidth, screenHeight, \"raylib [core] example - 2d camera mouse zoom\")\n\n\tvar camera rl.Camera2D\n\tcamera.Zoom = 1.0\n\n\trl.SetTargetFPS(60) // Set our game to run at 60 frames-per-second\n\t//--------------------------------------------------------------------------------------\n\n\t// Main game loop\n\tfor !rl.WindowShouldClose() { // Detect window close button or ESC key\n\t\t// Update\n\t\t//----------------------------------------------------------------------------------\n\t\t// Translate based on mouse right click\n\t\tif rl.IsMouseButtonDown(rl.MouseRightButton) {\n\t\t\tdelta := rl.GetMouseDelta()\n\t\t\tdelta = rl.Vector2Scale(delta, -1.0/camera.Zoom)\n\n\t\t\tcamera.Target = rl.Vector2Add(camera.Target, delta)\n\t\t}\n\n\t\t// Zoom based on mouse wheel\n\t\twheel := rl.GetMouseWheelMove()\n\t\tif wheel != 0 {\n\t\t\t// Get the world point that is under the mouse\n\t\t\tmouseWorldPos := rl.GetScreenToWorld2D(rl.GetMousePosition(), camera)\n\n\t\t\t// Set the offset to where the mouse is\n\t\t\tcamera.Offset = rl.GetMousePosition()\n\n\t\t\t// Set the target to match, so that the camera maps the world space point\n\t\t\t// under the cursor to the screen space point under the cursor at any zoom\n\t\t\tcamera.Target = mouseWorldPos\n\n\t\t\t// Zoom increment\n\t\t\tconst zoomIncrement float32 = 0.125\n\n\t\t\tcamera.Zoom += (wheel * zoomIncrement)\n\t\t\tif camera.Zoom < zoomIncrement {\n\t\t\t\tcamera.Zoom = zoomIncrement\n\t\t\t}\n\t\t}\n\n\t\t//----------------------------------------------------------------------------------\n\n\t\t// Draw\n\t\t//----------------------------------------------------------------------------------\n\t\trl.BeginDrawing()\n\t\trl.ClearBackground(rl.Black)\n\n\t\trl.BeginMode2D(camera)\n\n\t\t// Draw the 3d grid, rotated 90 degrees and centered around 0,0\n\t\t// just so we have something in the XY plane\n\t\trl.PushMatrix()\n\t\trl.Translatef(0, 25*50, 0)\n\t\trl.Rotatef(90, 1, 0, 0)\n\t\trl.DrawGrid(100, 50)\n\t\trl.PopMatrix()\n\n\t\t// Draw a reference circle\n\t\trl.DrawCircle(100, 100, 50, rl.Yellow)\n\n\t\trl.EndMode2D()\n\n\t\trl.DrawText(\"Mouse right button drag to move, mouse wheel to zoom\", 10, 10, 20, rl.White)\n\n\t\trl.EndDrawing()\n\t\t//----------------------------------------------------------------------------------\n\t}\n\n\t// De-Initialization\n\t//--------------------------------------------------------------------------------------\n\trl.CloseWindow() // Close window and OpenGL context\n\t//--------------------------------------------------------------------------------------\n}",
"func (p *Projection) grid(xInterval float64, yInterval float64) {\n l := draw2d.NewGraphicContext(p.img)\n l.SetStrokeColor(color.RGBA{0xEE, 0xEE, 0xEE, 0xFF})\n l.SetLineWidth(0.5)\n\n xCount := p.worldWidth / xInterval\n yCount := p.worldHeight / yInterval\n\n // horizontal lines\n for x := 1.0; x < xCount; x += 1 {\n xx, _ := p.project((x - (xCount / 2)) * xInterval, 0)\n l.MoveTo(xx, 0)\n l.LineTo(xx, p.canvasHeight)\n l.Stroke()\n }\n\n // vertical lines\n for y := 1.0; y < yCount; y += 1 {\n _, yy := p.project(0, (y - (yCount / 2)) * yInterval)\n l.MoveTo(0, yy)\n l.LineTo(p.canvasWidth, yy)\n l.Stroke()\n }\n\n l.SetStrokeColor(color.RGBA{0xAA, 0xAA, 0xAA, 0xFF})\n\n // horiz axis\n l.MoveTo(p.canvasWidth/2, 0)\n l.LineTo(p.canvasWidth/2, p.canvasHeight)\n l.Stroke()\n\n // vert axis\n l.MoveTo(0, p.canvasHeight/2)\n l.LineTo(p.canvasWidth, p.canvasHeight/2)\n l.Stroke()\n}",
"func KORW(k, k1, k2 operand.Op) { ctx.KORW(k, k1, k2) }",
"func ORB(imr, amr operand.Op) { ctx.ORB(imr, amr) }",
"func ReverseV2(scope *Scope, tensor tf.Output, axis tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"ReverseV2\",\n\t\tInput: []tf.Input{\n\t\t\ttensor, axis,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func VPXOR(mxy, xy, xy1 operand.Op) { ctx.VPXOR(mxy, xy, xy1) }",
"func newOrExpr(lhs, rhs Expr) Expr {\n\t// Compute constant if both sides are constant.\n\tif lhs, ok := lhs.(*ConstantExpr); ok {\n\t\tif rhs, ok := rhs.(*ConstantExpr); ok {\n\t\t\treturn lhs.Or(rhs)\n\t\t}\n\t}\n\n\t// If constant is on left side, swap to right side.\n\tif IsConstantExpr(lhs) && !IsConstantExpr(rhs) {\n\t\tlhs, rhs = rhs, lhs\n\t}\n\n\t// Optimize for if constant is all ones or zeros.\n\tif rhs, ok := rhs.(*ConstantExpr); ok {\n\t\tif rhs.IsAllOnes() {\n\t\t\treturn rhs\n\t\t} else if rhs.Value == 0 {\n\t\t\treturn lhs\n\t\t}\n\t}\n\treturn &BinaryExpr{Op: OR, LHS: lhs, RHS: rhs}\n}",
"func NewProjectionPerspective(fovy, near, far, viewWidth, viewHeight float64) Matrix4 {\n\n\taspect := viewWidth / viewHeight\n\n\tt := math.Tan(fovy * math.Pi / 360)\n\tb := -t\n\tr := t * aspect\n\tl := -r\n\n\t// l := -viewWidth / 2\n\t// r := viewWidth / 2\n\t// t := -viewHeight / 2\n\t// b := viewHeight / 2\n\n\treturn Matrix4{\n\t\t{(2 * near) / (r - l), 0, (r + l) / (r - l), 0},\n\t\t{0, (2 * near) / (t - b), (t + b) / (t - b), 0},\n\t\t{0, 0, -((far + near) / (far - near)), -((2 * far * near) / (far - near))},\n\t\t{0, 0, -1, 0},\n\t}\n\n}",
"func (self *GameObjectCreator) RenderTexture2O(width int, height int) *RenderTexture{\n return &RenderTexture{self.Object.Call(\"renderTexture\", width, height)}\n}",
"func (n *Uint256) Or(n2 *Uint256) *Uint256 {\n\tn.n[0] |= n2.n[0]\n\tn.n[1] |= n2.n[1]\n\tn.n[2] |= n2.n[2]\n\tn.n[3] |= n2.n[3]\n\treturn n\n}",
"func BitwiseOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"BitwiseOr\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (session *Session) Or(query interface{}, args ...interface{}) *Session {\n\tsession.Statement.Or(query, args...)\n\treturn session\n}",
"func (o *Grid2d) Init(xmin, xmax, ymin, ymax float64, nx, ny int) {\n\n\t// input\n\to.Xmin = xmin\n\to.Xmax = xmax\n\to.Ymin = ymin\n\to.Ymax = ymax\n\to.Nx = nx\n\to.Ny = ny\n\n\t// derived\n\to.N = nx * ny\n\to.Lx = xmax - xmin\n\to.Ly = ymax - ymin\n\to.Dx = o.Lx / float64(nx-1)\n\to.Dy = o.Ly / float64(ny-1)\n\to.Dxx = o.Dx * o.Dx\n\to.Dyy = o.Dy * o.Dy\n\n\t// derived: boundaries\n\to.L = utl.IntRange3(0, o.N, o.Nx)\n\to.R = utl.IntAddScalar(o.L, o.Nx-1)\n\to.B = utl.IntRange(o.Nx)\n\to.T = utl.IntAddScalar(o.B, (o.Ny-1)*o.Nx)\n}",
"func (z *Int) Or(x, y *Int) *Int {\n\tz[0] = x[0] | y[0]\n\tz[1] = x[1] | y[1]\n\tz[2] = x[2] | y[2]\n\tz[3] = x[3] | y[3]\n\treturn z\n}",
"func NewOp2(argLen int64, argModes []int64) *Op2 {\n\treturn &Op2{\n\t\tArgLen: argLen,\n\t\tArgModes: padMissingArgModes(argLen-1, argModes),\n\t}\n}",
"func (e *ConstantExpr) Or(other *ConstantExpr) *ConstantExpr {\n\tassert(e.Width == other.Width, \"or: width mismatch: %d != %d\", e.Width, other.Width)\n\treturn NewConstantExpr(e.Value|other.Value, e.Width)\n}",
"func LogicalOr(scope *Scope, x tf.Output, y tf.Output) (z tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"LogicalOr\",\n\t\tInput: []tf.Input{\n\t\t\tx, y,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func ORPS(mx, x operand.Op) { ctx.ORPS(mx, x) }",
"func (q *Query) Or(n int) *Query {\n\tq.headers = append(q.headers, fmt.Sprintf(\"Or: %d\", n))\n\treturn q\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowFrustum(gpFrustum, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func Or(x, y meta.ConstValue) meta.ConstValue {\n\tv1, ok1 := x.ToBool()\n\tv2, ok2 := y.ToBool()\n\tif ok1 && ok2 {\n\t\treturn meta.NewBoolConst(v1 || v2)\n\t}\n\treturn meta.UnknownValue\n}",
"func (self *Viewport) Reshape(width int, height int) {\n\tself.selectionDirty = false\n\tself.screenWidth = width\n\tself.screenHeight = height\n\n\tgl.Viewport(0, 0, width, height)\n\n\tviewWidth := float64(self.screenWidth) / float64(SCREEN_SCALE)\n\tviewHeight := float64(self.screenHeight) / float64(SCREEN_SCALE)\n\n\tself.lplane = -viewWidth / 2\n\tself.rplane = viewWidth / 2\n\tself.bplane = -viewHeight / 4\n\tself.tplane = 3 * viewHeight / 4\n\n\tgl.MatrixMode(gl.PROJECTION)\n\tgl.LoadIdentity()\n\tgl.Ortho(self.lplane, self.rplane, self.bplane, self.tplane, -60, 60)\n\n\t// self.Perspective(90, 1, 0.01,1000);\n\n\tgl.MatrixMode(gl.MODELVIEW)\n\tgl.LoadIdentity()\n\tpicker.x = float32(viewport.rplane) - picker.radius + BLOCK_SCALE*0.5\n\tpicker.y = float32(viewport.bplane) + picker.radius - BLOCK_SCALE*0.5\n\n}",
"func OrWhere(col, op string, expr Expr) Option {\n\treturn func(q Query) Query {\n\t\treturn realWhere(\"OR\", Ident(col), op, expr)(q)\n\t}\n}",
"func (self *PhysicsP2) SetBounds2O(x int, y int, width int, height int, left bool, right bool) {\n self.Object.Call(\"setBounds\", x, y, width, height, left, right)\n}",
"func Or(m gomock.Matcher, ms ...gomock.Matcher) gomock.Matcher {\n\tmcs := append([]gomock.Matcher{m}, ms...)\n\treturn orMatcher{mcs}\n}",
"func solve2(input string) string {\n\tpg := intcode.New(input)\n\t// set the starting panel white\n\trobot := &Robot{pg: pg, hull: map[[2]int]int64{\n\t\t{0, 0}: 1,\n\t}}\n\trobot.Run()\n\tfmt.Println()\n\trobot.DrawHull(os.Stdout)\n\tfmt.Println()\n\t// answer got by looking at the picture...\n\treturn \"PCKRLPUK\"\n\n}",
"func TexImage2D(target Enum, level Int, internalformat Int, width Sizei, height Sizei, border Int, format Enum, kind Enum, pixels unsafe.Pointer) {\n\tctarget, _ := (C.GLenum)(target), cgoAllocsUnknown\n\tclevel, _ := (C.GLint)(level), cgoAllocsUnknown\n\tcinternalformat, _ := (C.GLint)(internalformat), cgoAllocsUnknown\n\tcwidth, _ := (C.GLsizei)(width), cgoAllocsUnknown\n\tcheight, _ := (C.GLsizei)(height), cgoAllocsUnknown\n\tcborder, _ := (C.GLint)(border), cgoAllocsUnknown\n\tcformat, _ := (C.GLenum)(format), cgoAllocsUnknown\n\tckind, _ := (C.GLenum)(kind), cgoAllocsUnknown\n\tcpixels, _ := (unsafe.Pointer)(unsafe.Pointer(pixels)), cgoAllocsUnknown\n\tC.glTexImage2D(ctarget, clevel, cinternalformat, cwidth, cheight, cborder, cformat, ckind, cpixels)\n}",
"func ORPD(mx, x operand.Op) { ctx.ORPD(mx, x) }",
"func ORL(imr, emr operand.Op) { ctx.ORL(imr, emr) }",
"func NewGameWorld(gameWorldTemplate [][]string) *GameWorld {\n\tgw := GameWorld{}\n\n\t//fill gameArea with defautlvalue\n\tcount := 0\n\tgw.gameArea = make([][]*GameTile, len(gameWorldTemplate))\n\tfor i := 0; i < len(gw.gameArea); i++ {\n\t\tgw.gameArea[i] = make([]*GameTile, len(gameWorldTemplate[i]))\n\n\t\tfor j := 0; j < len(gw.gameArea[i]); j++ {\n\t\t\t//create tile\n\t\t\tgw.gameArea[i][j] = NewDefaultTile(count, gameWorldTemplate[i][j])\n\t\t\tcount++\n\t\t}\n\t}\n\n\t//set connections\n\tfor i := 0; i < len(gw.gameArea); i++ {\n\t\tfor j := 0; j < len(gw.gameArea[i]); j++ {\n\t\t\t//set connections\n\t\t\tcurrentTile := gw.gameArea[i][j]\n\n\t\t\t//set connection up\n\t\t\tif (i - 1) > 0 {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i-1][j], Up)\n\t\t\t\tgw.GameArea()[i-1][j].SetConnetionTile(currentTile, Down)\n\t\t\t}\n\n\t\t\t//set connection right\n\t\t\tif (j + 1) < len(gw.gameArea[i]) {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i][j+1], Right)\n\t\t\t\tgw.gameArea[i][j+1].SetConnetionTile(currentTile, Left)\n\t\t\t}\n\n\t\t\t//set connection down\n\t\t\tif (i + 1) < len(gw.gameArea) {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i+1][j], Down)\n\t\t\t\tgw.gameArea[i+1][j].SetConnetionTile(currentTile, Up)\n\t\t\t}\n\n\t\t\t//set connection left\n\t\t\tif (j - 1) > 0 {\n\t\t\t\tcurrentTile.SetConnetionTile(gw.gameArea[i][j-1], Left)\n\t\t\t\tgw.gameArea[i][j-1].SetConnetionTile(currentTile, Right)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn &gw\n}",
"func (p Perspective) Opposite() Perspective {\n\treturn 3 - p\n}",
"func (o *Grid) Meshgrid2d() (X, Y [][]float64) {\n\tX = utl.Alloc(o.npts[1], o.npts[0])\n\tY = utl.Alloc(o.npts[1], o.npts[0])\n\tp := 0\n\tfor n := 0; n < o.npts[1]; n++ {\n\t\tfor m := 0; m < o.npts[0]; m++ {\n\t\t\tX[n][m] = o.mtr[p][n][m].X[0]\n\t\t\tY[n][m] = o.mtr[p][n][m].X[1]\n\t\t}\n\t}\n\treturn\n}",
"func (session *Session) Or(query interface{}, args ...interface{}) *Session {\n\tsession.Session = session.Session.Or(query, args...)\n\treturn session\n}",
"func (w *Wrapper) OrWhere(args ...interface{}) *Wrapper {\n\tw.saveCondition(\"WHERE\", \"OR\", args...)\n\treturn w\n}",
"func (self *TStatement) Or(query string, args ...interface{}) *TStatement {\r\n\tself.Op(domain.OR_OPERATOR, query, args...)\r\n\treturn self\r\n}",
"func (c *Context) ORW(imr, amr operand.Op) {\n\tc.addinstruction(x86.ORW(imr, amr))\n}",
"func (self *PhysicsP2) ConvertTilemap2O(map_ *Tilemap, layer interface{}, addToWorld bool) []interface{}{\n\tarray00 := self.Object.Call(\"convertTilemap\", map_, layer, addToWorld)\n\tlength00 := array00.Length()\n\tout00 := make([]interface{}, length00, length00)\n\tfor i00 := 0; i00 < length00; i00++ {\n\t\tout00[i00] = array00.Index(i00)\n\t}\n\treturn out00\n}",
"func Or(lhs, rhs Expression, ops ...Expression) Expression {\n\tfolded := foldLeft(or, append([]Expression{lhs, rhs}, ops...)...)\n\tif folded != nil {\n\t\treturn folded\n\t}\n\treturn NewLiteral(false)\n}",
"func NewOr(x, y Constant) *ExprOr {\n\treturn &ExprOr{\n\t\tX: x,\n\t\tY: y,\n\t}\n}",
"func OR(elems ...Scope) Scope {\n\treturn &orScope{elems: elems}\n}",
"func (b1 *BitSet) Or(b2 *BitSet) *BitSet {\n\tb3 := new(BitSet)\n\tb3.Bits.Or(&b1.Bits, &b2.Bits)\n\treturn b3\n}",
"func NewGraphics2O(game *Game, x int, y int) *Graphics {\n return &Graphics{js.Global.Get(\"Phaser\").Get(\"Graphics\").New(game, x, y)}\n}",
"func (mp MultiPolygon) XOr(p2 Polygonal) Polygon {\n\treturn mp.op(p2, polyclip.XOR)\n}",
"func (u *Universe) Draw(w *glfw.Window, time float64) {\n\tplayer := u.Player\n\tloc := player.Location()\n\tplanetRen := u.PlanetMap[player.Planet.Spec.Id]\n\tplanetRotation := time / planetRen.Planet.Spec.RotationSeconds\n\tplanetRotation *= 2 * math.Pi\n\torbitPosition := time / planetRen.Planet.Spec.OrbitSeconds\n\torbitPosition *= 2 * math.Pi\n\tplanetLoc := planetRen.location(time, u.PlanetMap)\n\tplanetRotateNeg := mgl32.Rotate3DZ(-float32(planetRotation))\n\n\trotated := planetRotateNeg.Mul3x1(loc)\n\tsunDir := planetLoc.Add(rotated).Normalize()\n\n\tvpnDotSun := float64(rotated.Normalize().Dot(sunDir))\n\tlight1Color := mgl32.Vec3{0.5, 0.7, 1.0}\n\tlight1 := math.Max(math.Sqrt(vpnDotSun), 0)\n\tif math.IsNaN(light1) {\n\t\tlight1 = 0\n\t}\n\tlight2Color := mgl32.Vec3{0, 0, 0}\n\tlight2 := math.Max(math.Sqrt(1-vpnDotSun), 0)\n\tif math.IsNaN(light2) {\n\t\tlight2 = 0\n\t}\n\tlight3Color := mgl32.Vec3{0.7, 0.5, 0.4}\n\tlight3 := math.Max(0.6-math.Sqrt(math.Abs(vpnDotSun)), 0)\n\tif math.IsNaN(light3) {\n\t\tlight3 = 0\n\t}\n\tlight := light1Color.Mul(float32(light1)).Add(light2Color.Mul(float32(light2))).Add(light3Color.Mul(float32(light3)))\n\n\tgl.ClearColor(light.X(), light.Y(), light.Z(), 1)\n\tgl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT)\n\n\tfor _, planetRen := range u.PlanetMap {\n\t\tplanetRen.Draw(u.Player, u.PlanetMap, w, time)\n\t}\n}",
"func Or(a, b gbt.AlgoHandler) gbt.AlgoHandler {\n\treturn &orAlgo{\n\t\ta: a,\n\t\tb: b,\n\t}\n}",
"func (z *Int) Or(x, y *Int) *Int {}",
"func (grid *SquareGrid) ToWorld(c, r float64) (float64, float64) {\n\tworld := grid.toWorldMat.Mul2x1(mgl64.Vec2{c, r})\n\treturn world.X(), world.Y()\n}",
"func (pos *PosOpenStatement) Many2One() *Many2One {\n\treturn NewMany2One(pos.Id.Get(), \"\")\n}",
"func world2screen(wx, wy float32) (sx, sy int) {\n\tsx = int((wx * pixel_per_meter) + (float32(WINDOW_X) / 2.0))\n\tsy = int((-wy * pixel_per_meter) + (float32(WINDOW_Y) / 2.0))\n\treturn\n}",
"func (objx *CoderSession) Or(query interface{}, args ...interface{}) *CoderSession {\n\treturn (*CoderSession)(((*xorm.Session)(objx)).Or(query, args...))\n}",
"func (q *Query) Or() *Query {\n\tq.logicalOr = true\n\treturn q\n}",
"func Or(predicates ...predicate.Province) predicate.Province {\n\treturn predicate.Province(func(s *sql.Selector) {\n\t\ts1 := s.Clone().SetP(nil)\n\t\tfor i, p := range predicates {\n\t\t\tif i > 0 {\n\t\t\t\ts1.Or()\n\t\t\t}\n\t\t\tp(s1)\n\t\t}\n\t\ts.Where(s1.P())\n\t})\n}",
"func (me *Frustum) UpdatePlanesGH(mat *unum.Mat4, normalize bool) {\n\t// Left clipping plane\n\tme.Planes[0].X = mat[12] + mat[0]\n\tme.Planes[0].Y = mat[13] + mat[1]\n\tme.Planes[0].Z = mat[14] + mat[2]\n\tme.Planes[0].W = mat[15] + mat[3]\n\t// Right clipping plane\n\tme.Planes[1].X = mat[12] - mat[0]\n\tme.Planes[1].Y = mat[13] - mat[1]\n\tme.Planes[1].Z = mat[14] - mat[2]\n\tme.Planes[1].W = mat[15] - mat[3]\n\t// Bottom clipping plane\n\tme.Planes[2].X = mat[12] + mat[4]\n\tme.Planes[2].Y = mat[13] + mat[5]\n\tme.Planes[2].Z = mat[14] + mat[6]\n\tme.Planes[2].W = mat[15] + mat[7]\n\t// Top clipping plane\n\tme.Planes[3].X = mat[12] - mat[4]\n\tme.Planes[3].Y = mat[13] - mat[5]\n\tme.Planes[3].Z = mat[14] - mat[6]\n\tme.Planes[3].W = mat[15] - mat[7]\n\t// Near clipping plane\n\tme.Planes[4].X = mat[12] + mat[8]\n\tme.Planes[4].Y = mat[13] + mat[9]\n\tme.Planes[4].Z = mat[14] + mat[10]\n\tme.Planes[4].W = mat[15] + mat[11]\n\t// Far clipping plane\n\tme.Planes[5].X = mat[12] - mat[8]\n\tme.Planes[5].Y = mat[13] - mat[9]\n\tme.Planes[5].Z = mat[14] - mat[10]\n\tme.Planes[5].W = mat[15] - mat[11]\n\tif normalize {\n\t\tfor i := 0; i < len(me.Planes); i++ {\n\t\t\tme.Planes[i].Normalize()\n\t\t}\n\t}\n}",
"func (g *Grammar) Or(terms ...Term) (out Term) {\n\tname := fmt.Sprintf(\"OR_%v\", terms)\n\trule := g.Lookup(name)\n\tfor _, t := range terms {\n\t\trule.Productions = append(rule.Productions, Production{t})\n\t}\n\tg.rules[name] = rule\n\treturn rule\n}",
"func (self *Rectangle) GetPoint2O(position int, out *Point) *Point{\n return &Point{self.Object.Call(\"getPoint\", position, out)}\n}",
"func (o AuthenticationRuleOutput) Oauth() OAuthRequirementsPtrOutput {\n\treturn o.ApplyT(func(v AuthenticationRule) *OAuthRequirements { return v.Oauth }).(OAuthRequirementsPtrOutput)\n}",
"func Or(operands ...Operand) OrOperator {\n\treturn OrOperator{\n\t\toperands,\n\t}\n}",
"func VPMOVM2D(k, xyz operand.Op) { ctx.VPMOVM2D(k, xyz) }",
"func (plan Handle) ExecZ2D(idata, odata cu.DevicePtr) {\n\terr := Result(C.cufftExecZ2D(\n\t\tC.cufftHandle(plan),\n\t\t(*C.cufftDoubleComplex)(unsafe.Pointer(uintptr(idata))),\n\t\t(*C.cufftDoubleReal)(unsafe.Pointer(uintptr(odata)))))\n\tif err != SUCCESS {\n\t\tpanic(err)\n\t}\n}",
"func (e *Ellipsoid) Direct(\n\tlat1, lon1, azi1, s12 float64,\n\tlat2, lon2, azi2 *float64,\n) {\n\tC.geod_direct(&e.g,\n\t\tC.double(lat1), C.double(lon1), C.double(azi1), C.double(s12),\n\t\t(*C.double)(lat2), (*C.double)(lon2), (*C.double)(azi2))\n}",
"func (z *Bits) Or(x, y Bits) {\n\tif x.Num != y.Num {\n\t\tpanic(\"arguments have different number of bits\")\n\t}\n\tif z.Num != x.Num {\n\t\t*z = New(x.Num)\n\t}\n\tfor i, w := range y.Bits {\n\t\tz.Bits[i] = x.Bits[i] | w\n\t}\n}"
] | [
"0.7265957",
"0.72310865",
"0.7083383",
"0.6947122",
"0.680068",
"0.6585098",
"0.64460534",
"0.6387449",
"0.63567126",
"0.6282929",
"0.6277347",
"0.6056117",
"0.5784974",
"0.5626712",
"0.5495447",
"0.5270123",
"0.50828767",
"0.49142155",
"0.49066404",
"0.45804802",
"0.4481543",
"0.44299152",
"0.43773556",
"0.43758005",
"0.43287137",
"0.43077925",
"0.42849883",
"0.42592886",
"0.42266458",
"0.4226142",
"0.4219015",
"0.41805944",
"0.4178197",
"0.41768548",
"0.4156062",
"0.41382322",
"0.41120324",
"0.40994543",
"0.409663",
"0.40955582",
"0.40881556",
"0.4076296",
"0.40753672",
"0.40666324",
"0.40658793",
"0.40643948",
"0.40622076",
"0.4057739",
"0.4057373",
"0.4046855",
"0.404428",
"0.40432703",
"0.4027313",
"0.40202817",
"0.40169024",
"0.40156952",
"0.4015516",
"0.40140086",
"0.4011191",
"0.40094122",
"0.40069646",
"0.39920363",
"0.3992019",
"0.39871874",
"0.39819223",
"0.39723733",
"0.39701352",
"0.39691806",
"0.39580566",
"0.39577615",
"0.39556766",
"0.39442474",
"0.39428473",
"0.39313525",
"0.3924715",
"0.39219633",
"0.39207315",
"0.3916742",
"0.39137888",
"0.39137185",
"0.3912444",
"0.39092758",
"0.38978148",
"0.3893348",
"0.38894597",
"0.38885036",
"0.388307",
"0.38805166",
"0.38633737",
"0.38610378",
"0.38570923",
"0.3852214",
"0.38485733",
"0.3847967",
"0.38469955",
"0.38453168",
"0.38437903",
"0.384176",
"0.38361934",
"0.38342717"
] | 0.72120196 | 2 |
Perspective generates a Perspective Matrix. | func Perspective(fovy, aspect, near, far float64) Mat4 {
// fovy = (fovy * math.Pi) / 180.0 // convert from degrees to radians
nmf, f := near-far, float64(1./math.Tan(float64(fovy)/2.0))
return Mat4{float64(f / aspect), 0, 0, 0, 0, float64(f), 0, 0, 0, 0, float64((near + far) / nmf), -1, 0, 0, float64((2. * far * near) / nmf), 0}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Perspective(fovy, aspect, zNear, zFar gl.Float) *Mat4 {\n\tf := 1 / (TanGL(fovy / 2.0))\n\tm := IdentMat4()\n\tm[0].X = f / aspect\n\tm[1].Y = f\n\tm[2].Z = (zFar + zNear) / (zNear - zFar)\n\tm[3].W = 0\n\tm[2].W = -1\n\tm[3].Z = (2 * zFar * zNear) / (zNear - zFar)\n\treturn m\n}",
"func MatrixPerspective(fovy, aspect, near, far float32) Matrix {\n\ttop := near * float32(math.Tan(float64(fovy*Pi)/360.0))\n\tright := top * aspect\n\n\treturn MatrixFrustum(-right, right, -top, top, near, far)\n}",
"func NewMatrixPerspective(fovy, aspect, near, far float64) Matrix {\n\ttop := near * math.Tan(fovy*0.5)\n\tright := top * aspect\n\treturn NewMatrixFrustum(-right, right, -top, top, near, far)\n}",
"func NewProjectionPerspective(fovy, near, far, viewWidth, viewHeight float64) Matrix4 {\n\n\taspect := viewWidth / viewHeight\n\n\tt := math.Tan(fovy * math.Pi / 360)\n\tb := -t\n\tr := t * aspect\n\tl := -r\n\n\t// l := -viewWidth / 2\n\t// r := viewWidth / 2\n\t// t := -viewHeight / 2\n\t// b := viewHeight / 2\n\n\treturn Matrix4{\n\t\t{(2 * near) / (r - l), 0, (r + l) / (r - l), 0},\n\t\t{0, (2 * near) / (t - b), (t + b) / (t - b), 0},\n\t\t{0, 0, -((far + near) / (far - near)), -((2 * far * near) / (far - near))},\n\t\t{0, 0, -1, 0},\n\t}\n\n}",
"func NewMatrix4Perspective(left, right, top, bottom, near, far float32) *Matrix4 {\n\tx := 2 * near / (right - left)\n\ty := 2 * near / (top - bottom)\n\n\ta := (right + left) / (right - left)\n\tb := (top + bottom) / (top - bottom)\n\tc := -(far + near) / (far - near)\n\td := -2 * far * near / (far - near)\n\n\treturn &Matrix4{\n\t\telements: [16]float32{\n\t\t\tx, 0, 0, 0,\n\t\t\t0, y, 0, 0,\n\t\t\ta, b, c, -1,\n\t\t\t0, 0, d, 0,\n\t\t},\n\t}\n}",
"func Perspective() *PerspectiveApplyConfiguration {\n\treturn &PerspectiveApplyConfiguration{}\n}",
"func (c *Camera) SetPerspective(angle, ratio, zNear, zFar float32) {\n\tglm.PerspectiveIn(angle, ratio, zNear, zFar, &c.Projection)\n}",
"func NewPerspectiveCamera(fov float64, aspect float64, near float64, far float64) PerspectiveCamera {\n\treturn &perspectiveCameraImp{threejs.NewObject3DFromJSValue(threejs.Threejs(\"PerspectiveCamera\").New(fov, aspect, near, far))}\n}",
"func perspProj(a *vec3.T, cam *Camera) *vec2.T {\n\tm := mkExtrinsicCameraMtx(cam)\n\tprintM4(m)\n\tsp := vec3.From(a)\n\tm.TransformVec3(&sp)\n\treturn &vec2.T{sp[0], sp[1]}\n}",
"func MatrixFrustum(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = (near * 2.0) / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\n\tresult.M4 = 0.0\n\tresult.M5 = (near * 2.0) / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\n\tresult.M8 = right + left/rl\n\tresult.M9 = top + bottom/tb\n\tresult.M10 = -(far + near) / fn\n\tresult.M11 = -1.0\n\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = -(far * near * 2.0) / fn\n\tresult.M15 = 0.0\n\n\treturn result\n}",
"func NewV1Perspective(appID string) SnapshotPerspective {\n\treturn NewPerspective(appID, \"v1\")\n}",
"func MatrixProjection(fov FovPort, znear, zfar float32, rightHanded bool) Matrix4f {\n\tif rightHanded {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 1))\n\t} else {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 0))\n\t}\n}",
"func (a *CloudCostPerspectivesApiService) GetPerspective(ctx context.Context, accountIdentifier string, perspectiveId string) (ResponseDtoceView, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseDtoceView\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/ccm/api/perspective\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\tlocalVarQueryParams.Add(\"accountIdentifier\", parameterToString(accountIdentifier, \"\"))\n\tlocalVarQueryParams.Add(\"perspectiveId\", parameterToString(perspectiveId, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v Failure\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ResponseDtoceView\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func (a *CloudCostPerspectivesApiService) CreatePerspective(ctx context.Context, body CeView, accountIdentifier string, clone bool) (ResponseDtoceView, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Post\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseDtoceView\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/ccm/api/perspective\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\tlocalVarQueryParams.Add(\"accountIdentifier\", parameterToString(accountIdentifier, \"\"))\n\tlocalVarQueryParams.Add(\"clone\", parameterToString(clone, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v Failure\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ResponseDtoceView\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func NewMatrixFrustum(left, right, bottom, top, near, far float64) Matrix {\n\trl := (right - left)\n\ttb := (top - bottom)\n\tfn := (far - near)\n\n\treturn Matrix{\n\t\tM0: float32((near * 2) / rl),\n\t\tM1: 0,\n\t\tM2: 0,\n\t\tM3: 0,\n\t\tM4: 0,\n\t\tM5: float32((near * 2) / tb),\n\t\tM6: 0,\n\t\tM7: 0,\n\t\tM8: float32((right + left) / rl),\n\t\tM9: float32((top + bottom) / tb),\n\t\tM10: float32(-(far + near) / fn),\n\t\tM11: -1,\n\t\tM12: 0,\n\t\tM13: 0,\n\t\tM14: float32(-(far * near * 2) / fn),\n\t\tM15: 0,\n\t}\n}",
"func (a *CloudCostPerspectivesApiService) DeletePerspective(ctx context.Context, accountIdentifier string, perspectiveId string) (ResponseDtoString, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Delete\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseDtoString\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/ccm/api/perspective\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\tlocalVarQueryParams.Add(\"accountIdentifier\", parameterToString(accountIdentifier, \"\"))\n\tlocalVarQueryParams.Add(\"perspectiveId\", parameterToString(perspectiveId, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v Failure\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ResponseDtoString\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func FourPointTransform(img gocv.Mat, pts []image.Point, dst *gocv.Mat) {\n\trect := OrderPoints(pts)\n\ttl := rect[0]\n\ttr := rect[1]\n\tbr := rect[2]\n\tbl := rect[3]\n\n\twidthA := math.Sqrt(math.Pow(float64(br.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(br.Y)-float64(bl.Y), 2))\n\twidthB := math.Sqrt(math.Pow(float64(tr.X)-float64(tl.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(tl.Y), 2))\n\tmaxWidth := math.Max(widthA, widthB)\n\n\theightA := math.Sqrt(math.Pow(float64(tr.X)-float64(br.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(br.Y), 2))\n\theightB := math.Sqrt(math.Pow(float64(tl.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(tl.Y)-float64(bl.Y), 2))\n\tmaxHeight := math.Max(heightA, heightB)\n\n\tdt := []image.Point{\n\t\timage.Pt(0, 0),\n\t\timage.Pt(int(maxWidth)-1, 0),\n\t\timage.Pt(int(maxWidth)-1, int(maxHeight)-1),\n\t\timage.Pt(0, int(maxHeight)-1)}\n\n\tm := gocv.GetPerspectiveTransform(rect, dt)\n\tgocv.WarpPerspective(img, dst, m, image.Pt(int(maxWidth), int(maxHeight)))\n\n}",
"func (a *CloudCostPerspectivesApiService) UpdatePerspective(ctx context.Context, body CeView, accountIdentifier string) (ResponseDtoceView, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Put\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseDtoceView\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/ccm/api/perspective\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\tlocalVarQueryParams.Add(\"accountIdentifier\", parameterToString(accountIdentifier, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{\"application/json\"}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\t// body params\n\tlocalVarPostBody = &body\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v Failure\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ResponseDtoceView\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func buildMatrix(dataShards, totalShards int) matrix {\n\t// Start with a Vandermonde matrix. This matrix would work, in theory, but\n\t// doesn't have the property that the data shards are unchanged after\n\t// encoding.\n\tvm := vandermonde(totalShards, dataShards)\n\n\t// Multiply by the inverse of the top square of the matrix. This will make\n\t// the top square be the identity matrix, but preserve the property that any\n\t// square subset of rows is invertible.\n\ttop := vm.SubMatrix(0, 0, dataShards, dataShards)\n\ttopInv, _ := top.Invert()\n\treturn vm.Multiply(topInv)\n}",
"func NewProjectionPlane(args ...interface{}) (*ProjectionPlane, error) {\n\twidth, height, pixelSize, distance, err := projectionPlaneParams(args)\n\n\tprojectionPlane := &ProjectionPlane{\n\t\tWidth: width,\n\t\tHeight: height,\n\t\tPixelSize: pixelSize,\n\t\tDistance: distance,\n\t\tGamma: 1.0,\n\t\tClampOutOfGamut: false,\n\t\tClampColor: *color.NewColor(0., 0., 0.),\n\t\tImage: *image.NewRGBA(image.Rect(0, 0, width, height)),\n\t}\n\n\treturn projectionPlane, err\n}",
"func New1x1Camera() *PerspectiveCamera {\n\torigin := geo.Vec3{\n\t\t0,\n\t\t-1.31-1,\n\t\t0,\n\t}\n\treturn NewPerspectiveCamera(origin, geo.Vec3{0,1,0}, 0.27*2, 1)\n}",
"func Frustum(left, right, bottom, top, near, far float64) Mat4 {\n\trml, tmb, fmn := (right - left), (top - bottom), (far - near)\n\tA, B, C, D := (right+left)/rml, (top+bottom)/tmb, -(far+near)/fmn, -(2*far*near)/fmn\n\n\treturn Mat4{float64((2. * near) / rml), 0, 0, 0, 0, float64((2. * near) / tmb), 0, 0, float64(A), float64(B), float64(C), -1, 0, 0, float64(D), 0}\n}",
"func make_plane(tWidth, tHeight uint32, vertices []float32, indices []uint32) {\n\t// width and height are the number of triangles across and down\n\t// plus one for the vertices to define them\n\ttWidth++\n\ttHeight++\n\n\tvar makeIsland = true\n\tvar heightMap = make([]float32, tWidth*tHeight)\n\tmake_height_map(tWidth, tHeight, heightMap, makeIsland)\n\tvar x, y uint32\n\tvar scale float32\n\tscale = 2.0 / float32(plane_rows)\n\thScale := scale * 2\n\t//var fbTexScale = float32(cols / width)\n\tvar fbTexScale = float32(1.0)\n\t// Set up vertices\n\tfor y = 0; y < tHeight; y++ {\n\t\tbase := y * tWidth\n\t\tfor x = 0; x < tWidth; x++ {\n\t\t\tindex := base + x\n\t\t\t// Position\n\t\t\tvertices[(8 * index)] = float32(x)*scale - 1.0\n\t\t\tvertices[(8*index)+1] = float32(y)*scale - 1.0\n\t\t\tvertices[(8*index)+2] = heightMap[index] * hScale\n\t\t\t// Colours\n\t\t\tvertices[(8*index)+3] = float32(1.0)\n\t\t\tvertices[(8*index)+4] = float32(1.0)\n\t\t\tvertices[(8*index)+5] = float32(1.0)\n\t\t\t// Texture\n\t\t\tvertices[(8*index)+6] = fbTexScale * float32(x) / float32(tWidth-1)\n\t\t\tvertices[(8*index)+7] = fbTexScale * float32(y) / float32(tHeight-1)\n\t\t\t/*fmt.Printf(\"%d: Ver ( %.2f, %.2f, %.2f ) / Col ( %.2f %.2f %.2f ) / Text ( %.2f, %.2f )\\n\",\n\t\t\tindex, vertices[(8*index)+0], vertices[(8*index)+1], vertices[(8*index)+2],\n\t\t\tvertices[(8*index)+3], vertices[(8*index)+4], vertices[(8*index)+5],\n\t\t\tvertices[(8*index)+6], vertices[(8*index)+7])*/\n\t\t}\n\t}\n\n\t// Set up indices\n\ti := 0\n\ttHeight--\n\tfor y = 0; y < tHeight; y++ {\n\t\tbase := y * tWidth\n\n\t\t//indices[i++] = (uint16)base;\n\t\tfor x = 0; x < tWidth; x++ {\n\t\t\tindices[i] = (uint32)(base + x)\n\t\t\ti += 1\n\t\t\tindices[i] = (uint32)(base + tWidth + x)\n\t\t\ti += 1\n\t\t}\n\t\t// add a degenerate triangle (except in a last row)\n\t\tif y < tHeight-1 {\n\t\t\tindices[i] = (uint32)((y+1)*tWidth + (tWidth - 1))\n\t\t\ti += 1\n\t\t\tindices[i] = (uint32)((y + 1) * tWidth)\n\t\t\ti += 1\n\t\t}\n\t}\n\n\t/*var ind int\n\tfor ind = 0; ind < i; ind++ {\n\t\tfmt.Printf(\"%d \", indices[ind])\n\t}\n\tfmt.Printf(\"\\nIn total %d indices\\n\", ind)*/\n}",
"func (s *Permutation) View() reform.View {\n\treturn PermutationTable\n}",
"func (gm *GraphicsManager) RenderAllFromPerspective(id component.GOiD, sm component.SceneManager) (*common.Vector, *common.Vector) {\n\terrs := common.MakeVector()\n\tcompsToSend := common.MakeVector()\n\tcomps := gm.compList.Array()\n\n\tperspLoc, err := sm.GetObjectLocation(id)\n\tif err != nil {\n\t\terrs.Insert(fmt.Errorf(\"requesting location from scene manager failed in perspective render, error %s\", err.Error()))\n\t\treturn nil, errs\n\t}\n\tcompsNearPerspective := sm.GetObjectsInLocationRadius(perspLoc, 5.0).Array()\n\n\tfor i := range comps {\n\t\tif comps[i] == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif comps[i].(component.GOiD) == id || comps[i].(component.GOiD) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor j := range compsNearPerspective {\n\t\t\tif comps[i].(component.GOiD) == compsNearPerspective[j].(component.GOiD) {\n\t\t\t\tcompsToSend.Insert(comps[i].(component.GOiD))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn compsToSend, errs\n}",
"func Project(obj Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (win Vec3) {\n\tobj4 := obj.Vec4(1)\n\n\tvpp := projection.Mul4(modelview).Mul4x1(obj4)\n\tvpp = vpp.Mul(1 / vpp.W())\n\twin[0] = float64(initialX) + (float64(width)*(vpp[0]+1))/2\n\twin[1] = float64(initialY) + (float64(height)*(vpp[1]+1))/2\n\twin[2] = (vpp[2] + 1) / 2\n\n\treturn win\n}",
"func calcMVP(widthPx, heightPx int, tlx, tly, trx, try, blx, bly float64) f64.Aff3 {\n\t// Convert from pixel coords to vertex shader coords.\n\tinvHalfWidth := +2 / float64(widthPx)\n\tinvHalfHeight := -2 / float64(heightPx)\n\ttlx = tlx*invHalfWidth - 1\n\ttly = tly*invHalfHeight + 1\n\ttrx = trx*invHalfWidth - 1\n\ttry = try*invHalfHeight + 1\n\tblx = blx*invHalfWidth - 1\n\tbly = bly*invHalfHeight + 1\n\n\t// The resultant affine matrix:\n\t//\t- maps (0, 0) to (tlx, tly).\n\t//\t- maps (1, 0) to (trx, try).\n\t//\t- maps (0, 1) to (blx, bly).\n\treturn f64.Aff3{\n\t\ttrx - tlx, blx - tlx, tlx,\n\t\ttry - tly, bly - tly, tly,\n\t}\n}",
"func diag(m *Matrix) (*Matrix, string) {\n P, err := eig(m);\n if (err != \"\") {\n return nil, err;\n }\n Pinv, _ := inv(P);\n return mulMatrices(mulMatrices(Pinv, m), P), \"\";\n}",
"func (c *Camera) Matrix() mgl32.Mat4 {\n\treturn mgl32.LookAtV(c.pos, c.pos.Add(c.front), c.up)\n}",
"func (c *Camera) SetPersp(view image.Rectangle, fov, near, far float64) {\n\taspectRatio := float64(view.Dx()) / float64(view.Dy())\n\tm := lmath.Mat4Perspective(fov, aspectRatio, near, far)\n\tc.Projection = ConvertMat4(m)\n}",
"func (pc *perspectiveCameraImp) ProjectionMatrixInverse() *threejs.Matrix4 {\n\treturn &threejs.Matrix4{Value: pc.JSValue().Get(\"projectionMatrixInverse\")}\n}",
"func (matrix *Matrix) printMatrix() {\n\n\tfor i := 0; i < len(matrix.Indexes); i++ {\n\t\tfmt.Print(matrix.Indexes[i].Index, \"[ \")\n\t\tfor j := 0; j < len(matrix.Indexes[i].Departments); j++ {\n\t\t\tfmt.Print(\"[ \")\n\t\t\tfor k := 0; k < len(matrix.Indexes[i].Departments[j].Ratings); k++ {\n\t\t\t\ttext, _ := matrix.Indexes[i].Departments[j].Ratings[k].Lista.ToString()\n\t\t\t\tfmt.Print(\"[ \", text, \" ]\")\n\n\t\t\t}\n\t\t\tfmt.Print(\"] \")\n\t\t}\n\t\tfmt.Println(\"]\")\n\t}\n}",
"func NewMatrixOrtho(left, right, bottom, top, near, far float64) Matrix {\n\trl := (right - left)\n\ttb := (top - bottom)\n\tfn := (far - near)\n\treturn Matrix{\n\t\tM0: float32(2 / rl),\n\t\tM1: 0,\n\t\tM2: 0,\n\t\tM3: 0,\n\t\tM4: 0,\n\t\tM5: float32(2 / tb),\n\t\tM6: 0,\n\t\tM7: 0,\n\t\tM8: 0,\n\t\tM9: 0,\n\t\tM10: float32(-2 / fn),\n\t\tM11: 0,\n\t\tM12: float32(-(left + right) / rl),\n\t\tM13: float32(-(top + bottom) / tb),\n\t\tM14: float32(-(far + near) / fn),\n\t\tM15: 1,\n\t}\n}",
"func (p *PointAffine) FromProj(p1 *PointProj) *PointAffine {\n\tp.X.Div(&p1.X, &p1.Z)\n\tp.Y.Div(&p1.Y, &p1.Z)\n\treturn p\n}",
"func (a *CloudCostPerspectivesApiService) GetAllPerspectives(ctx context.Context, accountIdentifier string) (ResponseDtoListPerspective, *http.Response, error) {\n\tvar (\n\t\tlocalVarHttpMethod = strings.ToUpper(\"Get\")\n\t\tlocalVarPostBody interface{}\n\t\tlocalVarFileName string\n\t\tlocalVarFileBytes []byte\n\t\tlocalVarReturnValue ResponseDtoListPerspective\n\t)\n\n\t// create path and map variables\n\tlocalVarPath := a.client.cfg.BasePath + \"/ccm/api/perspective/getAllPerspectives\"\n\n\tlocalVarHeaderParams := make(map[string]string)\n\tlocalVarQueryParams := url.Values{}\n\tlocalVarFormParams := url.Values{}\n\n\tlocalVarQueryParams.Add(\"accountIdentifier\", parameterToString(accountIdentifier, \"\"))\n\t// to determine the Content-Type header\n\tlocalVarHttpContentTypes := []string{}\n\n\t// set Content-Type header\n\tlocalVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)\n\tif localVarHttpContentType != \"\" {\n\t\tlocalVarHeaderParams[\"Content-Type\"] = localVarHttpContentType\n\t}\n\n\t// to determine the Accept header\n\tlocalVarHttpHeaderAccepts := []string{\"application/json\"}\n\n\t// set Accept header\n\tlocalVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)\n\tif localVarHttpHeaderAccept != \"\" {\n\t\tlocalVarHeaderParams[\"Accept\"] = localVarHttpHeaderAccept\n\t}\n\tif ctx != nil {\n\t\t// API Key Authentication\n\t\tif auth, ok := ctx.Value(ContextAPIKey).(APIKey); ok {\n\t\t\tvar key string\n\t\t\tif auth.Prefix != \"\" {\n\t\t\t\tkey = auth.Prefix + \" \" + auth.Key\n\t\t\t} else {\n\t\t\t\tkey = auth.Key\n\t\t\t}\n\t\t\tlocalVarHeaderParams[\"x-api-key\"] = key\n\n\t\t}\n\t}\n\tr, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)\n\tif err != nil {\n\t\treturn localVarReturnValue, nil, err\n\t}\n\n\tlocalVarHttpResponse, err := a.client.callAPI(r)\n\tif err != nil || localVarHttpResponse == nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tlocalVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)\n\tlocalVarHttpResponse.Body.Close()\n\tif err != nil {\n\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t}\n\n\tif localVarHttpResponse.StatusCode < 300 {\n\t\t// If we succeed, return the data, otherwise pass on to decode error.\n\t\terr = a.client.decode(&localVarReturnValue, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\tif err == nil {\n\t\t\treturn localVarReturnValue, localVarHttpResponse, err\n\t\t}\n\t}\n\n\tif localVarHttpResponse.StatusCode >= 300 {\n\t\tnewErr := GenericSwaggerError{\n\t\t\tbody: localVarBody,\n\t\t\terror: localVarHttpResponse.Status,\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 400 {\n\t\t\tvar v Failure\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 500 {\n\t\t\tvar v ModelError\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\tif localVarHttpResponse.StatusCode == 0 {\n\t\t\tvar v ResponseDtoListPerspective\n\t\t\terr = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get(\"Content-Type\"))\n\t\t\tif err != nil {\n\t\t\t\tnewErr.error = err.Error()\n\t\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t\t}\n\t\t\tnewErr.model = v\n\t\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t\t}\n\t\treturn localVarReturnValue, localVarHttpResponse, newErr\n\t}\n\n\treturn localVarReturnValue, localVarHttpResponse, nil\n}",
"func MatrixOrtho(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = 2.0 / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\tresult.M4 = 0.0\n\tresult.M5 = 2.0 / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\tresult.M8 = 0.0\n\tresult.M9 = 0.0\n\tresult.M10 = -2.0 / fn\n\tresult.M11 = 0.0\n\tresult.M12 = -(left + right) / rl\n\tresult.M13 = -(top + bottom) / tb\n\tresult.M14 = -(far + near) / fn\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func (cam *Camera) SetupViewProjection() {\n\tx_ratio := cam.Width / cam.Height\n\tcam.View = PerspectiveFrustum(cam.YFov, x_ratio, cam.Near, cam.Far)\n\tcam.Projection = cam.View.M44()\n}",
"func (p *Proc) Matrix(a, b, c, d, e, f float64) {\n\tp.stk.matrix(f32.NewAffine2D(\n\t\tfloat32(a), float32(b), float32(c),\n\t\tfloat32(d), float32(e), float32(f),\n\t))\n}",
"func MatrixLookAt(eye, target, up Vector3) Matrix {\n\tvar result Matrix\n\n\tz := Vector3Subtract(eye, target)\n\tz = Vector3Normalize(z)\n\tx := Vector3CrossProduct(up, z)\n\tx = Vector3Normalize(x)\n\ty := Vector3CrossProduct(z, x)\n\ty = Vector3Normalize(y)\n\n\tresult.M0 = x.X\n\tresult.M1 = x.Y\n\tresult.M2 = x.Z\n\tresult.M3 = -((x.X * eye.X) + (x.Y * eye.Y) + (x.Z * eye.Z))\n\tresult.M4 = y.X\n\tresult.M5 = y.Y\n\tresult.M6 = y.Z\n\tresult.M7 = -((y.X * eye.X) + (y.Y * eye.Y) + (y.Z * eye.Z))\n\tresult.M8 = z.X\n\tresult.M9 = z.Y\n\tresult.M10 = z.Z\n\tresult.M11 = -((z.X * eye.X) + (z.Y * eye.Y) + (z.Z * eye.Z))\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = 0.0\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func (m *Mat4) Transpose() *Mat4 {\n\tvar rm = Mat4{\n\t\t{m[0].X, m[1].X, m[2].X, m[3].X},\n\t\t{m[0].Y, m[1].Y, m[2].Y, m[3].Y},\n\t\t{m[0].Z, m[1].Z, m[2].Z, m[3].Z},\n\t\t{m[0].W, m[1].W, m[2].W, m[3].W},\n\t}\n\treturn &rm\n}",
"func NewCube() *Mesh {\n\n\tmesh := NewMesh(\"Cube\",\n\n\t\t// Top\n\n\t\tNewVertex(1, 1, -1, 1, 0),\n\t\tNewVertex(1, 1, 1, 1, 1),\n\t\tNewVertex(-1, 1, -1, 0, 0),\n\n\t\tNewVertex(-1, 1, -1, 0, 0),\n\t\tNewVertex(1, 1, 1, 1, 1),\n\t\tNewVertex(-1, 1, 1, 0, 1),\n\n\t\t// Bottom\n\n\t\tNewVertex(-1, -1, -1, 0, 0),\n\t\tNewVertex(1, -1, 1, 1, 1),\n\t\tNewVertex(1, -1, -1, 1, 0),\n\n\t\tNewVertex(-1, -1, 1, 0, 1),\n\t\tNewVertex(1, -1, 1, 1, 1),\n\t\tNewVertex(-1, -1, -1, 0, 0),\n\n\t\t// Front\n\n\t\tNewVertex(-1, 1, 1, 0, 0),\n\t\tNewVertex(1, -1, 1, 1, 1),\n\t\tNewVertex(1, 1, 1, 1, 0),\n\n\t\tNewVertex(-1, -1, 1, 0, 1),\n\t\tNewVertex(1, -1, 1, 1, 1),\n\t\tNewVertex(-1, 1, 1, 0, 0),\n\n\t\t// Back\n\n\t\tNewVertex(1, 1, -1, 1, 0),\n\t\tNewVertex(1, -1, -1, 1, 1),\n\t\tNewVertex(-1, 1, -1, 0, 0),\n\n\t\tNewVertex(-1, 1, -1, 0, 0),\n\t\tNewVertex(1, -1, -1, 1, 1),\n\t\tNewVertex(-1, -1, -1, 0, 1),\n\n\t\t// Right\n\n\t\tNewVertex(1, 1, -1, 1, 0),\n\t\tNewVertex(1, 1, 1, 1, 1),\n\t\tNewVertex(1, -1, -1, 0, 0),\n\n\t\tNewVertex(1, -1, -1, 0, 0),\n\t\tNewVertex(1, 1, 1, 1, 1),\n\t\tNewVertex(1, -1, 1, 0, 1),\n\n\t\t// Left\n\n\t\tNewVertex(-1, -1, -1, 0, 0),\n\t\tNewVertex(-1, 1, 1, 1, 1),\n\t\tNewVertex(-1, 1, -1, 1, 0),\n\n\t\tNewVertex(-1, -1, 1, 0, 1),\n\t\tNewVertex(-1, 1, 1, 1, 1),\n\t\tNewVertex(-1, -1, -1, 0, 0),\n\t)\n\n\treturn mesh\n\n}",
"func NewMatrix4Orthographic(left, right, top, bottom, near, far float32) *Matrix4 {\n\tma := NewDefaultMatrix4()\n\tma.MakeOrthographic(left, right, top, bottom, near, far)\n\treturn ma\n}",
"func MatrixDeterminant(mat Matrix) float32 {\n\tvar result float32\n\n\ta00 := mat.M0\n\ta01 := mat.M1\n\ta02 := mat.M2\n\ta03 := mat.M3\n\ta10 := mat.M4\n\ta11 := mat.M5\n\ta12 := mat.M6\n\ta13 := mat.M7\n\ta20 := mat.M8\n\ta21 := mat.M9\n\ta22 := mat.M10\n\ta23 := mat.M11\n\ta30 := mat.M12\n\ta31 := mat.M13\n\ta32 := mat.M14\n\ta33 := mat.M15\n\n\tresult = a30*a21*a12*a03 - a20*a31*a12*a03 - a30*a11*a22*a03 + a10*a31*a22*a03 +\n\t\ta20*a11*a32*a03 - a10*a21*a32*a03 - a30*a21*a02*a13 + a20*a31*a02*a13 +\n\t\ta30*a01*a22*a13 - a00*a31*a22*a13 - a20*a01*a32*a13 + a00*a21*a32*a13 +\n\t\ta30*a11*a02*a23 - a10*a31*a02*a23 - a30*a01*a12*a23 + a00*a31*a12*a23 +\n\t\ta10*a01*a32*a23 - a00*a11*a32*a23 - a20*a11*a02*a33 + a10*a21*a02*a33 +\n\t\ta20*a01*a12*a33 - a00*a21*a12*a33 - a10*a01*a22*a33 + a00*a11*a22*a33\n\n\treturn result\n}",
"func NewPlane() *Mesh {\n\n\tmesh := NewMesh(\"Plane\",\n\t\tNewVertex(1, 0, -1, 1, 0),\n\t\tNewVertex(1, 0, 1, 1, 1),\n\t\tNewVertex(-1, 0, -1, 0, 0),\n\n\t\tNewVertex(-1, 0, -1, 0, 0),\n\t\tNewVertex(1, 0, 1, 1, 1),\n\t\tNewVertex(-1, 0, 1, 0, 1),\n\t)\n\n\treturn mesh\n\n}",
"func newMatrix(width, height int) *Matrix {\n\tmat := make([][]qrvalue, width)\n\tfor w := 0; w < width; w++ {\n\t\tmat[w] = make([]qrvalue, height)\n\t}\n\n\tm := &Matrix{\n\t\tmat: mat,\n\t\twidth: width,\n\t\theight: height,\n\t}\n\n\tm.init()\n\treturn m\n}",
"func transMatrix(pState []Pair) ([]float64) {\n var x float64 = 0.0\n tMat := make([]float64, len(pState))\n for i := range pState {\n x = float64(pState[i].cnt) + x\n }\n for i := range pState {\n tMat[i] = float64(pState[i].cnt) / x\n if i >= 1 {\n var y float64 = 0.0\n for j := i; j >= 0; j-- {\n y = float64(pState[j].cnt) + y\n }\n tMat[i] = y / x\n }\n }\n return tMat\n}",
"func MatrixTranspose(mat Matrix) Matrix {\n\tvar result Matrix\n\n\tresult.M0 = mat.M0\n\tresult.M1 = mat.M4\n\tresult.M2 = mat.M8\n\tresult.M3 = mat.M12\n\tresult.M4 = mat.M1\n\tresult.M5 = mat.M5\n\tresult.M6 = mat.M9\n\tresult.M7 = mat.M13\n\tresult.M8 = mat.M2\n\tresult.M9 = mat.M6\n\tresult.M10 = mat.M10\n\tresult.M11 = mat.M14\n\tresult.M12 = mat.M3\n\tresult.M13 = mat.M7\n\tresult.M14 = mat.M11\n\tresult.M15 = mat.M15\n\n\treturn result\n}",
"func ScienceVPPerm(fields [][]int) [][]int {\n\tif fields == nil || len(fields) == 0 {\n\t\treturn [][]int{}\n\t}\n\tif fields[0] == nil || len(fields[0]) == 0 {\n\t\treturn ScienceVPPerm(fields[1:])\n\t}\n\tsubFields := ScienceVPPerm(fields[1:])\n\tif len(subFields) == 0 {\n\t\t// Set subFields to be one empty to create single permutations\n\t\tsubFields = [][]int{{}}\n\t}\n\tperm := [][]int{}\n\tfor _, f := range fields[0] {\n\t\tfor _, sub := range subFields {\n\t\t\tperm = append(perm, append([]int{f}, sub...))\n\t\t}\n\t}\n\treturn perm\n}",
"func NewLookAtMatrix(target, center, up vector.Vector) Matrix4 {\n\tz := target.Sub(center).Unit()\n\tx, _ := up.Cross(z)\n\tx = x.Unit()\n\ty, _ := z.Cross(x)\n\treturn Matrix4{\n\t\t{x[0], x[1], x[2], -x.Dot(target)},\n\t\t{y[0], y[1], y[2], -y.Dot(target)},\n\t\t{z[0], z[1], z[2], -z.Dot(target)},\n\t\t{0, 0, 0, 1},\n\t}\n}",
"func matOrientation(x1, y1, x2, y2, x, y float64) OrientationKind {\n\tif (x1 == x2 && x2 == x) || (y1 == y2 && y2 == y) {\n\t\t// points are horizontally or vertically aligned\n\t\treturn Colinear\n\t}\n\tm := mat.NewDense(3, 3, []float64{x1, y1, 1, x2, y2, 1, x, y, 1})\n\tvar lu mat.LU\n\tlu.Factorize(m)\n\tcond := lu.Cond()\n\tif cond > mat.ConditionTolerance {\n\t\treturn Colinear\n\t}\n\t// Since only the sign is needed LogDet achieves the result in faster time.\n\t_, sign := lu.LogDet()\n\tswitch sign {\n\tcase 1:\n\t\treturn CCW\n\tcase -1:\n\t\treturn CW\n\t}\n\treturn IndeterminateOrientation\n}",
"func (mat Mat) TransposeMatrix() Mat {\n r := mat.Shape[0]\n c := mat.Shape[1]\n var temp [][]float32\n for i := 0; i < c; i++ {\n temp_r := make([]float32, r)\n for j := 0 ; j < r; j++ {\n temp_r[j] = mat.Value[j][i]\n }\n temp = append(temp, temp_r)\n }\n return Mat{temp,[]int{mat.Shape[1], mat.Shape[0]}}\n}",
"func (self *Viewport) Reshape(width int, height int) {\n\tself.selectionDirty = false\n\tself.screenWidth = width\n\tself.screenHeight = height\n\n\tgl.Viewport(0, 0, width, height)\n\n\tviewWidth := float64(self.screenWidth) / float64(SCREEN_SCALE)\n\tviewHeight := float64(self.screenHeight) / float64(SCREEN_SCALE)\n\n\tself.lplane = -viewWidth / 2\n\tself.rplane = viewWidth / 2\n\tself.bplane = -viewHeight / 4\n\tself.tplane = 3 * viewHeight / 4\n\n\tgl.MatrixMode(gl.PROJECTION)\n\tgl.LoadIdentity()\n\tgl.Ortho(self.lplane, self.rplane, self.bplane, self.tplane, -60, 60)\n\n\t// self.Perspective(90, 1, 0.01,1000);\n\n\tgl.MatrixMode(gl.MODELVIEW)\n\tgl.LoadIdentity()\n\tpicker.x = float32(viewport.rplane) - picker.radius + BLOCK_SCALE*0.5\n\tpicker.y = float32(viewport.bplane) + picker.radius - BLOCK_SCALE*0.5\n\n}",
"func dppt01(uplo mat.MatUplo, n int, a, afac, rwork *mat.Vector) (resid float64) {\n\tvar anorm, eps, one, t, zero float64\n\tvar i, k, kc, npp int\n\tvar err error\n\n\tzero = 0.0\n\tone = 1.0\n\n\t// Quick exit if N = 0\n\tif n <= 0 {\n\t\tresid = zero\n\t\treturn\n\t}\n\n\t// Exit with RESID = 1/EPS if ANORM = 0.\n\teps = golapack.Dlamch(Epsilon)\n\tanorm = golapack.Dlansp('1', uplo, n, a, rwork)\n\tif anorm <= zero {\n\t\tresid = one / eps\n\t\treturn\n\t}\n\n\t// Compute the product U'*U, overwriting U.\n\tif uplo == Upper {\n\t\tkc = (n*(n-1))/2 + 1\n\t\tfor k = n; k >= 1; k-- {\n\t\t\t// Compute the (K,K) element of the result.\n\t\t\tt = afac.Off(kc-1).Dot(k, afac.Off(kc-1), 1, 1)\n\t\t\tafac.Set(kc+k-1-1, t)\n\n\t\t\t// Compute the rest of column K.\n\t\t\tif k > 1 {\n\t\t\t\tif err = afac.Off(kc-1).Tpmv(Upper, Trans, NonUnit, k-1, afac, 1); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tkc = kc - (k - 1)\n\t\t\t}\n\t\t}\n\n\t\t// Compute the product L*L', overwriting L.\n\t} else {\n\t\tkc = (n * (n + 1)) / 2\n\t\tfor k = n; k >= 1; k-- {\n\t\t\t// Add a multiple of column K of the factor L to each of\n\t\t\t// columns K+1 through N.\n\t\t\tif k < n {\n\t\t\t\tif err = afac.Off(kc+n-k).Spr(Lower, n-k, one, afac.Off(kc), 1); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Scale column K by the diagonal element.\n\t\t\tt = afac.Get(kc - 1)\n\t\t\tafac.Off(kc-1).Scal(n-k+1, t, 1)\n\n\t\t\tkc = kc - (n - k + 2)\n\t\t}\n\t}\n\n\t// Compute the difference L*L' - A (or U'*U - A).\n\tnpp = n * (n + 1) / 2\n\tfor i = 1; i <= npp; i++ {\n\t\tafac.Set(i-1, afac.Get(i-1)-a.Get(i-1))\n\t}\n\n\t// Compute norm( L*U - A ) / ( N * norm(A) * EPS )\n\tresid = golapack.Dlansp('1', uplo, n, afac, rwork)\n\n\tresid = ((resid / float64(n)) / anorm) / eps\n\n\treturn\n}",
"func (c *Camera) SetupProjection(aspect float32) {\n\tc.ProjMat = types.Perspective4(c.FOV, aspect, 1, 1000)\n\tc.Update()\n}",
"func MatrixDeterminant(scope *Scope, input tf.Output) (output tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"MatrixDeterminant\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func (c Affine) Mat4() mat4 {\n\tm := mgl32.Scale3D(c.Scale[0], c.Scale[1], c.Scale[2])\n\n\tif c.Rotation[0] != 0 {\n\t\tm = m.Mul4(mgl32.HomogRotate3DX(c.Rotation[0]))\n\t}\n\tif c.Rotation[1] != 0 {\n\t\tm = m.Mul4(mgl32.HomogRotate3DY(c.Rotation[1]))\n\t}\n\tif c.Rotation[2] != 0 {\n\t\tm = m.Mul4(mgl32.HomogRotate3DZ(c.Rotation[2]))\n\t}\n\n\treturn m.Mul4(mgl32.Translate3D(c.Translation[0], c.Translation[1], c.Translation[2]))\n}",
"func (me *Frustum) UpdatePlanesGH(mat *unum.Mat4, normalize bool) {\n\t// Left clipping plane\n\tme.Planes[0].X = mat[12] + mat[0]\n\tme.Planes[0].Y = mat[13] + mat[1]\n\tme.Planes[0].Z = mat[14] + mat[2]\n\tme.Planes[0].W = mat[15] + mat[3]\n\t// Right clipping plane\n\tme.Planes[1].X = mat[12] - mat[0]\n\tme.Planes[1].Y = mat[13] - mat[1]\n\tme.Planes[1].Z = mat[14] - mat[2]\n\tme.Planes[1].W = mat[15] - mat[3]\n\t// Bottom clipping plane\n\tme.Planes[2].X = mat[12] + mat[4]\n\tme.Planes[2].Y = mat[13] + mat[5]\n\tme.Planes[2].Z = mat[14] + mat[6]\n\tme.Planes[2].W = mat[15] + mat[7]\n\t// Top clipping plane\n\tme.Planes[3].X = mat[12] - mat[4]\n\tme.Planes[3].Y = mat[13] - mat[5]\n\tme.Planes[3].Z = mat[14] - mat[6]\n\tme.Planes[3].W = mat[15] - mat[7]\n\t// Near clipping plane\n\tme.Planes[4].X = mat[12] + mat[8]\n\tme.Planes[4].Y = mat[13] + mat[9]\n\tme.Planes[4].Z = mat[14] + mat[10]\n\tme.Planes[4].W = mat[15] + mat[11]\n\t// Far clipping plane\n\tme.Planes[5].X = mat[12] - mat[8]\n\tme.Planes[5].Y = mat[13] - mat[9]\n\tme.Planes[5].Z = mat[14] - mat[10]\n\tme.Planes[5].W = mat[15] - mat[11]\n\tif normalize {\n\t\tfor i := 0; i < len(me.Planes); i++ {\n\t\t\tme.Planes[i].Normalize()\n\t\t}\n\t}\n}",
"func Vperp(v Vect) Vect {\n\treturn goVect(C.cpvperp(v.c()))\n}",
"func (matrix Matrix4) Decompose() (vector.Vector, vector.Vector, Matrix4) {\n\n\tposition := vector.Vector{matrix[3][0], matrix[3][1], matrix[3][2]}\n\n\trotation := NewMatrix4()\n\trotation = rotation.SetRow(0, matrix.Row(0).Unit())\n\trotation = rotation.SetRow(1, matrix.Row(1).Unit())\n\trotation = rotation.SetRow(2, matrix.Row(2).Unit())\n\n\tin := matrix.Mult(rotation.Transposed())\n\n\tscale := vector.Vector{in.Row(0).Magnitude(), in.Row(1).Magnitude(), in.Row(2).Magnitude()}\n\n\treturn position, scale, rotation\n\n}",
"func myVisualiseMatrix(world [][]byte, ImageWidth, ImageHeight int) {\n\tfor i := 0; i < ImageHeight; i++ {\n\t\tfor j := 0; j < ImageWidth; j++ {\n\t\t\tfmt.Print(world[i][j])\n\t\t}\n\t\tfmt.Println()\n\t}\n}",
"func WriteMatrix(pattern []rune, text []rune, matrix [][]int, writer io.Writer) {\n\tfmt.Fprintf(writer, \" \")\n\tfor _, textRune := range text {\n\t\tfmt.Fprintf(writer, \" %c\", textRune)\n\t}\n\tfmt.Fprintf(writer, \"\\n\")\n\tfmt.Fprintf(writer, \" %2d\", matrix[0][0])\n\tfor j := range text {\n\t\tfmt.Fprintf(writer, \" %2d\", matrix[0][j+1])\n\t}\n\tfmt.Fprintf(writer, \"\\n\")\n\tfor i, patternRune := range pattern {\n\t\tfmt.Fprintf(writer, \"%c %2d\", patternRune, matrix[i+1][0])\n\t\tfor j := range text {\n\t\t\tfmt.Fprintf(writer, \" %2d\", matrix[i+1][j+1])\n\t\t}\n\t\tfmt.Fprintf(writer, \"\\n\")\n\t}\n\n}",
"func NewMatrix4() Matrix4 {\n\n\tmat := Matrix4{\n\t\t{1, 0, 0, 0},\n\t\t{0, 1, 0, 0},\n\t\t{0, 0, 1, 0},\n\t\t{0, 0, 0, 1},\n\t}\n\treturn mat\n\n}",
"func newMat(p C.Mat) Mat {\n\tm := Mat{p: p}\n\tMatProfile.Add(p, 1)\n\treturn m\n}",
"func (m Matrix) Project(u Vec) Vec {\n\treturn Vec{m[0]*u.X + m[2]*u.Y + m[4], m[1]*u.X + m[3]*u.Y + m[5]}\n}",
"func MatrixMode(mode uint32) {\n C.glowMatrixMode(gpMatrixMode, (C.GLenum)(mode))\n}",
"func (leg *Leg) Matrix() math3d.Matrix44 {\n\treturn *math3d.MakeMatrix44(*leg.Origin, *math3d.MakeSingularEulerAngle(math3d.RotationHeading, leg.Angle))\n}",
"func NewRotationMatrix(start Vertex, end Vertex, theta float64) *RotationMatrix {\n\t//Create the struct\n\trotMat := &RotationMatrix{\n\t\tstart: start,\n\t\tend: end,\n\t\tlineVec: end.minus(&start),\n\t}\n\n\t//Store the line mag\n\trotMat.lineMag = rotMat.lineVec.mag()\n\n\t//Compute a unit dir vector\n\tuv := end.minus(&start)\n\tuv.norm()\n\n\t//Precompute some values\n\tcos := float32(math.Cos(theta))\n\tsin := float32(math.Sin(theta))\n\toneMinusCos := float32(1.0 - cos)\n\n\t//Now build the rotation vector\n\trotMat.matrix[0][0] = cos + uv[0]*uv[0]*oneMinusCos\n\trotMat.matrix[0][1] = uv[0]*uv[1]*oneMinusCos - uv[2]*sin\n\trotMat.matrix[0][2] = uv[0]*uv[2]*oneMinusCos + uv[1]*sin\n\n\trotMat.matrix[1][0] = uv[1]*uv[0]*oneMinusCos + uv[2]*sin\n\trotMat.matrix[1][1] = cos + uv[1]*uv[1]*oneMinusCos\n\trotMat.matrix[1][2] = uv[1]*uv[2]*oneMinusCos - uv[0]*sin\n\n\trotMat.matrix[2][0] = uv[2]*uv[0]*oneMinusCos - uv[1]*sin\n\trotMat.matrix[2][1] = uv[2]*uv[1]*oneMinusCos + uv[0]*sin\n\trotMat.matrix[2][2] = cos + uv[2]*uv[2]*oneMinusCos\n\n\treturn rotMat\n}",
"func MatrixRotation(matrix [][]int32, r int32) {\n\trows := len(matrix)\n\tcols := len(matrix[0])\n\n\tl := int(math.Round(math.Min(float64(rows), float64(cols))) / 2.0)\n\n\tfor i := 0; r > 0 && i < l; i++ {\n\t\tlayer := make([]int32, 2*(rows-2*i+cols-2*i-2))\n\t\trot := int(r) % len(layer)\n\t\tif rot == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Walk the matrix clockwise starting from top left and collect the values\n\t\t// for each layer.\n\n\t\tc := 0\n\n\t\t// top left to top right\n\t\tfor j := i; j < cols-i-1; j++ {\n\t\t\tlayer[c] = matrix[i][j]\n\t\t\tc++\n\t\t}\n\t\t// top right to bottom right\n\t\tfor j := i; j < rows-i-1; j++ {\n\t\t\tlayer[c] = matrix[j][cols-i-1]\n\t\t\tc++\n\t\t}\n\t\t// bottom right to bottom left\n\t\tfor j := cols - i - 1; j > i; j-- {\n\t\t\tlayer[c] = matrix[rows-i-1][j]\n\t\t\tc++\n\t\t}\n\t\t// bottom left to top left\n\t\tfor j := rows - i - 1; j > i; j-- {\n\t\t\tlayer[c] = matrix[j][i]\n\t\t\tc++\n\t\t}\n\n\t\t// Left shift values.\n\t\tlayer = append(layer[rot:], layer[:rot]...)\n\n\t\t// Populate the matrix with the values from layer.\n\n\t\tc = 0\n\n\t\t// top left to top right\n\t\tfor j := i; j < cols-i-1; j++ {\n\t\t\tmatrix[i][j] = layer[c]\n\t\t\tc++\n\t\t}\n\t\t// top right to bottom right\n\t\tfor j := i; j < rows-i-1; j++ {\n\t\t\tmatrix[j][cols-i-1] = layer[c]\n\t\t\tc++\n\t\t}\n\t\t// bottom right to bottom left\n\t\tfor j := cols - i - 1; j > i; j-- {\n\t\t\tmatrix[rows-i-1][j] = layer[c]\n\t\t\tc++\n\t\t}\n\t\t// bottom left to top left\n\t\tfor j := rows - i - 1; j > i; j-- {\n\t\t\tmatrix[j][i] = layer[c]\n\t\t\tc++\n\t\t}\n\t}\n\n\t// Print the matrix.\n\tfor i := 0; i < rows; i++ {\n\t\tfor j := 0; j < cols; j++ {\n\t\t\tfmt.Print(matrix[i][j])\n\t\t\tif j < cols-1 {\n\t\t\t\tfmt.Print(\" \")\n\t\t\t}\n\t\t}\n\t\tfmt.Print(\"\\n\")\n\t}\n}",
"func MatrixInvert(mat Matrix) Matrix {\n\tvar result Matrix\n\n\ta00 := mat.M0\n\ta01 := mat.M1\n\ta02 := mat.M2\n\ta03 := mat.M3\n\ta10 := mat.M4\n\ta11 := mat.M5\n\ta12 := mat.M6\n\ta13 := mat.M7\n\ta20 := mat.M8\n\ta21 := mat.M9\n\ta22 := mat.M10\n\ta23 := mat.M11\n\ta30 := mat.M12\n\ta31 := mat.M13\n\ta32 := mat.M14\n\ta33 := mat.M15\n\n\tb00 := a00*a11 - a01*a10\n\tb01 := a00*a12 - a02*a10\n\tb02 := a00*a13 - a03*a10\n\tb03 := a01*a12 - a02*a11\n\tb04 := a01*a13 - a03*a11\n\tb05 := a02*a13 - a03*a12\n\tb06 := a20*a31 - a21*a30\n\tb07 := a20*a32 - a22*a30\n\tb08 := a20*a33 - a23*a30\n\tb09 := a21*a32 - a22*a31\n\tb10 := a21*a33 - a23*a31\n\tb11 := a22*a33 - a23*a32\n\n\t// Calculate the invert determinant (inlined to avoid double-caching)\n\tinvDet := 1.0 / (b00*b11 - b01*b10 + b02*b09 + b03*b08 - b04*b07 + b05*b06)\n\n\tresult.M0 = (a11*b11 - a12*b10 + a13*b09) * invDet\n\tresult.M1 = (-a01*b11 + a02*b10 - a03*b09) * invDet\n\tresult.M2 = (a31*b05 - a32*b04 + a33*b03) * invDet\n\tresult.M3 = (-a21*b05 + a22*b04 - a23*b03) * invDet\n\tresult.M4 = (-a10*b11 + a12*b08 - a13*b07) * invDet\n\tresult.M5 = (a00*b11 - a02*b08 + a03*b07) * invDet\n\tresult.M6 = (-a30*b05 + a32*b02 - a33*b01) * invDet\n\tresult.M7 = (a20*b05 - a22*b02 + a23*b01) * invDet\n\tresult.M8 = (a10*b10 - a11*b08 + a13*b06) * invDet\n\tresult.M9 = (-a00*b10 + a01*b08 - a03*b06) * invDet\n\tresult.M10 = (a30*b04 - a31*b02 + a33*b00) * invDet\n\tresult.M11 = (-a20*b04 + a21*b02 - a23*b00) * invDet\n\tresult.M12 = (-a10*b09 + a11*b07 - a12*b06) * invDet\n\tresult.M13 = (a00*b09 - a01*b07 + a02*b06) * invDet\n\tresult.M14 = (-a30*b03 + a31*b01 - a32*b00) * invDet\n\tresult.M15 = (a20*b03 - a21*b01 + a22*b00) * invDet\n\n\treturn result\n}",
"func PrintSpiralMatrix(matrix [][]uint) {\n\tfirstRow := 0\n\tlastRow := len(matrix) - 1\n\tfirstColumn := 0\n\tlastColumn := len(matrix[0]) - 1\n\n\tfor firstRow < lastRow && firstColumn < lastColumn {\n\t\tfor i := firstColumn; i <= lastColumn; i++ {\n\t\t\tfmt.Println(matrix[firstRow][i])\n\t\t}\n\n\t\tfor i := firstRow + 1; i <= lastRow; i++ {\n\t\t\tfmt.Println(matrix[i][lastColumn])\n\t\t}\n\n\t\tfor i := lastColumn - 1; i >= firstColumn; i-- {\n\t\t\tfmt.Println(matrix[lastRow][i])\n\t\t}\n\n\t\tfor i := lastRow - 1; i > firstColumn; i-- {\n\t\t\tfmt.Println(matrix[i][firstColumn])\n\t\t}\n\n\t\tfirstRow++\n\t\tlastRow--\n\t\tfirstColumn++\n\t\tlastColumn--\n\t}\n}",
"func NewMatrix4Rotate(x, y, z, angle float64) Matrix4 {\n\n\tmat := NewMatrix4()\n\tvector := vector.Vector{x, y, z}.Unit()\n\ts := math.Sin(angle)\n\tc := math.Cos(angle)\n\tm := 1 - c\n\n\tmat[0][0] = m*vector[0]*vector[0] + c\n\tmat[0][1] = m*vector[0]*vector[1] + vector[2]*s\n\tmat[0][2] = m*vector[2]*vector[0] - vector[1]*s\n\n\tmat[1][0] = m*vector[0]*vector[1] - vector[2]*s\n\tmat[1][1] = m*vector[1]*vector[1] + c\n\tmat[1][2] = m*vector[1]*vector[2] + vector[0]*s\n\n\tmat[2][0] = m*vector[2]*vector[0] + vector[1]*s\n\tmat[2][1] = m*vector[1]*vector[2] - vector[0]*s\n\tmat[2][2] = m*vector[2]*vector[2] + c\n\n\treturn mat\n\n}",
"func (m *Mat4) Print(s string) {\n\tif s == \"\" {\n\t\ts = \"Debugging Matrix\"\n\t}\n\tdashes := GetDashedHeader(s)\n\tfmt.Fprintf(debugOut, \"%s\\n\", dashes)\n\tfmt.Fprintf(debugOut, \"%9.3f %9.3f %9.3f %9.3f\\n\", m[0].X, m[1].X, m[2].X, m[3].X)\n\tfmt.Fprintf(debugOut, \"%9.3f %9.3f %9.3f %9.3f\\n\", m[0].Y, m[1].Y, m[2].Y, m[3].Y)\n\tfmt.Fprintf(debugOut, \"%9.3f %9.3f %9.3f %9.3f\\n\", m[0].Z, m[1].Z, m[2].Z, m[3].Z)\n\tfmt.Fprintf(debugOut, \"%9.3f %9.3f %9.3f %9.3f\\n\\n\", m[0].W, m[1].W, m[2].W, m[3].W)\n\t//fmt.Fprintf(debugOut, \"\\t------------------------------------------------------------\\n\")\n}",
"func ProgramUniformMatrix2x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix2x4fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func Mat3Projection(out []float64, width, height float64) []float64 {\n\tout[0] = 2 / width\n\tout[1] = 0\n\tout[2] = 0\n\tout[3] = 0\n\tout[4] = -2 / height\n\tout[5] = 0\n\tout[6] = -1\n\tout[7] = 1\n\tout[8] = 1\n\treturn out\n}",
"func MatrixTranslate(x, y, z float32) Matrix {\n\treturn NewMatrix(\n\t\t1.0, 0.0, 0.0, x,\n\t\t0.0, 1.0, 0.0, y,\n\t\t0.0, 0.0, 1.0, z,\n\t\t0, 0, 0, 1.0)\n}",
"func NewPlane3D(normal, point *Vector3D.Vector3D) *Plane3D {\n\treturn &Plane3D{\n\t\tnormal: normal,\n\t\tpoint: point,\n\t\ttransform: NewTransform3(),\n\t\tisNormalFlipped: false,\n\t}\n}",
"func (t *TriDense) DiagView() Diagonal {\n\tif t.mat.Diag == blas.Unit {\n\t\tpanic(\"mat: cannot take view of Unit diagonal\")\n\t}\n\tn := t.mat.N\n\treturn &DiagDense{\n\t\tmat: blas64.Vector{\n\t\t\tN: n,\n\t\t\tInc: t.mat.Stride + 1,\n\t\t\tData: t.mat.Data[:(n-1)*t.mat.Stride+n],\n\t\t},\n\t}\n}",
"func (m Matrix) Transpose() Matrix {\n\tfor i := 0; i < 3; i++ {\n\t\tfor j := i + 1; j < 4; j++ {\n\t\t\tm[i][j], m[j][i] = m[j][i], m[i][j]\n\t\t}\n\t}\n\treturn m\n}",
"func (q Quat) Mat4f() Mat4f {\n\treturn Mat4f{\n\t\t1 - 2*q.Y*q.Y - 2*q.Z*q.Z, 2*q.X*q.Y + 2*q.W*q.Z, 2*q.X*q.Z - 2*q.W*q.Y, 0,\n\t\t2*q.X*q.Y - 2*q.W*q.Z, 1 - 2*q.X*q.X - 2*q.Z*q.Z, 2*q.Y*q.Z + 2*q.W*q.X, 0,\n\t\t2*q.X*q.Z + 2*q.W*q.Y, 2*q.Y*q.Z - 2*q.W*q.X, 1 - 2*q.X*q.X - 2*q.Y*q.Y, 0,\n\t\t0, 0, 0, 1,\n\t}\n}",
"func (t *TriDense) T() Matrix {\n\treturn Transpose{t}\n}",
"func printThreeByFourMatrix(inputMatrix [3][4]int) {\n\n\trowLength := len(inputMatrix)\n\tcolumnLength := len(inputMatrix[0])\n\n\tfor i := 0; i < rowLength; i++ {\n\t\tfor j := 0; j < columnLength; j++ {\n\t\t\tfmt.Printf(\"%5d \", inputMatrix[i][j])\n\t\t}\n\t\tfmt.Println()\n\t}\n\n}",
"func pToV(point g.Point) p.Vec {\n\treturn p.V(float64(point.X), float64(point.Y))\n}",
"func (matrix Matrix4) Transposed() Matrix4 {\n\n\tnew := NewMatrix4()\n\n\tfor i := 0; i < 4; i++ {\n\t\tfor j := 0; j < 4; j++ {\n\t\t\tnew[i][j] = matrix[j][i]\n\t\t}\n\t}\n\n\treturn new\n\n}",
"func (q1 Quat) Mat4() Mat4 {\n\tw, x, y, z := q1.W, q1.V[0], q1.V[1], q1.V[2]\n\treturn Mat4{\n\t\t1 - 2*y*y - 2*z*z, 2*x*y + 2*w*z, 2*x*z - 2*w*y, 0,\n\t\t2*x*y - 2*w*z, 1 - 2*x*x - 2*z*z, 2*y*z + 2*w*x, 0,\n\t\t2*x*z + 2*w*y, 2*y*z - 2*w*x, 1 - 2*x*x - 2*y*y, 0,\n\t\t0, 0, 0, 1,\n\t}\n}",
"func (v *Vec4) Transform(m *Mat4) {\n\tvar t Vec4\n\tt.Assign(v)\n\n\tv.X = t.X*m[0] + t.Y*m[4] + t.Z*m[8] + t.W*m[12]\n\tv.Y = t.X*m[1] + t.Y*m[5] + t.Z*m[9] + t.W*m[13]\n\tv.Z = t.X*m[2] + t.Y*m[6] + t.Z*m[10] + t.W*m[14]\n\tv.W = t.X*m[3] + t.Y*m[7] + t.Z*m[11] + t.W*m[15]\n}",
"func projectPoint(\n\tx, y, z float64, // 3d point to project\n\tw, h, f float64, // width, height, focal\n\tscale float64, // scale\n) (px, py float64) { // projected point\n\tx, y, z = x*scale*f, y*scale*f, z*scale*f\n\tzz := z + f\n\tif zz == 0 {\n\t\tzz = math.SmallestNonzeroFloat64\n\t}\n\tpx = x*(f/zz) + w/2\n\tpy = y*(f/zz) - h/2\n\tpy *= -1\n\treturn\n}",
"func MatrixMode(mode uint32) {\n\tsyscall.Syscall(gpMatrixMode, 1, uintptr(mode), 0, 0)\n}",
"func MatrixDiagPart(scope *Scope, input tf.Output) (diagonal tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"MatrixDiagPart\",\n\t\tInput: []tf.Input{\n\t\t\tinput,\n\t\t},\n\t}\n\top := scope.AddOperation(opspec)\n\treturn op.Output(0)\n}",
"func invertMatrix(matrix []byte, k int) error {\n\tpivot_searcher := newPivotSearcher(k)\n\tindxc := make([]int, k)\n\tindxr := make([]int, k)\n\tid_row := make([]byte, k)\n\n\tfor col := 0; col < k; col++ {\n\t\ticol, irow, err := pivot_searcher.search(col, matrix)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif irow != icol {\n\t\t\tfor i := 0; i < k; i++ {\n\t\t\t\tswap(&matrix[irow*k+i], &matrix[icol*k+i])\n\t\t\t}\n\t\t}\n\n\t\tindxr[col] = irow\n\t\tindxc[col] = icol\n\t\tpivot_row := matrix[icol*k:][:k]\n\t\tc := pivot_row[icol]\n\n\t\tif c == 0 {\n\t\t\treturn Error.New(\"singular matrix\")\n\t\t}\n\n\t\tif c != 1 {\n\t\t\tc = gf_inverse[c]\n\t\t\tpivot_row[icol] = 1\n\t\t\tmul_c := gf_mul_table[c][:]\n\n\t\t\tfor i := 0; i < k; i++ {\n\t\t\t\tpivot_row[i] = mul_c[pivot_row[i]]\n\t\t\t}\n\t\t}\n\n\t\tid_row[icol] = 1\n\t\tif !bytes.Equal(pivot_row, id_row) {\n\t\t\tp := matrix\n\t\t\tfor i := 0; i < k; i++ {\n\t\t\t\tif i != icol {\n\t\t\t\t\tc = p[icol]\n\t\t\t\t\tp[icol] = 0\n\t\t\t\t\taddmul(p[:k], pivot_row, c)\n\t\t\t\t}\n\t\t\t\tp = p[k:]\n\t\t\t}\n\t\t}\n\n\t\tid_row[icol] = 0\n\t}\n\n\tfor i := 0; i < k; i++ {\n\t\tif indxr[i] != indxc[i] {\n\t\t\tfor row := 0; row < k; row++ {\n\t\t\t\tswap(&matrix[row*k+indxr[i]], &matrix[row*k+indxc[i]])\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func MatInv(m u.Matrix) (u.Matrix, error) {\n\tvar (\n\t\terr = m.SetSize()\n\t\tresultMat u.Matrix\n\t\tmv = m.Value\n\t)\n\tif err != nil {\n\t\treturn u.Matrix{}, err\n\t}\n\n\tif m.ColsNum == 2 && m.RowsNum == 2 {\n\t\tnm := u.Matrix{\n\t\t\tRowsNum: 2,\n\t\t\tColsNum: 2,\n\t\t\tValue: u.MatVal{\n\t\t\t\t{mv[1][1], -mv[0][1]},\n\t\t\t\t{-mv[1][0], mv[0][0]},\n\t\t\t},\n\t\t}\n\n\t\tdetM := mv[0][0]*mv[1][1] - mv[0][1]*mv[1][0]\n\t\tif detM == 0 {\n\t\t\treturn u.Matrix{}, errors.New(\"matrix is singular, does not have an inverse\")\n\t\t}\n\n\t\treturn mult.ScalarMult(1/detM, nm), nil\n\t}\n\n\t// matrix of minors\n\tresultMat = u.PopulateNewMat(u.MatPopConfig{\n\t\tMainMat: m,\n\t\tNewRows: m.RowsNum,\n\t\tNewCols: m.ColsNum,\n\t\tAction: func(mv u.MatVal, r, c int, secMvs []u.MatVal) float64 {\n\t\t\tminor := u.GetMinor(m, r, c)\n\t\t\tdetMinor, _ := det.MatDet(minor) // add error handling in the future\n\n\t\t\treturn math.Pow(-1, float64(r+c)) * detMinor\n\t\t},\n\t})\n\n\tresultMat, err = trans.MatTrans(resultMat)\n\tif err != nil {\n\t\treturn u.Matrix{}, err\n\t}\n\n\tdetM, err := det.MatDet(m)\n\tif err != nil {\n\t\treturn u.Matrix{}, err\n\t} else if detM == 0 {\n\t\treturn u.Matrix{}, errors.New(\"matrix is singular, does not have an inverse\")\n\t}\n\n\tresultMat = mult.ScalarMult(1/detM, resultMat)\n\n\treturn resultMat, nil\n}",
"func (t *Transform) Matrix(a, b, c, d, e, f float64) {\n\tout := fmt.Sprintf(\"matrix(%g,%g,%g,%g,%g,%g)\", a, b, c, d, e, f)\n\n\tt.transforms = append(t.transforms, out)\n}",
"func p(v *Vertex, depth int, showPointer bool) string {\n\tstr := \"\"\n\tlevels := \"\"\n\t// Calculate our depth string\n\t// levels := strings.Repeat(\" \", depth)\n\tfor i := depth; i >= 0; i-- {\n\t\tlevels = fmt.Sprintf(\"%s%s\", levels, \" \")\n\t}\n\tstr = fmt.Sprintf(\"%s\\n\", str)\n\tstr = fmt.Sprintf(\"%s%sDepth : %d\\n\", str, levels, depth)\n\tstr = fmt.Sprintf(\"%s%sName : %s\\n\", str, levels, v.Name)\n\tstr = fmt.Sprintf(\"%s%sValue : %d\\n\", str, levels, v.Value)\n\tif showPointer == true {\n\t\tstr = fmt.Sprintf(\"%s%sLocation : %p\\n\", str, levels, v)\n\t}\n\tstr = fmt.Sprintf(\"%s\\n\", str)\n\treturn str\n}",
"func ProgramUniformMatrix3x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix3x4fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func GetMatrix(x, y int, v int64) (m *Matrix) {\n\tm = &Matrix{}\n\tm.x = uint64(x)\n\tm.y = uint64(y)\n\tm.Init(v)\n\tm.ResetFocus()\n\treturn\n}",
"func (p *PointProj) FromAffine(p1 *PointAffine) *PointProj {\n\tp.X.Set(&p1.X)\n\tp.Y.Set(&p1.Y)\n\tp.Z.SetOne()\n\treturn p\n}",
"func (matrix Matrix4) Mult(other Matrix4) Matrix4 {\n\n\tnewMat := NewMatrix4()\n\n\tnewMat[0][0] = matrix[0][0]*other[0][0] + matrix[0][1]*other[1][0] + matrix[0][2]*other[2][0] + matrix[0][3]*other[3][0]\n\tnewMat[1][0] = matrix[1][0]*other[0][0] + matrix[1][1]*other[1][0] + matrix[1][2]*other[2][0] + matrix[1][3]*other[3][0]\n\tnewMat[2][0] = matrix[2][0]*other[0][0] + matrix[2][1]*other[1][0] + matrix[2][2]*other[2][0] + matrix[2][3]*other[3][0]\n\tnewMat[3][0] = matrix[3][0]*other[0][0] + matrix[3][1]*other[1][0] + matrix[3][2]*other[2][0] + matrix[3][3]*other[3][0]\n\n\tnewMat[0][1] = matrix[0][0]*other[0][1] + matrix[0][1]*other[1][1] + matrix[0][2]*other[2][1] + matrix[0][3]*other[3][1]\n\tnewMat[1][1] = matrix[1][0]*other[0][1] + matrix[1][1]*other[1][1] + matrix[1][2]*other[2][1] + matrix[1][3]*other[3][1]\n\tnewMat[2][1] = matrix[2][0]*other[0][1] + matrix[2][1]*other[1][1] + matrix[2][2]*other[2][1] + matrix[2][3]*other[3][1]\n\tnewMat[3][1] = matrix[3][0]*other[0][1] + matrix[3][1]*other[1][1] + matrix[3][2]*other[2][1] + matrix[3][3]*other[3][1]\n\n\tnewMat[0][2] = matrix[0][0]*other[0][2] + matrix[0][1]*other[1][2] + matrix[0][2]*other[2][2] + matrix[0][3]*other[3][2]\n\tnewMat[1][2] = matrix[1][0]*other[0][2] + matrix[1][1]*other[1][2] + matrix[1][2]*other[2][2] + matrix[1][3]*other[3][2]\n\tnewMat[2][2] = matrix[2][0]*other[0][2] + matrix[2][1]*other[1][2] + matrix[2][2]*other[2][2] + matrix[2][3]*other[3][2]\n\tnewMat[3][2] = matrix[3][0]*other[0][2] + matrix[3][1]*other[1][2] + matrix[3][2]*other[2][2] + matrix[3][3]*other[3][2]\n\n\tnewMat[0][3] = matrix[0][0]*other[0][3] + matrix[0][1]*other[1][3] + matrix[0][2]*other[2][3] + matrix[0][3]*other[3][3]\n\tnewMat[1][3] = matrix[1][0]*other[0][3] + matrix[1][1]*other[1][3] + matrix[1][2]*other[2][3] + matrix[1][3]*other[3][3]\n\tnewMat[2][3] = matrix[2][0]*other[0][3] + matrix[2][1]*other[1][3] + matrix[2][2]*other[2][3] + matrix[2][3]*other[3][3]\n\tnewMat[3][3] = matrix[3][0]*other[0][3] + matrix[3][1]*other[1][3] + matrix[3][2]*other[2][3] + matrix[3][3]*other[3][3]\n\n\treturn newMat\n\n}",
"func New(mdef MatrixDefiner, amdef AnonymousMatrixDefiner, sdef ScalarDefiner) *E {\n\te := &E{mdef: mdef, amdef: amdef, sdef: sdef}\n\n\tfor r := 'A'; r <= 'Z'; r++ {\n\t\te.mvars = append(e.mvars, matrix.New(3, 3))\n\t}\n\n\tfor r := 'a'; r <= 'z'; r++ {\n\t\te.svars = append(e.svars, matrix.NewScalarFrac(0))\n\t}\n\n\treturn e\n}",
"func newPlane(mk, mdl string) *plane {\n\tp := &plane{}\n\tp.make = mk\n\tp.model = mdl\n\treturn p\n}",
"func (m Matrix) Decompose() []float32 {\n\treturn []float32{\n\t\tm.M0, m.M1, m.M2, m.M3, m.M4, m.M5, m.M6, m.M7, m.M8, m.M9,\n\t\tm.M10, m.M11, m.M12, m.M13, m.M14, m.M15,\n\t}\n}",
"func (m Mat2f) Diag() Vec2f {\n\treturn Vec2f{m[0], m[3]}\n}"
] | [
"0.77690375",
"0.7601805",
"0.7248959",
"0.6781702",
"0.6759459",
"0.6070791",
"0.5912761",
"0.57179683",
"0.55036145",
"0.5496717",
"0.5453301",
"0.54473794",
"0.5251755",
"0.5088271",
"0.50508565",
"0.49665186",
"0.48535895",
"0.48271555",
"0.46785966",
"0.466484",
"0.4612361",
"0.46031985",
"0.45977736",
"0.4594498",
"0.45836926",
"0.45279953",
"0.45083934",
"0.44858357",
"0.44630182",
"0.44393364",
"0.4371735",
"0.42758045",
"0.42671427",
"0.4236779",
"0.42347512",
"0.42249593",
"0.42138064",
"0.42102838",
"0.41796824",
"0.41684362",
"0.41599083",
"0.41366613",
"0.41286582",
"0.41218957",
"0.40933126",
"0.408459",
"0.40843058",
"0.40593246",
"0.40535653",
"0.40415013",
"0.40163738",
"0.40096045",
"0.39952096",
"0.39944288",
"0.3993275",
"0.39840433",
"0.39817804",
"0.3969278",
"0.39679137",
"0.3959563",
"0.3959215",
"0.39574227",
"0.39507017",
"0.3945157",
"0.39192745",
"0.39172098",
"0.38883606",
"0.3884674",
"0.38831732",
"0.38819623",
"0.38818398",
"0.3847306",
"0.38384637",
"0.38382503",
"0.38273084",
"0.38154426",
"0.381159",
"0.3805977",
"0.38037634",
"0.38033545",
"0.38024896",
"0.37995037",
"0.37912276",
"0.37881643",
"0.3782986",
"0.37800467",
"0.37707418",
"0.3757339",
"0.37564668",
"0.37521043",
"0.37455237",
"0.3745508",
"0.373874",
"0.37253416",
"0.37214077",
"0.3718631",
"0.37178695",
"0.37168026",
"0.37115383",
"0.3708433"
] | 0.74354875 | 2 |
Frustum generates a Frustum Matrix. | func Frustum(left, right, bottom, top, near, far float64) Mat4 {
rml, tmb, fmn := (right - left), (top - bottom), (far - near)
A, B, C, D := (right+left)/rml, (top+bottom)/tmb, -(far+near)/fmn, -(2*far*near)/fmn
return Mat4{float64((2. * near) / rml), 0, 0, 0, 0, float64((2. * near) / tmb), 0, 0, float64(A), float64(B), float64(C), -1, 0, 0, float64(D), 0}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func MatrixFrustum(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = (near * 2.0) / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\n\tresult.M4 = 0.0\n\tresult.M5 = (near * 2.0) / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\n\tresult.M8 = right + left/rl\n\tresult.M9 = top + bottom/tb\n\tresult.M10 = -(far + near) / fn\n\tresult.M11 = -1.0\n\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = -(far * near * 2.0) / fn\n\tresult.M15 = 0.0\n\n\treturn result\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tC.glowFrustum(gpFrustum, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tsyscall.Syscall6(gpFrustum, 6, uintptr(math.Float64bits(left)), uintptr(math.Float64bits(right)), uintptr(math.Float64bits(bottom)), uintptr(math.Float64bits(top)), uintptr(math.Float64bits(zNear)), uintptr(math.Float64bits(zFar)))\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowFrustum(gpFrustum, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func NewMatrixFrustum(left, right, bottom, top, near, far float64) Matrix {\n\trl := (right - left)\n\ttb := (top - bottom)\n\tfn := (far - near)\n\n\treturn Matrix{\n\t\tM0: float32((near * 2) / rl),\n\t\tM1: 0,\n\t\tM2: 0,\n\t\tM3: 0,\n\t\tM4: 0,\n\t\tM5: float32((near * 2) / tb),\n\t\tM6: 0,\n\t\tM7: 0,\n\t\tM8: float32((right + left) / rl),\n\t\tM9: float32((top + bottom) / tb),\n\t\tM10: float32(-(far + near) / fn),\n\t\tM11: -1,\n\t\tM12: 0,\n\t\tM13: 0,\n\t\tM14: float32(-(far * near * 2) / fn),\n\t\tM15: 0,\n\t}\n}",
"func MatrixProjection(fov FovPort, znear, zfar float32, rightHanded bool) Matrix4f {\n\tif rightHanded {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 1))\n\t} else {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 0))\n\t}\n}",
"func MatrixPerspective(fovy, aspect, near, far float32) Matrix {\n\ttop := near * float32(math.Tan(float64(fovy*Pi)/360.0))\n\tright := top * aspect\n\n\treturn MatrixFrustum(-right, right, -top, top, near, far)\n}",
"func (me *Frustum) UpdatePlanesGH(mat *unum.Mat4, normalize bool) {\n\t// Left clipping plane\n\tme.Planes[0].X = mat[12] + mat[0]\n\tme.Planes[0].Y = mat[13] + mat[1]\n\tme.Planes[0].Z = mat[14] + mat[2]\n\tme.Planes[0].W = mat[15] + mat[3]\n\t// Right clipping plane\n\tme.Planes[1].X = mat[12] - mat[0]\n\tme.Planes[1].Y = mat[13] - mat[1]\n\tme.Planes[1].Z = mat[14] - mat[2]\n\tme.Planes[1].W = mat[15] - mat[3]\n\t// Bottom clipping plane\n\tme.Planes[2].X = mat[12] + mat[4]\n\tme.Planes[2].Y = mat[13] + mat[5]\n\tme.Planes[2].Z = mat[14] + mat[6]\n\tme.Planes[2].W = mat[15] + mat[7]\n\t// Top clipping plane\n\tme.Planes[3].X = mat[12] - mat[4]\n\tme.Planes[3].Y = mat[13] - mat[5]\n\tme.Planes[3].Z = mat[14] - mat[6]\n\tme.Planes[3].W = mat[15] - mat[7]\n\t// Near clipping plane\n\tme.Planes[4].X = mat[12] + mat[8]\n\tme.Planes[4].Y = mat[13] + mat[9]\n\tme.Planes[4].Z = mat[14] + mat[10]\n\tme.Planes[4].W = mat[15] + mat[11]\n\t// Far clipping plane\n\tme.Planes[5].X = mat[12] - mat[8]\n\tme.Planes[5].Y = mat[13] - mat[9]\n\tme.Planes[5].Z = mat[14] - mat[10]\n\tme.Planes[5].W = mat[15] - mat[11]\n\tif normalize {\n\t\tfor i := 0; i < len(me.Planes); i++ {\n\t\t\tme.Planes[i].Normalize()\n\t\t}\n\t}\n}",
"func NewMatrixPerspective(fovy, aspect, near, far float64) Matrix {\n\ttop := near * math.Tan(fovy*0.5)\n\tright := top * aspect\n\treturn NewMatrixFrustum(-right, right, -top, top, near, far)\n}",
"func (cam *Camera) SetupViewProjection() {\n\tx_ratio := cam.Width / cam.Height\n\tcam.View = PerspectiveFrustum(cam.YFov, x_ratio, cam.Near, cam.Far)\n\tcam.Projection = cam.View.M44()\n}",
"func NewProjectionPerspective(fovy, near, far, viewWidth, viewHeight float64) Matrix4 {\n\n\taspect := viewWidth / viewHeight\n\n\tt := math.Tan(fovy * math.Pi / 360)\n\tb := -t\n\tr := t * aspect\n\tl := -r\n\n\t// l := -viewWidth / 2\n\t// r := viewWidth / 2\n\t// t := -viewHeight / 2\n\t// b := viewHeight / 2\n\n\treturn Matrix4{\n\t\t{(2 * near) / (r - l), 0, (r + l) / (r - l), 0},\n\t\t{0, (2 * near) / (t - b), (t + b) / (t - b), 0},\n\t\t{0, 0, -((far + near) / (far - near)), -((2 * far * near) / (far - near))},\n\t\t{0, 0, -1, 0},\n\t}\n\n}",
"func (c *Camera) Matrix() mgl32.Mat4 {\n\treturn mgl32.LookAtV(c.pos, c.pos.Add(c.front), c.up)\n}",
"func UniformMatrix2x4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2x4fv(gpUniformMatrix2x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2x4fv(gpUniformMatrix2x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x4fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix2x4fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func Perspective(fovy, aspect, zNear, zFar gl.Float) *Mat4 {\n\tf := 1 / (TanGL(fovy / 2.0))\n\tm := IdentMat4()\n\tm[0].X = f / aspect\n\tm[1].Y = f\n\tm[2].Z = (zFar + zNear) / (zNear - zFar)\n\tm[3].W = 0\n\tm[2].W = -1\n\tm[3].Z = (2 * zFar * zNear) / (zNear - zFar)\n\treturn m\n}",
"func UniformMatrix4fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix4fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func (c *Camera) SetPerspective(angle, ratio, zNear, zFar float32) {\n\tglm.PerspectiveIn(angle, ratio, zNear, zFar, &c.Projection)\n}",
"func UniformMatrix3x4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3x4fv(gpUniformMatrix3x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3x4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3x4fv(gpUniformMatrix3x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func (debugging *debuggingOpenGL) UniformMatrix4fv(location int32, transpose bool, value *[16]float32) {\n\tdebugging.recordEntry(\"UniformMatrix4fv\", location, transpose, value)\n\tdebugging.gl.UniformMatrix4fv(location, transpose, value)\n\tdebugging.recordExit(\"UniformMatrix4fv\")\n}",
"func UniformMatrix4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4fv(gpUniformMatrix4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4fv(gpUniformMatrix4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3x4fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix3x4fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func UniformMatrix4fv(location UniformLocation, transpose bool, value []float32) {\n\tgl.UniformMatrix4fv(int32(location), 1, transpose, &value[0])\n}",
"func (native *OpenGL) UniformMatrix4fv(location int32, transpose bool, value *[16]float32) {\n\tcount := int32(1)\n\tgl.UniformMatrix4fv(location, count, transpose, &value[0])\n}",
"func UniformMatrix4x2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4x2fv(gpUniformMatrix4x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4x2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4x2fv(gpUniformMatrix4x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4x3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4x3fv(gpUniformMatrix4x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4x3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix4x3fv(gpUniformMatrix4x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func NewMatrix4Perspective(left, right, top, bottom, near, far float32) *Matrix4 {\n\tx := 2 * near / (right - left)\n\ty := 2 * near / (top - bottom)\n\n\ta := (right + left) / (right - left)\n\tb := (top + bottom) / (top - bottom)\n\tc := -(far + near) / (far - near)\n\td := -2 * far * near / (far - near)\n\n\treturn &Matrix4{\n\t\telements: [16]float32{\n\t\t\tx, 0, 0, 0,\n\t\t\t0, y, 0, 0,\n\t\t\ta, b, c, -1,\n\t\t\t0, 0, d, 0,\n\t\t},\n\t}\n}",
"func UniformMatrix4x2fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix4x2fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func UniformMatrix3x4fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix3x4fv(gpUniformMatrix3x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func (native *OpenGL) GLUniformMatrix4fv(location int32, count int32, transpose bool, value *float32) {\n\tgl.UniformMatrix4fv(location, count, transpose, value)\n}",
"func UniformMatrix2x4fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix2x4fv(gpUniformMatrix2x4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix4fv(gpUniformMatrix4fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4x3fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix4x3fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func UniformMatrix4x3fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix4x3fv(gpUniformMatrix4x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4x2fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix4x2fv(gpUniformMatrix4x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix4fv(location Int, count Sizei, transpose Boolean, value []Float) {\n\tclocation, _ := (C.GLint)(location), cgoAllocsUnknown\n\tccount, _ := (C.GLsizei)(count), cgoAllocsUnknown\n\tctranspose, _ := (C.GLboolean)(transpose), cgoAllocsUnknown\n\tcvalue, _ := (*C.GLfloat)(unsafe.Pointer((*sliceHeader)(unsafe.Pointer(&value)).Data)), cgoAllocsUnknown\n\tC.glUniformMatrix4fv(clocation, ccount, ctranspose, cvalue)\n}",
"func (gl *WebGL) UniformMatrix4fv(location WebGLUniformLocation, matrix Matrix) {\n\tbuffer := matrix.DecomposePointer()\n\ttypedBuffer := sliceToTypedArray([]float32((*buffer)[:]))\n\tgl.context.Call(\"uniformMatrix4fv\", location, false, typedBuffer)\n}",
"func ProgramUniformMatrix2x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix2x4fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func (q Quat) Mat4f() Mat4f {\n\treturn Mat4f{\n\t\t1 - 2*q.Y*q.Y - 2*q.Z*q.Z, 2*q.X*q.Y + 2*q.W*q.Z, 2*q.X*q.Z - 2*q.W*q.Y, 0,\n\t\t2*q.X*q.Y - 2*q.W*q.Z, 1 - 2*q.X*q.X - 2*q.Z*q.Z, 2*q.Y*q.Z + 2*q.W*q.X, 0,\n\t\t2*q.X*q.Z + 2*q.W*q.Y, 2*q.Y*q.Z - 2*q.W*q.X, 1 - 2*q.X*q.X - 2*q.Y*q.Y, 0,\n\t\t0, 0, 0, 1,\n\t}\n}",
"func Perspective(fovy, aspect, near, far float64) Mat4 {\n\t// fovy = (fovy * math.Pi) / 180.0 // convert from degrees to radians\n\tnmf, f := near-far, float64(1./math.Tan(float64(fovy)/2.0))\n\n\treturn Mat4{float64(f / aspect), 0, 0, 0, 0, float64(f), 0, 0, 0, 0, float64((near + far) / nmf), -1, 0, 0, float64((2. * far * near) / nmf), 0}\n}",
"func NewCamera(from, to, vup Vec3, vfov, aspect float64) Camera {\n\ttheta := vfov * math.Pi / 180\n\thalfheight := math.Tan(theta / 2)\n\thalfwidth := aspect * halfheight\n\tw := from.Sub(to).Unit()\n\tu := vup.Cross(w).Unit()\n\tv := w.Cross(u)\n\treturn Camera{\n\t\tOrigin: from,\n\t\tBottomLeft: from.Sub(u.ScalarMul(halfwidth)).Sub(v.ScalarMul(halfheight)).Sub(w),\n\t\tHorizontal: u.ScalarMul(2 * halfwidth),\n\t\tVertical: v.ScalarMul(2 * halfheight),\n\t}\n}",
"func (native *OpenGL) UniformMatrix3fv(location int32, count int32, transpose bool, value *float32) {\n\tgl.UniformMatrix3fv(location, count, transpose, value)\n}",
"func UniformMatrix3x2fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix3x2fv(gpUniformMatrix3x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func NewPerspectiveCamera(fov float64, aspect float64, near float64, far float64) PerspectiveCamera {\n\treturn &perspectiveCameraImp{threejs.NewObject3DFromJSValue(threejs.Threejs(\"PerspectiveCamera\").New(fov, aspect, near, far))}\n}",
"func UniformMatrix2fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix2fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func UniformMatrix2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2fv(gpUniformMatrix2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2fv(gpUniformMatrix2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func ProgramUniformMatrix4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix4fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func (m Mat2f) Mat4f() Mat4f {\n\tcol0, col1 := m.Cols()\n\treturn Mat4fFromCols(\n\t\tcol0.Vec4f(0, 0),\n\t\tcol1.Vec4f(0, 0),\n\t\tVec4f{0, 0, 1, 0},\n\t\tVec4f{0, 0, 0, 1},\n\t)\n}",
"func UniformMatrix2fv(location UniformLocation, transpose bool, value []float32) {\n\tgl.UniformMatrix2fv(int32(location), 1, transpose, &value[0])\n}",
"func UniformMatrix3x2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3x2fv(gpUniformMatrix3x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3x2fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3x2fv(gpUniformMatrix3x2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2x3fv(gpUniformMatrix2x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix2x3fv(gpUniformMatrix2x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3x2fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix3x2fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func ProgramUniformMatrix3x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix3x4fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func UniformMatrix4fv(dst Uniform, src []float32) {\n\tgl.UniformMatrix4fv(dst.Value, int32(len(src)/(4*4)), false, &src[0])\n}",
"func UniformMatrix3fv(location UniformLocation, transpose bool, value []float32) {\n\tgl.UniformMatrix3fv(int32(location), 1, transpose, &value[0])\n}",
"func (pc *perspectiveCameraImp) ProjectionMatrixInverse() *threejs.Matrix4 {\n\treturn &threejs.Matrix4{Value: pc.JSValue().Get(\"projectionMatrixInverse\")}\n}",
"func UniformMatrix2fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix2fv(gpUniformMatrix2fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x3fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix2x3fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func UniformMatrix3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3fv(gpUniformMatrix3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3fv(location int32, count int32, transpose bool, value *float32) {\n\tC.glowUniformMatrix3fv(gpUniformMatrix3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix3fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix3fv(gpUniformMatrix3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2x3fv(location int32, count int32, transpose bool, value *float32) {\n C.glowUniformMatrix2x3fv(gpUniformMatrix2x3fv, (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func (c *Camera) debugUpdate() {\n\tc.State = gfx.NewState()\n\tc.Shader = shader\n\tc.State.FaceCulling = gfx.BackFaceCulling\n\n\tm := gfx.NewMesh()\n\tm.Primitive = gfx.Lines\n\n\tm.Vertices = []gfx.Vec3{}\n\tm.Colors = []gfx.Color{}\n\n\tnear := float32(c.Near)\n\tfar := float32(c.Far)\n\n\tif c.Ortho {\n\t\twidth := float32(c.View.Dx())\n\t\theight := float32(c.View.Dy())\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{width / 2, 0, height / 2},\n\n\t\t\t// Near\n\t\t\t{0, near, 0},\n\t\t\t{width, near, 0},\n\t\t\t{width, near, height},\n\t\t\t{0, near, height},\n\n\t\t\t// Far\n\t\t\t{0, far, 0},\n\t\t\t{width, far, 0},\n\t\t\t{width, far, height},\n\t\t\t{0, far, height},\n\n\t\t\t{width / 2, far, height / 2},\n\n\t\t\t// Up\n\t\t\t{0, near, height},\n\t\t\t{0, near, height},\n\t\t\t{width, near, height},\n\t\t}\n\t} else {\n\t\tratio := float32(c.View.Dx()) / float32(c.View.Dy())\n\t\tfovRad := c.FOV / 180 * math.Pi\n\n\t\thNear := float32(2 * math.Tan(fovRad/2) * c.Near)\n\t\twNear := hNear * ratio\n\n\t\thFar := float32(2 * math.Tan(fovRad/2) * c.Far)\n\t\twFar := hFar * ratio\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{0, 0, 0},\n\n\t\t\t// Near\n\t\t\t{-wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, hNear / 2},\n\t\t\t{-wNear / 2, near, hNear / 2},\n\n\t\t\t// Far\n\t\t\t{-wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, hFar / 2},\n\t\t\t{-wFar / 2, far, hFar / 2},\n\n\t\t\t{0, far, 0},\n\n\t\t\t// Up\n\t\t\t{0, near, hNear},\n\t\t\t{-wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t\t{wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t}\n\t}\n\n\tm.Colors = []gfx.Color{\n\t\t{1, 1, 1, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 1, 1, 1},\n\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t}\n\n\tm.Indices = []uint32{\n\t\t// From 0 to near plane\n\t\t0, 1,\n\t\t0, 2,\n\t\t0, 3,\n\t\t0, 4,\n\n\t\t// Near plane\n\t\t1, 2,\n\t\t2, 3,\n\t\t3, 4,\n\t\t4, 1,\n\n\t\t// Far plane\n\t\t5, 6,\n\t\t6, 7,\n\t\t7, 8,\n\t\t8, 5,\n\n\t\t// Lines from near to far plane\n\t\t1, 5,\n\t\t2, 6,\n\t\t3, 7,\n\t\t4, 8,\n\n\t\t0, 9,\n\n\t\t// Up\n\t\t10, 11,\n\t\t11, 12,\n\t\t12, 10,\n\t}\n\n\tc.Meshes = []*gfx.Mesh{m}\n}",
"func MToF(m Meter) Foot { return Foot(m / 0.3048) }",
"func (renderer *SimpleMatrixRenderer) Render() {\n\trenderer.renderCharacter(\"\\n\")\n\n\tfor row := 0; row < renderer.Matrix.Height; row++ {\n\t\tfor col := 0; col < renderer.Matrix.Width; col++ {\n\t\t\tif !renderer.Matrix.IsFieldOccupied(row, col) {\n\t\t\t\trenderer.renderUnoccupiedField()\n\t\t\t} else {\n\t\t\t\trenderer.renderOccupiedFieldAtCurrentCursorPos(row, col)\n\t\t\t}\n\t\t}\n\n\t\trenderer.renderCharacter(\"\\n\")\n\t}\n\n\trenderer.renderCharacter(\"\\n\")\n}",
"func ProgramUniformMatrix4x2fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix4x2fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func DepthRangef(n Float, f Float) {\n\tcn, _ := (C.GLfloat)(n), cgoAllocsUnknown\n\tcf, _ := (C.GLfloat)(f), cgoAllocsUnknown\n\tC.glDepthRangef(cn, cf)\n}",
"func (c *Camera) updateFrustrum() {\n\tvar v types.Vec4\n\tinvProjViewMat := c.InvViewProjMat()\n\n\tvar yUp float32 = 1.0\n\tif c.InvertY {\n\t\tyUp = -1.0\n\t}\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(-1, yUp, -1, 1))\n\tc.Frustrum[0] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(1, yUp, -1, 1))\n\tc.Frustrum[1] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(-1, -yUp, -1, 1))\n\tc.Frustrum[2] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(1, -yUp, -1, 1))\n\tc.Frustrum[3] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n}",
"func ProgramUniformMatrix2x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tC.glowProgramUniformMatrix2x4fv(gpProgramUniformMatrix2x4fv, (C.GLuint)(program), (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func ProgramUniformMatrix2x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tC.glowProgramUniformMatrix2x4fv(gpProgramUniformMatrix2x4fv, (C.GLuint)(program), (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func UniformMatrix2fv(location Int, count Sizei, transpose Boolean, value []Float) {\n\tclocation, _ := (C.GLint)(location), cgoAllocsUnknown\n\tccount, _ := (C.GLsizei)(count), cgoAllocsUnknown\n\tctranspose, _ := (C.GLboolean)(transpose), cgoAllocsUnknown\n\tcvalue, _ := (*C.GLfloat)(unsafe.Pointer((*sliceHeader)(unsafe.Pointer(&value)).Data)), cgoAllocsUnknown\n\tC.glUniformMatrix2fv(clocation, ccount, ctranspose, cvalue)\n}",
"func UniformMatrix3fv(location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpUniformMatrix3fv, 4, uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0, 0)\n}",
"func Mat3Projection(out []float64, width, height float64) []float64 {\n\tout[0] = 2 / width\n\tout[1] = 0\n\tout[2] = 0\n\tout[3] = 0\n\tout[4] = -2 / height\n\tout[5] = 0\n\tout[6] = -1\n\tout[7] = 1\n\tout[8] = 1\n\treturn out\n}",
"func ProgramUniformMatrix4x3fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix4x3fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func NewFzMatrixRef(ref unsafe.Pointer) *FzMatrix {\n\treturn (*FzMatrix)(ref)\n}",
"func UniformMatrix3fv(location Int, count Sizei, transpose Boolean, value []Float) {\n\tclocation, _ := (C.GLint)(location), cgoAllocsUnknown\n\tccount, _ := (C.GLsizei)(count), cgoAllocsUnknown\n\tctranspose, _ := (C.GLboolean)(transpose), cgoAllocsUnknown\n\tcvalue, _ := (*C.GLfloat)(unsafe.Pointer((*sliceHeader)(unsafe.Pointer(&value)).Data)), cgoAllocsUnknown\n\tC.glUniformMatrix3fv(clocation, ccount, ctranspose, cvalue)\n}",
"func (c *Camera) SetupProjection(aspect float32) {\n\tc.ProjMat = types.Perspective4(c.FOV, aspect, 1, 1000)\n\tc.Update()\n}",
"func Project(obj Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (win Vec3) {\n\tobj4 := obj.Vec4(1)\n\n\tvpp := projection.Mul4(modelview).Mul4x1(obj4)\n\tvpp = vpp.Mul(1 / vpp.W())\n\twin[0] = float64(initialX) + (float64(width)*(vpp[0]+1))/2\n\twin[1] = float64(initialY) + (float64(height)*(vpp[1]+1))/2\n\twin[2] = (vpp[2] + 1) / 2\n\n\treturn win\n}",
"func MatrixOrtho(left, right, bottom, top, near, far float32) Matrix {\n\tvar result Matrix\n\n\trl := right - left\n\ttb := top - bottom\n\tfn := far - near\n\n\tresult.M0 = 2.0 / rl\n\tresult.M1 = 0.0\n\tresult.M2 = 0.0\n\tresult.M3 = 0.0\n\tresult.M4 = 0.0\n\tresult.M5 = 2.0 / tb\n\tresult.M6 = 0.0\n\tresult.M7 = 0.0\n\tresult.M8 = 0.0\n\tresult.M9 = 0.0\n\tresult.M10 = -2.0 / fn\n\tresult.M11 = 0.0\n\tresult.M12 = -(left + right) / rl\n\tresult.M13 = -(top + bottom) / tb\n\tresult.M14 = -(far + near) / fn\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func UniformMatrix2fv(dst Uniform, src []float32) {\n\tgl.UniformMatrix2fv(dst.Value, int32(len(src)/(2*2)), false, &src[0])\n}",
"func ProgramUniformMatrix2x4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n C.glowProgramUniformMatrix2x4fv(gpProgramUniformMatrix2x4fv, (C.GLuint)(program), (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func ProgramUniformMatrix2fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix2fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func (s *Shader) SetUniformFMat4(name string, value mgl32.Mat4) {\n\tloc := s.getUniformLocation(name)\n\tif loc != -1 {\n\t\tgl.UniformMatrix4fv(loc, 1, false, &value[0])\n\t}\n}",
"func ProgramUniformMatrix4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tC.glowProgramUniformMatrix4fv(gpProgramUniformMatrix4fv, (C.GLuint)(program), (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func ProgramUniformMatrix4fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tC.glowProgramUniformMatrix4fv(gpProgramUniformMatrix4fv, (C.GLuint)(program), (C.GLint)(location), (C.GLsizei)(count), (C.GLboolean)(boolToInt(transpose)), (*C.GLfloat)(unsafe.Pointer(value)))\n}",
"func RotateZ(fAngDeg gl.Float) *Mat4 {\n\tfAngRad := DegToRad(fAngDeg)\n\tfCos := CosGL(fAngRad)\n\tfSin := SinGL(fAngRad)\n\ttheMat := IdentMat4()\n\ttheMat[0].X = fCos\n\ttheMat[1].X = -fSin\n\ttheMat[0].Y = fSin\n\ttheMat[1].Y = fCos\n\treturn theMat\n}",
"func NewCameraWithTransform(horizontalSize int, verticalSize int, fieldOfView float64,\n\ttransform *matrix.Matrix) *Camera {\n\n\tc := &Camera{\n\t\thorizontalSizeInPixels: horizontalSize,\n\t\tverticalSizeInPixels: verticalSize,\n\t\tfieldOfView: fieldOfView,\n\t\ttransform: transform,\n\t}\n\tc.prepareWorldSpaceUnits()\n\n\t// Cache the inverse of the transform, which never\n\t// changes and is used in rendering routines often.\n\tinverseTransform, _ := matrix.Inverse(transform)\n\tc.inverseTransform = inverseTransform\n\n\treturn c\n}",
"func Zlarfb(side mat.MatSide, trans mat.MatTrans, direct, storev byte, m, n, k int, v, t, c, work *mat.CMatrix) {\n\tvar transt mat.MatTrans\n\tvar one complex128\n\tvar i, j int\n\tvar err error\n\n\tone = (1.0 + 0.0*1i)\n\n\t// Quick return if possible\n\tif m <= 0 || n <= 0 {\n\t\treturn\n\t}\n\n\tif trans == NoTrans {\n\t\ttranst = ConjTrans\n\t} else {\n\t\ttranst = NoTrans\n\t}\n\n\tif storev == 'C' {\n\n\t\tif direct == 'F' {\n\t\t\t// Let V = ( V1 ) (first K rows)\n\t\t\t// ( V2 )\n\t\t\t// where V1 is unit lower triangular.\n\t\t\tif side == Left {\n\t\t\t\t// Form H * C or H**H * C where C = ( C1 )\n\t\t\t\t// ( C2 )\n\t\t\t\t//\n\t\t\t\t// W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C1**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(n, c.Off(j-1, 0).CVector(), c.Rows, 1)\n\t\t\t\t\tZlacgv(n, work.Off(0, j-1).CVector(), 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1\n\t\t\t\tif err = work.Trmm(Right, Lower, NoTrans, Unit, n, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif m > k {\n\t\t\t\t\t// W := W + C2**H * V2\n\t\t\t\t\tif err = work.Gemm(ConjTrans, NoTrans, n, k, m-k, one, c.Off(k, 0), v.Off(k, 0), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T**H or W * T\n\t\t\t\tif err = work.Trmm(Right, Upper, transt, NonUnit, n, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - V * W**H\n\t\t\t\tif m > k {\n\t\t\t\t\t// C2 := C2 - V2 * W**H\n\t\t\t\t\tif err = c.Off(k, 0).Gemm(NoTrans, ConjTrans, m-k, n, k, -one, v.Off(k, 0), work, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1**H\n\t\t\t\tif err = work.Trmm(Right, Lower, ConjTrans, Unit, n, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C1 := C1 - W**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= n; i++ {\n\t\t\t\t\t\tc.Set(j-1, i-1, c.Get(j-1, i-1)-work.GetConj(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else if side == Right {\n\t\t\t\t// Form C * H or C * H**H where C = ( C1 C2 )\n\t\t\t\t//\n\t\t\t\t// W := C * V = (C1*V1 + C2*V2) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C1\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(m, c.Off(0, j-1).CVector(), 1, 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1\n\t\t\t\tif err = work.Trmm(Right, Lower, NoTrans, Unit, m, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif n > k {\n\t\t\t\t\t// W := W + C2 * V2\n\t\t\t\t\tif err = work.Gemm(NoTrans, NoTrans, m, k, n-k, one, c.Off(0, k), v.Off(k, 0), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T or W * T**H\n\t\t\t\tif err = work.Trmm(Right, Upper, trans, NonUnit, m, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - W * V**H\n\t\t\t\tif n > k {\n\t\t\t\t\t// C2 := C2 - W * V2**H\n\t\t\t\t\tif err = c.Off(0, k).Gemm(NoTrans, ConjTrans, m, n-k, k, -one, work, v.Off(k, 0), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1**H\n\t\t\t\tif err = work.Trmm(Right, Lower, ConjTrans, Unit, m, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C1 := C1 - W\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= m; i++ {\n\t\t\t\t\t\tc.Set(i-1, j-1, c.Get(i-1, j-1)-work.Get(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t} else {\n\t\t\t// Let V = ( V1 )\n\t\t\t// ( V2 ) (last K rows)\n\t\t\t// where V2 is unit upper triangular.\n\t\t\tif side == Left {\n\t\t\t\t// Form H * C or H**H * C where C = ( C1 )\n\t\t\t\t// ( C2 )\n\t\t\t\t//\n\t\t\t\t// W := C**H * V = (C1**H * V1 + C2**H * V2) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C2**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(n, c.Off(m-k+j-1, 0).CVector(), c.Rows, 1)\n\t\t\t\t\tZlacgv(n, work.Off(0, j-1).CVector(), 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2\n\t\t\t\tif err = work.Trmm(Right, Upper, NoTrans, Unit, n, k, one, v.Off(m-k, 0)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif m > k {\n\t\t\t\t\t// W := W + C1**H * V1\n\t\t\t\t\tif err = work.Gemm(ConjTrans, NoTrans, n, k, m-k, one, c, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T**H or W * T\n\t\t\t\tif err = work.Trmm(Right, Lower, transt, NonUnit, n, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - V * W**H\n\t\t\t\tif m > k {\n\t\t\t\t\t// C1 := C1 - V1 * W**H\n\t\t\t\t\tif err = c.Gemm(NoTrans, ConjTrans, m-k, n, k, -one, v, work, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2**H\n\t\t\t\tif err = work.Trmm(Right, Upper, ConjTrans, Unit, n, k, one, v.Off(m-k, 0)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C2 := C2 - W**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= n; i++ {\n\t\t\t\t\t\tc.Set(m-k+j-1, i-1, c.Get(m-k+j-1, i-1)-work.GetConj(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else if side == Right {\n\t\t\t\t// Form C * H or C * H**H where C = ( C1 C2 )\n\t\t\t\t//\n\t\t\t\t// W := C * V = (C1*V1 + C2*V2) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C2\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(m, c.Off(0, n-k+j-1).CVector(), 1, 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2\n\t\t\t\tif err = work.Trmm(Right, Upper, NoTrans, Unit, m, k, one, v.Off(n-k, 0)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif n > k {\n\t\t\t\t\t// W := W + C1 * V1\n\t\t\t\t\tif err = work.Gemm(NoTrans, NoTrans, m, k, n-k, one, c, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T or W * T**H\n\t\t\t\tif err = work.Trmm(Right, Lower, trans, NonUnit, m, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - W * V**H\n\t\t\t\tif n > k {\n\t\t\t\t\t// C1 := C1 - W * V1**H\n\t\t\t\t\tif err = c.Gemm(NoTrans, ConjTrans, m, n-k, k, -one, work, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2**H\n\t\t\t\tif err = work.Trmm(Right, Upper, ConjTrans, Unit, m, k, one, v.Off(n-k, 0)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C2 := C2 - W\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= m; i++ {\n\t\t\t\t\t\tc.Set(i-1, n-k+j-1, c.Get(i-1, n-k+j-1)-work.Get(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t} else if storev == 'R' {\n\n\t\tif direct == 'F' {\n\t\t\t// Let V = ( V1 V2 ) (V1: first K columns)\n\t\t\t// where V1 is unit upper triangular.\n\t\t\tif side == Left {\n\t\t\t\t// Form H * C or H**H * C where C = ( C1 )\n\t\t\t\t// ( C2 )\n\t\t\t\t//\n\t\t\t\t// W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C1**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(n, c.Off(j-1, 0).CVector(), c.Rows, 1)\n\t\t\t\t\tZlacgv(n, work.Off(0, j-1).CVector(), 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1**H\n\t\t\t\tif err = work.Trmm(Right, Upper, ConjTrans, Unit, n, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif m > k {\n\t\t\t\t\t// W := W + C2**H * V2**H\n\t\t\t\t\tif err = work.Gemm(ConjTrans, ConjTrans, n, k, m-k, one, c.Off(k, 0), v.Off(0, k), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T**H or W * T\n\t\t\t\tif err = work.Trmm(Right, Upper, transt, NonUnit, n, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - V**H * W**H\n\t\t\t\tif m > k {\n\t\t\t\t\t// C2 := C2 - V2**H * W**H\n\t\t\t\t\tif err = c.Off(k, 0).Gemm(ConjTrans, ConjTrans, m-k, n, k, -one, v.Off(0, k), work, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1\n\t\t\t\tif err = work.Trmm(Right, Upper, NoTrans, Unit, n, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C1 := C1 - W**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= n; i++ {\n\t\t\t\t\t\tc.Set(j-1, i-1, c.Get(j-1, i-1)-work.GetConj(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else if side == Right {\n\t\t\t\t// Form C * H or C * H**H where C = ( C1 C2 )\n\t\t\t\t//\n\t\t\t\t// W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C1\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(m, c.Off(0, j-1).CVector(), 1, 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1**H\n\t\t\t\tif err = work.Trmm(Right, Upper, ConjTrans, Unit, m, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif n > k {\n\t\t\t\t\t// W := W + C2 * V2**H\n\t\t\t\t\tif err = work.Gemm(NoTrans, ConjTrans, m, k, n-k, one, c.Off(0, k), v.Off(0, k), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T or W * T**H\n\t\t\t\tif err = work.Trmm(Right, Upper, trans, NonUnit, m, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - W * V\n\t\t\t\tif n > k {\n\t\t\t\t\t// C2 := C2 - W * V2\n\t\t\t\t\tif err = c.Off(0, k).Gemm(NoTrans, NoTrans, m, n-k, k, -one, work, v.Off(0, k), one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V1\n\t\t\t\tif err = work.Trmm(Right, Upper, NoTrans, Unit, m, k, one, v); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C1 := C1 - W\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= m; i++ {\n\t\t\t\t\t\tc.Set(i-1, j-1, c.Get(i-1, j-1)-work.Get(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t} else {\n\t\t\t// Let V = ( V1 V2 ) (V2: last K columns)\n\t\t\t// where V2 is unit lower triangular.\n\t\t\tif side == Left {\n\t\t\t\t// Form H * C or H**H * C where C = ( C1 )\n\t\t\t\t// ( C2 )\n\t\t\t\t//\n\t\t\t\t// W := C**H * V**H = (C1**H * V1**H + C2**H * V2**H) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C2**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(n, c.Off(m-k+j-1, 0).CVector(), c.Rows, 1)\n\t\t\t\t\tZlacgv(n, work.Off(0, j-1).CVector(), 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2**H\n\t\t\t\tif err = work.Trmm(Right, Lower, ConjTrans, Unit, n, k, one, v.Off(0, m-k)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif m > k {\n\t\t\t\t\t// W := W + C1**H * V1**H\n\t\t\t\t\tif err = work.Gemm(ConjTrans, ConjTrans, n, k, m-k, one, c, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T**H or W * T\n\t\t\t\tif err = work.Trmm(Right, Lower, transt, NonUnit, n, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - V**H * W**H\n\t\t\t\tif m > k {\n\t\t\t\t\t// C1 := C1 - V1**H * W**H\n\t\t\t\t\tif err = c.Gemm(ConjTrans, ConjTrans, m-k, n, k, -one, v, work, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2\n\t\t\t\tif err = work.Trmm(Right, Lower, NoTrans, Unit, n, k, one, v.Off(0, m-k)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C2 := C2 - W**H\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= n; i++ {\n\t\t\t\t\t\tc.Set(m-k+j-1, i-1, c.Get(m-k+j-1, i-1)-work.GetConj(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t} else if side == Right {\n\t\t\t\t// Form C * H or C * H**H where C = ( C1 C2 )\n\t\t\t\t//\n\t\t\t\t// W := C * V**H = (C1*V1**H + C2*V2**H) (stored in WORK)\n\t\t\t\t//\n\t\t\t\t// W := C2\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\twork.Off(0, j-1).CVector().Copy(m, c.Off(0, n-k+j-1).CVector(), 1, 1)\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2**H\n\t\t\t\tif err = work.Trmm(Right, Lower, ConjTrans, Unit, m, k, one, v.Off(0, n-k)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\t\t\t\tif n > k {\n\t\t\t\t\t// W := W + C1 * V1**H\n\t\t\t\t\tif err = work.Gemm(NoTrans, ConjTrans, m, k, n-k, one, c, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * T or W * T**H\n\t\t\t\tif err = work.Trmm(Right, Lower, trans, NonUnit, m, k, one, t); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C := C - W * V\n\t\t\t\tif n > k {\n\t\t\t\t\t// C1 := C1 - W * V1\n\t\t\t\t\tif err = c.Gemm(NoTrans, NoTrans, m, n-k, k, -one, work, v, one); err != nil {\n\t\t\t\t\t\tpanic(err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\t// W := W * V2\n\t\t\t\tif err = work.Trmm(Right, Lower, NoTrans, Unit, m, k, one, v.Off(0, n-k)); err != nil {\n\t\t\t\t\tpanic(err)\n\t\t\t\t}\n\n\t\t\t\t// C1 := C1 - W\n\t\t\t\tfor j = 1; j <= k; j++ {\n\t\t\t\t\tfor i = 1; i <= m; i++ {\n\t\t\t\t\t\tc.Set(i-1, n-k+j-1, c.Get(i-1, n-k+j-1)-work.Get(i-1, j-1))\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t}\n\n\t\t}\n\t}\n}",
"func (s *Shader) setUniformMatrix(name string, value *mgl32.Mat4) {\n location:=gl.GetUniformLocation(s.idPrograma, gl.Str(name + \"\\x00\"))\n if location != -1 { // Si existe ese nombre de variable\n\n bb := new([16]float32) // Creamos un buffer de floats\n for i:=0; i<4; i++{\n for j:=0; j<4; j++ {\n bb[j+i*4] = float32(value.At(i,j))\n }\n }\n gl.UniformMatrix4fv(location, 1, false, &bb[0]) // Enviar a shader matriz PROJECTION * SCALE\n }\n}",
"func ProgramUniformMatrix3x2fv(program uint32, location int32, count int32, transpose bool, value *float32) {\n\tsyscall.Syscall6(gpProgramUniformMatrix3x2fv, 5, uintptr(program), uintptr(location), uintptr(count), boolToUintptr(transpose), uintptr(unsafe.Pointer(value)), 0)\n}",
"func MatrixLookAt(eye, target, up Vector3) Matrix {\n\tvar result Matrix\n\n\tz := Vector3Subtract(eye, target)\n\tz = Vector3Normalize(z)\n\tx := Vector3CrossProduct(up, z)\n\tx = Vector3Normalize(x)\n\ty := Vector3CrossProduct(z, x)\n\ty = Vector3Normalize(y)\n\n\tresult.M0 = x.X\n\tresult.M1 = x.Y\n\tresult.M2 = x.Z\n\tresult.M3 = -((x.X * eye.X) + (x.Y * eye.Y) + (x.Z * eye.Z))\n\tresult.M4 = y.X\n\tresult.M5 = y.Y\n\tresult.M6 = y.Z\n\tresult.M7 = -((y.X * eye.X) + (y.Y * eye.Y) + (y.Z * eye.Z))\n\tresult.M8 = z.X\n\tresult.M9 = z.Y\n\tresult.M10 = z.Z\n\tresult.M11 = -((z.X * eye.X) + (z.Y * eye.Y) + (z.Z * eye.Z))\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = 0.0\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func NewLookAtMatrix(target, center, up vector.Vector) Matrix4 {\n\tz := target.Sub(center).Unit()\n\tx, _ := up.Cross(z)\n\tx = x.Unit()\n\ty, _ := z.Cross(x)\n\treturn Matrix4{\n\t\t{x[0], x[1], x[2], -x.Dot(target)},\n\t\t{y[0], y[1], y[2], -y.Dot(target)},\n\t\t{z[0], z[1], z[2], -z.Dot(target)},\n\t\t{0, 0, 0, 1},\n\t}\n}",
"func make_plane(tWidth, tHeight uint32, vertices []float32, indices []uint32) {\n\t// width and height are the number of triangles across and down\n\t// plus one for the vertices to define them\n\ttWidth++\n\ttHeight++\n\n\tvar makeIsland = true\n\tvar heightMap = make([]float32, tWidth*tHeight)\n\tmake_height_map(tWidth, tHeight, heightMap, makeIsland)\n\tvar x, y uint32\n\tvar scale float32\n\tscale = 2.0 / float32(plane_rows)\n\thScale := scale * 2\n\t//var fbTexScale = float32(cols / width)\n\tvar fbTexScale = float32(1.0)\n\t// Set up vertices\n\tfor y = 0; y < tHeight; y++ {\n\t\tbase := y * tWidth\n\t\tfor x = 0; x < tWidth; x++ {\n\t\t\tindex := base + x\n\t\t\t// Position\n\t\t\tvertices[(8 * index)] = float32(x)*scale - 1.0\n\t\t\tvertices[(8*index)+1] = float32(y)*scale - 1.0\n\t\t\tvertices[(8*index)+2] = heightMap[index] * hScale\n\t\t\t// Colours\n\t\t\tvertices[(8*index)+3] = float32(1.0)\n\t\t\tvertices[(8*index)+4] = float32(1.0)\n\t\t\tvertices[(8*index)+5] = float32(1.0)\n\t\t\t// Texture\n\t\t\tvertices[(8*index)+6] = fbTexScale * float32(x) / float32(tWidth-1)\n\t\t\tvertices[(8*index)+7] = fbTexScale * float32(y) / float32(tHeight-1)\n\t\t\t/*fmt.Printf(\"%d: Ver ( %.2f, %.2f, %.2f ) / Col ( %.2f %.2f %.2f ) / Text ( %.2f, %.2f )\\n\",\n\t\t\tindex, vertices[(8*index)+0], vertices[(8*index)+1], vertices[(8*index)+2],\n\t\t\tvertices[(8*index)+3], vertices[(8*index)+4], vertices[(8*index)+5],\n\t\t\tvertices[(8*index)+6], vertices[(8*index)+7])*/\n\t\t}\n\t}\n\n\t// Set up indices\n\ti := 0\n\ttHeight--\n\tfor y = 0; y < tHeight; y++ {\n\t\tbase := y * tWidth\n\n\t\t//indices[i++] = (uint16)base;\n\t\tfor x = 0; x < tWidth; x++ {\n\t\t\tindices[i] = (uint32)(base + x)\n\t\t\ti += 1\n\t\t\tindices[i] = (uint32)(base + tWidth + x)\n\t\t\ti += 1\n\t\t}\n\t\t// add a degenerate triangle (except in a last row)\n\t\tif y < tHeight-1 {\n\t\t\tindices[i] = (uint32)((y+1)*tWidth + (tWidth - 1))\n\t\t\ti += 1\n\t\t\tindices[i] = (uint32)((y + 1) * tWidth)\n\t\t\ti += 1\n\t\t}\n\t}\n\n\t/*var ind int\n\tfor ind = 0; ind < i; ind++ {\n\t\tfmt.Printf(\"%d \", indices[ind])\n\t}\n\tfmt.Printf(\"\\nIn total %d indices\\n\", ind)*/\n}"
] | [
"0.76970947",
"0.7330446",
"0.7255292",
"0.72207195",
"0.69924295",
"0.58773893",
"0.5802752",
"0.54933745",
"0.5351413",
"0.5217522",
"0.51141834",
"0.4948555",
"0.48751333",
"0.48751333",
"0.48583722",
"0.48525465",
"0.48237985",
"0.48176077",
"0.47767034",
"0.47767034",
"0.4771447",
"0.47664297",
"0.47664297",
"0.47526172",
"0.4751868",
"0.4727682",
"0.472501",
"0.472501",
"0.4710263",
"0.4710263",
"0.47069737",
"0.4706857",
"0.4703184",
"0.4696323",
"0.46847045",
"0.4681396",
"0.46619388",
"0.46357056",
"0.4634042",
"0.4618145",
"0.4547962",
"0.4546378",
"0.45233914",
"0.45041803",
"0.4456889",
"0.44541255",
"0.44430354",
"0.44387293",
"0.44373724",
"0.44263703",
"0.44263703",
"0.44217205",
"0.44193614",
"0.44060743",
"0.44030827",
"0.44030827",
"0.4383526",
"0.4383526",
"0.43821445",
"0.43742183",
"0.43715712",
"0.43713474",
"0.43646494",
"0.43632388",
"0.43545198",
"0.43481174",
"0.43481174",
"0.4341664",
"0.43280673",
"0.43260425",
"0.4324885",
"0.43148354",
"0.4311192",
"0.42909235",
"0.42893118",
"0.42774343",
"0.42774343",
"0.42754045",
"0.42341104",
"0.4233581",
"0.42239878",
"0.4219528",
"0.42156014",
"0.42142457",
"0.42045537",
"0.41910827",
"0.4177033",
"0.41431347",
"0.41380116",
"0.4130242",
"0.41278538",
"0.41278538",
"0.41164434",
"0.41162428",
"0.41157085",
"0.4108239",
"0.4105371",
"0.4100065",
"0.40949455",
"0.4088835"
] | 0.7870303 | 0 |
LookAt generates a transform matrix from world space to the given eye space. | func LookAt(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ float64) Mat4 {
return LookAtV(Vec3{eyeX, eyeY, eyeZ}, Vec3{centerX, centerY, centerZ}, Vec3{upX, upY, upZ})
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func LookAt(cameraX, cameraY, cameraZ, eyeX, eyeY, eyeZ, orientX, orientY, orientZ gl.Float) *Mat4 {\n\tcamera := &Vec3{ cameraX, cameraY, cameraZ }\n\teye := &Vec3{ eyeX, eyeY, eyeZ }\n\torient := &Vec3{ orientX, orientY, orientZ }\n\treturn LookAtV(camera, eye, orient)\n}",
"func MatrixLookAt(eye, target, up Vector3) Matrix {\n\tvar result Matrix\n\n\tz := Vector3Subtract(eye, target)\n\tz = Vector3Normalize(z)\n\tx := Vector3CrossProduct(up, z)\n\tx = Vector3Normalize(x)\n\ty := Vector3CrossProduct(z, x)\n\ty = Vector3Normalize(y)\n\n\tresult.M0 = x.X\n\tresult.M1 = x.Y\n\tresult.M2 = x.Z\n\tresult.M3 = -((x.X * eye.X) + (x.Y * eye.Y) + (x.Z * eye.Z))\n\tresult.M4 = y.X\n\tresult.M5 = y.Y\n\tresult.M6 = y.Z\n\tresult.M7 = -((y.X * eye.X) + (y.Y * eye.Y) + (y.Z * eye.Z))\n\tresult.M8 = z.X\n\tresult.M9 = z.Y\n\tresult.M10 = z.Z\n\tresult.M11 = -((z.X * eye.X) + (z.Y * eye.Y) + (z.Z * eye.Z))\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = 0.0\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func (transform *Transform) LookAt(target mgl32.Vec3) {\n\ttransform.objMatrix = mgl32.LookAtV(transform.GetPosition(), target, mgl32.Vec3{0, 1, 0})\n}",
"func NewMatrixLookAt(eye, target, up Vector3) Matrix {\n\tz := eye.Subtract(target).Normalize()\n\tx := up.CrossProduct(z).Normalize()\n\ty := z.CrossProduct(x).Normalize()\n\treturn Matrix{\n\t\tM0: x.X,\n\t\tM1: x.Y,\n\t\tM2: x.Z,\n\t\tM3: 0,\n\t\tM4: y.X,\n\t\tM5: y.Y,\n\t\tM6: y.Z,\n\t\tM7: 0,\n\t\tM8: z.X,\n\t\tM9: z.Y,\n\t\tM10: z.Z,\n\t\tM11: 0,\n\t\tM12: eye.X,\n\t\tM13: eye.Y,\n\t\tM14: eye.Z,\n\t\tM15: 1,\n\t}\n}",
"func (c *Camera) LookAt(x, y float64) {\n\tc.lookAtX = x\n\tc.lookAtY = y\n\tc.sTop = c.lookAtY + float64(c.screenH/2)*c.zoomInv\n\tc.sBottom = c.lookAtY - float64(c.screenH/2)*c.zoomInv\n\tc.sLeft = c.lookAtX - float64(c.screenW/2)*c.zoomInv\n\tc.sRight = c.lookAtX + float64(c.screenW/2)*c.zoomInv\n\n}",
"func NewLookAtMatrix(target, center, up vector.Vector) Matrix4 {\n\tz := target.Sub(center).Unit()\n\tx, _ := up.Cross(z)\n\tx = x.Unit()\n\ty, _ := z.Cross(x)\n\treturn Matrix4{\n\t\t{x[0], x[1], x[2], -x.Dot(target)},\n\t\t{y[0], y[1], y[2], -y.Dot(target)},\n\t\t{z[0], z[1], z[2], -z.Dot(target)},\n\t\t{0, 0, 0, 1},\n\t}\n}",
"func LookMatrix(o Vec, to Vec) *Mtx {\n\tf, _ := o.Minus(to).Unit() // forward\n\tr, _ := yAxis.Cross(f) // right\n\tu, _ := f.Cross(r) // up\n\torient := NewMat(\n\t\tr.X, u.X, f.X, 0,\n\t\tr.Y, u.Y, f.Y, 0,\n\t\tr.Z, u.Z, f.Z, 0,\n\t\t0, 0, 0, 1,\n\t)\n\treturn Shift(o).Mult(orient)\n}",
"func (c *Camera) LookAtVec(eye, center, up *glm.Vec3) {\n\tc.View = glm.LookAt(eye.X, eye.Y, eye.Z, center.X, center.Y, center.Z, up.X, up.Y, up.Z)\n\tc.Pos = *eye\n}",
"func (c *Camera) LookingAt() (float64, float64) {\n\treturn c.lookAtX, c.lookAtY\n}",
"func (c *Camera) LookAtval(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ float32) {\n\tc.View = glm.LookAt(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ)\n\tc.Pos = glm.Vec3{X: eyeX, Y: eyeY, Z: eyeZ}\n}",
"func LookAtV(eye, center, up Vec3) Mat4 {\n\tf := center.Sub(eye).Normalize()\n\ts := f.Cross(up.Normalize()).Normalize()\n\tu := s.Cross(f)\n\n\tM := Mat4{\n\t\ts[0], u[0], -f[0], 0,\n\t\ts[1], u[1], -f[1], 0,\n\t\ts[2], u[2], -f[2], 0,\n\t\t0, 0, 0, 1,\n\t}\n\n\treturn M.Mul4(Translate3D(float64(-eye[0]), float64(-eye[1]), float64(-eye[2])))\n}",
"func LookAtV(cameraLoc, lookTo, orientation *Vec3) *Mat4 {\n\n\tF := lookTo.Sub(cameraLoc)\n\tf := F.Normalize()\n\to := orientation.Normalize()\n\ts := f.Cross(o).Normalize()\n\tu := s.Cross(f)\n\tM := Mat4{\n\t\tVec4{ s.X, u.X, -f.X, 0.0 },\n\t\tVec4{ s.Y, u.Y, -f.Y, 0.0 },\n\t\tVec4{ s.Z, u.Z, -f.Z, 0.0 },\n\t\tVec4{ 0.0, 0.0, 0.0, 1.0, },\n\t}\n\tt := Vec4{-lookTo.X, -lookTo.Y, -lookTo.Z, 1.0}\n\tMR := M.Translate(&t)\n\n\treturn MR\n}",
"func QuatLookAtV(eye, center, up Vec3) Quat {\n\t// http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-17-quaternions/#I_need_an_equivalent_of_gluLookAt__How_do_I_orient_an_object_towards_a_point__\n\t// https://bitbucket.org/sinbad/ogre/src/d2ef494c4a2f5d6e2f0f17d3bfb9fd936d5423bb/OgreMain/src/OgreCamera.cpp?at=default#cl-161\n\n\tdirection := center.Sub(eye).Normalize()\n\n\t// Find the rotation between the front of the object (that we assume towards Z-,\n\t// but this depends on your model) and the desired direction\n\trotDir := QuatBetweenVectors(Vec3{0, 0, -1}, direction)\n\n\t// Recompute up so that it's perpendicular to the direction\n\t// You can skip that part if you really want to force up\n\t//right := direction.Cross(up)\n\t//up = right.Cross(direction)\n\n\t// Because of the 1rst rotation, the up is probably completely screwed up.\n\t// Find the rotation between the \"up\" of the rotated object, and the desired up\n\tupCur := rotDir.Rotate(Vec3{0, 1, 0})\n\trotUp := QuatBetweenVectors(upCur, up)\n\n\trotTarget := rotUp.Mul(rotDir) // remember, in reverse order.\n\treturn rotTarget.Inverse() // camera rotation should be inversed!\n}",
"func (c *camera) point_at(v Vector) {\n c.front = v.Sub(c.pos).Normalize().Add(c.pos)\n}",
"func (c *Camera) Matrix() mgl32.Mat4 {\n\treturn mgl32.LookAtV(c.pos, c.pos.Add(c.front), c.up)\n}",
"func (ray Ray) PointAt(time float64) Vec3D {\n\treturn AddVec3D(ray.origin, ScalarProduct(time, ray.direction))\n}",
"func (r *Ray) PointAt(t float64) Vector {\n\treturn r.Origin.Add(r.Direction.Mult(t))\n}",
"func (k *Kernel) At(x, y int) float64 {\n\treturn k.Matrix[y*k.Width+x]\n}",
"func (m Matrix3) At(row, col int) float64 {\n\tconst size = 3\n\treturn m[row*size+col]\n}",
"func (transform *Transform) GetPosition() mgl32.Vec3 {\n\tpos := transform.objMatrix.Row(3)\n\treturn mgl32.Vec3{pos.X(), pos.Y(), pos.Z()}\n}",
"func NewCamera(lookFrom, lookAt, vup Vec3, vfov, aspect, aperture, focusDistance float64) *Camera {\n\n\ttheta := vfov * math.Pi / 180.0\n\thalfHeight := math.Tan(theta / 2.0)\n\thalfWidth := aspect * halfHeight\n\tw := lookFrom.SubtractVec3(lookAt).UnitVector()\n\tu := vup.Cross(w).UnitVector()\n\tv := w.Cross(u)\n\n\treturn &Camera{\n\t\tLowerLeftCorner: lookFrom.\n\t\t\tSubtractVec3(u.MultiplyScalar(halfWidth * focusDistance)).\n\t\t\tSubtractVec3(v.MultiplyScalar(halfHeight * focusDistance)).\n\t\t\tSubtractVec3(w.MultiplyScalar(focusDistance)),\n\t\tHorizontal: u.MultiplyScalar(halfWidth * focusDistance * 2.0),\n\t\tVertical: v.MultiplyScalar(halfHeight * focusDistance * 2.0),\n\t\tOrigin: lookFrom,\n\t\tLensRadius: aperture / 2.0,\n\t\tU: u,\n\t\tV: v,\n\t}\n}",
"func NewTransform() Transform {\n\tt := transform{\n\t\tmodelView: mgl32.Ident4(),\n\t\trotation: mgl32.Vec3{0, 0, 0},\n\t\ttranslation: mgl32.Vec3{0, 0, 0},\n\t}\n\treturn &t\n}",
"func (r *Ray) At(t float64) Vector {\n\treturn r.Origin.Plus(r.Direction.MultiplyScalar(t))\n}",
"func (o LookupExperienceResultOutput) CreatedAt() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupExperienceResult) string { return v.CreatedAt }).(pulumi.StringOutput)\n}",
"func NewCamera(lookFrom, lookAt, vup Vec3, vfov, aspect, aperture, focusDist float64) Camera {\n\tlensRadius := aperture / 2\n\ttheta := vfov * math.Pi / 180\n\thalfHeight := math.Tan(theta / 2)\n\thalfWidth := aspect * halfHeight\n\torigin := lookFrom\n\tw := lookFrom.Substract(lookAt).UnitVector()\n\tu := vup.Cross(w).UnitVector()\n\tv := w.Cross(u)\n\tlowerLeftCorner := origin.Substract(\n\t\tu.ScalarMultiple(halfWidth * focusDist)).\n\t\tSubstract(v.ScalarMultiple(halfHeight * focusDist)).\n\t\tSubstract(w.ScalarMultiple(focusDist))\n\thorizontal := u.ScalarMultiple(2 * halfWidth * focusDist)\n\tvertical := v.ScalarMultiple(2 * halfHeight * focusDist)\n\treturn Camera{origin, lowerLeftCorner, horizontal, vertical, u, v, w, lensRadius}\n}",
"func (transform *Transform) SetPosition(newPos mgl32.Vec3) {\n\ttransform.objMatrix.SetRow(3, mgl32.Vec4{newPos.X(), newPos.Y(), newPos.Z()})\n}",
"func Translation(t vec.Vec) Matrix {\n\treturn Matrix{1, 0, 0, 1, t.X, t.Y}\n}",
"func (w *worldImg) At(x, y int) color.Color {\n\tp := w.probs[x*w.W+y]\n\tloc := w.World.At(x, y)\n\tmin, max := 0.1, 1.0\n\tf := (p/w.mx)*(max-min) + min\n\tc := colors[loc.Terrain.Char[0]]\n\treturn color.RGBA{\n\t\tR: uint8(float64(c.R) * f),\n\t\tG: uint8(float64(c.G) * f),\n\t\tB: uint8(float64(c.B) * f),\n\t\tA: c.A,\n\t}\n}",
"func (m Matrix) Transform(a vec.Vec) vec.Vec {\n\treturn vec.New(\n\t\ta.X*m[0]+a.Y*m[2]+m[4],\n\t\ta.X*m[1]+a.Y*m[3]+m[5])\n}",
"func (w *RandomWorld) GetBeingAt(location GoWorld.Location) (uuid.UUID, error) {\n\tif w.IsOutOfBounds(location) {\n\t\treturn uuid.Nil, fmt.Errorf(\n\t\t\t\"error providing being at spot: the location (%d, %d) is out of bounds. WorldSize (%v, %v)\",\n\t\t\tlocation.X, location.Y, w.Width, w.Height)\n\t}\n\treturn w.TerrainSpots[location.X][location.Y].Being, nil\n}",
"func (env *Environment) At(pos Position) *Tile {\n\tif pos.X < 0 || pos.X >= env.Size[0] || pos.Y < 0 || pos.Y >= env.Size[1] {\n\t\treturn nil\n\t}\n\n\tfor _, it := range env.Parcels {\n\t\tif pos == it.Position && it.State == PSFree {\n\t\t\treturn &Tile{env: env, Kind: TKParcel, Position: pos, Parcel: it}\n\t\t}\n\t}\n\tfor _, it := range env.Transports {\n\t\tif pos == it.Position {\n\t\t\treturn &Tile{env: env, Kind: TKTransport, Position: pos, Transport: it}\n\t\t}\n\t}\n\n\treturn &Tile{env: env, Kind: TKFree, Position: pos}\n}",
"func NewCameraWithTransform(horizontalSize int, verticalSize int, fieldOfView float64,\n\ttransform *matrix.Matrix) *Camera {\n\n\tc := &Camera{\n\t\thorizontalSizeInPixels: horizontalSize,\n\t\tverticalSizeInPixels: verticalSize,\n\t\tfieldOfView: fieldOfView,\n\t\ttransform: transform,\n\t}\n\tc.prepareWorldSpaceUnits()\n\n\t// Cache the inverse of the transform, which never\n\t// changes and is used in rendering routines often.\n\tinverseTransform, _ := matrix.Inverse(transform)\n\tc.inverseTransform = inverseTransform\n\n\treturn c\n}",
"func (hmd *Hmd) GetEyeTimewarpMatrices(eye EyeType, renderPose Posef) (twmOut [2]Matrix4f) {\n\tC.ovrHmd_GetEyeTimewarpMatrices(hmd.cptr(), C.ovrEyeType(eye), c_posef(renderPose), twmOut[0].cptr())\n\treturn\n}",
"func (r Ray) At(t float64) *Vec3 {\n\treturn r.Direction.SMul(t).AddSet(r.Origin)\n}",
"func LookAt(children ...Element) *CompoundElement { return newCE(\"LookAt\", children) }",
"func (bm Blendmap) View() (float32, float32, float32, float32) {\n\treturn bm.Map.viewport.Min.X, bm.Map.viewport.Min.Y, bm.Map.viewport.Max.X, bm.Map.viewport.Max.Y\n}",
"func (tr *trooper) loc() (x, y, z float64) { return tr.part.At() }",
"func Eye(size int) Matrix {\n\tdiag := make([]float64, size)\n\tfor i := 0; i < size; i++ {\n\t\tdiag[i] = 1.0\n\t}\n\treturn Diag(diag...)\n}",
"func (p *Player) LookPos() m.Pos {\n\tfocus := m.Pos{\n\t\tX: p.Entity.Rect.Origin.X + PlayerEyeDX,\n\t\tY: p.LastGroundPos.Y + p.eyeDY(),\n\t}\n\tif p.LookUp {\n\t\tfocus.Y -= LookDistance\n\t}\n\tif p.LookDown {\n\t\tfocus.Y += LookDistance\n\t}\n\treturn focus\n}",
"func (m *RoleMutation) CreatedAt() (r time.Time, exists bool) {\n\tv := m.createdAt\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}",
"func (c *CSR) At(m, n int) float64 {\n\treturn c.matrix.At(m, n)\n}",
"func MatrixTranslate(x, y, z float32) Matrix {\n\treturn NewMatrix(\n\t\t1.0, 0.0, 0.0, x,\n\t\t0.0, 1.0, 0.0, y,\n\t\t0.0, 0.0, 1.0, z,\n\t\t0, 0, 0, 1.0)\n}",
"func NewCamera(from, to, vup Vec3, vfov, aspect float64) Camera {\n\ttheta := vfov * math.Pi / 180\n\thalfheight := math.Tan(theta / 2)\n\thalfwidth := aspect * halfheight\n\tw := from.Sub(to).Unit()\n\tu := vup.Cross(w).Unit()\n\tv := w.Cross(u)\n\treturn Camera{\n\t\tOrigin: from,\n\t\tBottomLeft: from.Sub(u.ScalarMul(halfwidth)).Sub(v.ScalarMul(halfheight)).Sub(w),\n\t\tHorizontal: u.ScalarMul(2 * halfwidth),\n\t\tVertical: v.ScalarMul(2 * halfheight),\n\t}\n}",
"func (leg *Leg) Matrix() math3d.Matrix44 {\n\treturn *math3d.MakeMatrix44(*leg.Origin, *math3d.MakeSingularEulerAngle(math3d.RotationHeading, leg.Angle))\n}",
"func (d *Dense) At(i, j int) float64 {\n\tidx := i*d.columns + j\n\treturn d.data[idx]\n}",
"func Eye(size int) (matrix Matrix) {\n\tmatrix = NewMatrix(size, size)\n\n\tfor i := 0; i < size; i++ {\n\t\tmatrix[i][i] = 1\n\t}\n\n\treturn matrix\n}",
"func (s *Surface) Transform(a, b, c, d, e, f float64) {\n\ts.Ctx.Call(\"transform\", a, b, c, d, e, f)\n}",
"func (ec *ExperienceCreate) SetCreatedAt(t time.Time) *ExperienceCreate {\n\tec.mutation.SetCreatedAt(t)\n\treturn ec\n}",
"func (A *Matrix) At(i, j int) float64 {\n\treturn A.data[i * A.stride + j]\n}",
"func EyePosition(e world.Entity) mgl64.Vec3 {\n\tpos := e.Position()\n\tif eyed, ok := e.(Eyed); ok {\n\t\tpos = pos.Add(mgl64.Vec3{0, eyed.EyeHeight()})\n\t}\n\treturn pos\n}",
"func (cam *Camera) SetupViewProjection() {\n\tx_ratio := cam.Width / cam.Height\n\tcam.View = PerspectiveFrustum(cam.YFov, x_ratio, cam.Near, cam.Far)\n\tcam.Projection = cam.View.M44()\n}",
"func (m *CvMat) At(x, y int) color.Color {\n\tcolorAtPoint := C.cvMatAt(m.ptr, C.int(x), C.int(y))\n\treturn color.Gray{uint8(colorAtPoint)}\n}",
"func (o LookupOrganizationResultOutput) CreatedAt() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupOrganizationResult) string { return v.CreatedAt }).(pulumi.StringOutput)\n}",
"func CallArgAt(vm *VM, target, locals Interface, msg *Message) Interface {\n\tm := target.(*Call).Msg\n\tv, stop := msg.NumberArgAt(vm, locals, 0)\n\tif stop != nil {\n\t\treturn stop\n\t}\n\tr := m.ArgAt(int(v.Value))\n\tif r != nil {\n\t\treturn r\n\t}\n\treturn vm.Nil\n}",
"func New(pos mgl32.Vec3) *Camera {\n\tc := &Camera{\n\t\tpos: pos,\n\t\tfront: mgl32.Vec3{0, 0, -1},\n\t\trotatey: 0,\n\t\trotatex: -90,\n\t\tSens: 0.14,\n\t\tisFlying: false,\n\t}\n\tc.updateAngles()\n\treturn c\n}",
"func (o LookupExperienceResultOutput) UpdatedAt() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupExperienceResult) string { return v.UpdatedAt }).(pulumi.StringOutput)\n}",
"func (wec *WorkExperienceCreate) SetCreatedAt(t time.Time) *WorkExperienceCreate {\n\twec.mutation.SetCreatedAt(t)\n\treturn wec\n}",
"func (c *Composite) At(x, y int) float32 {\n\treturn c.AtZ(x, y, 0)\n}",
"func (p *Points) RenderSetup(gs *gls.GLS, rinfo *core.RenderInfo) {\n\n\t// Transfer model view projection matrix uniform\n\tmvpm := p.ModelViewProjectionMatrix()\n\tlocation := p.uniMVPm.Location(gs)\n\tgs.UniformMatrix4fv(location, 1, false, &mvpm[0])\n\n\t// Transfer model view matrix uniform\n\tmvm := p.ModelViewMatrix()\n\tlocation = p.uniMVm.Location(gs)\n\tgs.UniformMatrix4fv(location, 1, false, &mvm[0])\n}",
"func (r *RayTracer) buildRayFromEyeToImage(i, j entry, eye *Vec3) *Ray {\n\t// formulas from reference calculations\n\talpha := r.tanX * ((j / r.halfWidth) - ONE)\n\tbeta := r.tanY * (ONE - (i / r.halfHeight))\n\tdir := r.basisU.scale(alpha).plus(r.basisV.scale(beta)).minus(&r.basisW).direction()\n\treturn &Ray{*eye, *dir}\n}",
"func (p *FakeProvider) GetActorAt(ctx context.Context, key block.TipSetKey, addr address.Address) (*types.Actor, error) {\n\tif !key.Equals(p.head) {\n\t\treturn nil, errors.Errorf(\"No such tipset %s, expected %s\", key, p.head)\n\t}\n\ta, ok := p.actors[addr]\n\tif !ok {\n\t\treturn nil, xerrors.Errorf(\"No such address %s\", addr.String())\n\t}\n\treturn a, nil\n}",
"func SetViewTransform(viewID ViewID, view, proj [16]float32) {\n\tC.bgfx_set_view_transform(\n\t\tC.ushort(viewID),\n\t\tunsafe.Pointer(&view[0]),\n\t\tunsafe.Pointer(&proj[0]),\n\t)\n}",
"func (c *CSC) At(m, n int) float64 {\n\treturn c.matrix.At(n, m)\n}",
"func NewMatrixTranslate(x, y, z float32) Matrix {\n\treturn Matrix{\n\t\tM0: 1, M1: 0, M2: 0, M3: x,\n\t\tM4: 0, M5: 1, M6: 0, M7: y,\n\t\tM8: 0, M9: 0, M10: 1, M11: z,\n\t\tM12: 0, M13: 0, M14: 0, M15: 1,\n\t}\n}",
"func (t *Dense) at(coords ...int) (at int, err error) {\n\treturn Ltoi(t.Shape(), t.Strides(), coords...)\n}",
"func (t *Dense) At(coords ...int) (interface{}, error) {\n\tif len(coords) != t.Dims() {\n\t\treturn nil, errors.Errorf(dimMismatch, t.Dims(), len(coords))\n\t}\n\n\tat, err := t.at(coords...)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"At()\")\n\t}\n\n\treturn t.Get(at), nil\n}",
"func (vw *View) UpdatePose() {\n\tUpdatePose(vw.World, vw.Root)\n}",
"func (o EnvironmentLastUpdatedOutput) CreatedAt() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v EnvironmentLastUpdated) *string { return v.CreatedAt }).(pulumi.StringPtrOutput)\n}",
"func (this *Hmd) GetEyePose(eye EyeType) Posef {\n\treturn posef(C.ovrHmd_GetEyePose(this.cptr(), C.ovrEyeType(eye)))\n}",
"func (pl *Plane) NormalAt(point *Tuple) Tuple {\n\tobNormal := Vector(0, 1, 0)\n\twNormal := pl.NormalToWorld(obNormal)\n\treturn *wNormal\n}",
"func (self *TileSprite) SetCameraOffsetA(member *Point) {\n self.Object.Set(\"cameraOffset\", member)\n}",
"func (t *Dense) SetAt(v interface{}, coords ...int) error {\n\tif len(coords) != t.Dims() {\n\t\treturn errors.Errorf(dimMismatch, t.Dims(), len(coords))\n\t}\n\n\tat, err := t.at(coords...)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"SetAt()\")\n\t}\n\tt.Set(at, v)\n\treturn nil\n}",
"func (t TransposeTri) At(i, j int) float64 {\n\treturn t.Triangular.At(j, i)\n}",
"func (mnu *MedicalNoteUpdate) SetAt(t time.Time) *MedicalNoteUpdate {\n\tmnu.at = &t\n\treturn mnu\n}",
"func (uimu *UserIDMappingUpdate) SetCreatedAt(t time.Time) *UserIDMappingUpdate {\n\tuimu.mutation.SetCreatedAt(t)\n\treturn uimu\n}",
"func (w *RandomWorld) MoveBeingToLocation(b *GoWorld.Being, to GoWorld.Location) error {\n\t// Check if location is valid for being to move to\n\t//if ok, err := w.IsHabitable(to); !ok {\n\t//\tpanic(err.Error())\n\t//\treturn err\n\t//}\n\t// Update the terrain spots with the new being\n\tw.TerrainSpots[b.Position.X][b.Position.Y].Being = uuid.Nil\n\tw.TerrainSpots[to.X][to.Y].Being = b.ID\n\n\t// Update being position\n\tb.Position.X = to.X\n\tb.Position.Y = to.Y\n\n\treturn nil\n}",
"func (b *BasicShape) MaterialAt(geometry.Vector) *mat.Material {\n\treturn b.material\n}",
"func (this *Hmd) BeginEyeRender(eye EyeType) Posef {\n\treturn posef(C.ovrHmd_BeginEyeRender(this.cptr(), C.ovrEyeType(eye)))\n}",
"func (c2d *C2DMatrix) Translate(x, y float64) {\n\tvar mat Matrix\n\n\tmat.m11 = 1\n\tmat.m12 = 0\n\tmat.m13 = 0\n\n\tmat.m21 = 0\n\tmat.m22 = 1\n\tmat.m23 = 0\n\n\tmat.m31 = x\n\tmat.m32 = y\n\tmat.m33 = 1\n\n\t//and multiply\n\tc2d.MatrixMultiply(mat)\n}",
"func MessageArgAt(vm *VM, target, locals Interface, msg *Message) Interface {\n\tm := target.(*Message)\n\tn, stop := msg.NumberArgAt(vm, locals, 0)\n\tif stop != nil {\n\t\treturn stop\n\t}\n\tr := m.ArgAt(int(n.Value))\n\tif r != nil {\n\t\treturn r\n\t}\n\treturn vm.Nil\n}",
"func (m IntMatrix2D) At(i, j int) (int, error) {\n\tif i >= m.nrows || j >= m.ncols {\n\t\treturn 0, fmt.Errorf(\"[%dx%d Matrix]: Invalid Location (%d,%d)\",\n\t\t\tm.nrows, m.ncols, i, j)\n\t}\n\treturn m.mat[i][j], nil\n}",
"func GetPosition(date time.Time, lat float64, lng float64) SunPosition {\n\n\tvar lw = rad * -lng\n\tvar phi = rad * lat\n\tvar d = toDays(date)\n\tvar c = sunCoords(d)\n\tvar H = siderealTime(d, lw) - c.rightAscension\n\n\treturn SunPosition{\n\t\tazimuth(H, phi, c.declination),\n\t\taltitude(H, phi, c.declination),\n\t}\n}",
"func (o FaqOutput) CreatedAt() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Faq) pulumi.StringOutput { return v.CreatedAt }).(pulumi.StringOutput)\n}",
"func (t *transform) Set(modelView mgl32.Mat4) {\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\tt.modelView = modelView\n}",
"func (o LookupAppResultOutput) CreatedAt() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupAppResult) string { return v.CreatedAt }).(pulumi.StringOutput)\n}",
"func (m *Message) EvalArgAt(vm *VM, locals Interface, n int) Interface {\n\treturn m.ArgAt(n).Eval(vm, locals)\n}",
"func (c *Camera) TranslatePosition(x, y float64) (adjustedX, adjustedY float64) {\n\tadjustedOffsetY := float64(c.viewportHeight) * c.OffsetY / 100\n\tadjustedOffsetX := float64(c.viewportHeight) * c.OffsetX / 100\n\tadjustedX = x*c.zoom - float64(c.PosX) + adjustedOffsetX + float64(c.viewportWidth/2)\n\tadjustedY = y*c.zoom - float64(c.PosY) + adjustedOffsetY + float64(c.viewportHeight/2)\n\treturn\n}",
"func QuatFromEuler(yaw, pitch, roll float32) Quat {\n\t// Source: https://en.wikipedia.org/wiki/Conversion_between_quaternions_and_Euler_angles\n\tsinY, cosY := math32.Sincos(yaw * 0.5)\n\tsinP, cosP := math32.Sincos(pitch * 0.5)\n\tsinR, cosR := math32.Sincos(roll * 0.5)\n\treturn Quat{\n\t\tW: cosR*cosP*cosY + sinR*sinP*sinY,\n\t\tX: sinR*cosP*cosY - cosR*sinP*sinY,\n\t\tY: cosR*sinP*cosY + sinR*cosP*sinY,\n\t\tZ: cosR*cosP*sinY - sinR*sinP*cosY,\n\t}\n}",
"func (c *Camera) Move(dir CameraDirection, offset float32) {\n\tvar delta types.Vec3\n\n\tswitch dir {\n\tcase Up:\n\t\tdelta = c.Up.Mul(offset)\n\tcase Down:\n\t\tdelta = c.Up.Mul(-offset)\n\tcase Left:\n\t\tdelta = c.LookAt.Sub(c.Position).Normalize().Cross(c.Up).Mul(-offset)\n\tcase Right:\n\t\tdelta = c.LookAt.Sub(c.Position).Normalize().Cross(c.Up).Mul(offset)\n\tcase Forward:\n\t\tdelta = c.LookAt.Sub(c.Position).Normalize().Mul(offset)\n\tcase Backward:\n\t\tdelta = c.LookAt.Sub(c.Position).Normalize().Mul(-offset)\n\t}\n\n\tc.Position = c.Position.Add(delta)\n\tc.LookAt = c.LookAt.Add(delta)\n\tc.Update()\n}",
"func (t Tower) FireAt(e *Enemy) *Bullet {\n\tpic := DrawableRect(pixel.R(0, 0, 10, 10), colornames.Hotpink)\n\treturn NewBullet(t.Position, pixel.V(50, 50), pic, e.Speed/10, e, t.Damage)\n}",
"func (m Matcher) At(v Var) Matcher {\n\treturn m\n}",
"func (s *Surface) SetTransform(a, b, c, d, e, f float64) {\n\ts.Ctx.Call(\"setTransform\", a, b, c, d, e, f)\n}",
"func (t *transform) Update(translate, rotate mgl32.Vec3) {\n\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\n\tt.translation = t.translation.Add(translate)\n\ttrans := t.translation\n\n\tt.rotation = t.rotation.Add(rotate)\n\ttotal := t.rotation\n\trotX := mgl32.HomogRotate3DX(total.X())\n\trotY := mgl32.HomogRotate3DY(total.Y())\n\trotZ := mgl32.HomogRotate3DZ(total.Z())\n\trotMatrix := rotZ.Mul4(rotY).Mul4(rotX)\n\ttrans = t.translation\n\tt.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)\n}",
"func NewTransform() *Transform {\n\treturn &Transform{\n\t\tPosition: NewVector(0, 0),\n\t\tRotation: 0,\n\t\tScale: NewVector(1, 1),\n\t\tDim: NewVector(0, 0),\n\t}\n}",
"func (lp *Point) RenderSetup(gs *gls.GLS, rinfo *core.RenderInfo, idx int) {\n\n\t// Calculates light position in camera coordinates and updates uniform\n\tvar pos math32.Vector3\n\tlp.WorldPosition(&pos)\n\tpos4 := math32.Vector4{pos.X, pos.Y, pos.Z, 1.0}\n\tpos4.ApplyMatrix4(&rinfo.ViewMatrix)\n\tlp.udata.position.X = pos4.X\n\tlp.udata.position.Y = pos4.Y\n\tlp.udata.position.Z = pos4.Z\n\n\t// Transfer uniform data\n\tconst vec3count = 3\n\tlocation := lp.uni.LocationIdx(gs, vec3count*int32(idx))\n\tgs.Uniform3fv(location, vec3count, &lp.udata.color.R)\n}",
"func (self *Graphics) SetCameraOffsetA(member *Point) {\n self.Object.Set(\"cameraOffset\", member)\n}",
"func (app *RotatingCubeApp) Update(dt float32) {\n\tapp.rotation = app.rotation + dt/500*5\n\n\t//Update the move matrix\n\t//movMatrix := n.NewMatrixRotate(n.NewVector3Up(), 0.5)\n\t//movMatrix = movMatrix.Multiply(n.NewMatrixRotate(n.NewVector3Forward(), 0.3*app.rotation))\n\t//movMatrix = movMatrix.Multiply(n.NewMatrixRotate(n.NewVector3Right(), 0.2*app.rotation))\n\t//app.moveMatrix = movMatrix\n}",
"func TSWorldMatrix(index int) TRANSFORMSTATETYPE {\n\treturn TRANSFORMSTATETYPE(index + 256)\n}",
"func (sf *QueryFactory) QueryAt(ctx context.Context, height int64) (Query, error) {\n\tstate, err := newImmutableState(ctx, sf.state, height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &epochtimeMockQuerier{state}, nil\n}",
"func (ec *Client) NodeInfoAt(ctx context.Context) (*p2p.NodeInfo, error) {\n\tvar result p2p.NodeInfo\n\terr := ec.c.CallContext(ctx, &result, \"manage_nodeInfo\")\n\treturn (*p2p.NodeInfo)(&result), err\n}"
] | [
"0.7221874",
"0.7156577",
"0.7148838",
"0.7097981",
"0.68202144",
"0.6567185",
"0.6151463",
"0.61138666",
"0.59963095",
"0.59800833",
"0.56573945",
"0.55753505",
"0.5262416",
"0.49019694",
"0.48895672",
"0.4848101",
"0.47955284",
"0.46801323",
"0.45855114",
"0.44959348",
"0.44658354",
"0.443222",
"0.44306004",
"0.4423808",
"0.43974873",
"0.43612996",
"0.43100354",
"0.43007928",
"0.42700985",
"0.42598537",
"0.42479873",
"0.4236775",
"0.42241982",
"0.4217463",
"0.41876262",
"0.40886065",
"0.40803096",
"0.4064444",
"0.4045481",
"0.40388817",
"0.40293238",
"0.40264592",
"0.40203297",
"0.39984292",
"0.3995163",
"0.3994655",
"0.39942095",
"0.398901",
"0.39730856",
"0.39615202",
"0.39471483",
"0.39459386",
"0.39424467",
"0.39392298",
"0.3932383",
"0.39113256",
"0.39027807",
"0.38984454",
"0.38974133",
"0.38938564",
"0.3890775",
"0.38873848",
"0.3883485",
"0.3865192",
"0.38629672",
"0.3858851",
"0.3845559",
"0.38347226",
"0.38344",
"0.38179144",
"0.3815528",
"0.38031405",
"0.38026732",
"0.38019103",
"0.38012096",
"0.37981358",
"0.378299",
"0.3782543",
"0.3777403",
"0.37755537",
"0.37738103",
"0.375819",
"0.37556234",
"0.37545878",
"0.37539944",
"0.37524036",
"0.3749595",
"0.3747231",
"0.37428454",
"0.3734208",
"0.37311712",
"0.3709731",
"0.370353",
"0.37004143",
"0.36992607",
"0.36981642",
"0.36914483",
"0.3689052",
"0.36877415",
"0.36857158"
] | 0.76853 | 0 |
LookAtV generates a transform matrix from world space into the specific eye space. | func LookAtV(eye, center, up Vec3) Mat4 {
f := center.Sub(eye).Normalize()
s := f.Cross(up.Normalize()).Normalize()
u := s.Cross(f)
M := Mat4{
s[0], u[0], -f[0], 0,
s[1], u[1], -f[1], 0,
s[2], u[2], -f[2], 0,
0, 0, 0, 1,
}
return M.Mul4(Translate3D(float64(-eye[0]), float64(-eye[1]), float64(-eye[2])))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func LookAtV(cameraLoc, lookTo, orientation *Vec3) *Mat4 {\n\n\tF := lookTo.Sub(cameraLoc)\n\tf := F.Normalize()\n\to := orientation.Normalize()\n\ts := f.Cross(o).Normalize()\n\tu := s.Cross(f)\n\tM := Mat4{\n\t\tVec4{ s.X, u.X, -f.X, 0.0 },\n\t\tVec4{ s.Y, u.Y, -f.Y, 0.0 },\n\t\tVec4{ s.Z, u.Z, -f.Z, 0.0 },\n\t\tVec4{ 0.0, 0.0, 0.0, 1.0, },\n\t}\n\tt := Vec4{-lookTo.X, -lookTo.Y, -lookTo.Z, 1.0}\n\tMR := M.Translate(&t)\n\n\treturn MR\n}",
"func QuatLookAtV(eye, center, up Vec3) Quat {\n\t// http://www.opengl-tutorial.org/intermediate-tutorials/tutorial-17-quaternions/#I_need_an_equivalent_of_gluLookAt__How_do_I_orient_an_object_towards_a_point__\n\t// https://bitbucket.org/sinbad/ogre/src/d2ef494c4a2f5d6e2f0f17d3bfb9fd936d5423bb/OgreMain/src/OgreCamera.cpp?at=default#cl-161\n\n\tdirection := center.Sub(eye).Normalize()\n\n\t// Find the rotation between the front of the object (that we assume towards Z-,\n\t// but this depends on your model) and the desired direction\n\trotDir := QuatBetweenVectors(Vec3{0, 0, -1}, direction)\n\n\t// Recompute up so that it's perpendicular to the direction\n\t// You can skip that part if you really want to force up\n\t//right := direction.Cross(up)\n\t//up = right.Cross(direction)\n\n\t// Because of the 1rst rotation, the up is probably completely screwed up.\n\t// Find the rotation between the \"up\" of the rotated object, and the desired up\n\tupCur := rotDir.Rotate(Vec3{0, 1, 0})\n\trotUp := QuatBetweenVectors(upCur, up)\n\n\trotTarget := rotUp.Mul(rotDir) // remember, in reverse order.\n\treturn rotTarget.Inverse() // camera rotation should be inversed!\n}",
"func LookAt(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ float64) Mat4 {\n\treturn LookAtV(Vec3{eyeX, eyeY, eyeZ}, Vec3{centerX, centerY, centerZ}, Vec3{upX, upY, upZ})\n}",
"func (c *Camera) LookAtVec(eye, center, up *glm.Vec3) {\n\tc.View = glm.LookAt(eye.X, eye.Y, eye.Z, center.X, center.Y, center.Z, up.X, up.Y, up.Z)\n\tc.Pos = *eye\n}",
"func LookAt(cameraX, cameraY, cameraZ, eyeX, eyeY, eyeZ, orientX, orientY, orientZ gl.Float) *Mat4 {\n\tcamera := &Vec3{ cameraX, cameraY, cameraZ }\n\teye := &Vec3{ eyeX, eyeY, eyeZ }\n\torient := &Vec3{ orientX, orientY, orientZ }\n\treturn LookAtV(camera, eye, orient)\n}",
"func MatrixLookAt(eye, target, up Vector3) Matrix {\n\tvar result Matrix\n\n\tz := Vector3Subtract(eye, target)\n\tz = Vector3Normalize(z)\n\tx := Vector3CrossProduct(up, z)\n\tx = Vector3Normalize(x)\n\ty := Vector3CrossProduct(z, x)\n\ty = Vector3Normalize(y)\n\n\tresult.M0 = x.X\n\tresult.M1 = x.Y\n\tresult.M2 = x.Z\n\tresult.M3 = -((x.X * eye.X) + (x.Y * eye.Y) + (x.Z * eye.Z))\n\tresult.M4 = y.X\n\tresult.M5 = y.Y\n\tresult.M6 = y.Z\n\tresult.M7 = -((y.X * eye.X) + (y.Y * eye.Y) + (y.Z * eye.Z))\n\tresult.M8 = z.X\n\tresult.M9 = z.Y\n\tresult.M10 = z.Z\n\tresult.M11 = -((z.X * eye.X) + (z.Y * eye.Y) + (z.Z * eye.Z))\n\tresult.M12 = 0.0\n\tresult.M13 = 0.0\n\tresult.M14 = 0.0\n\tresult.M15 = 1.0\n\n\treturn result\n}",
"func (transform *Transform) LookAt(target mgl32.Vec3) {\n\ttransform.objMatrix = mgl32.LookAtV(transform.GetPosition(), target, mgl32.Vec3{0, 1, 0})\n}",
"func NewMatrixLookAt(eye, target, up Vector3) Matrix {\n\tz := eye.Subtract(target).Normalize()\n\tx := up.CrossProduct(z).Normalize()\n\ty := z.CrossProduct(x).Normalize()\n\treturn Matrix{\n\t\tM0: x.X,\n\t\tM1: x.Y,\n\t\tM2: x.Z,\n\t\tM3: 0,\n\t\tM4: y.X,\n\t\tM5: y.Y,\n\t\tM6: y.Z,\n\t\tM7: 0,\n\t\tM8: z.X,\n\t\tM9: z.Y,\n\t\tM10: z.Z,\n\t\tM11: 0,\n\t\tM12: eye.X,\n\t\tM13: eye.Y,\n\t\tM14: eye.Z,\n\t\tM15: 1,\n\t}\n}",
"func NewLookAtMatrix(target, center, up vector.Vector) Matrix4 {\n\tz := target.Sub(center).Unit()\n\tx, _ := up.Cross(z)\n\tx = x.Unit()\n\ty, _ := z.Cross(x)\n\treturn Matrix4{\n\t\t{x[0], x[1], x[2], -x.Dot(target)},\n\t\t{y[0], y[1], y[2], -y.Dot(target)},\n\t\t{z[0], z[1], z[2], -z.Dot(target)},\n\t\t{0, 0, 0, 1},\n\t}\n}",
"func (c *Camera) LookAtval(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ float32) {\n\tc.View = glm.LookAt(eyeX, eyeY, eyeZ, centerX, centerY, centerZ, upX, upY, upZ)\n\tc.Pos = glm.Vec3{X: eyeX, Y: eyeY, Z: eyeZ}\n}",
"func LookMatrix(o Vec, to Vec) *Mtx {\n\tf, _ := o.Minus(to).Unit() // forward\n\tr, _ := yAxis.Cross(f) // right\n\tu, _ := f.Cross(r) // up\n\torient := NewMat(\n\t\tr.X, u.X, f.X, 0,\n\t\tr.Y, u.Y, f.Y, 0,\n\t\tr.Z, u.Z, f.Z, 0,\n\t\t0, 0, 0, 1,\n\t)\n\treturn Shift(o).Mult(orient)\n}",
"func (c *camera) point_at(v Vector) {\n c.front = v.Sub(c.pos).Normalize().Add(c.pos)\n}",
"func (c *Camera) LookAt(x, y float64) {\n\tc.lookAtX = x\n\tc.lookAtY = y\n\tc.sTop = c.lookAtY + float64(c.screenH/2)*c.zoomInv\n\tc.sBottom = c.lookAtY - float64(c.screenH/2)*c.zoomInv\n\tc.sLeft = c.lookAtX - float64(c.screenW/2)*c.zoomInv\n\tc.sRight = c.lookAtX + float64(c.screenW/2)*c.zoomInv\n\n}",
"func (c *Camera) LookingAt() (float64, float64) {\n\treturn c.lookAtX, c.lookAtY\n}",
"func (c *Camera) Matrix() mgl32.Mat4 {\n\treturn mgl32.LookAtV(c.pos, c.pos.Add(c.front), c.up)\n}",
"func (m Matrix) Transform(a vec.Vec) vec.Vec {\n\treturn vec.New(\n\t\ta.X*m[0]+a.Y*m[2]+m[4],\n\t\ta.X*m[1]+a.Y*m[3]+m[5])\n}",
"func NewCamera(lookFrom, lookAt, vup Vec3, vfov, aspect, aperture, focusDistance float64) *Camera {\n\n\ttheta := vfov * math.Pi / 180.0\n\thalfHeight := math.Tan(theta / 2.0)\n\thalfWidth := aspect * halfHeight\n\tw := lookFrom.SubtractVec3(lookAt).UnitVector()\n\tu := vup.Cross(w).UnitVector()\n\tv := w.Cross(u)\n\n\treturn &Camera{\n\t\tLowerLeftCorner: lookFrom.\n\t\t\tSubtractVec3(u.MultiplyScalar(halfWidth * focusDistance)).\n\t\t\tSubtractVec3(v.MultiplyScalar(halfHeight * focusDistance)).\n\t\t\tSubtractVec3(w.MultiplyScalar(focusDistance)),\n\t\tHorizontal: u.MultiplyScalar(halfWidth * focusDistance * 2.0),\n\t\tVertical: v.MultiplyScalar(halfHeight * focusDistance * 2.0),\n\t\tOrigin: lookFrom,\n\t\tLensRadius: aperture / 2.0,\n\t\tU: u,\n\t\tV: v,\n\t}\n}",
"func V(x, y float64) Vec {\n\treturn Vec{x, y}\n}",
"func (m *Mat4) MulV(v *Vec4) *Vec4 {\n\trv := Vec4{0.0, 0.0, 0.0, 0.0}\n\trv.X = m[0].X*v.X + m[1].X*v.Y + m[2].X*v.Z + m[3].X*v.W\n\trv.Y = m[0].Y*v.X + m[1].Y*v.Y + m[2].Y*v.Z + m[3].Y*v.W\n\trv.Z = m[0].Z*v.X + m[1].Z*v.Y + m[2].Z*v.Z + m[3].Z*v.W\n\trv.W = m[0].W*v.X + m[1].W*v.Y + m[2].W*v.Z + m[3].W*v.W\n\treturn &rv\n}",
"func V(x, y float64) Vect {\n\treturn Vect{x, y}\n}",
"func NewCamera(from, to, vup Vec3, vfov, aspect float64) Camera {\n\ttheta := vfov * math.Pi / 180\n\thalfheight := math.Tan(theta / 2)\n\thalfwidth := aspect * halfheight\n\tw := from.Sub(to).Unit()\n\tu := vup.Cross(w).Unit()\n\tv := w.Cross(u)\n\treturn Camera{\n\t\tOrigin: from,\n\t\tBottomLeft: from.Sub(u.ScalarMul(halfwidth)).Sub(v.ScalarMul(halfheight)).Sub(w),\n\t\tHorizontal: u.ScalarMul(2 * halfwidth),\n\t\tVertical: v.ScalarMul(2 * halfheight),\n\t}\n}",
"func (v *Vec4) Transform(m *Mat4) {\n\tvar t Vec4\n\tt.Assign(v)\n\n\tv.X = t.X*m[0] + t.Y*m[4] + t.Z*m[8] + t.W*m[12]\n\tv.Y = t.X*m[1] + t.Y*m[5] + t.Z*m[9] + t.W*m[13]\n\tv.Z = t.X*m[2] + t.Y*m[6] + t.Z*m[10] + t.W*m[14]\n\tv.W = t.X*m[3] + t.Y*m[7] + t.Z*m[11] + t.W*m[15]\n}",
"func NewCamera(lookFrom, lookAt, vup Vec3, vfov, aspect, aperture, focusDist float64) Camera {\n\tlensRadius := aperture / 2\n\ttheta := vfov * math.Pi / 180\n\thalfHeight := math.Tan(theta / 2)\n\thalfWidth := aspect * halfHeight\n\torigin := lookFrom\n\tw := lookFrom.Substract(lookAt).UnitVector()\n\tu := vup.Cross(w).UnitVector()\n\tv := w.Cross(u)\n\tlowerLeftCorner := origin.Substract(\n\t\tu.ScalarMultiple(halfWidth * focusDist)).\n\t\tSubstract(v.ScalarMultiple(halfHeight * focusDist)).\n\t\tSubstract(w.ScalarMultiple(focusDist))\n\thorizontal := u.ScalarMultiple(2 * halfWidth * focusDist)\n\tvertical := v.ScalarMultiple(2 * halfHeight * focusDist)\n\treturn Camera{origin, lowerLeftCorner, horizontal, vertical, u, v, w, lensRadius}\n}",
"func (_Flopper *FlopperCallerSession) Vat() (common.Address, error) {\n\treturn _Flopper.Contract.Vat(&_Flopper.CallOpts)\n}",
"func V(x, y f.Float) Vect { return Vect{x, y} }",
"func Translation(t vec.Vec) Matrix {\n\treturn Matrix{1, 0, 0, 1, t.X, t.Y}\n}",
"func (_Flopper *FlopperSession) Vat() (common.Address, error) {\n\treturn _Flopper.Contract.Vat(&_Flopper.CallOpts)\n}",
"func NewCameraWithTransform(horizontalSize int, verticalSize int, fieldOfView float64,\n\ttransform *matrix.Matrix) *Camera {\n\n\tc := &Camera{\n\t\thorizontalSizeInPixels: horizontalSize,\n\t\tverticalSizeInPixels: verticalSize,\n\t\tfieldOfView: fieldOfView,\n\t\ttransform: transform,\n\t}\n\tc.prepareWorldSpaceUnits()\n\n\t// Cache the inverse of the transform, which never\n\t// changes and is used in rendering routines often.\n\tinverseTransform, _ := matrix.Inverse(transform)\n\tc.inverseTransform = inverseTransform\n\n\treturn c\n}",
"func (p *Point) V() pixel.Vec {\n\treturn pixel.V(float64(p.X), float64(p.Y))\n}",
"func Vector3Transform(v Vector3, mat Matrix) Vector3 {\n\tresult := Vector3{}\n\n\tx := v.X\n\ty := v.Y\n\tz := v.Z\n\n\tresult.X = mat.M0*x + mat.M4*y + mat.M8*z + mat.M12\n\tresult.Y = mat.M1*x + mat.M5*y + mat.M9*z + mat.M13\n\tresult.Z = mat.M2*x + mat.M6*y + mat.M10*z + mat.M14\n\n\treturn result\n}",
"func (p *Player) V() pixel.Vec {\n\treturn pixel.V(p.X, p.Y)\n}",
"func calcMVP(widthPx, heightPx int, tlx, tly, trx, try, blx, bly float64) f64.Aff3 {\n\t// Convert from pixel coords to vertex shader coords.\n\tinvHalfWidth := +2 / float64(widthPx)\n\tinvHalfHeight := -2 / float64(heightPx)\n\ttlx = tlx*invHalfWidth - 1\n\ttly = tly*invHalfHeight + 1\n\ttrx = trx*invHalfWidth - 1\n\ttry = try*invHalfHeight + 1\n\tblx = blx*invHalfWidth - 1\n\tbly = bly*invHalfHeight + 1\n\n\t// The resultant affine matrix:\n\t//\t- maps (0, 0) to (tlx, tly).\n\t//\t- maps (1, 0) to (trx, try).\n\t//\t- maps (0, 1) to (blx, bly).\n\treturn f64.Aff3{\n\t\ttrx - tlx, blx - tlx, tlx,\n\t\ttry - tly, bly - tly, tly,\n\t}\n}",
"func SetViewTransform(viewID ViewID, view, proj [16]float32) {\n\tC.bgfx_set_view_transform(\n\t\tC.ushort(viewID),\n\t\tunsafe.Pointer(&view[0]),\n\t\tunsafe.Pointer(&proj[0]),\n\t)\n}",
"func Mat3FromTranslation(out, v []float64) []float64 {\n\tout[0] = 1\n\tout[1] = 0\n\tout[2] = 0\n\tout[3] = 0\n\tout[4] = 1\n\tout[5] = 0\n\tout[6] = v[0]\n\tout[7] = v[1]\n\tout[8] = 1\n\treturn out\n}",
"func NewTransform() Transform {\n\tt := transform{\n\t\tmodelView: mgl32.Ident4(),\n\t\trotation: mgl32.Vec3{0, 0, 0},\n\t\ttranslation: mgl32.Vec3{0, 0, 0},\n\t}\n\treturn &t\n}",
"func (o Orbit) V() (V []float64) {\n\treturn o.vVec\n}",
"func (m Matrix) Translate(v vec32.Vector) Matrix {\n\treturn Mul(m, Matrix{\n\t\t{1, 0, 0, 0},\n\t\t{0, 1, 0, 0},\n\t\t{0, 0, 1, 0},\n\t\t{v[0], v[1], v[2], 1},\n\t})\n}",
"func (ray Ray) PointAt(time float64) Vec3D {\n\treturn AddVec3D(ray.origin, ScalarProduct(time, ray.direction))\n}",
"func Shift(v Vec) *Mtx {\n\treturn NewMat(\n\t\t1, 0, 0, v.X,\n\t\t0, 1, 0, v.Y,\n\t\t0, 0, 1, v.Z,\n\t\t0, 0, 0, 1,\n\t)\n}",
"func NewView(world *eve.Group, sc *gi3d.Scene, root *gi3d.Group) *View {\n\tvw := &View{World: world, Scene: sc, Root: root}\n\treturn vw\n}",
"func makeVao(points []float32) uint32 {\n\tvar vbo uint32\n\tgl.GenBuffers(1, &vbo)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tgl.BufferData(gl.ARRAY_BUFFER, 4*len(points), gl.Ptr(points), gl.STATIC_DRAW)\n\n\tvar vao uint32\n\tgl.GenVertexArrays(1, &vao)\n\tgl.BindVertexArray(vao)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tvar offset int = 6 * 4\n\tgl.VertexAttribPointer(0, 2, gl.FLOAT, false, 8*4, gl.PtrOffset(offset))\n\tgl.EnableVertexAttribArray(0)\n\t//gl.VertexAttribPointer(0, 3, gl.FLOAT, false, 0, nil)\n\n\treturn vao\n}",
"func VPMOVM2D(k, xyz operand.Op) { ctx.VPMOVM2D(k, xyz) }",
"func Rotate(v Vec) *Mtx {\n\ta := v.Len()\n\tc := math.Cos(a)\n\ts := math.Sin(a)\n\tt := 1 - c\n\tn, _ := v.Unit()\n\tx, y, z := n.X, n.Y, n.Z\n\treturn NewMat(\n\t\tt*x*x+c, t*x*y-z*s, t*x*z+y*s, 0,\n\t\tt*x*y+z*s, t*y*y+c, t*y*z-x*s, 0,\n\t\tt*x*z-y*s, t*y*z+x*s, t*z*z+c, 0,\n\t\t0, 0, 0, 1,\n\t)\n}",
"func (_Flopper *FlopperCaller) Vat(opts *bind.CallOpts) (common.Address, error) {\n\tvar (\n\t\tret0 = new(common.Address)\n\t)\n\tout := ret0\n\terr := _Flopper.contract.Call(opts, out, \"vat\")\n\treturn *ret0, err\n}",
"func updateCamVectors() {\n\tfront := math32.Vector3{}\n\tfront.X = math32.Cos(math32.DegToRad(flyCam.yaw)) * math32.Cos(math32.DegToRad(flyCam.pitch))\n\tfront.Y = math32.Sin(math32.DegToRad(flyCam.pitch))\n\tfront.Z = math32.Sin(math32.DegToRad(flyCam.yaw)) * math32.Cos(math32.DegToRad(flyCam.pitch))\n\tflyCam.front = *front.Normalize()\n\tflyCam.right = *flyCam.front.Clone().Cross(&flyCam.worldUp).Normalize()\n\tflyCam.up = *flyCam.right.Clone().Cross(&flyCam.front).Normalize()\n}",
"func makeVao(points []float32) uint32 {\n\tvar vbo uint32\n\tgl.GenBuffers(1, &vbo)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tgl.BufferData(gl.ARRAY_BUFFER, 4*len(points), gl.Ptr(points), gl.STATIC_DRAW)\n\n\tvar vao uint32\n\tgl.GenVertexArrays(1, &vao)\n\tgl.BindVertexArray(vao)\n\tgl.EnableVertexAttribArray(0)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tgl.VertexAttribPointer(0, 3, gl.FLOAT, false, 0, nil)\n\n\treturn vao\n}",
"func VPMOVM2W(k, xyz operand.Op) { ctx.VPMOVM2W(k, xyz) }",
"func main() {\n\tif err := glfw.Init(); err != nil {\n\t\tlog.Fatalln(\"failed to initialize glfw:\", err)\n\t}\n\tdefer glfw.Terminate()\n\n\tglfw.WindowHint(glfw.Resizable, glfw.False)\n\tglfw.WindowHint(glfw.ContextVersionMajor, 2)\n\tglfw.WindowHint(glfw.ContextVersionMinor, 1)\n\twindow, err := glfw.CreateWindow(windowWidth, windowHeight, \"CUBE\", nil, nil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\twindow.MakeContextCurrent()\n\n\t// Set mouse tracking callback\n\twindow.SetCursorPosCallback(cursorPosCallback)\n\n\t// Initialize Gl\n\tif err := gl.Init(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tversion := gl.GoStr(gl.GetString(gl.VERSION))\n\tfmt.Println(\"OpenGL version\", version)\n\n\t// Configure the vertex and fragment shaders\n\tprogram, err := newProgram(vertexShader, fragmentShader)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tgl.UseProgram(program)\n\n\tprojection = mgl32.Perspective(mgl32.DegToRad(45.0), float32(windowWidth)/windowHeight, 0.1, 10.0)\n\tprojectionUniform := gl.GetUniformLocation(program, gl.Str(\"projection\\x00\"))\n\tgl.UniformMatrix4fv(projectionUniform, 1, false, &projection[0])\n\n\tview = mgl32.LookAtV(mgl32.Vec3{3, 3, 3}, mgl32.Vec3{0, 0, 0}, mgl32.Vec3{0, 1, 0})\n\tmodel = mgl32.Ident4()\n\n\tmodelView := view.Mul4(model)\n\tmodelViewUniform := gl.GetUniformLocation(program, gl.Str(\"modelView\\x00\"))\n\tgl.UniformMatrix4fv(modelViewUniform, 1, false, &modelView[0])\n\n\tnormal := (modelView.Inv()).Transpose()\n\tnormalUniform := gl.GetUniformLocation(program, gl.Str(\"normal\\x00\"))\n\tgl.UniformMatrix4fv(normalUniform, 1, false, &normal[0])\n\n\ttextureUniform := gl.GetUniformLocation(program, gl.Str(\"texture\\x00\"))\n\tgl.Uniform1i(textureUniform, 0)\n\n\tselectedTriangle = mgl32.Ident3()\n\tselectedTriangle.SetCol(0, mgl32.Vec3{1, 1, 1})\n\tselectedTriangle.SetCol(1, mgl32.Vec3{-1, 1, 1})\n\tselectedTriangle.SetCol(2, mgl32.Vec3{-1, -1, 1})\n\tselectedTriangleUniform := gl.GetUniformLocation(program, gl.Str(\"selectedTriangle\\x00\"))\n\tgl.UniformMatrix3fv(selectedTriangleUniform, 1, false, &selectedTriangle[0])\n\n\t// Load the texture\n\ttexture, err := newTexture(\"./res/square.png\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\t// Configure the vertex data\n\tvar vao uint32\n\tgl.GenVertexArrays(1, &vao)\n\tgl.BindVertexArray(vao)\n\n\tvar vbo uint32\n\tgl.GenBuffers(1, &vbo)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tgl.BufferData(gl.ARRAY_BUFFER, len(cubeVertices)*floatSize, gl.Ptr(cubeVertices), gl.STATIC_DRAW)\n\n\tvar ibo uint32\n\tgl.GenBuffers(1, &ibo)\n\tgl.BindBuffer(gl.ELEMENT_ARRAY_BUFFER, ibo)\n\tgl.BufferData(gl.ELEMENT_ARRAY_BUFFER, len(cubeIndices)*floatSize, gl.Ptr(cubeIndices), gl.STATIC_DRAW)\n\n\tvertAttrib := uint32(gl.GetAttribLocation(program, gl.Str(\"vertPosition\\x00\")))\n\tgl.EnableVertexAttribArray(vertAttrib)\n\tgl.VertexAttribPointer(vertAttrib, 3, gl.FLOAT, false, 11*floatSize, gl.PtrOffset(0))\n\n\tnormalAttrib := uint32(gl.GetAttribLocation(program, gl.Str(\"vertNormal\\x00\")))\n\tgl.EnableVertexAttribArray(normalAttrib)\n\tgl.VertexAttribPointer(normalAttrib, 3, gl.FLOAT, false, 11*floatSize, gl.PtrOffset(3))\n\n\ttexCoordAttrib := uint32(gl.GetAttribLocation(program, gl.Str(\"vertTexCoord\\x00\")))\n\tgl.EnableVertexAttribArray(texCoordAttrib)\n\tgl.VertexAttribPointer(texCoordAttrib, 2, gl.FLOAT, false, 11*floatSize, gl.PtrOffset(6*floatSize))\n\n\tcolorAttrib := uint32(gl.GetAttribLocation(program, gl.Str(\"vertColor\\x00\")))\n\tgl.EnableVertexAttribArray(colorAttrib)\n\tgl.VertexAttribPointer(colorAttrib, 3, gl.FLOAT, false, 11*floatSize, gl.PtrOffset(8*floatSize))\n\n\tgl.BindVertexArray(0)\n\tgl.UseProgram(0)\n\n\t// Configure global settings\n\tgl.Enable(gl.DEPTH_TEST)\n\tgl.DepthFunc(gl.LEQUAL)\n\tgl.ClearColor(0.7, 0.7, 0.7, 1.0)\n\tgl.ClearStencil(0)\n\tgl.ClearDepth(1.0)\n\n\tangle := 0.0\n\tpreviousTime := glfw.GetTime()\n\n\tfor !window.ShouldClose() {\n\t\tgl.Clear(gl.COLOR_BUFFER_BIT | gl.DEPTH_BUFFER_BIT | gl.STENCIL_BUFFER_BIT)\n\n\t\t// Update\n\t\ttime := glfw.GetTime()\n\t\telapsed := time - previousTime\n\t\tpreviousTime = time\n\t\tangle -= elapsed\n\n\t\tmodel = mgl32.HomogRotate3D(float32(angle), mgl32.Vec3{0, 1, 0})\n\t\tmodelView = view.Mul4(model)\n\t\tnormal = (modelView.Inv()).Transpose()\n\n\t\t// Ray-triangle intersection (with mouse coordinates)\n\t\tif (mouseX >= -1 && mouseX <= 1) && (mouseY >= -1 && mouseY <= 1) {\n\t\t\t// log.Printf(\"[Debug] Mouse position (x y): %v %v\\n\", mouseX, mouseY)\n\n\t\t\tinvProjection := projection.Inv()\n\t\t\tinvView := view.Inv()\n\t\t\tinvModel := model.Inv()\n\n\t\t\tviewP1 := mgl32.TransformCoordinate(mgl32.Vec3{mouseX, mouseY, -1.0}, invProjection)\n\n\t\t\tR0 := mgl32.TransformCoordinate(mgl32.TransformCoordinate(mgl32.Vec3{0, 0, 0}, invView), invModel)\n\t\t\tR1 := mgl32.TransformCoordinate(mgl32.TransformCoordinate(viewP1, invView), invModel)\n\t\t\tD := mgl32.Vec3{R1[0] - R0[0], R1[1] - R0[1], R1[2] - R0[2]}.Normalize()\n\n\t\t\ttriangleIsectIndex := -1\n\t\t\tminDist := float32(100000)\n\t\t\tfor it := 0; it < len(cubeIndices); it += 3 {\n\t\t\t\ttriangle := []int32{cubeIndices[it+0], cubeIndices[it+1], cubeIndices[it+2]}\n\t\t\t\tA := mgl32.Vec3{cubeVertices[triangle[0]*11+0], cubeVertices[triangle[0]*11+1], cubeVertices[triangle[0]*11+2]}\n\t\t\t\tB := mgl32.Vec3{cubeVertices[triangle[1]*11+0], cubeVertices[triangle[1]*11+1], cubeVertices[triangle[1]*11+2]}\n\t\t\t\tC := mgl32.Vec3{cubeVertices[triangle[2]*11+0], cubeVertices[triangle[2]*11+1], cubeVertices[triangle[2]*11+2]}\n\n\t\t\t\tP0 := A\n\t\t\t\tNV := mgl32.Vec3{B[0] - A[0], B[1] - A[1], B[2] - A[2]}.Cross(mgl32.Vec3{C[0] - A[0], C[1] - A[1], C[2] - A[2]}).Normalize()\n\n\t\t\t\tdistIsect := mgl32.Vec3{P0[0] - R0[0], P0[1] - R0[1], P0[2] - R0[2]}.Dot(NV) / D.Dot(NV)\n\t\t\t\tif distIsect < 0.0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tPIsect := mgl32.Vec3{R0[0] + D[0]*distIsect, R0[1] + D[1]*distIsect, R0[2] + D[2]*distIsect}\n\n\t\t\t\tif PointInOrOnTriangle(PIsect, A, B, C) {\n\t\t\t\t\tif distIsect < minDist {\n\t\t\t\t\t\tminDist = distIsect\n\t\t\t\t\t\ttriangleIsectIndex = it / 3\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif triangleIsectIndex >= 0 {\n\t\t\t\t// log.Printf(\"[Debug] Mouse is ON Triangle with Index: %v\\n\", triangleIsectIndex)\n\n\t\t\t\ttriangle := []int32{cubeIndices[triangleIsectIndex*3+0], cubeIndices[triangleIsectIndex*3+1], cubeIndices[triangleIsectIndex*3+2]}\n\t\t\t\tselectedTriangle.SetCol(0, mgl32.Vec3{cubeVertices[triangle[0]*11+0], cubeVertices[triangle[0]*11+1], cubeVertices[triangle[0]*11+2]})\n\t\t\t\tselectedTriangle.SetCol(1, mgl32.Vec3{cubeVertices[triangle[1]*11+0], cubeVertices[triangle[1]*11+1], cubeVertices[triangle[1]*11+2]})\n\t\t\t\tselectedTriangle.SetCol(2, mgl32.Vec3{cubeVertices[triangle[2]*11+0], cubeVertices[triangle[2]*11+1], cubeVertices[triangle[2]*11+2]})\n\t\t\t} else {\n\t\t\t\tselectedTriangle = mgl32.Ident3()\n\t\t\t}\n\t\t}\n\n\t\t// Render\n\t\tgl.UseProgram(program)\n\t\tgl.UniformMatrix4fv(modelViewUniform, 1, false, &modelView[0])\n\t\tgl.UniformMatrix4fv(normalUniform, 1, false, &normal[0])\n\t\tgl.UniformMatrix3fv(selectedTriangleUniform, 1, false, &selectedTriangle[0])\n\n\t\tgl.BindVertexArray(vao)\n\n\t\tgl.ActiveTexture(gl.TEXTURE0)\n\t\tgl.BindTexture(gl.TEXTURE_2D, texture)\n\n\t\t// Draw cube\n\t\tgl.DrawElements(gl.TRIANGLES, int32(len(cubeIndices)), gl.UNSIGNED_INT, gl.PtrOffset(0))\n\n\t\t// Draw additional cube\n\t\t//\n\t\t// modelAdditional := model.Mul4(mgl32.Translate3D(2, 0, 0))\n\t\t// modelView = view.Mul4(modelAdditional)\n\t\t// normal = (modelView.Inv()).Transpose()\n\t\t// gl.UniformMatrix4fv(modelViewUniform, 1, false, &modelView[0])\n\t\t// gl.UniformMatrix4fv(normalUniform, 1, false, &normal[0])\n\t\t// gl.DrawElements(gl.TRIANGLES, int32(len(cubeIndices)), gl.UNSIGNED_INT, gl.PtrOffset(0))\n\n\t\tgl.BindVertexArray(0)\n\t\tgl.UseProgram(0)\n\n\t\t// Maintenance\n\t\twindow.SwapBuffers()\n\t\tglfw.PollEvents()\n\t}\n}",
"func (vw *View) UpdatePose() {\n\tUpdatePose(vw.World, vw.Root)\n}",
"func (cam *Camera) SetupViewProjection() {\n\tx_ratio := cam.Width / cam.Height\n\tcam.View = PerspectiveFrustum(cam.YFov, x_ratio, cam.Near, cam.Far)\n\tcam.Projection = cam.View.M44()\n}",
"func makeVao(vertices []float32, textureCoords []float32) uint32 {\n\tvbos := make([]uint32, 2)\n\t// vertices\n\tgl.GenBuffers(1, &vbos[0])\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbos[0])\n\tgl.BufferData(gl.ARRAY_BUFFER, 4*len(vertices), gl.Ptr(vertices), gl.STATIC_DRAW)\n\n\t// texture coords\n\ttexInvertY(textureCoords)\n\tgl.GenBuffers(1, &vbos[1])\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbos[1])\n\tgl.BufferData(gl.ARRAY_BUFFER, 4*len(textureCoords), gl.Ptr(textureCoords), gl.STATIC_DRAW)\n\n\t// create vao\n\tvar vao uint32\n\tgl.GenVertexArrays(1, &vao)\n\tgl.BindVertexArray(vao)\n\n\t// bind vertices\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbos[0])\n\tgl.VertexAttribPointer(0, 3, gl.FLOAT, false, 0, nil)\n\tgl.EnableVertexAttribArray(0)\n\n\t// bind textures\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbos[1])\n\tgl.VertexAttribPointer(1, 2, gl.FLOAT, false, 0, nil)\n\tgl.EnableVertexAttribArray(1)\n\n\treturn vao\n}",
"func (transform *Transform) SetPosition(newPos mgl32.Vec3) {\n\ttransform.objMatrix.SetRow(3, mgl32.Vec4{newPos.X(), newPos.Y(), newPos.Z()})\n}",
"func tsiolkovskyDeltaV(Ve float64, m0 float64, mf float64) float64 {\n\n\t// input validation\n\tif mf == 0 {\n\t\treturn 0\n\t}\n\n\t// calculate the mass ratio, i.e. the different between the initial\n\t// propellant and the final mass w/o propellant\n\tratioOfInitialToDryMass := m0 / mf\n\n\t// determine the natural log of the mass ratio\n\tnlogOfMassRatio := math.Log(ratioOfInitialToDryMass)\n\n\t// figure out the total energy requiring during the change of mass\n\t// over the start and end of the launch, otherwise known as the delta-V\n\tdeltaV := Ve * nlogOfMassRatio\n\n\t// go ahead and return the values\n\treturn deltaV\n}",
"func (m Matrix) Transform(u vec32.Vector) (v vec32.Vector) {\n\tfor i := range v {\n\t\tfor j := range u {\n\t\t\tv[i] += m[i][j] * u[j]\n\t\t}\n\t}\n\treturn\n}",
"func VMOVUPD_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVUPD_Z(mxyz, k, mxyz1) }",
"func (path *PATH) V(y float64) *PATH {\n\treturn path.AddPart(\"V\", y)\n}",
"func (rbm *RBM) P_H_Given_V(hiddenIndex int, v []float64) float64 {\n\tsum := 0.0\n\tfor j := 0; j < rbm.NumVisibleUnits; j++ {\n\t\tsum += rbm.W[hiddenIndex][j] * v[j]\n\t}\n\treturn nnet.Sigmoid(sum + rbm.C[hiddenIndex])\n}",
"func (r *Ray) PointAt(t float64) Vector {\n\treturn r.Origin.Add(r.Direction.Mult(t))\n}",
"func Vrotate(v1, v2 Vect) Vect {\n\treturn goVect(C.cpvrotate(v1.c(), v2.c()))\n}",
"func (transform *Transform) GetPosition() mgl32.Vec3 {\n\tpos := transform.objMatrix.Row(3)\n\treturn mgl32.Vec3{pos.X(), pos.Y(), pos.Z()}\n}",
"func (t *Transform) Mat4() lmath.Mat4 {\n\treturn t.Convert(LocalToWorld)\n}",
"func New(pos mgl32.Vec3) *Camera {\n\tc := &Camera{\n\t\tpos: pos,\n\t\tfront: mgl32.Vec3{0, 0, -1},\n\t\trotatey: 0,\n\t\trotatex: -90,\n\t\tSens: 0.14,\n\t\tisFlying: false,\n\t}\n\tc.updateAngles()\n\treturn c\n}",
"func (t *transform) Set(modelView mgl32.Mat4) {\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\tt.modelView = modelView\n}",
"func (bm Blendmap) View() (float32, float32, float32, float32) {\n\treturn bm.Map.viewport.Min.X, bm.Map.viewport.Min.Y, bm.Map.viewport.Max.X, bm.Map.viewport.Max.Y\n}",
"func (t *transform) Update(translate, rotate mgl32.Vec3) {\n\n\tt.dataLock.Lock()\n\tdefer t.dataLock.Unlock()\n\n\tt.translation = t.translation.Add(translate)\n\ttrans := t.translation\n\n\tt.rotation = t.rotation.Add(rotate)\n\ttotal := t.rotation\n\trotX := mgl32.HomogRotate3DX(total.X())\n\trotY := mgl32.HomogRotate3DY(total.Y())\n\trotZ := mgl32.HomogRotate3DZ(total.Z())\n\trotMatrix := rotZ.Mul4(rotY).Mul4(rotX)\n\ttrans = t.translation\n\tt.modelView = mgl32.Ident4().Mul4(mgl32.Translate3D(trans.X(), trans.Y(), trans.Z())).Mul4(rotMatrix)\n}",
"func (di *info) ovf() (string, error) {\n\tvar buf bytes.Buffer\n\n\ttmpl, err := template.New(\"ovf\").Parse(ovfenv)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\terr = tmpl.Execute(&buf, di)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn buf.String(), nil\n}",
"func (vec *Vector3) TransformDirection(matrix *Matrix4) {\n\tx := vec.X\n\ty := vec.Y\n\tz := vec.Z\n\n\tvec.X = matrix.elements[0]*x + matrix.elements[4]*y + matrix.elements[8]*z\n\tvec.Y = matrix.elements[1]*x + matrix.elements[5]*y + matrix.elements[9]*z\n\tvec.Z = matrix.elements[2]*x + matrix.elements[6]*y + matrix.elements[10]*z\n\n\tvec.Normalize()\n}",
"func updateV(grads mat.Matrix, state *State, beta2 float64) {\n\tstate.V.ProdScalarInPlace(beta2)\n\tsqGrad := grads.Prod(grads)\n\tstate.Buf2.ProdMatrixScalarInPlace(sqGrad, 1.0-beta2)\n\tstate.V.AddInPlace(state.Buf2)\n}",
"func pToV(point g.Point) p.Vec {\n\treturn p.V(float64(point.X), float64(point.Y))\n}",
"func (m Matrix3) MultiplyV(v Vector3) (r Vector3) {\n\tfor i := range r {\n\t\tr[i] = m.GetRow(i).Dot(v)\n\t}\n\treturn\n}",
"func (path *PATH) Vv(y float64) *PATH {\n\treturn path.AddPart(\"v\", y)\n}",
"func ConcatV(vs ...Matrix) Matrix {\n\tcup := 0\n\tfor _, v := range vs {\n\t\tcup += v.Size()\n\t}\n\tdata := make([]float64, 0, cup)\n\tfor _, v := range vs {\n\t\tif !v.IsVector() {\n\t\t\tpanic(\"mat: required vector, found matrix\")\n\t\t}\n\t\tdata = append(data, v.Data()...)\n\t}\n\treturn NewVecDense(data)\n}",
"func (s *ActorInfo) View() reform.View {\n\treturn ActorInfoView\n}",
"func VToDB(a *VAgent) error {\n\tif a.Agent == \"\" {\n\t\treturn nil\n\t}\n\n\tif len(a.Agent) > 15 {\n\t\tlog.Infow(\"bad agent name from V\", \"gid\", a.Gid, \"name\", a.Agent)\n\t}\n\n\t// telegram, startlat, startlon, distance, fetched are not set on the \"trust\" API call.\n\t// use ON DUPLICATE so as to not overwrite apikey or telegram\n\t// TODO: prune fields we will never use or that V never sends\n\t_, err := db.Exec(\"INSERT INTO v (enlid, gid, vlevel, vpoints, agent, level, quarantine, active, blacklisted, verified, flagged, banned, cellid, startlat, startlon, distance, fetched) VALUES (?,?,?,?,LEFT(?,15),?,?,?,?,?,?,?,?,?,?,?,UTC_TIMESTAMP()) ON DUPLICATE KEY UPDATE agent=LEFT(?, 15), quarantine=?, blacklisted=?, verified=?, flagged=?, banned=?, fetched=UTC_TIMESTAMP()\",\n\t\ta.EnlID, a.Gid, a.Vlevel, a.Vpoints, a.Agent, a.Level, a.Quarantine, a.Active, a.Blacklisted, a.Verified, a.Flagged, a.Banned, a.CellID, a.StartLat, a.StartLon, a.Distance,\n\t\ta.Agent, a.Quarantine, a.Blacklisted, a.Verified, a.Flagged, a.Banned)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn err\n\t}\n\n\tif a.TelegramID != 0 {\n\t\texisting, err := a.Gid.TelegramID()\n\t\tif err != nil {\n\t\t\tlog.Error(err)\n\t\t\treturn err\n\t\t}\n\t\tif existing == 0 {\n\t\t\terr := a.Gid.SetTelegramID(TelegramID(a.TelegramID), a.Telegram)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(err)\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m Matrix) Project(u Vec) Vec {\n\treturn Vec{m[0]*u.X + m[2]*u.Y + m[4], m[1]*u.X + m[3]*u.Y + m[5]}\n}",
"func (p point) v(delta int) point {\n\treturn point{\n\t\tx: p.x,\n\t\ty: p.y + int64(delta),\n\t}\n}",
"func (o Orbit) VNorm() float64 {\n\treturn Norm(o.vVec)\n}",
"func VPMOVDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVDW_Z(xyz, k, mxy) }",
"func VMOVUPS_Z(mxyz, k, mxyz1 operand.Op) { ctx.VMOVUPS_Z(mxyz, k, mxyz1) }",
"func VPMOVW2M(xyz, k operand.Op) { ctx.VPMOVW2M(xyz, k) }",
"func VPMOVSQW_Z(xyz, k, mx operand.Op) { ctx.VPMOVSQW_Z(xyz, k, mx) }",
"func MatrixProjection(fov FovPort, znear, zfar float32, rightHanded bool) Matrix4f {\n\tif rightHanded {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 1))\n\t} else {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 0))\n\t}\n}",
"func (uni *Uniform4fv) SetPos(pos int, v float32) {\n\n\tuni.v[pos] = v\n}",
"func VPMOVUSDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVUSDW_Z(xyz, k, mxy) }",
"func Mat3Translate(out, a, v []float64) []float64 {\n\ta00 := a[0]\n\ta01 := a[1]\n\ta02 := a[2]\n\ta10 := a[3]\n\ta11 := a[4]\n\ta12 := a[5]\n\ta20 := a[6]\n\ta21 := a[7]\n\ta22 := a[8]\n\tx := v[0]\n\ty := v[1]\n\n\tout[0] = a00\n\tout[1] = a01\n\tout[2] = a02\n\n\tout[3] = a10\n\tout[4] = a11\n\tout[5] = a12\n\n\tout[6] = x*a00 + y*a10 + a20\n\tout[7] = x*a01 + y*a11 + a21\n\tout[8] = x*a02 + y*a12 + a22\n\treturn out\n}",
"func (c *Context) VPMINUW_Z(mxyz, xyz, k, xyz1 operand.Op) {\n\tc.addinstruction(x86.VPMINUW_Z(mxyz, xyz, k, xyz1))\n}",
"func perspProj(a *vec3.T, cam *Camera) *vec2.T {\n\tm := mkExtrinsicCameraMtx(cam)\n\tprintM4(m)\n\tsp := vec3.From(a)\n\tm.TransformVec3(&sp)\n\treturn &vec2.T{sp[0], sp[1]}\n}",
"func VPMOVQW_Z(xyz, k, mx operand.Op) { ctx.VPMOVQW_Z(xyz, k, mx) }",
"func (rbm *RBM) P_V_Given_H(visibleIndex int, h []float64) float64 {\n\tsum := 0.0\n\tfor i := 0; i < rbm.NumHiddenUnits; i++ {\n\t\tsum += rbm.W[i][visibleIndex] * h[i]\n\t}\n\treturn nnet.Sigmoid(sum + rbm.B[visibleIndex])\n}",
"func vmQemuInstantiate(s *state.State, args db.InstanceArgs) *vmQemu {\n\tvm := &vmQemu{\n\t\tstate: s,\n\t\tid: args.ID,\n\t\tproject: args.Project,\n\t\tname: args.Name,\n\t\tdescription: args.Description,\n\t\tephemeral: args.Ephemeral,\n\t\tarchitecture: args.Architecture,\n\t\tdbType: args.Type,\n\t\tsnapshot: args.Snapshot,\n\t\tcreationDate: args.CreationDate,\n\t\tlastUsedDate: args.LastUsedDate,\n\t\tprofiles: args.Profiles,\n\t\tlocalConfig: args.Config,\n\t\tlocalDevices: args.Devices,\n\t\tstateful: args.Stateful,\n\t\tnode: args.Node,\n\t\texpiryDate: args.ExpiryDate,\n\t}\n\n\t// Cleanup the zero values.\n\tif vm.expiryDate.IsZero() {\n\t\tvm.expiryDate = time.Time{}\n\t}\n\n\tif vm.creationDate.IsZero() {\n\t\tvm.creationDate = time.Time{}\n\t}\n\n\tif vm.lastUsedDate.IsZero() {\n\t\tvm.lastUsedDate = time.Time{}\n\t}\n\n\treturn vm\n}",
"func VPMINUW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMINUW_Z(mxyz, xyz, k, xyz1) }",
"func (a Vec4) Inverse() Vec4 {\n\treturn Vec4{-a.X, -a.Y, -a.Z, -a.W}\n}",
"func (a *Mtx) MultPoint(v Vec) (result Vec) {\n\tresult.X = v.X*a.el[0][0] + v.Y*a.el[1][0] + v.Z*a.el[2][0] + a.el[3][0]\n\tresult.Y = v.X*a.el[0][1] + v.Y*a.el[1][1] + v.Z*a.el[2][1] + a.el[3][1]\n\tresult.Z = v.X*a.el[0][2] + v.Y*a.el[1][2] + v.Z*a.el[2][2] + a.el[3][2]\n\t// final row assumed to be [0,0,0,1]\n\treturn\n}",
"func Vector3MultiplyV(v1, v2 Vector3) Vector3 {\n\tresult := Vector3{}\n\n\tresult.X = v1.X * v2.X\n\tresult.Y = v1.Y * v2.Y\n\tresult.Z = v1.Z * v2.Z\n\n\treturn result\n}",
"func NewV(capHint int) *Gini {\n\tg := &Gini{\n\t\txo: xo.NewSV(capHint)}\n\treturn g\n}",
"func (hmd *Hmd) GetEyeTimewarpMatrices(eye EyeType, renderPose Posef) (twmOut [2]Matrix4f) {\n\tC.ovrHmd_GetEyeTimewarpMatrices(hmd.cptr(), C.ovrEyeType(eye), c_posef(renderPose), twmOut[0].cptr())\n\treturn\n}",
"func VPMOVSXWD_Z(mxy, k, xyz operand.Op) { ctx.VPMOVSXWD_Z(mxy, k, xyz) }",
"func (b *GlogLoggerBuilder) InfoV(v glog.Level) *GlogLoggerBuilder {\n\tb.infoV = v\n\treturn b\n}",
"func Vlerp(v1, v2 Vect, t float64) Vect {\n\treturn goVect(C.cpvlerp(v1.c(), v2.c(), C.cpFloat(t)))\n}",
"func NewV(d int) *V {\n\tif d <= 0 {\n\t\tpanic(ErrDim)\n\t}\n\treturn &V{Data: make([]float64, d)}\n}"
] | [
"0.79384255",
"0.7008268",
"0.6827936",
"0.674036",
"0.66893333",
"0.65252846",
"0.64359015",
"0.6208828",
"0.6147548",
"0.60695577",
"0.57845163",
"0.56114215",
"0.55764824",
"0.50536716",
"0.49189788",
"0.4896015",
"0.48141652",
"0.48135036",
"0.48012808",
"0.47918695",
"0.47876397",
"0.47008792",
"0.47006762",
"0.4685765",
"0.46568754",
"0.46304306",
"0.4611148",
"0.45665818",
"0.4565831",
"0.45482522",
"0.4527038",
"0.45034486",
"0.45026776",
"0.44543886",
"0.44488928",
"0.44475836",
"0.4444265",
"0.4364686",
"0.4360399",
"0.4359988",
"0.4324204",
"0.43168664",
"0.42983302",
"0.4294552",
"0.428176",
"0.42813167",
"0.42648286",
"0.42529327",
"0.42511484",
"0.42478928",
"0.4195533",
"0.41882566",
"0.41865435",
"0.41671905",
"0.41661146",
"0.4163385",
"0.41523907",
"0.41435704",
"0.41135427",
"0.41123834",
"0.40964854",
"0.40956008",
"0.40745142",
"0.40736684",
"0.40605763",
"0.40460482",
"0.40449825",
"0.40254912",
"0.40248576",
"0.40247446",
"0.4018688",
"0.40160146",
"0.40017954",
"0.399579",
"0.3989604",
"0.3989501",
"0.39864388",
"0.39859048",
"0.39822334",
"0.39764005",
"0.39728776",
"0.39596426",
"0.39593875",
"0.39547473",
"0.3948278",
"0.39464414",
"0.39419025",
"0.39377013",
"0.3936321",
"0.393162",
"0.39260033",
"0.3924979",
"0.39225322",
"0.39107922",
"0.39083415",
"0.3904609",
"0.39019558",
"0.3901134",
"0.3898777",
"0.3898057"
] | 0.8124075 | 0 |
Project transforms a set of coordinates from object space (in obj) to window coordinates (with depth). Window coordinates are continuous, not discrete (well, as continuous as an IEEE Floating Point can be), so you won't get exact pixel locations without rounding or similar | func Project(obj Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (win Vec3) {
obj4 := obj.Vec4(1)
vpp := projection.Mul4(modelview).Mul4x1(obj4)
vpp = vpp.Mul(1 / vpp.W())
win[0] = float64(initialX) + (float64(width)*(vpp[0]+1))/2
win[1] = float64(initialY) + (float64(height)*(vpp[1]+1))/2
win[2] = (vpp[2] + 1) / 2
return win
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Projection) project(wx float64, wy float64) (float64, float64) {\n return ((wx / p.worldWidth) * p.canvasWidth) + (p.canvasWidth * 0.5),\n ((wy / p.worldHeight) * -p.canvasHeight) + (p.canvasHeight * 0.5)\n}",
"func UnProject(win Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (obj Vec3, err error) {\n\tinv := projection.Mul4(modelview).Inv()\n\tvar blank Mat4\n\tif inv == blank {\n\t\treturn Vec3{}, errors.New(\"Could not find matrix inverse (projection times modelview is probably non-singular)\")\n\t}\n\n\tobj4 := inv.Mul4x1(Vec4{\n\t\t(2 * (win[0] - float64(initialX)) / float64(width)) - 1,\n\t\t(2 * (win[1] - float64(initialY)) / float64(height)) - 1,\n\t\t2*win[2] - 1,\n\t\t1.0,\n\t})\n\tobj = obj4.Vec3()\n\n\t//if obj4[3] > MinValue {}\n\tobj[0] /= obj4[3]\n\tobj[1] /= obj4[3]\n\tobj[2] /= obj4[3]\n\n\treturn obj, nil\n}",
"func projectPoint(\n\tx, y, z float64, // 3d point to project\n\tw, h, f float64, // width, height, focal\n\tscale float64, // scale\n) (px, py float64) { // projected point\n\tx, y, z = x*scale*f, y*scale*f, z*scale*f\n\tzz := z + f\n\tif zz == 0 {\n\t\tzz = math.SmallestNonzeroFloat64\n\t}\n\tpx = x*(f/zz) + w/2\n\tpy = y*(f/zz) - h/2\n\tpy *= -1\n\treturn\n}",
"func Render(r *sdl.Renderer, w *World) {\n\twidth := float64(r.GetViewport().W)\n\theight := float64(r.GetViewport().H)\n\n\trenderedVertices := [][2]int{}\n\n\tfor _, obj := range w.Objects {\n\t\tfor _, vertex := range obj.Geometry.Vertices {\n\t\t\trenderCoordinates := AsRelativeToSystem(w.ActiveCamera.ObjSys, ToSystem(obj.ObjSys, vertex.Pos))\n\n\t\t\tZ := Clamp(renderCoordinates.Z, 0.0001, math.NaN())\n\t\t\tratio := w.ActiveCamera.FocalLength / math.Abs(Z)\n\t\t\trenderX := ratio * renderCoordinates.X\n\t\t\trenderY := ratio * renderCoordinates.Y\n\n\t\t\tnormX, normY := NormalizeScreen(\n\t\t\t\twidth,\n\t\t\t\theight,\n\t\t\t\trenderX,\n\t\t\t\trenderY,\n\t\t\t)\n\n\t\t\trasterX := int(math.Floor(normX*width) + width/2)\n\t\t\trasterY := int(math.Floor(normY*height) + height/2)\n\n\t\t\trenderedVertices = append(renderedVertices, [2]int{rasterX, rasterY})\n\n\t\t\tDrawCircle(r, rasterX, rasterY, 5)\n\t\t}\n\t\tfor _, edge := range obj.Geometry.Edges {\n\t\t\tif len(renderedVertices) > edge.From && len(renderedVertices) > edge.To {\n\t\t\t\tr.DrawLine(\n\t\t\t\t\tint32(renderedVertices[edge.From][0]),\n\t\t\t\t\tint32(renderedVertices[edge.From][1]),\n\t\t\t\t\tint32(renderedVertices[edge.To][0]),\n\t\t\t\t\tint32(renderedVertices[edge.To][1]),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\t}\n\n\tr.Present()\n}",
"func flattenCoordinates(destinationBounds image.Rectangle) []complex128 {\n\tflattenedCoordinates := []complex128{}\n\tfor destinationY := destinationBounds.Min.Y; destinationY < destinationBounds.Max.Y; destinationY++ {\n\t\tfor destinationX := destinationBounds.Min.X; destinationX < destinationBounds.Max.X; destinationX++ {\n\t\t\tflattenedCoordinates = append(flattenedCoordinates, complex(float64(destinationX), float64(destinationY)))\n\t\t}\n\t}\n\treturn flattenedCoordinates\n}",
"func world2screen(wx, wy float32) (sx, sy int) {\n\tsx = int((wx * pixel_per_meter) + (float32(WINDOW_X) / 2.0))\n\tsy = int((-wy * pixel_per_meter) + (float32(WINDOW_Y) / 2.0))\n\treturn\n}",
"func (p *Projection) grid(xInterval float64, yInterval float64) {\n l := draw2d.NewGraphicContext(p.img)\n l.SetStrokeColor(color.RGBA{0xEE, 0xEE, 0xEE, 0xFF})\n l.SetLineWidth(0.5)\n\n xCount := p.worldWidth / xInterval\n yCount := p.worldHeight / yInterval\n\n // horizontal lines\n for x := 1.0; x < xCount; x += 1 {\n xx, _ := p.project((x - (xCount / 2)) * xInterval, 0)\n l.MoveTo(xx, 0)\n l.LineTo(xx, p.canvasHeight)\n l.Stroke()\n }\n\n // vertical lines\n for y := 1.0; y < yCount; y += 1 {\n _, yy := p.project(0, (y - (yCount / 2)) * yInterval)\n l.MoveTo(0, yy)\n l.LineTo(p.canvasWidth, yy)\n l.Stroke()\n }\n\n l.SetStrokeColor(color.RGBA{0xAA, 0xAA, 0xAA, 0xFF})\n\n // horiz axis\n l.MoveTo(p.canvasWidth/2, 0)\n l.LineTo(p.canvasWidth/2, p.canvasHeight)\n l.Stroke()\n\n // vert axis\n l.MoveTo(0, p.canvasHeight/2)\n l.LineTo(p.canvasWidth, p.canvasHeight/2)\n l.Stroke()\n}",
"func canvasToViewPort(x, y float64, c Canvas) Vector {\n\tfw, fh := float64(c.Width()), float64(c.Height())\n\treturn Vector{(x - fw/2) / fw, (y - fh/2) / fh, 1}\n}",
"func toPixelVector(win *pixelgl.Window, x float64, y float64) pixel.Vec {\n\tvar (\n\t\tnewY = win.Bounds().Max.Y - y\n\t)\n\n\treturn pixel.Vec{x, newY}\n}",
"func (m Matrix) Project(u Vec) Vec {\n\treturn Vec{m[0]*u.X + m[2]*u.Y + m[4], m[1]*u.X + m[3]*u.Y + m[5]}\n}",
"func (crs Projection) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func image2World(b image.Rectangle, x, y float64) Point {\n\tw := float64(b.Max.X - b.Min.X)\n\th := float64(b.Max.Y - b.Min.Y)\n\treturn Point{\n\t\t(x - float64(b.Min.X)) / w,\n\t\t1.0 - (y-float64(b.Min.Y))/h,\n\t\t0.0,\n\t}\n}",
"func (c *Camera) Project(p3 lmath.Vec3) (p2 lmath.Vec2, ok bool) {\n\tcameraInv, _ := c.Object.Transform.Mat4().Inverse()\n\tcameraInv = cameraInv.Mul(zUpRightToYUpRight)\n\n\tprojection := c.Projection.Mat4()\n\tvp := cameraInv.Mul(projection)\n\n\tp2, ok = vp.Project(p3)\n\treturn\n}",
"func DebugFindPoints(viewImage *image.Image, screenImage *image.Image, width int, height int) {\n\twindow1 := gocv.NewWindow(\"test1\")\n\tdefer window1.Close()\n\n\twindow2 := gocv.NewWindow(\"test2\")\n\tdefer window2.Close()\n\n\tpoints := make([]image.Point, 5)\n\n\tpoints[0] = image.Point{((*viewImage).Bounds().Max.X - 1) / 2, ((*viewImage).Bounds().Max.Y - 1) / 2}\n\tpoints[1] = image.Point{points[0].X - width, points[0].Y - height}\n\tpoints[2] = image.Point{points[0].X + width, points[0].Y - height}\n\tpoints[3] = image.Point{points[0].X - width, points[0].Y + height}\n\tpoints[4] = image.Point{points[0].X + width, points[0].Y + height}\n\n\tmatchedPoints, err := FindPoints(viewImage, screenImage, points)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\n\tphotoMat, _ := gocv.ImageToMatRGBA(*viewImage)\n\tdefer photoMat.Close()\n\tscreenMat, _ := gocv.ImageToMatRGBA(*screenImage)\n\tdefer screenMat.Close()\n\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tfor _, point := range points {\n\t\tgocv.Circle(&photoMat, point, 5, color.RGBA{255, 0, 0, 0}, -1)\n\t}\n\n\tgocv.Line(&photoMat, points[1], points[2], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[1], points[3], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[3], points[4], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[4], points[2], color.RGBA{0, 255, 0, 0}, 2)\n\n\tfor _, point := range matchedPoints {\n\t\tgocv.Circle(&screenMat, point, 5, color.RGBA{255, 0, 0, 0}, -1)\n\t}\n\n\tgocv.Line(&screenMat, matchedPoints[1], matchedPoints[2], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[1], matchedPoints[3], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[3], matchedPoints[4], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[2], matchedPoints[4], color.RGBA{0, 255, 0, 0}, 2)\n\n\tnewWidth := (matchedPoints[2].X - matchedPoints[1].X + matchedPoints[4].X - matchedPoints[3].X) / 2\n\tnewHeight := (matchedPoints[3].Y - matchedPoints[1].Y + matchedPoints[4].Y - matchedPoints[2].Y) / 2\n\tlog.Println(\"End Calculating\")\n\n\tlog.Printf(\"[DEBUG] Width: %v\\n\", newWidth)\n\tlog.Printf(\"[DEBUG] Height: %v\\n\", newHeight)\n\n\twindow1.IMShow(photoMat)\n\twindow2.IMShow(screenMat)\n\n\tfor {\n\n\t\tif window1.WaitKey(1) >= 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif window2.WaitKey(1) >= 0 {\n\t\t\tbreak\n\t\t}\n\n\t}\n}",
"func (v *TextView) WindowToBufferCoords(win TextWindowType, window_x, window_y int) (buffer_x, buffer_y int) {\n\tvar bx, by C.gint\n\tC.gtk_text_view_window_to_buffer_coords(v.native(), C.GtkTextWindowType(win), C.gint(window_x), C.gint(window_y), &bx, &by)\n\treturn int(bx), int(by)\n}",
"func transformCoordinatesAndReport(wallpaperCommand *command.CreateSymmetryPattern, scaledCoordinates []complex128) []complex128 {\n\ttransformedCoordinates := transformCoordinatesForFormula(wallpaperCommand, scaledCoordinates)\n\tzMin, zMax := mathutility.GetBoundingBox(transformedCoordinates)\n\tprintln(zMin)\n\tprintln(zMax)\n\treturn transformedCoordinates\n}",
"func (bm Blendmap) View() (float32, float32, float32, float32) {\n\treturn bm.Map.viewport.Min.X, bm.Map.viewport.Min.Y, bm.Map.viewport.Max.X, bm.Map.viewport.Max.Y\n}",
"func coreRatioViewport(fbWidth int, fbHeight int) (x, y, w, h float32) {\n\t// Scale the content to fit in the viewport.\n\tfbw := float32(fbWidth)\n\tfbh := float32(fbHeight)\n\n\t// NXEngine workaround\n\taspectRatio := float32(Geom.AspectRatio)\n\tif aspectRatio == 0 {\n\t\taspectRatio = float32(Geom.BaseWidth) / float32(Geom.BaseHeight)\n\t}\n\n\th = fbh\n\tw = fbh * aspectRatio\n\tif w > fbw {\n\t\th = fbw / aspectRatio\n\t\tw = fbw\n\t}\n\n\t// Place the content in the middle of the window.\n\tx = (fbw - w) / 2\n\ty = (fbh - h) / 2\n\n\tva := vertexArray(x, y, w, h, 1.0)\n\tgl.BindBuffer(gl.ARRAY_BUFFER, vbo)\n\tgl.BufferData(gl.ARRAY_BUFFER, len(va)*4, gl.Ptr(va), gl.STATIC_DRAW)\n\n\treturn\n}",
"func (wf WorldFile) ToMap(p image.Point) (float64, float64) {\n\tfX := float64(p.X)\n\tfY := float64(p.Y)\n\n\tx := wf.A*fX + wf.B*fY + wf.C\n\ty := wf.D*fX + wf.E*fY + wf.F\n\n\treturn x, y\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowFrustum(gpFrustum, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func (r *Rectangle) ToWorld(position *Vector3) {\n\tr.MinPoint[0] = position[0] - r.HalfSize[0]\n\tr.MaxPoint[0] = position[0] + r.HalfSize[0]\n\tr.MinPoint[1] = position[1] - r.HalfSize[1]\n\tr.MaxPoint[1] = position[1] + r.HalfSize[1]\n\tr.MinPoint[2] = position[2] - r.HalfSize[2]\n\tr.MaxPoint[2] = position[2] + r.HalfSize[2]\n}",
"func Mat3Projection(out []float64, width, height float64) []float64 {\n\tout[0] = 2 / width\n\tout[1] = 0\n\tout[2] = 0\n\tout[3] = 0\n\tout[4] = -2 / height\n\tout[5] = 0\n\tout[6] = -1\n\tout[7] = 1\n\tout[8] = 1\n\treturn out\n}",
"func Project(zone int, south bool, latitude, longitude float64) (float64, float64) {\n\n\t// False northing\n\tfn := 0.\n\tif south {\n\t\tfn = utmSouthernHemisphereFalseNorthing\n\t}\n\n\th1 := n/2 - n2*2/3 + n3*5/16 + n4*41/180\n\th2 := n2*13/48 - n3*3/5 + n4*557/1440\n\th3 := n3*61/240 - n4*103/140\n\th4 := n4 * 49561 / 161280\n\n\tq := math.Asinh(math.Tan(latitude)) - e*math.Atanh(e*math.Sin(latitude))\n\tβ := math.Atan(math.Sinh(q))\n\n\tη0 := math.Atanh(math.Cos(β) * math.Sin(longitude-λO(zone)))\n\tξ0 := math.Asin(math.Sin(β) * math.Cosh(η0))\n\n\tη1 := h1 * math.Cos(2*ξ0) * math.Sinh(2*η0)\n\tη2 := h2 * math.Cos(4*ξ0) * math.Sinh(4*η0)\n\tη3 := h3 * math.Cos(6*ξ0) * math.Sinh(6*η0)\n\tη4 := h4 * math.Cos(8*ξ0) * math.Sinh(8*η0)\n\n\tξ1 := h1 * math.Sin(2*ξ0) * math.Cosh(2*η0)\n\tξ2 := h2 * math.Sin(4*ξ0) * math.Cosh(4*η0)\n\tξ3 := h3 * math.Sin(6*ξ0) * math.Cosh(6*η0)\n\tξ4 := h4 * math.Sin(8*ξ0) * math.Cosh(8*η0)\n\n\tξ := ξ0 + ξ1 + ξ2 + ξ3 + ξ4\n\tη := η0 + η1 + η2 + η3 + η4\n\n\te := fe + kO*b*η\n\tn := fn + kO*b*ξ\n\treturn e, n\n}",
"func (crs AlbersEqualAreaConic) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func (crs WebMercator) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func (l Layout) DebugRender(win *pixelgl.Window) {\n\tfor key := range l.Panels {\n\t\tpanel := l.CreatePanel(key)\n\t\tpanel.Draw(win)\n\t}\n\n\t//temp camera matrix\n\t//cam := pixel.IM.Scaled(l.centerPos, 1.0).Moved(l.centerPos)\n\t//win.SetMatrix(cam)\n}",
"func (crs Projection) FromWGS84(x0, y0, z0 float64) (x, y, z float64) {\n\treturn fromWGS84(crs.GeodeticDatum, x0, y0, z0)\n}",
"func (r *Renderer) Update(t int64) {\n\tdefer r.SwitchBuffer()\n\n\t// any world logic can go in here\n\tframe := r.TargetFrame()\n\tframe.Clear(0) // shouldn't be needed when rendering complete scenes\n\n\tbuf := frame.bmp\n\n\t// Update scene\n\tr.scene.Advance(t)\n\tr.scene.Camera.Position = Vec3{\n\t //math.Cos(r.scene.Time/3)*10, 0, -math.Sin(r.scene.Time/3)*5 + 8,\n\t\t//math.Cos(r.scene.Time/3)*4, 0, -math.Sin(r.scene.Time/3)*4,\n\n\t\t//-5,(math.Sin(r.scene.Time/3)+1.0) * 2.5,-5,\n\t\tmath.Cos(r.scene.Time/3)*4, (math.Sin(r.scene.Time/6)+1.0) * 2.5, -math.Sin(r.scene.Time/3)*4,\n\t}\n\tr.scene.Camera.Yaw = math.Pi + (math.Sin(r.scene.Time)*0.2) // 0 is facing +Z, pi is facing -Z\n\tr.scene.Camera.Pitch = math.Sin(r.scene.Time/6)*0.1\n\n\tr.scene.Camera.Target = Vec3{\n\t\t0,0,0,\n\t\t//0,math.Cos(r.scene.Time), 0,\n\t\t//math.Cos(r.scene.Time/6)*3,1,0,\n\t}\n\n\t// Do the transforms (scene & perspective)\n\tpoints := r.scene.ProjectPoints(float64(frame.Width), float64(frame.Height))\n\n\t// Sort geometry far to near\n\tgeom := r.scene.Geometry\n\ttriangles := make([]*RefTriangle, len(geom))\n\tfor i := 0; i < len(geom); i++ {\n\t\ttriangles[i] = &geom[i]\n\t}\n\tsort.Slice(triangles, func(i, j int) bool {\n\t\ta := triangles[i]; b := triangles[j]\n\n\t\taveA := points[a.A].Z+points[a.B].Z+points[a.C].Z\n\t\taveB := points[b.A].Z+points[b.B].Z+points[b.C].Z\n\t\treturn aveA < aveB\n\t})\n\n\t// Render geometry\n\tend := len(triangles)\n\tfor i := 0; i < end; i++ {\n\t\ttri := triangles[i]\n\n\t\ttex := r.scene.Textures[tri.Tex]\n\t\ta := points[tri.A]\n\t\tb := points[tri.B]\n\t\tc := points[tri.C]\n\n\t\tif a.Z > 0 || b.Z > 0 || c.Z > 0 {\n\t\t\t// Should check if any are in front and do clipping. But not yet\n\t\t\tbreak // we should be drawing in order, so reject anything behind the camera\n\t\t}\n\n\t\tTextureTriangle(a,b,c, tex, &buf, frame.Width, frame.Height)\n\t}\n}",
"func (c *Camera) Viewport() (x float32, y float32, width float32, height float32) {\n\tif c.x > 0 {\n\t\tx = float32(c.x) / float32(c.windowWidth)\n\t}\n\tif c.y > 0 {\n\t\ty = float32(c.y) / float32(c.worldHeight)\n\t}\n\n\tratio := math32.Min(\n\t\tfloat32(c.windowWidth)/float32(c.worldWidth),\n\t\tfloat32(c.windowHeight)/float32(c.worldHeight),\n\t)\n\twidth, height = c.relativeToWindowSize(ratio, ratio)\n\twidth /= float32(c.width) / float32(c.worldWidth)\n\theight /= float32(c.height) / float32(c.worldHeight)\n\treturn x, y, width, height\n}",
"func (r Ray) ProjectX(x float64) float64 {\n\t/*\n\t\tderived by evaluating y form r.Formula\n\t*/\n\treturn (r.V.Y*x - r.V.Y*r.O.X + r.V.X*r.O.Y) / r.V.X\n}",
"func (u Vec) Project(v Vec) Vec {\n\tlen := u.Dot(v) / v.Len()\n\treturn v.Unit().Scaled(len)\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n C.glowOrtho(gpOrtho, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func FourPointTransform(img gocv.Mat, pts []image.Point, dst *gocv.Mat) {\n\trect := OrderPoints(pts)\n\ttl := rect[0]\n\ttr := rect[1]\n\tbr := rect[2]\n\tbl := rect[3]\n\n\twidthA := math.Sqrt(math.Pow(float64(br.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(br.Y)-float64(bl.Y), 2))\n\twidthB := math.Sqrt(math.Pow(float64(tr.X)-float64(tl.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(tl.Y), 2))\n\tmaxWidth := math.Max(widthA, widthB)\n\n\theightA := math.Sqrt(math.Pow(float64(tr.X)-float64(br.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(br.Y), 2))\n\theightB := math.Sqrt(math.Pow(float64(tl.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(tl.Y)-float64(bl.Y), 2))\n\tmaxHeight := math.Max(heightA, heightB)\n\n\tdt := []image.Point{\n\t\timage.Pt(0, 0),\n\t\timage.Pt(int(maxWidth)-1, 0),\n\t\timage.Pt(int(maxWidth)-1, int(maxHeight)-1),\n\t\timage.Pt(0, int(maxHeight)-1)}\n\n\tm := gocv.GetPerspectiveTransform(rect, dt)\n\tgocv.WarpPerspective(img, dst, m, image.Pt(int(maxWidth), int(maxHeight)))\n\n}",
"func (r paintingRobot) getGridInfo() (int, int, point) {\n xMin, xMax, yMin, yMax := 0, 0, 0, 0\n for _, p := range r.paintedPoints {\n if p.x > xMax {\n xMax = p.x\n }\n if p.x < xMin {\n xMin = p.x\n }\n if p.y > yMax {\n yMax = p.y\n }\n if p.y < yMin {\n yMin = p.y\n }\n }\n\n return int(math.Abs(float64(xMin))) + xMax + 1, int(math.Abs(float64(yMin))) + yMax + 1,\n point{\n x: int(math.Abs(float64(xMin))),\n y: int(math.Abs(float64(yMin))),\n }\n}",
"func (crs TransverseMercator) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func Project(v, u *Vec) *Vec {\n\tl := v.Dot(u) / u.Magnitude()\n\tresult := Normalize(u)\n\tresult.Multiply(l)\n\treturn result\n}",
"func ProjectMercator(nx, ny float32, radius float32) (x, y, z float32) {\n\tnx = math.Pi / 4 * (2*nx - 1)\n\tny = math.Pi / 4 * (4*ny + 1)\n\tx = radius * cos(ny) * cos(nx)\n\ty = radius * cos(ny) * sin(nx)\n\tz = radius * sin(ny)\n\treturn\n}",
"func Vec3BasisProject(U, V, W, S Vec3) (o Vec3) {\n\to[0] = Vec3Dot(U, S)\n\to[1] = Vec3Dot(V, S)\n\to[2] = Vec3Dot(W, S)\n\treturn\n}",
"func FindPathMat(wrld *WorldStructure, start, end [2]int) (nodes [][2]int) {\n\trx1 := BiggestInt(SmallestInt(start[0], end[0])-5, 0)\n\try1 := BiggestInt(SmallestInt(start[1], end[1])-5, 0)\n\trx2 := BiggestInt(start[0], end[0]) + 5\n\try2 := BiggestInt(start[1], end[1]) + 5\n\tw := rx2 - rx1 + 1\n\th := ry2 - ry1 + 1\n\n\tcollMat := GetMatrix(w, h, 0)\n\tobjctIDs := wrld.GetObjectsInField(rx1, ry1, w, h)\n\n\tfor _, objID := range objctIDs {\n\t\thb := wrld.Objects[objID].Hitbox\n\t\tcollMat.Fill(int(hb.min.X)-rx1, int(hb.min.Y)-ry1, int(math.Ceil(hb.max.X-1))-rx1, int(math.Ceil(hb.max.Y-1))-ry1, 1)\n\t}\n\n\tresult, _ := FindPath(start, end, func(pos [2]int) (nbs [][2]int) {\n\t\tnbsr := GetNeighboursInBounds(pos[0], pos[1], rx1, ry1, rx2, ry2, collMat)\n\t\tnbs = make([][2]int, 0)\n\t\tfor _, nb := range nbsr {\n\t\t\tX := nb[0] + pos[0]\n\t\t\tY := nb[1] + pos[1]\n\t\t\tval, err := collMat.Get(X-rx1, Y-ry1)\n\t\t\tif val <= 0 && err == nil {\n\t\t\t\tnbs = append(nbs, [2]int{X, Y})\n\t\t\t}\n\t\t}\n\t\treturn\n\t})\n\treturn result\n}",
"func (v *TextView) BufferToWindowCoords(win TextWindowType, buffer_x, buffer_y int) (window_x, window_y int) {\n\tvar wx, wy C.gint\n\tC.gtk_text_view_buffer_to_window_coords(v.native(), C.GtkTextWindowType(win), C.gint(buffer_x), C.gint(buffer_y), &wx, &wy)\n\treturn int(wx), int(wy)\n}",
"func drawContours(img gocv.Mat, dst gocv.Mat) {\n\thierarchy := gocv.NewMat()\n\tcontours := gocv.FindContoursWithParams(img, &hierarchy, gocv.RetrievalTree, gocv.ChainApproxSimple)\n\tprintln(hierarchy.Type().String())\n\thierarchyDims := hierarchy.Size()\n\n\tcounter := 0\n\tvar hierarchyMatrix [][]int32\n\tfor j := 0; j < hierarchyDims[1]*4; j += 4 {\n\t\thierarchyMatrix = append(hierarchyMatrix, []int32{})\n\t\tfor count := 0; count < 4; count++ {\n\t\t\thierarchyMatrix[j/4] = append(hierarchyMatrix[j/4], hierarchy.GetIntAt(0, j+count))\n\t\t\tfmt.Printf(\"%3v\", hierarchyMatrix[j/4][count])\n\t\t}\n\t\tcounter++\n\t\tprintln()\n\t}\n\tprintln(counter)\n\n\terr := dst.Close()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tsizes := img.Size()\n\tdst = gocv.NewMatWithSizes(sizes, gocv.MatTypeCV64FC4)\n\tconPoly := gocv.NewPointsVector()\n\tobjectType := \"\"\n\trand.Seed(1400)\n\tfor i := 0; i < contours.Size(); i++ {\n\t\tarea := gocv.ContourArea(contours.At(i))\n\n\t\tparent := hierarchyMatrix[i][3]\n\t\tif area > 1000 && parent == 1 {\n\t\t\tperi := gocv.ArcLength(contours.At(i), true)\n\t\t\tconPoly.Append(gocv.ApproxPolyDP(contours.At(i), 0.02*peri, true))\n\t\t\tgocv.DrawContours(&dst, conPoly, conPoly.Size()-1, color.RGBA{B: 69, R: 255}, 1)\n\t\t\trect := gocv.BoundingRect(conPoly.At(conPoly.Size() - 1))\n\n\t\t\tgocv.Rectangle(&dst, rect, color.RGBA{G: 255, A: 5}, 1)\n\t\t\tswitch conPoly.At(conPoly.Size() - 1).Size() {\n\t\t\tcase 3:\n\t\t\t\tobjectType = \"Tri\"\n\t\t\tcase 4:\n\t\t\t\taspRatio := float32(rect.Dx()) / float32(rect.Dy())\n\t\t\t\tif aspRatio > 0.95 && aspRatio < 1.05 {\n\t\t\t\t\tobjectType = \"Square\"\n\t\t\t\t} else {\n\t\t\t\t\tobjectType = \"Rect\"\n\t\t\t\t}\n\t\t\tdefault:\n\t\t\t\tobjectType = \"Circle\"\n\t\t\t}\n\t\t\t//println(objectType)\n\t\t\tgocv.PutText(&dst, objectType, rect.Min, gocv.FontHersheyPlain, 1, color.RGBA{R: 255, G: 255, B: 255}, 1)\n\t\t}\n\t}\n\tfmt.Printf(\"%v significant contours found\", conPoly.Size())\n}",
"func (r Rect) Projection() *Projection {\n\treturn NewProjection(r.x0, r.y0, r.x1, r.y1)\n}",
"func Coords(x, y, r uint32) ([]uint8, bool) {\n\tif r >= uint32(len(CoordOffset)) {\n\t\t// log.Printf(\"Coord radius too large: %d\\n\", r)\n\t\treturn []uint8{0, 0, 0}, false\n\t}\n\tdx := CoordOffset[r][0]\n\tdy := CoordOffset[r][1]\n\tif int(x)+dx < 0 || int(x)+dx >= WindowImage.Rect.Max.X || int(y)+dy < 0 || int(y)+dy >= WindowImage.Rect.Max.Y {\n\t\treturn []uint8{0, 0, 0}, false\n\t}\n\ti := (int(x)+dx)*4 + (int(y)+dy)*WindowImage.Stride\n\treturn WindowImage.Pix[i : i+3], true\n}",
"func scaleDestinationPixels(destinationBounds image.Rectangle, destinationCoordinates []complex128, viewPortMin complex128, viewPortMax complex128) []complex128 {\n\tscaledCoordinates := []complex128{}\n\tfor _, destinationCoordinate := range destinationCoordinates {\n\t\tdestinationScaledX := mathutility.ScaleValueBetweenTwoRanges(\n\t\t\treal(destinationCoordinate),\n\t\t\tfloat64(destinationBounds.Min.X),\n\t\t\tfloat64(destinationBounds.Max.X),\n\t\t\treal(viewPortMin),\n\t\t\treal(viewPortMax),\n\t\t)\n\t\tdestinationScaledY := mathutility.ScaleValueBetweenTwoRanges(\n\t\t\timag(destinationCoordinate),\n\t\t\tfloat64(destinationBounds.Min.Y),\n\t\t\tfloat64(destinationBounds.Max.Y),\n\t\t\timag(viewPortMin),\n\t\t\timag(viewPortMax),\n\t\t)\n\t\tscaledCoordinates = append(scaledCoordinates, complex(destinationScaledX, destinationScaledY))\n\t}\n\treturn scaledCoordinates\n}",
"func DrawToCanvas(u Universe, canvasWidth int) image.Image {\n\tc := canvas.CreateNewCanvas(canvasWidth, canvasWidth)\n\tc.SetFillColor(canvas.MakeColor(0, 0, 0))\n\tc.ClearRect(0, 0, canvasWidth, canvasWidth)\n\tc.Fill()\n\n\tfor i, b := range u.bodies {\n\t\tc.SetFillColor(canvas.MakeColor(b.red, b.green, b.blue))\n\t\tcx := (b.position.x / u.width) * float64(canvasWidth)\n\t\tcy := (b.position.y / u.width) * float64(canvasWidth)\n\t\tr := (b.radius / u.width) * float64(canvasWidth)\n\t\tif i == 0 { // For Jupiter, don't scale it\n\n\t\t\tc.Circle(cx, cy, r)\n\t\t} else { //Scale Moons by a factor of 10 to be visible\n\t\t\tr *= 10\n\t\t\tc.Circle(cx, cy, r)\n\t\t}\n\t\tc.Fill()\n\t}\n\treturn c.GetImage()\n}",
"func (c *Camera) debugUpdate() {\n\tc.State = gfx.NewState()\n\tc.Shader = shader\n\tc.State.FaceCulling = gfx.BackFaceCulling\n\n\tm := gfx.NewMesh()\n\tm.Primitive = gfx.Lines\n\n\tm.Vertices = []gfx.Vec3{}\n\tm.Colors = []gfx.Color{}\n\n\tnear := float32(c.Near)\n\tfar := float32(c.Far)\n\n\tif c.Ortho {\n\t\twidth := float32(c.View.Dx())\n\t\theight := float32(c.View.Dy())\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{width / 2, 0, height / 2},\n\n\t\t\t// Near\n\t\t\t{0, near, 0},\n\t\t\t{width, near, 0},\n\t\t\t{width, near, height},\n\t\t\t{0, near, height},\n\n\t\t\t// Far\n\t\t\t{0, far, 0},\n\t\t\t{width, far, 0},\n\t\t\t{width, far, height},\n\t\t\t{0, far, height},\n\n\t\t\t{width / 2, far, height / 2},\n\n\t\t\t// Up\n\t\t\t{0, near, height},\n\t\t\t{0, near, height},\n\t\t\t{width, near, height},\n\t\t}\n\t} else {\n\t\tratio := float32(c.View.Dx()) / float32(c.View.Dy())\n\t\tfovRad := c.FOV / 180 * math.Pi\n\n\t\thNear := float32(2 * math.Tan(fovRad/2) * c.Near)\n\t\twNear := hNear * ratio\n\n\t\thFar := float32(2 * math.Tan(fovRad/2) * c.Far)\n\t\twFar := hFar * ratio\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{0, 0, 0},\n\n\t\t\t// Near\n\t\t\t{-wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, hNear / 2},\n\t\t\t{-wNear / 2, near, hNear / 2},\n\n\t\t\t// Far\n\t\t\t{-wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, hFar / 2},\n\t\t\t{-wFar / 2, far, hFar / 2},\n\n\t\t\t{0, far, 0},\n\n\t\t\t// Up\n\t\t\t{0, near, hNear},\n\t\t\t{-wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t\t{wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t}\n\t}\n\n\tm.Colors = []gfx.Color{\n\t\t{1, 1, 1, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 1, 1, 1},\n\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t}\n\n\tm.Indices = []uint32{\n\t\t// From 0 to near plane\n\t\t0, 1,\n\t\t0, 2,\n\t\t0, 3,\n\t\t0, 4,\n\n\t\t// Near plane\n\t\t1, 2,\n\t\t2, 3,\n\t\t3, 4,\n\t\t4, 1,\n\n\t\t// Far plane\n\t\t5, 6,\n\t\t6, 7,\n\t\t7, 8,\n\t\t8, 5,\n\n\t\t// Lines from near to far plane\n\t\t1, 5,\n\t\t2, 6,\n\t\t3, 7,\n\t\t4, 8,\n\n\t\t0, 9,\n\n\t\t// Up\n\t\t10, 11,\n\t\t11, 12,\n\t\t12, 10,\n\t}\n\n\tc.Meshes = []*gfx.Mesh{m}\n}",
"func Ortho(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tsyscall.Syscall6(gpOrtho, 6, uintptr(math.Float64bits(left)), uintptr(math.Float64bits(right)), uintptr(math.Float64bits(bottom)), uintptr(math.Float64bits(top)), uintptr(math.Float64bits(zNear)), uintptr(math.Float64bits(zFar)))\n}",
"func (p *Projection) cross(wx float64, wy float64, col color.RGBA) {\n cx, cy := p.project(wx, wy)\n\n c := draw2d.NewGraphicContext(p.img)\n c.SetStrokeColor(col)\n c.SetLineWidth(1)\n size := 2.0\n\n // top left -> bottom right\n c.MoveTo(cx - size, cy - size)\n c.LineTo(cx + size, cy + size)\n c.Stroke()\n\n // top right -> bottom left\n c.MoveTo(cx + size, cy - size)\n c.LineTo(cx - size, cy + size)\n c.Stroke()\n}",
"func Project(lhs Vector, rhs Vector) float64 {\n\treturn DotProduct(lhs, rhs) / Magnitude(rhs)\n}",
"func Render(width int, height int, samples int) image.Image {\n\tcanvas := image.NewNRGBA(image.Rect(0, 0, width, height))\n\n\tfrom := mgl64.Vec3{13, 2, 3}\n\tat := mgl64.Vec3{0.0, 0.0, 0.0}\n\tfov := 20.0\n\taperture := 0.1\n\tfocusDistance := from.Sub(at).Len()\n\tcam := NewCamera(\n\t\tfrom,\n\t\tat,\n\t\tmgl64.Vec3{0, 1.0, 0.0},\n\t\tfov,\n\t\tfloat64(width)/float64(height),\n\t\taperture,\n\t\tfocusDistance)\n\n\tworld := CreateRandomScene()\n\n\tfor j := 0; j < height; j++ {\n\t\tfor i := 0; i < width; i++ {\n\t\t\tcol := mgl64.Vec3{}\n\t\t\tfor s := 0; s < samples; s++ {\n\t\t\t\tu := (float64(i) + rand.Float64()) / float64(width)\n\t\t\t\t// Note that we flip the vertical axis here.\n\t\t\t\tv := (float64(height-j) + rand.Float64()) / float64(height)\n\t\t\t\tr := cam.GetRay(u, v)\n\t\t\t\tcol = col.Add(getColor(r, world, 0))\n\t\t\t}\n\t\t\tcol = col.Mul(1.0 / float64(samples))\n\t\t\t// Gamma-correct the colors:\n\t\t\tcol = mgl64.Vec3{math.Sqrt(col[0]), math.Sqrt(col[1]), math.Sqrt(col[2])}\n\t\t\tir, ig, ib := uint8(255.99*col.X()), uint8(255.99*col.Y()), uint8(255.99*col.Z())\n\t\t\tcanvas.Set(i, j, color.NRGBA{R: ir, G: ig, B: ib, A: 255})\n\t\t}\n\t}\n\treturn canvas\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tsyscall.Syscall6(gpFrustum, 6, uintptr(math.Float64bits(left)), uintptr(math.Float64bits(right)), uintptr(math.Float64bits(bottom)), uintptr(math.Float64bits(top)), uintptr(math.Float64bits(zNear)), uintptr(math.Float64bits(zFar)))\n}",
"func (gm *GraphicsManager) RenderAllFromPerspective(id component.GOiD, sm component.SceneManager) (*common.Vector, *common.Vector) {\n\terrs := common.MakeVector()\n\tcompsToSend := common.MakeVector()\n\tcomps := gm.compList.Array()\n\n\tperspLoc, err := sm.GetObjectLocation(id)\n\tif err != nil {\n\t\terrs.Insert(fmt.Errorf(\"requesting location from scene manager failed in perspective render, error %s\", err.Error()))\n\t\treturn nil, errs\n\t}\n\tcompsNearPerspective := sm.GetObjectsInLocationRadius(perspLoc, 5.0).Array()\n\n\tfor i := range comps {\n\t\tif comps[i] == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif comps[i].(component.GOiD) == id || comps[i].(component.GOiD) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor j := range compsNearPerspective {\n\t\t\tif comps[i].(component.GOiD) == compsNearPerspective[j].(component.GOiD) {\n\t\t\t\tcompsToSend.Insert(comps[i].(component.GOiD))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn compsToSend, errs\n}",
"func ColorSamplingPointsForStillColorsVideo(videoW, videoH int) map[string]image.Point {\n\touterCorners := map[string]image.Point{\n\t\t\"outer_top_left\": {1, 1},\n\t\t\"outer_top_right\": {(videoW - 1) - 1, 1},\n\t\t\"outer_bottom_right\": {videoW - 1, videoH - 1},\n\t\t\"outer_bottom_left\": {1, (videoH - 1) - 1},\n\t}\n\tedgeOffset := 5\n\tstencilW := 5\n\tinnerCorners := map[string]image.Point{\n\t\t\"inner_top_left_00\": {edgeOffset, edgeOffset},\n\t\t\"inner_top_left_01\": {edgeOffset, edgeOffset + stencilW},\n\t\t\"inner_top_left_10\": {edgeOffset + stencilW, edgeOffset},\n\t\t\"inner_top_left_11\": {edgeOffset + stencilW, edgeOffset + stencilW},\n\t\t\"inner_top_right_00\": {(videoW - 1) - edgeOffset, edgeOffset},\n\t\t\"inner_top_right_01\": {(videoW - 1) - edgeOffset, edgeOffset + stencilW},\n\t\t\"inner_top_right_10\": {(videoW - 1) - edgeOffset - stencilW, edgeOffset},\n\t\t\"inner_top_right_11\": {(videoW - 1) - edgeOffset - stencilW, edgeOffset + stencilW},\n\t\t\"inner_bottom_right_00\": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset},\n\t\t\"inner_bottom_right_01\": {(videoW - 1) - edgeOffset, (videoH - 1) - edgeOffset - stencilW},\n\t\t\"inner_bottom_right_10\": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset},\n\t\t\"inner_bottom_right_11\": {(videoW - 1) - edgeOffset - stencilW, (videoH - 1) - edgeOffset - stencilW},\n\t\t\"inner_bottom_left_00\": {edgeOffset, (videoH - 1) - edgeOffset},\n\t\t\"inner_bottom_left_01\": {edgeOffset, (videoH - 1) - edgeOffset - stencilW},\n\t\t\"inner_bottom_left_10\": {edgeOffset + stencilW, (videoH - 1) - edgeOffset},\n\t\t\"inner_bottom_left_11\": {edgeOffset + stencilW, (videoH - 1) - edgeOffset - stencilW},\n\t}\n\tsamples := map[string]image.Point{}\n\tfor k, v := range innerCorners {\n\t\tsamples[k] = v\n\t}\n\tfor k, v := range outerCorners {\n\t\tsamples[k] = v\n\t}\n\treturn samples\n}",
"func (c *imageBinaryChannel) dev2nRect(x1, y1, x2, y2 int) float64 {\n\treturn c.integralImage.dev2nRect(x1, y1, x2, y2)\n}",
"func Make_Layer_Rect_Xmap(tile m.TileID, finds []l.Polygon) Tile_Xmap {\n\tnewlist := []l.Polygon{}\n\n\t// getting rectangle\n\tfirst := l.Polygon{Polygon: Make_Tile_Poly(tile)}\n\t//first.Polygon.Add(val)\n\tval := first.Polygon[0]\n\tval = pc.Contour{val[0], val[1], val[2], val[3], val[2], val[1], val[0]}\n\tfirst.Polygon.Add(val)\n\t// iterating through each found area\n\tfor _, i := range finds {\n\t\ti.Polygon.Add(val)\n\t\t//if IsReachable(first, i, \"INTERSECTION\") == true {\n\t\tresult := first.Polygon.Construct(pc.INTERSECTION, i.Polygon)\n\t\t//}\n\n\t\t// adding the the result to newlist if possible\n\t\tif len(result) != 0 {\n\t\t\tamap := map[string]string{}\n\t\t\tamap[i.Layer] = i.Area\n\t\t\tamap[\"tile\"] = m.Tilestr(tile)\n\n\t\t\t//fmt.Print(amap, \"\\n\")\n\t\t\ti.Polygon = result\n\t\t\ti.Layers = amap\n\t\t\tnewlist = append(newlist, i)\n\t\t} else {\n\t\t\t//\tfmt.Print(\"here\\n\", first.Polystring, \"\\n\", i.Polystring, \"\\n\")\n\t\t\t//fmt.Print(\"here\\n\")\n\t\t}\n\n\t}\n\t// linting the output polygons\n\tstringlist := Lint_Layer_Polygons(newlist)\n\n\t// iterating through each value in newlist\n\txmaptotal := map[string][]Yrow{}\n\tfor _, i := range stringlist {\n\t\txmap := Make_Xmap_Total(get_coords_json(i[1]), i[0], tile)\n\t\tfor k, v := range xmap {\n\t\t\txmaptotal[k] = append(xmaptotal[k], v...)\n\t\t}\n\t}\n\tvalbool := false\n\n\tif valbool == true {\n\t\tstringlist2 := []string{\"LONG,LAT,AREA\"}\n\t\tfor k, v := range xmaptotal {\n\t\t\tx := Get_Middle(k)[0]\n\t\t\tfor _, vv := range v {\n\t\t\t\tarea := strings.Replace(vv.Area, \",\", \"\", -1)\n\t\t\t\tstringlist2 = append(stringlist2, fmt.Sprintf(\"%f,%f,%s\", x, vv.Range[0], area))\n\t\t\t\tstringlist2 = append(stringlist2, fmt.Sprintf(\"%f,%f,%s\", x, vv.Range[1], area))\n\t\t\t}\n\n\t\t}\n\t\t//fmt.Print(xmap, \"\\n\")\n\t\tbds := m.Bounds(tile)\n\t\tcount := 0\n\t\tlatconst := bds.N\n\n\t\tfor count < 100000 {\n\t\t\tcount += 1\n\t\t\tpt := RandomPt(bds)\n\t\t\tareat := strings.Replace(Pip_Simple(pt, xmaptotal, latconst), \",\", \"\", -1)\n\t\t\tfmt.Print(areat)\n\t\t\tif areat != \"\" {\n\t\t\t\tfmt.Print(\"Here\\n\")\n\t\t\t\tstringlist2 = append(stringlist2, fmt.Sprintf(\"%f,%f,%s\", pt[0], pt[1], areat))\n\t\t\t}\n\t\t}\n\n\t\ta := strings.Join(stringlist2, \"\\n\")\n\t\tff, _ := os.Create(\"d.csv\")\n\t\tff.WriteString(a)\n\t\tfmt.Print(a, \"\\n\")\n\t}\n\t//ff, _ := os.Create(\"d.csv\")\n\t//ff.WriteString(a)\n\treturn Tile_Xmap{Tile: tile, Xmap: xmaptotal}\n}",
"func (grid *SquareGrid) ToWorld(c, r float64) (float64, float64) {\n\tworld := grid.toWorldMat.Mul2x1(mgl64.Vec2{c, r})\n\treturn world.X(), world.Y()\n}",
"func assignCoordinates(layers [][]*vertex, ro *renderConfig) {\n\tmaxWidth := rowWidth(layers, ro)\n\tfor _, l := range layers {\n\t\tboxCenterOffset := maxWidth / (len(l) + 1)\n\t\tfor j := 0; j < len(l); j++ {\n\t\t\tl[j].rowOffset = (j + 1) * boxCenterOffset\n\t\t}\n\t}\n}",
"func DepthRangeIndexed(index uint32, n float64, f float64) {\n C.glowDepthRangeIndexed(gpDepthRangeIndexed, (C.GLuint)(index), (C.GLdouble)(n), (C.GLdouble)(f))\n}",
"func Render(c *Camera, w *world.World) (*canvas.Canvas, error) {\n\timage := canvas.NewCanvas(c.horizontalSizeInPixels, c.verticalSizeInPixels)\n\n\t// For each pixel of the camera\n\tfor y := 0; y < c.verticalSizeInPixels; y++ {\n\t\tfor x := 0; x < c.horizontalSizeInPixels; x++ {\n\t\t\t// Compute the ray for the current pixel\n\t\t\tr, err := RayForPixel(c, x, y)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Intersect the ray with the world to get the color at the intersection\n\t\t\tc, err := world.ColorAt(w, r)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\t// Write the color to the canvas at the current pixel\n\t\t\terr = image.WritePixel(x, y, *c)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\t}\n\n\treturn image, nil\n}",
"func Fwd(proj *Proj, long, lat float64) (x, y float64, err error) {\n\tif !proj.opened {\n\t\treturn math.NaN(), math.NaN(), errors.New(\"projection is closed\")\n\t}\n\tx1 := C.double(long)\n\ty1 := C.double(lat)\n\te := C.fwd(proj.pj, &x1, &y1)\n\tif e != nil {\n\t\treturn math.NaN(), math.NaN(), errors.New(C.GoString(e))\n\t}\n\treturn float64(x1), float64(y1), nil\n}",
"func (c *Camera) WtoS(x, y float64) (float64, float64) {\n\tvar sx, sy float64\n\tsx = (x-c.lookAtX)*c.zoom + float64(c.screenW)/2.0\n\tsy = (c.lookAtY-y)*c.zoom + float64(c.screenH)/2.0\n\treturn sx, sy\n}",
"func normal2imageCoordinate(x, y int) (int, int) {\n\tx = x + maxX/3\n\ty = y + maxY/2\n\treturn x, y\n}",
"func perspProj(a *vec3.T, cam *Camera) *vec2.T {\n\tm := mkExtrinsicCameraMtx(cam)\n\tprintM4(m)\n\tsp := vec3.From(a)\n\tm.TransformVec3(&sp)\n\treturn &vec2.T{sp[0], sp[1]}\n}",
"func (c *Camera) ScreenToWorld(posX, posY int) (float64, float64) {\n\tinverseMatrix := c.worldMatrix()\n\tif inverseMatrix.IsInvertible() {\n\t\tinverseMatrix.Invert()\n\t\treturn inverseMatrix.Apply(float64(posX), float64(posY))\n\t}\n\t// When scaling it can happend that matrix is not invertable\n\treturn math.NaN(), math.NaN()\n}",
"func (c *Camera) StoW(sx, sy int) (float64, float64) {\n\tvar x, y float64\n\tx = (float64(sx)-float64(c.screenW)/2.0)/c.zoom + c.lookAtX\n\ty = c.lookAtY - (float64(sy)-float64(c.screenH)/2.0)/c.zoom\n\treturn x, y\n}",
"func (p *G2Jac) ToProjFromJac() *G2Jac {\n\t// memalloc\n\tvar buf e2\n\tbuf.Square(&p.Z)\n\n\tp.X.Mul(&p.X, &p.Z)\n\tp.Z.Mul(&p.Z, &buf)\n\n\treturn p\n}",
"func XYWHTo4points(x, y, w, h, fbh float32) (x1, y1, x2, y2, x3, y3, x4, y4 float32) {\n\tx1 = x\n\tx2 = x\n\tx3 = x + w\n\tx4 = x + w\n\ty1 = fbh - (y + h)\n\ty2 = fbh - y\n\ty3 = fbh - (y + h)\n\ty4 = fbh - y\n\treturn\n}",
"func DepthRange(near float64, far float64) {\n C.glowDepthRange(gpDepthRange, (C.GLdouble)(near), (C.GLdouble)(far))\n}",
"func OfGeomPoints(points ...geom.Point) Winding { return Order{}.OfGeomPoints(points...) }",
"func OfPoints(pts ...[2]float64) Winding { return Order{}.OfPoints(pts...) }",
"func ScreenXY(value Vec2) *SimpleElement { return newSEVec2(\"screenXY\", value) }",
"func (crs XYZ) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func (m Matrix) Unproject(u Vec) Vec {\n\tdet := m[0]*m[3] - m[2]*m[1]\n\treturn Vec{\n\t\t(m[3]*(u.X-m[4]) - m[2]*(u.Y-m[5])) / det,\n\t\t(-m[1]*(u.X-m[4]) + m[0]*(u.Y-m[5])) / det,\n\t}\n}",
"func (c *Camera) Update(window *pixelgl.Window) {\n\tc.pixelPosition.X = c.chaseObject.GetPosition().X - window.Bounds().Max.X / 2\n\tc.pixelPosition.Y = c.chaseObject.GetPosition().Y - window.Bounds().Max.Y / 2\n\n\tc.matrixPosition = pixel.IM.Moved(c.pixelPosition.Scaled(-1))\n\twindow.SetMatrix(c.matrixPosition)\n\n\tc.zoom *= math.Pow(c.zoomSpeed, window.MouseScroll().Y)\n\n\tif c.zoom > c.maxZoom {\n\t\tc.zoom = c.maxZoom\n\t} else if c.zoom < c.minZoom {\n\t\tc.zoom = c.minZoom\n\t}\n}",
"func Frustum(left, right, bottom, top, near, far float64) Mat4 {\n\trml, tmb, fmn := (right - left), (top - bottom), (far - near)\n\tA, B, C, D := (right+left)/rml, (top+bottom)/tmb, -(far+near)/fmn, -(2*far*near)/fmn\n\n\treturn Mat4{float64((2. * near) / rml), 0, 0, 0, 0, float64((2. * near) / tmb), 0, 0, float64(A), float64(B), float64(C), -1, 0, 0, float64(D), 0}\n}",
"func (self *Viewport) Reshape(width int, height int) {\n\tself.selectionDirty = false\n\tself.screenWidth = width\n\tself.screenHeight = height\n\n\tgl.Viewport(0, 0, width, height)\n\n\tviewWidth := float64(self.screenWidth) / float64(SCREEN_SCALE)\n\tviewHeight := float64(self.screenHeight) / float64(SCREEN_SCALE)\n\n\tself.lplane = -viewWidth / 2\n\tself.rplane = viewWidth / 2\n\tself.bplane = -viewHeight / 4\n\tself.tplane = 3 * viewHeight / 4\n\n\tgl.MatrixMode(gl.PROJECTION)\n\tgl.LoadIdentity()\n\tgl.Ortho(self.lplane, self.rplane, self.bplane, self.tplane, -60, 60)\n\n\t// self.Perspective(90, 1, 0.01,1000);\n\n\tgl.MatrixMode(gl.MODELVIEW)\n\tgl.LoadIdentity()\n\tpicker.x = float32(viewport.rplane) - picker.radius + BLOCK_SCALE*0.5\n\tpicker.y = float32(viewport.bplane) + picker.radius - BLOCK_SCALE*0.5\n\n}",
"func (crs LambertConformalConic2SP) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func (T triangle) rasterize( C canvas) {\n\tmask:=make([][]bool,C.yres,C.yres)\n\tfor i := 0; i < C.yres; i++ {\n\t\tmaskpre:=make([]bool,C.xres,C.xres )\n\t\tmask[i]=maskpre\n\t}\n\tp0:=[2]float64{T.p0.x/T.p0.z*C.res,T.p0.y/T.p0.z*C.res}\n\tp1:=[2]float64{T.p1.x/T.p1.z*C.res,T.p1.y/T.p1.z*C.res}\n\tp2:=[2]float64{T.p2.x/T.p2.z*C.res,T.p2.y/T.p2.z*C.res}\n\tedges:=[3][2][2]float64{[2][2]float64{p0,p1},[2][2]float64{p1,p2},[2][2]float64{p2,p0}}\n\n\tminy,maxy:=2147483647,0\n\tfor _,edge := range edges {\n\t\tif edge[0][1]>edge[1][1] {\n\t\t\tedge[0],edge[1]=edge[1],edge[0]\n\t\t}\n\t\ty0:=int( edge[0][1]-0.5)-C.y0\n\t\ty1:=int( edge[1][1]-0.5)-C.y0\n\t\tif y0 < 0 {\n\t\t\ty0=0\n\t\t} else if y0 >= C.yres {\n\t\t\ty0=C.yres-1\n\t\t}\n\t\tif y1 <0 {\n\t\t\ty1=0\n\t\t} else if y1 >= C.yres {\n\t\t\ty1=C.yres-1\n\t\t}\n\t\tif y0<miny {\n\t\t\tminy=y0\n\t\t}\n\t\tif y1>maxy {\n\t\t\tmaxy=y1\n\t\t}\n\t\tfor i := y0; i <= y1; i++ {\n\t\t\tfloati:=float64(i+C.y0)+0.5\n\t \t\tslopeinv:= (edge[0][0]-edge[1][0])/(edge[0][1]-edge[1][1])\n\t\t\tcrosx:=edge[0][0]+slopeinv*(floati-edge[0][1])\n\t\t\txindex:=int(crosx+0.5)-C.x0\n\t\t\tif xindex<C.xres {\n\t\t\t\tif xindex>=0 {\n\t\t\t\t\tmask[i][xindex]= !mask[i][xindex]\n\t\t\t\t} else{\n\t\t\t\t\tmask[i][0]= !mask[i][0]\n\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\tfor i :=miny ; i < maxy ; i++ {\n\t\tinside:=false\n\t\tfor j := 0; j < C.xres; j++ {\n\t\t\tif mask[i][j]{\n\t\t\t\tinside= !inside\n\t\t\t\tif !inside{\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif inside {\n\t\t\t\tz:=T.p0dotn/T.n.dot(vec3{float64(j+C.x0)/C.res,float64(i+C.y0)/C.res,1.})\n\t\t\t\tif z<C.z[i][j] {\n\t\t\t\t\tC.z[i][j]=z\n\t\t\t\t\tC.pic[i][j]=T.color\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}",
"func (o *Grid) boundaries() {\n\tn0 := o.npts[0]\n\tn1 := o.npts[1]\n\tif o.ndim == 2 {\n\t\to.edge = make([][]int, 4) // xmin,xmax,ymin,ymax\n\t\to.edge[0] = make([]int, n1) // xmin\n\t\to.edge[1] = make([]int, n1) // xmax\n\t\to.edge[2] = make([]int, n0) // ymin\n\t\to.edge[3] = make([]int, n0) // ymax\n\t\tfor n := 0; n < n1; n++ {\n\t\t\to.edge[0][n] = n * n0 // xmin\n\t\t\to.edge[1][n] = n*n0 + n0 - 1 // xmax\n\t\t}\n\t\tfor m := 0; m < n0; m++ {\n\t\t\to.edge[2][m] = m // ymin\n\t\t\to.edge[3][m] = m + n0*(n1-1) // ymax\n\t\t}\n\t\treturn\n\t}\n\tn2 := o.npts[2]\n\to.face = make([][]int, 6) // xmin,xmax,ymin,ymax,zmin,zmax\n\to.face[0] = make([]int, n1*n2) // xmin\n\to.face[1] = make([]int, n1*n2) // xmax\n\to.face[2] = make([]int, n0*n2) // ymin\n\to.face[3] = make([]int, n0*n2) // ymax\n\to.face[4] = make([]int, n0*n1) // zmin\n\to.face[5] = make([]int, n0*n1) // zmax\n\tt := 0\n\tfor p := 0; p < n2; p++ { // loop over z\n\t\tfor n := 0; n < n1; n++ { // loop over y\n\t\t\to.face[0][t] = n*n0 + (n0*n1)*p // xmin\n\t\t\to.face[1][t] = n*n0 + (n0*n1)*p + (n0 - 1) // xmax\n\t\t\tt++\n\t\t}\n\t}\n\tt = 0\n\tfor p := 0; p < n2; p++ { // loop over z\n\t\tfor m := 0; m < n0; m++ { // loop over x\n\t\t\to.face[2][t] = m + (n0*n1)*p // ymin\n\t\t\to.face[3][t] = m + (n0*n1)*p + n0*(n1-1) // ymax\n\t\t\tt++\n\t\t}\n\t}\n\tt = 0\n\tfor n := 0; n < n1; n++ { // loop over y\n\t\tfor m := 0; m < n0; m++ { // loop over x\n\t\t\to.face[4][t] = m + n0*n // zmin\n\t\t\to.face[5][t] = m + n0*n + (n0*n1)*(n2-1) // zmax\n\t\t\tt++\n\t\t}\n\t}\n}",
"func (c *canvas) findObjects() {\n\tp := Point{}\n\n\t// Find any new paths by starting with a point that wasn't yet visited, beginning at the top\n\t// left of the grid.\n\tfor y := 0; y < c.size.Y; y++ {\n\t\tp.Y = y\n\t\tfor x := 0; x < c.size.X; x++ {\n\t\t\tp.X = x\n\t\t\tif c.isVisited(p) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ch := c.at(p); ch.isPathStart() {\n\t\t\t\t// Found the start of a one or multiple connected paths. Traverse all\n\t\t\t\t// connecting points. This will generate multiple objects if multiple\n\t\t\t\t// paths (either open or closed) are found.\n\t\t\t\tc.visit(p)\n\t\t\t\tobjs := c.scanPath([]Point{p})\n\t\t\t\tfor _, obj := range objs {\n\t\t\t\t\t// For all points in all objects found, mark the points as visited.\n\t\t\t\t\tfor _, p := range obj.Points() {\n\t\t\t\t\t\tc.visit(p)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tc.objects = append(c.objects, objs...)\n\t\t\t}\n\t\t}\n\t}\n\n\t// A second pass through the grid attempts to identify any text within the grid.\n\tfor y := 0; y < c.size.Y; y++ {\n\t\tp.Y = y\n\t\tfor x := 0; x < c.size.X; x++ {\n\t\t\tp.X = x\n\t\t\tif c.isVisited(p) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif ch := c.at(p); ch.isTextStart() {\n\t\t\t\tobj := c.scanText(p)\n\n\t\t\t\t// scanText will return nil if the text at this area is simply\n\t\t\t\t// setting options on a container object.\n\t\t\t\tif obj == nil {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfor _, p := range obj.Points() {\n\t\t\t\t\tc.visit(p)\n\t\t\t\t}\n\t\t\t\tc.objects = append(c.objects, obj)\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Sort(c.objects)\n}",
"func (outer outer) Shape() []pos.Rel {\r\n\tl := make([]pos.Rel, 2*(outer.Xlen+outer.Ylen))\r\n\tfor i := 0; i < outer.Xlen; i++ {\r\n\t\tl[i] = pos.Rel{Z: i, W: -1}\r\n\t\tl[outer.Xlen+i] = pos.Rel{Z: i, W: outer.Ylen}\r\n\t}\r\n\tfor j := 0; j < outer.Ylen; j++ {\r\n\t\tl[2*outer.Xlen+j] = pos.Rel{Z: -1, W: j}\r\n\t\tl[2*outer.Xlen+outer.Ylen+j] = pos.Rel{Z: outer.Xlen, W: j}\r\n\t}\r\n\treturn l\r\n}",
"func (o *WObj) Bounds() (float64, float64) {\n\tbnds := o.Hitbox.Bounds()\n\treturn bnds.X, bnds.Y\n}",
"func (grid *SquareGrid) ToGrid(x, y float64) (float64, float64) {\n\tg := grid.toWorldMat.Inv().Mul2x1(mgl64.Vec2{x, y})\n\treturn g.X(), g.Y()\n}",
"func Frustum(left float64, right float64, bottom float64, top float64, zNear float64, zFar float64) {\n\tC.glowFrustum(gpFrustum, (C.GLdouble)(left), (C.GLdouble)(right), (C.GLdouble)(bottom), (C.GLdouble)(top), (C.GLdouble)(zNear), (C.GLdouble)(zFar))\n}",
"func (self *Graphics) CameraOffset() *Point{\n return &Point{self.Object.Get(\"cameraOffset\")}\n}",
"func (crs LonLat) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func toPointGrid(r model.Quadrilateral, roughSize float64) [][]model.Vector {\n\txlen := float64(model.VectorFromTo(r.P1, r.P2).Length())\n\tylen := float64(model.VectorFromTo(r.P1, r.P4).Length())\n\tnumDivisionsX := math.Ceil(xlen / roughSize)\n\tnumDivisionsY := math.Ceil(ylen / roughSize)\n\tpointSizeX := xlen / numDivisionsX\n\tpointSizeY := ylen / numDivisionsY\n\txVector := model.VectorFromTo(r.P1, r.P2).Normalize().Times(float32(pointSizeX))\n\tyVector := model.VectorFromTo(r.P1, r.P4).Normalize().Times(float32(pointSizeY))\n\n\tnumPointsX := int(numDivisionsX) + 1\n\tnumPointsY := int(numDivisionsY) + 1\n\n\tgrid := make([][]model.Vector, numPointsY)\n\tfor y := 0; y < numPointsY; y++ {\n\t\trow := make([]model.Vector, numPointsX)\n\t\tfor x := 0; x < numPointsX; x++ {\n\t\t\trow[x] = r.P1.Add(xVector.Times(float32(x))).Add(yVector.Times(float32(y)))\n\t\t}\n\t\tgrid[y] = row\n\t}\n\n\treturn grid\n}",
"func reshape(window *glfw.Window, width, height int) {\n\tgl.Viewport(0, 0, int32(width), int32(height));\n\taspect_ratio = (float32(width) / 640.0 * 4.0) / (float32(height) / 480.0 * 3.0);\n}",
"func (crs WebMercator) FromWGS84(x0, y0, z0 float64) (x, y, z float64) {\n\treturn fromWGS84(crs.GeodeticDatum, x0, y0, z0)\n}",
"func PointsRect(r pixel.Rect) (bottomleft, bottomright, topleft, topright pixel.Vec) {\n\tbottomleft = pixel.V(r.Min.X, r.Min.Y)\n\tbottomright = pixel.V(r.Max.X, r.Min.Y)\n\ttopleft = pixel.V(r.Min.X, r.Max.Y)\n\ttopright = pixel.V(r.Max.X, r.Max.Y)\n\treturn\n}",
"func (g game) WindowDeep() (baseWindow *glfw.Window) {\r\n\treturn *(**glfw.Window)(unsafe.Pointer(reflect.Indirect(reflect.ValueOf(g.window)).FieldByName(\"window\").UnsafeAddr()))\r\n}",
"func Project(v1, v2 Vect) Vect {\n\treturn Mult(v2, Dot(v1, v2)/Dot(v2, v2))\n}",
"func AllElementsScene(frame int) (*render.Scene, error) {\n\tcamera, err := render.NewCamera(geometry.Ray{geometry.Point{10, 10, 5}, geometry.Vector{-10, -10, -5}},\n\t\tgeometry.Vector{-10, -10, 40}, 30, 0, 1, 1, 2)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tscene := render.Scene{Camera: camera, BackgroundColor: shading.Color{0.1, 0.8, 1}}\n\n\tyzPlane, err := surface.NewPlane(\n\t\tgeometry.Point{0, 0, 0},\n\t\tgeometry.Vector{0, 4, 0},\n\t\tgeometry.Vector{0, 0, 2},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.CheckerboardTexture{shading.Color{0.9, 0.1, 0.1}, shading.Color{0.8, 0.8, 0.8}, 1,\n\t\t\t\t0.5},\n\t\t\tOpacity: 1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(yzPlane)\n\n\txzPlane, err := surface.NewPlane(\n\t\tgeometry.Point{0, 0, 0},\n\t\tgeometry.Vector{4, 0, 0},\n\t\tgeometry.Vector{0, 0, 2},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.CheckerboardTexture{shading.Color{0.2, 0.5, 1}, shading.Color{0, 0, 0}, 0.1, 0.1},\n\t\t\tOpacity: 1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(xzPlane)\n\n\txyPlane, err := surface.NewPlane(\n\t\tgeometry.Point{0, 0, 0},\n\t\tgeometry.Vector{4, 0, 0},\n\t\tgeometry.Vector{0, 10, 0},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.CheckerboardTexture{shading.Color{0.9, 0.9, 0.9}, shading.Color{0.2, 0.2, 0.2}, 0.3,\n\t\t\t\t0.3},\n\t\t\tOpacity: 1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(xyPlane)\n\n\tmirrorSphere, err := surface.NewSphere(\n\t\tgeometry.Point{1.5, 1.5, 0.75},\n\t\t0.5,\n\t\tgeometry.Vector{0, 0, 1},\n\t\tgeometry.Vector{1, 0, 0},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.SolidTexture{shading.Color{0.5, 0.5, 0.5}},\n\t\t\tSpecularExponent: 100,\n\t\t\tSpecularIntensity: 0.5,\n\t\t\tOpacity: 1,\n\t\t\tReflectivity: 0.8,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(mirrorSphere)\n\n\tcheckerboardSphere, err := surface.NewSphere(\n\t\tgeometry.Point{1, 4.4, 1},\n\t\t0.3,\n\t\tgeometry.Vector{0, 1, 0},\n\t\tgeometry.Vector{1, 0, 0},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.CheckerboardTexture{\n\t\t\t\tColor1: shading.Color{1, 1, 1},\n\t\t\t\tColor2: shading.Color{0, 0, 1},\n\t\t\t\tUPitch: math.Pi / 2,\n\t\t\t\tVPitch: math.Pi / 4,\n\t\t\t},\n\t\t\tSpecularExponent: 100,\n\t\t\tSpecularIntensity: 0.5,\n\t\t\tOpacity: 1,\n\t\t\tReflectivity: 0.3,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(checkerboardSphere)\n\n\tcheckerboardDisc, err := surface.NewDisc(\n\t\tgeometry.Point{3, 1, 0.5},\n\t\tgeometry.Vector{0.5, 0, 0},\n\t\tgeometry.Vector{0, 0.5, 0},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.CheckerboardTexture{\n\t\t\t\tColor1: shading.Color{0.9, 0.8, 0.4},\n\t\t\t\tColor2: shading.Color{0.3, 0.3, 0},\n\t\t\t\tUPitch: 0.125,\n\t\t\t\tVPitch: math.Pi / 2,\n\t\t\t},\n\t\t\tOpacity: 1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(checkerboardDisc)\n\n\tmirrorDisc, err := surface.NewDisc(\n\t\tgeometry.Point{2, 2, 0.1},\n\t\tgeometry.Vector{1.5, 0, 0},\n\t\tgeometry.Vector{0, 1.5, 0},\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.SolidTexture{shading.Color{0, 0, 0}},\n\t\t\tSpecularExponent: 100,\n\t\t\tSpecularIntensity: 0.5,\n\t\t\tOpacity: 1,\n\t\t\tReflectivity: 0.7,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddSurface(mirrorDisc)\n\n\tgoldCubePlanes, err := surface.NewBox(\n\t\tgeometry.Point{1, 3, 0.75},\n\t\tgeometry.Vector{0, 0.5, 0.5},\n\t\tgeometry.Vector{0, -0.5, 0.5},\n\t\t0.5,\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.SolidTexture{shading.Color{0.9, 0.6, 0.2}},\n\t\t\tSpecularExponent: 100,\n\t\t\tSpecularIntensity: 0.5,\n\t\t\tOpacity: 1,\n\t\t\tReflectivity: 0.1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, plane := range goldCubePlanes {\n\t\tscene.AddSurface(plane)\n\t}\n\n\tboxPlanes, err := surface.NewBox(\n\t\tgeometry.Point{2.5, 4.3, 0.1},\n\t\tgeometry.Vector{-0.8, 0.6, 0},\n\t\tgeometry.Vector{0, 0, 2},\n\t\t0.05,\n\t\tshading.ShadingProperties{\n\t\t\tDiffuseTexture: shading.SolidTexture{shading.Color{0, 1, 0}},\n\t\t\tSpecularExponent: 100,\n\t\t\tSpecularIntensity: 0.5,\n\t\t\tOpacity: 0.1,\n\t\t\tReflectivity: 0.5,\n\t\t\tRefractiveIndex: 1.1,\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor _, plane := range boxPlanes {\n\t\tscene.AddSurface(plane)\n\t}\n\n\tlight1, err := light.NewDistantLight(\n\t\tgeometry.Vector{-10, -10, -20},\n\t\tshading.Color{1, 1, 1},\n\t\t0.75,\n\t\t0,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddLight(light1)\n\n\tlight2, err := light.NewDistantLight(\n\t\tgeometry.Vector{-10, -10, -25},\n\t\tshading.Color{1, 1, 1},\n\t\t0.75,\n\t\t0,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddLight(light2)\n\n\tlight3, err := light.NewDistantLight(\n\t\tgeometry.Vector{-11, -9, -20},\n\t\tshading.Color{1, 1, 1},\n\t\t0.75,\n\t\t0,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddLight(light3)\n\n\tlight4, err := light.NewPointLight(\n\t\tgeometry.Point{5, 1, 10},\n\t\tshading.Color{1, 1, 1},\n\t\t1000,\n\t\t0,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tscene.AddLight(light4)\n\n\treturn &scene, nil\n}",
"func (ds *DrawStack) DrawToScreen(world draw.Image, view *intgeom.Point2, w, h int) {\n\tfor _, a := range ds.as {\n\t\t// If we had concurrent operations, we'd do it here\n\t\t// in that case each draw call would return to us something\n\t\t// to composite onto the window / world\n\t\ta.DrawToScreen(world, view, w, h)\n\t}\n}",
"func BitmapToVector(floor [][]bool) []Line {\n\tp_array := createPointArray(floor)\n\n\t// Converts Points Array into a Line Array. Possibly could be turned into stand alone function \"pointsToLines(points []Point) []Line\"\n\tl_array := pointsToLines(p_array[0:])\n\n\t// Adds a Slope to each Line object.\n\tfor i := 0; i < len(l_array); i++ {\n\t\tl_array[i].Slope, l_array[i].Vertical = l_array[i].getSlope()\n\t}\n\n\t// Joins up any lines that have Points In Common and Equal Slope.\n\tfor i := 0; i < len(l_array); i++ {\n\t\tfor j := i + 1; j < len(l_array); j++ {\n\t\t\tif Debug {\n\t\t\t\tfmt.Println(\"== Line Array ==\", i, j)\n\t\t\t\tPrintLineArray(l_array[0:])\n\t\t\t}\n\t\t\tif l_array[i].canJoin(l_array[j]) {\n\t\t\t\tl_array[i] = l_array[i].joinTo(l_array[j])\n\t\t\t\tl_array_temp := l_array[0:j]\n\t\t\t\tl_array = append(l_array_temp, l_array[j+1:]...)\n\t\t\t\ti, j = 0, 1\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n\n\treturn l_array\n}",
"func roundRect(p *Path, size f32.Point, se, sw, nw, ne float32) {\n\t// https://pomax.github.io/bezierinfo/#circles_cubic.\n\tw, h := size.X, size.Y\n\tconst c = 0.55228475 // 4*(sqrt(2)-1)/3\n\tp.Move(f32.Point{X: w, Y: h - se})\n\tp.Cube(f32.Point{X: 0, Y: se * c}, f32.Point{X: -se + se*c, Y: se}, f32.Point{X: -se, Y: se}) // SE\n\tp.Line(f32.Point{X: sw - w + se, Y: 0})\n\tp.Cube(f32.Point{X: -sw * c, Y: 0}, f32.Point{X: -sw, Y: -sw + sw*c}, f32.Point{X: -sw, Y: -sw}) // SW\n\tp.Line(f32.Point{X: 0, Y: nw - h + sw})\n\tp.Cube(f32.Point{X: 0, Y: -nw * c}, f32.Point{X: nw - nw*c, Y: -nw}, f32.Point{X: nw, Y: -nw}) // NW\n\tp.Line(f32.Point{X: w - ne - nw, Y: 0})\n\tp.Cube(f32.Point{X: ne * c, Y: 0}, f32.Point{X: ne, Y: ne - ne*c}, f32.Point{X: ne, Y: ne}) // NE\n\tp.Line(f32.Point{X: 0, Y: -(ne - h + se)})\n}",
"func (c *Camera) prepareWorldSpaceUnits() {\n\t// Compute the width of half of the canvas by taking the tangent of half of the field of view.\n\t// Cutting the field of view in half creates a right triangle on the canvas, which is 1 unit\n\t// away from the camera. The adjacent is 1 and the opposite is half of the canvas.\n\thalfView := math.Tan(c.fieldOfView / 2)\n\n\t// Compute the aspect ratio\n\tc.aspectRatio = float64(c.horizontalSizeInPixels) / float64(c.verticalSizeInPixels)\n\n\t// Compute half of the width and half of the height of the canvas.\n\t// This is different than the number of horizontal or vertical pixels.\n\tif c.aspectRatio >= 1 {\n\t\t// The horizontal size is greater than or equal to the vertical size\n\t\tc.halfWidth = halfView\n\t\tc.halfHeight = halfView / c.aspectRatio\n\t} else {\n\t\t// The vertical size is greater than the horizontal size\n\t\tc.halfWidth = halfView * c.aspectRatio\n\t\tc.halfHeight = halfView\n\t}\n\n\t// Divide half of the width * 2 by the number of horizontal pixels to get\n\t// the pixel size. Note that the assumption here is that the pixels are\n\t// square, so there is no need to compute the vertical size of the pixel.\n\tc.pixelSize = (c.halfWidth * 2) / float64(c.horizontalSizeInPixels)\n}",
"func (r *Renderer) Render() {\n\tsx, sz, sy := r.c.Bounds()\n\tsxf := float64(sx - 1)\n\tsyf := float64(sy - 1)\n\tszf := float64(sz - 1)\n\tfor z := 0; z < sz; z++ {\n\t\tfor y := 0; y < sy; y++ {\n\t\t\tfor x := 0; x < sx; x++ {\n\t\t\t\tvar lr, lg, lb, _ uint32\n\t\t\t\tn := uint32(0)\n\t\t\t\tfor _, o := range r.objects {\n\t\t\t\t\tr, g, b, _ := o.At(float64(x)/sxf, float64(y)/syf, float64(z)/szf).RGBA()\n\t\t\t\t\tlr += r >> 8\n\t\t\t\t\tlg += g >> 8\n\t\t\t\t\tlb += b >> 8\n\t\t\t\t\tn++\n\t\t\t\t}\n\t\t\t\tr.c.Set(x, y, z, color.RGBA{uint8(lr/n), uint8(lg/n), uint8(lb/n), 255})\n\t\t\t}\n\t\t}\n\t}\n\tr.c.Render()\n}",
"func zOrder(x, y, minX, minY, invSize float64) int {\n\t// coords are transformed into non-negative 15-bit integer range\n\tix := 32767 * int((x-minX)*invSize)\n\tiy := 32767 * int((y-minY)*invSize)\n\n\tix = (ix | (ix << 8)) & 0x00FF00FF\n\tix = (ix | (ix << 4)) & 0x0F0F0F0F\n\tix = (ix | (ix << 2)) & 0x33333333\n\tix = (ix | (ix << 1)) & 0x55555555\n\n\tiy = (iy | (iy << 8)) & 0x00FF00FF\n\tiy = (iy | (iy << 4)) & 0x0F0F0F0F\n\tiy = (iy | (iy << 2)) & 0x33333333\n\tiy = (iy | (iy << 1)) & 0x55555555\n\n\treturn ix | (iy << 1)\n}",
"func (v *Viewport) Apply() {\n\tgl.Viewport(v.x, v.y, v.width, v.height)\n}"
] | [
"0.63158274",
"0.5481058",
"0.54486936",
"0.54296905",
"0.52340007",
"0.5148154",
"0.50325346",
"0.4956897",
"0.49509454",
"0.49372965",
"0.4893283",
"0.4836452",
"0.47983438",
"0.4735956",
"0.46727222",
"0.46138215",
"0.4606523",
"0.45490903",
"0.45301574",
"0.4525165",
"0.45135495",
"0.450603",
"0.45058045",
"0.45014602",
"0.44808874",
"0.44575742",
"0.44565976",
"0.4448889",
"0.44436902",
"0.44409922",
"0.442643",
"0.44048595",
"0.43946472",
"0.4392946",
"0.43729866",
"0.4360219",
"0.43598512",
"0.433291",
"0.43313664",
"0.43127823",
"0.42977884",
"0.42943326",
"0.42840427",
"0.42693695",
"0.42646915",
"0.42619506",
"0.42613465",
"0.42566118",
"0.4255659",
"0.4250041",
"0.42458174",
"0.42441788",
"0.42335668",
"0.42198643",
"0.42181036",
"0.4211534",
"0.42030987",
"0.41994378",
"0.41983736",
"0.41878277",
"0.41852805",
"0.4184032",
"0.41816446",
"0.41804576",
"0.4179619",
"0.41654035",
"0.41621402",
"0.41599646",
"0.4157643",
"0.4154848",
"0.41460297",
"0.41230565",
"0.41157067",
"0.4103304",
"0.40945917",
"0.40912172",
"0.40878695",
"0.40875036",
"0.4079097",
"0.4073954",
"0.40693393",
"0.40689602",
"0.40620688",
"0.40612158",
"0.4060494",
"0.40603608",
"0.40462607",
"0.40374258",
"0.40369636",
"0.40350893",
"0.40221196",
"0.40197864",
"0.4019548",
"0.40017512",
"0.3999966",
"0.39967132",
"0.3986341",
"0.3978121",
"0.39719343",
"0.3971177"
] | 0.66940176 | 0 |
UnProject transforms a set of window coordinates to object space. If your MVP (projection.Mul(modelview) matrix is not invertible, this will return an error. Note that the projection may not be perfect if you use strict pixel locations rather than the exact values given by Projectf. (It's still unlikely to be perfect due to precision errors, but it will be closer) | func UnProject(win Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (obj Vec3, err error) {
inv := projection.Mul4(modelview).Inv()
var blank Mat4
if inv == blank {
return Vec3{}, errors.New("Could not find matrix inverse (projection times modelview is probably non-singular)")
}
obj4 := inv.Mul4x1(Vec4{
(2 * (win[0] - float64(initialX)) / float64(width)) - 1,
(2 * (win[1] - float64(initialY)) / float64(height)) - 1,
2*win[2] - 1,
1.0,
})
obj = obj4.Vec3()
//if obj4[3] > MinValue {}
obj[0] /= obj4[3]
obj[1] /= obj4[3]
obj[2] /= obj4[3]
return obj, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (m Matrix) Unproject(u Vec) Vec {\n\tdet := m[0]*m[3] - m[2]*m[1]\n\treturn Vec{\n\t\t(m[3]*(u.X-m[4]) - m[2]*(u.Y-m[5])) / det,\n\t\t(-m[1]*(u.X-m[4]) + m[0]*(u.Y-m[5])) / det,\n\t}\n}",
"func Project(obj Vec3, modelview, projection Mat4, initialX, initialY, width, height int) (win Vec3) {\n\tobj4 := obj.Vec4(1)\n\n\tvpp := projection.Mul4(modelview).Mul4x1(obj4)\n\tvpp = vpp.Mul(1 / vpp.W())\n\twin[0] = float64(initialX) + (float64(width)*(vpp[0]+1))/2\n\twin[1] = float64(initialY) + (float64(height)*(vpp[1]+1))/2\n\twin[2] = (vpp[2] + 1) / 2\n\n\treturn win\n}",
"func (m Matrix) Project(u Vec) Vec {\n\treturn Vec{m[0]*u.X + m[2]*u.Y + m[4], m[1]*u.X + m[3]*u.Y + m[5]}\n}",
"func (p *Projection) project(wx float64, wy float64) (float64, float64) {\n return ((wx / p.worldWidth) * p.canvasWidth) + (p.canvasWidth * 0.5),\n ((wy / p.worldHeight) * -p.canvasHeight) + (p.canvasHeight * 0.5)\n}",
"func (c *Camera) Project(p3 lmath.Vec3) (p2 lmath.Vec2, ok bool) {\n\tcameraInv, _ := c.Object.Transform.Mat4().Inverse()\n\tcameraInv = cameraInv.Mul(zUpRightToYUpRight)\n\n\tprojection := c.Projection.Mat4()\n\tvp := cameraInv.Mul(projection)\n\n\tp2, ok = vp.Project(p3)\n\treturn\n}",
"func (p *PointAffine) FromProj(p1 *PointProj) *PointAffine {\n\tp.X.Div(&p1.X, &p1.Z)\n\tp.Y.Div(&p1.Y, &p1.Z)\n\treturn p\n}",
"func Project(v, u *Vec) *Vec {\n\tl := v.Dot(u) / u.Magnitude()\n\tresult := Normalize(u)\n\tresult.Multiply(l)\n\treturn result\n}",
"func (p *PointProj) Neg(p1 *PointProj) *PointProj {\n\tp.Set(p1)\n\tp.X.Neg(&p.X)\n\treturn p\n}",
"func ProjectUnpause(p project.APIProject, c *cli.Context) error {\n\terr := p.Unpause(context.Background(), c.Args()...)\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\treturn nil\n}",
"func projectPoint(\n\tx, y, z float64, // 3d point to project\n\tw, h, f float64, // width, height, focal\n\tscale float64, // scale\n) (px, py float64) { // projected point\n\tx, y, z = x*scale*f, y*scale*f, z*scale*f\n\tzz := z + f\n\tif zz == 0 {\n\t\tzz = math.SmallestNonzeroFloat64\n\t}\n\tpx = x*(f/zz) + w/2\n\tpy = y*(f/zz) - h/2\n\tpy *= -1\n\treturn\n}",
"func eraseProjection(ctx *sql.Context, a *Analyzer, node sql.Node, scope *plan.Scope, sel RuleSelector) (sql.Node, transform.TreeIdentity, error) {\n\tspan, ctx := ctx.Span(\"erase_projection\")\n\tdefer span.End()\n\n\tif !node.Resolved() {\n\t\treturn node, transform.SameTree, nil\n\t}\n\n\treturn transform.Node(node, func(node sql.Node) (sql.Node, transform.TreeIdentity, error) {\n\t\tproject, ok := node.(*plan.Project)\n\t\tif ok && project.Schema().CaseSensitiveEquals(project.Child.Schema()) {\n\t\t\ta.Log(\"project erased\")\n\t\t\treturn project.Child, transform.NewTree, nil\n\t\t}\n\n\t\treturn node, transform.SameTree, nil\n\t})\n}",
"func (m MappedImageSurface) Unmap() error {\n\terr := m.Err()\n\tmismux.Lock()\n\tdefer mismux.Unlock()\n\tfrom := mis[m.id()]\n\tC.cairo_surface_unmap_image(from, m.s)\n\tm.s = nil\n\treturn err\n}",
"func Project(zone int, south bool, latitude, longitude float64) (float64, float64) {\n\n\t// False northing\n\tfn := 0.\n\tif south {\n\t\tfn = utmSouthernHemisphereFalseNorthing\n\t}\n\n\th1 := n/2 - n2*2/3 + n3*5/16 + n4*41/180\n\th2 := n2*13/48 - n3*3/5 + n4*557/1440\n\th3 := n3*61/240 - n4*103/140\n\th4 := n4 * 49561 / 161280\n\n\tq := math.Asinh(math.Tan(latitude)) - e*math.Atanh(e*math.Sin(latitude))\n\tβ := math.Atan(math.Sinh(q))\n\n\tη0 := math.Atanh(math.Cos(β) * math.Sin(longitude-λO(zone)))\n\tξ0 := math.Asin(math.Sin(β) * math.Cosh(η0))\n\n\tη1 := h1 * math.Cos(2*ξ0) * math.Sinh(2*η0)\n\tη2 := h2 * math.Cos(4*ξ0) * math.Sinh(4*η0)\n\tη3 := h3 * math.Cos(6*ξ0) * math.Sinh(6*η0)\n\tη4 := h4 * math.Cos(8*ξ0) * math.Sinh(8*η0)\n\n\tξ1 := h1 * math.Sin(2*ξ0) * math.Cosh(2*η0)\n\tξ2 := h2 * math.Sin(4*ξ0) * math.Cosh(4*η0)\n\tξ3 := h3 * math.Sin(6*ξ0) * math.Cosh(6*η0)\n\tξ4 := h4 * math.Sin(8*ξ0) * math.Cosh(8*η0)\n\n\tξ := ξ0 + ξ1 + ξ2 + ξ3 + ξ4\n\tη := η0 + η1 + η2 + η3 + η4\n\n\te := fe + kO*b*η\n\tn := fn + kO*b*ξ\n\treturn e, n\n}",
"func perspProj(a *vec3.T, cam *Camera) *vec2.T {\n\tm := mkExtrinsicCameraMtx(cam)\n\tprintM4(m)\n\tsp := vec3.From(a)\n\tm.TransformVec3(&sp)\n\treturn &vec2.T{sp[0], sp[1]}\n}",
"func (o *Grid) U(m, n, p int) la.Vector {\n\treturn o.mtr[p][n][m].U\n}",
"func (pc *perspectiveCameraImp) ProjectionMatrixInverse() *threejs.Matrix4 {\n\treturn &threejs.Matrix4{Value: pc.JSValue().Get(\"projectionMatrixInverse\")}\n}",
"func Project(lhs Vector, rhs Vector) float64 {\n\treturn DotProduct(lhs, rhs) / Magnitude(rhs)\n}",
"func (crs Projection) FromWGS84(x0, y0, z0 float64) (x, y, z float64) {\n\treturn fromWGS84(crs.GeodeticDatum, x0, y0, z0)\n}",
"func Vec3BasisProject(U, V, W, S Vec3) (o Vec3) {\n\to[0] = Vec3Dot(U, S)\n\to[1] = Vec3Dot(V, S)\n\to[2] = Vec3Dot(W, S)\n\treturn\n}",
"func Mat3Projection(out []float64, width, height float64) []float64 {\n\tout[0] = 2 / width\n\tout[1] = 0\n\tout[2] = 0\n\tout[3] = 0\n\tout[4] = -2 / height\n\tout[5] = 0\n\tout[6] = -1\n\tout[7] = 1\n\tout[8] = 1\n\treturn out\n}",
"func (bm Blendmap) View() (float32, float32, float32, float32) {\n\treturn bm.Map.viewport.Min.X, bm.Map.viewport.Min.Y, bm.Map.viewport.Max.X, bm.Map.viewport.Max.Y\n}",
"func (u Vec) Project(v Vec) Vec {\n\tlen := u.Dot(v) / v.Len()\n\treturn v.Unit().Scaled(len)\n}",
"func (img *Image) Unmap() error {\n\treturn devUnmap(img)\n}",
"func NewProjectionPerspective(fovy, near, far, viewWidth, viewHeight float64) Matrix4 {\n\n\taspect := viewWidth / viewHeight\n\n\tt := math.Tan(fovy * math.Pi / 360)\n\tb := -t\n\tr := t * aspect\n\tl := -r\n\n\t// l := -viewWidth / 2\n\t// r := viewWidth / 2\n\t// t := -viewHeight / 2\n\t// b := viewHeight / 2\n\n\treturn Matrix4{\n\t\t{(2 * near) / (r - l), 0, (r + l) / (r - l), 0},\n\t\t{0, (2 * near) / (t - b), (t + b) / (t - b), 0},\n\t\t{0, 0, -((far + near) / (far - near)), -((2 * far * near) / (far - near))},\n\t\t{0, 0, -1, 0},\n\t}\n\n}",
"func Project(v1, v2 Vect) Vect {\n\treturn Mult(v2, Dot(v1, v2)/Dot(v2, v2))\n}",
"func InverseWindow(windowType WindowType, input VectorComplex) VectorComplex {\n\tswitch windowType {\n\tcase WindowTypeHann:\n\t\treturn Hann(input)\n\tcase WindowTypeHamming:\n\t\treturn Hamming(input)\n\tcase WindowTypeNuttal:\n\t\treturn Nuttal(input)\n\t}\n\treturn nil\n}",
"func (render *Renderer_impl)Unmap() fundations.Result{\n\n}",
"func (pc *perspectiveCameraImp) MatrixWorldInverse() *threejs.Matrix4 {\n\treturn &threejs.Matrix4{Value: pc.JSValue().Get(\"matrixWorldInverse\")}\n}",
"func (a *Vec4) Invert() {\n\ta.X = -a.X\n\ta.Y = -a.Y\n\ta.Z = -a.Z\n\ta.W = -a.W\n}",
"func (p *G2Jac) ToProjFromJac() *G2Jac {\n\t// memalloc\n\tvar buf e2\n\tbuf.Square(&p.Z)\n\n\tp.X.Mul(&p.X, &p.Z)\n\tp.Z.Mul(&p.Z, &buf)\n\n\treturn p\n}",
"func MatrixProjection(fov FovPort, znear, zfar float32, rightHanded bool) Matrix4f {\n\tif rightHanded {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 1))\n\t} else {\n\t\treturn matrix4f(C.ovrMatrix4f_Projection(c_fovPort(fov), C.float(znear), C.float(zfar), 0))\n\t}\n}",
"func (r *Window) UnFullscreen() {\n\tr.renderer.UnFullscreen()\n}",
"func planIsNullProjectionOp(\n\tctx context.Context,\n\tevalCtx *tree.EvalContext,\n\toutputType *types.T,\n\texpr tree.TypedExpr,\n\tcolumnTypes []*types.T,\n\tinput colexecop.Operator,\n\tacc *mon.BoundAccount,\n\tnegate bool,\n\tfactory coldata.ColumnFactory,\n) (op colexecop.Operator, resultIdx int, typs []*types.T, err error) {\n\top, resultIdx, typs, err = planProjectionOperators(\n\t\tctx, evalCtx, expr, columnTypes, input, acc, factory,\n\t)\n\tif err != nil {\n\t\treturn op, resultIdx, typs, err\n\t}\n\toutputIdx := len(typs)\n\tisTupleNull := typs[resultIdx].Family() == types.TupleFamily\n\top = colexec.NewIsNullProjOp(\n\t\tcolmem.NewAllocator(ctx, acc, factory), op, resultIdx, outputIdx, negate, isTupleNull,\n\t)\n\ttyps = appendOneType(typs, outputType)\n\treturn op, outputIdx, typs, nil\n}",
"func Inv(proj *Proj, x, y float64) (long, lat float64, err error) {\n\tif !proj.opened {\n\t\treturn math.NaN(), math.NaN(), errors.New(\"projection is closed\")\n\t}\n\tx2 := C.double(x)\n\ty2 := C.double(y)\n\te := C.inv(proj.pj, &x2, &y2)\n\tif e != nil {\n\t\treturn math.NaN(), math.NaN(), errors.New(C.GoString(e))\n\t}\n\treturn float64(x2), float64(y2), nil\n}",
"func UnmarshalProjectCollection(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(ProjectCollection)\n\terr = core.UnmarshalPrimitive(m, \"limit\", &obj.Limit)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"total_count\", &obj.TotalCount)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"first\", &obj.First, UnmarshalPaginationLink)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"last\", &obj.Last, UnmarshalPaginationLink)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"previous\", &obj.Previous, UnmarshalPaginationLink)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"next\", &obj.Next, UnmarshalPaginationLink)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"projects\", &obj.Projects, UnmarshalProjectCollectionMemberWithMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func toPixelVector(win *pixelgl.Window, x float64, y float64) pixel.Vec {\n\tvar (\n\t\tnewY = win.Bounds().Max.Y - y\n\t)\n\n\treturn pixel.Vec{x, newY}\n}",
"func (r Rect) Projection() *Projection {\n\treturn NewProjection(r.x0, r.y0, r.x1, r.y1)\n}",
"func unbox(b bbox) (tlx, tly, brx, bry int) {\n\ttlx = b.x\n\ttly = b.y\n\tbrx = b.x + b.w\n\tbry = b.y + b.h\n\treturn\n}",
"func (w *windowImpl) DeActivate() {\n\tglfw.DetachCurrentContext()\n}",
"func (v *Viewport) Apply() {\n\tgl.Viewport(v.x, v.y, v.width, v.height)\n}",
"func UnmarshalProject(m map[string]json.RawMessage, result interface{}) (err error) {\n\tobj := new(Project)\n\terr = core.UnmarshalPrimitive(m, \"name\", &obj.Name)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"description\", &obj.Description)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"id\", &obj.ID)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalPrimitive(m, \"crn\", &obj.Crn)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"configs\", &obj.Configs, UnmarshalProjectConfig)\n\tif err != nil {\n\t\treturn\n\t}\n\terr = core.UnmarshalModel(m, \"metadata\", &obj.Metadata, UnmarshalProjectMetadata)\n\tif err != nil {\n\t\treturn\n\t}\n\treflect.ValueOf(result).Elem().Set(reflect.ValueOf(obj))\n\treturn\n}",
"func (e *Ellipsoid) Inverse(\n\tlat1, lon1, lat2, lon2 float64,\n\ts12, azi1, azi2 *float64,\n) {\n\tC.geod_inverse(&e.g,\n\t\tC.double(lat1), C.double(lon1), C.double(lat2), C.double(lon2),\n\t\t(*C.double)(s12), (*C.double)(azi1), (*C.double)(azi2))\n}",
"func (c *Camera) StoW(sx, sy int) (float64, float64) {\n\tvar x, y float64\n\tx = (float64(sx)-float64(c.screenW)/2.0)/c.zoom + c.lookAtX\n\ty = c.lookAtY - (float64(sy)-float64(c.screenH)/2.0)/c.zoom\n\treturn x, y\n}",
"func (crs TransverseMercator) FromWGS84(x0, y0, z0 float64) (x, y, z float64) {\n\treturn fromWGS84(crs.GeodeticDatum, x0, y0, z0)\n}",
"func UnmapBuffer(target gl.Enum) bool {\n\treturn gl.GoBool(gl.UnmapBuffer(gl.Enum(target)))\n}",
"func (p *PJ) Trans(direction Direction, u1, v1, w1, t1 float64) (u2, v2, w2, t2 float64, err error) {\n\tif !p.opened {\n\t\treturn 0, 0, 0, 0, errProjectionClosed\n\t}\n\n\tvar u, v, w, t C.double\n\tC.trans(p.pj, C.PJ_DIRECTION(direction), C.double(u1), C.double(v1), C.double(w1), C.double(t1), &u, &v, &w, &t)\n\n\te := C.proj_errno(p.pj)\n\tif e != 0 {\n\t\treturn 0, 0, 0, 0, errors.New(C.GoString(C.proj_errno_string(e)))\n\t}\n\n\treturn float64(u), float64(v), float64(w), float64(t), nil\n}",
"func (ip IP) Unmap() IP {\n\tif ip.Is4in6() {\n\t\tip.z = z4\n\t}\n\treturn ip\n}",
"func (cam *Camera) SetupViewProjection() {\n\tx_ratio := cam.Width / cam.Height\n\tcam.View = PerspectiveFrustum(cam.YFov, x_ratio, cam.Near, cam.Far)\n\tcam.Projection = cam.View.M44()\n}",
"func (self *Viewport) Reshape(width int, height int) {\n\tself.selectionDirty = false\n\tself.screenWidth = width\n\tself.screenHeight = height\n\n\tgl.Viewport(0, 0, width, height)\n\n\tviewWidth := float64(self.screenWidth) / float64(SCREEN_SCALE)\n\tviewHeight := float64(self.screenHeight) / float64(SCREEN_SCALE)\n\n\tself.lplane = -viewWidth / 2\n\tself.rplane = viewWidth / 2\n\tself.bplane = -viewHeight / 4\n\tself.tplane = 3 * viewHeight / 4\n\n\tgl.MatrixMode(gl.PROJECTION)\n\tgl.LoadIdentity()\n\tgl.Ortho(self.lplane, self.rplane, self.bplane, self.tplane, -60, 60)\n\n\t// self.Perspective(90, 1, 0.01,1000);\n\n\tgl.MatrixMode(gl.MODELVIEW)\n\tgl.LoadIdentity()\n\tpicker.x = float32(viewport.rplane) - picker.radius + BLOCK_SCALE*0.5\n\tpicker.y = float32(viewport.bplane) + picker.radius - BLOCK_SCALE*0.5\n\n}",
"func (p *PJ) Dist3(u1, v1, w1, u2, v2, w2 float64) (float64, error) {\n\tif !p.opened {\n\t\treturn 0, errProjectionClosed\n\t}\n\ta := C.uvwt(C.double(u1), C.double(v1), C.double(w1), 0)\n\tb := C.uvwt(C.double(u2), C.double(v2), C.double(w2), 0)\n\td := C.proj_lpz_dist(p.pj, a, b)\n\te := C.proj_errno(p.pj)\n\tif e != 0 {\n\t\treturn 0, errors.New(C.GoString(C.proj_errno_string(e)))\n\t}\n\treturn float64(d), nil\n}",
"func Frustum(left, right, bottom, top, near, far float64) Mat4 {\n\trml, tmb, fmn := (right - left), (top - bottom), (far - near)\n\tA, B, C, D := (right+left)/rml, (top+bottom)/tmb, -(far+near)/fmn, -(2*far*near)/fmn\n\n\treturn Mat4{float64((2. * near) / rml), 0, 0, 0, 0, float64((2. * near) / tmb), 0, 0, float64(A), float64(B), float64(C), -1, 0, 0, float64(D), 0}\n}",
"func (c *Camera) ScreenToWorld(posX, posY int) (float64, float64) {\n\tinverseMatrix := c.worldMatrix()\n\tif inverseMatrix.IsInvertible() {\n\t\tinverseMatrix.Invert()\n\t\treturn inverseMatrix.Apply(float64(posX), float64(posY))\n\t}\n\t// When scaling it can happend that matrix is not invertable\n\treturn math.NaN(), math.NaN()\n}",
"func UnmapBuffer(target uint32) bool {\n\tret := C.glowUnmapBuffer(gpUnmapBuffer, (C.GLenum)(target))\n\treturn ret == TRUE\n}",
"func UnmapBuffer(target uint32) bool {\n\tret := C.glowUnmapBuffer(gpUnmapBuffer, (C.GLenum)(target))\n\treturn ret == TRUE\n}",
"func (crs WebMercator) FromWGS84(x0, y0, z0 float64) (x, y, z float64) {\n\treturn fromWGS84(crs.GeodeticDatum, x0, y0, z0)\n}",
"func projectCleanup(env environment.Environment, macro *model.Macro) error {\n\tpartialComparisonService := env.ServiceFactory().MustPartialComparisonService()\n\treturn partialComparisonService.DeleteFrom(macro)\n}",
"func (s *Statement) ResetProjection() {\n\ts.workingProjection = &Projection{}\n}",
"func (a Vec4) Inverse() Vec4 {\n\treturn Vec4{-a.X, -a.Y, -a.Z, -a.W}\n}",
"func (c *Camera) updateFrustrum() {\n\tvar v types.Vec4\n\tinvProjViewMat := c.InvViewProjMat()\n\n\tvar yUp float32 = 1.0\n\tif c.InvertY {\n\t\tyUp = -1.0\n\t}\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(-1, yUp, -1, 1))\n\tc.Frustrum[0] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(1, yUp, -1, 1))\n\tc.Frustrum[1] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(-1, -yUp, -1, 1))\n\tc.Frustrum[2] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n\n\tv = invProjViewMat.Mul4x1(types.XYZW(1, -yUp, -1, 1))\n\tc.Frustrum[3] = v.Mul(1.0 / v[3]).Vec3().Sub(c.Position).Vec4(0)\n}",
"func Vproject(v1, v2 Vect) Vect {\n\treturn goVect(C.cpvproject(v1.c(), v2.c()))\n}",
"func (c *Camera) Update(window *pixelgl.Window) {\n\tc.pixelPosition.X = c.chaseObject.GetPosition().X - window.Bounds().Max.X / 2\n\tc.pixelPosition.Y = c.chaseObject.GetPosition().Y - window.Bounds().Max.Y / 2\n\n\tc.matrixPosition = pixel.IM.Moved(c.pixelPosition.Scaled(-1))\n\twindow.SetMatrix(c.matrixPosition)\n\n\tc.zoom *= math.Pow(c.zoomSpeed, window.MouseScroll().Y)\n\n\tif c.zoom > c.maxZoom {\n\t\tc.zoom = c.maxZoom\n\t} else if c.zoom < c.minZoom {\n\t\tc.zoom = c.minZoom\n\t}\n}",
"func ProjectDown(p project.APIProject, c *cli.Context) error {\n\toptions := options.Down{\n\t\tRemoveVolume: c.Bool(\"volumes\"),\n\t\tRemoveImages: options.ImageType(c.String(\"rmi\")),\n\t\tRemoveOrphans: c.Bool(\"remove-orphans\"),\n\t}\n\terr := p.Down(context.Background(), options, c.Args()...)\n\tif err != nil {\n\t\treturn cli.NewExitError(err.Error(), 1)\n\t}\n\treturn nil\n}",
"func (a *Vec4) Subtract(b Vec4) {\n\ta.X -= b.X\n\ta.Y -= b.Y\n\ta.Z -= b.Z\n\ta.W -= b.W\n}",
"func Trim(p projection) *trimFunc {\n\treturn &trimFunc{\n\t\tsubject: p.(element),\n\t\tsel: p.from(),\n\t\tlocation: TRIM_BOTH,\n\t}\n}",
"func (crs Projection) ToWGS84(x, y, z float64) (x0, y0, z0 float64) {\n\treturn toWGS84(crs.GeodeticDatum, x, y, z)\n}",
"func (img *Image) UnmountAndUnmap(mountPoint string) error {\n\treturn devUnmountAndUnmap(img, mountPoint)\n}",
"func (vm *C8VM) NullifyPixels() {\n\tfor w := 0; w < ScreenWidth; w++ {\n\t\tfor h := 0; h < ScreenHeight; h++ {\n\t\t\tvm.pixels[w][h] = 0\n\t\t}\n\t}\n}",
"func UnassignMatchup( w http.ResponseWriter, r *http.Request ) {\n\n\tvars := mux.Vars(r)\n\n\t//----------------------------------------------------------------------------\t\t\n\t// Retrieve the id params\n\t//----------------------------------------------------------------------------\t\t\n\tgameId,_ := strconv.ParseUint( vars[\"parentId\"], 10, 64)\n\n\t//----------------------------------------------------------------------------\t\t\n\t// Delegate to the Game DAO\n\t//----------------------------------------------------------------------------\t\t\n\trequestResult := GameDAO.UnassignMatchup(gameId)\n\n\t//----------------------------------------------------------------------------\n\t// Marshal the model into a JSON object\n\t//----------------------------------------------------------------------------\n\tres, _ := json.Marshal(requestResult)\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(res)\n\n}",
"func uplink_free_project_result(result C.UplinkProjectResult) {\n\tuplink_free_error(result.error)\n\tfreeProject(result.project)\n}",
"func calculateProjectiles(gamestate *gamestate) {\n\tprojLock.Lock()\n\tfor _, projectile := range gamestate.Projectiles {\n\t\tprojectile.X = projectile.X + projectile.XVelocity\n\t\tprojectile.Y = projectile.Y + projectile.YVelocity\n\t\t//projectile.XVelocity = math.floor(projectile.XVelocity - projectile.XVelocity^2 * someConstant + wind) //where someConstant has a value that makes the function not do crazy things\n\t\tprojectile.YVelocity = projectile.YVelocity - g //g should be tuned to fit the tick system so the function does not do crazy things\n\t\tif projectile.Y > heightOfMap || (projectile.X > mapSize) || (projectile.X < 0) {\n\t\t\tdelete(gamestate.Projectiles, projectile.ID)\n\t\t} else if projectile.Y > gamestate.Terrain[int(projectile.X)].Y {\n\t\t\tcalculateExplosion(int(projectile.X), int(gamestate.Terrain[int(projectile.X)].Y), explosionSize, gamestate)\n\t\t\tdelete(gamestate.Projectiles, projectile.ID)\n\t\t}\n\t}\n\tprojLock.Unlock()\n}",
"func (l Layout) DebugRender(win *pixelgl.Window) {\n\tfor key := range l.Panels {\n\t\tpanel := l.CreatePanel(key)\n\t\tpanel.Draw(win)\n\t}\n\n\t//temp camera matrix\n\t//cam := pixel.IM.Scaled(l.centerPos, 1.0).Moved(l.centerPos)\n\t//win.SetMatrix(cam)\n}",
"func (o *NewWindowOptions) Fixup() {\n\tsc := TheApp.Screen(0)\n\tscsz := sc.Geometry.Size() // window coords size\n\n\tif o.Size.X <= 0 {\n\t\to.StdPixels = false\n\t\to.Size.X = int(0.8 * float32(scsz.X) * sc.DevicePixelRatio)\n\t}\n\tif o.Size.Y <= 0 {\n\t\to.StdPixels = false\n\t\to.Size.Y = int(0.8 * float32(scsz.Y) * sc.DevicePixelRatio)\n\t}\n\n\to.Size, o.Pos = sc.ConstrainWinGeom(o.Size, o.Pos)\n\tif o.Pos.X == 0 && o.Pos.Y == 0 {\n\t\twsz := sc.WinSizeFmPix(o.Size)\n\t\tdialog, modal, _, _ := WindowFlagsToBool(o.Flags)\n\t\tnw := TheApp.NWindows()\n\t\tif nw > 0 {\n\t\t\tlastw := TheApp.Window(nw - 1)\n\t\t\tlsz := lastw.WinSize()\n\t\t\tlp := lastw.Position()\n\n\t\t\tnwbig := wsz.X > lsz.X || wsz.Y > lsz.Y\n\n\t\t\tif modal || dialog || !nwbig { // place centered on top of current\n\t\t\t\tctrx := lp.X + (lsz.X / 2)\n\t\t\t\tctry := lp.Y + (lsz.Y / 2)\n\t\t\t\to.Pos.X = ctrx - wsz.X/2\n\t\t\t\to.Pos.Y = ctry - wsz.Y/2\n\t\t\t} else { // cascade to right\n\t\t\t\to.Pos.X = lp.X + lsz.X // tile to right -- could depend on orientation\n\t\t\t\to.Pos.Y = lp.Y + 72 // and move down a bit\n\t\t\t}\n\t\t} else { // center in screen\n\t\t\to.Pos.X = scsz.X/2 - wsz.X/2\n\t\t\to.Pos.Y = scsz.Y/2 - wsz.Y/2\n\t\t}\n\t\to.Size, o.Pos = sc.ConstrainWinGeom(o.Size, o.Pos) // make sure ok\n\t}\n}",
"func SunsetView(buildings []Building) []Building {\n\tif len(buildings) == 0 {\n\t\treturn nil\n\t}\n\n\tstack := InitStack()\n\tstack.Push(buildings[0])\n\n\t// Traverse from east to west. Remove buildings east to the current one\n\t// that are smaller in height. Push the current building in the stack\n\t// afterward.\n\tfor i := 1; i < len(buildings); i++ {\n\t\tfor !stack.Empty() &&\n\t\t\tstack.Peek().(Building).Height <= buildings[i].Height {\n\t\t\tstack.Pop()\n\t\t}\n\n\t\tstack.Push(buildings[i])\n\t}\n\n\t// The stack maintains the list of buildings that can view sunset.\n\tview := make([]Building, 0)\n\tfor !stack.Empty() {\n\t\tview = append(view, stack.Pop().(Building))\n\t}\n\n\t// Return the buildings in west-to-east order. The height of the\n\t// buildings in west-to-east order must be increasing.\n\treturn view\n}",
"func VPMOVUSDW_Z(xyz, k, mxy operand.Op) { ctx.VPMOVUSDW_Z(xyz, k, mxy) }",
"func (n *BufferView) UnfocusBuffers() {\n\t// clear focus from buffers\n\tfor _, buffPane := range n.buffers {\n\t\tbuffPane.SetFocus(false)\n\t}\n}",
"func (v *Vec4) Subtract(x *Vec4) {\n\tv.X -= x.X\n\tv.Y -= x.Y\n\tv.Z -= x.Z\n\tv.W -= v.W\n}",
"func MapUnstage(scope *Scope, key tf.Output, indices tf.Output, dtypes []tf.DataType, optional ...MapUnstageAttr) (values []tf.Output) {\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tattrs := map[string]interface{}{\"dtypes\": dtypes}\n\tfor _, a := range optional {\n\t\ta(attrs)\n\t}\n\topspec := tf.OpSpec{\n\t\tType: \"MapUnstage\",\n\t\tInput: []tf.Input{\n\t\t\tkey, indices,\n\t\t},\n\t\tAttrs: attrs,\n\t}\n\top := scope.AddOperation(opspec)\n\tif scope.Err() != nil {\n\t\treturn\n\t}\n\tvar idx int\n\tvar err error\n\tif values, idx, err = makeOutputList(op, idx, \"values\"); err != nil {\n\t\tscope.UpdateErr(\"MapUnstage\", err)\n\t\treturn\n\t}\n\treturn values\n}",
"func MatrixOrthoSubProjection(projection Matrix4f, orthoScale Vector2f, orthoDistance, eyeViewAdjustX float32) Matrix4f {\n\treturn matrix4f(C.ovrMatrix4f_OrthoSubProjection(c_matrix4f(projection), c_vector2f(orthoScale), C.float(orthoDistance), C.float(eyeViewAdjustX)))\n}",
"func VPMULLW_Z(mxyz, xyz, k, xyz1 operand.Op) { ctx.VPMULLW_Z(mxyz, xyz, k, xyz1) }",
"func (gm *GraphicsManager) RenderAllFromPerspective(id component.GOiD, sm component.SceneManager) (*common.Vector, *common.Vector) {\n\terrs := common.MakeVector()\n\tcompsToSend := common.MakeVector()\n\tcomps := gm.compList.Array()\n\n\tperspLoc, err := sm.GetObjectLocation(id)\n\tif err != nil {\n\t\terrs.Insert(fmt.Errorf(\"requesting location from scene manager failed in perspective render, error %s\", err.Error()))\n\t\treturn nil, errs\n\t}\n\tcompsNearPerspective := sm.GetObjectsInLocationRadius(perspLoc, 5.0).Array()\n\n\tfor i := range comps {\n\t\tif comps[i] == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif comps[i].(component.GOiD) == id || comps[i].(component.GOiD) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor j := range compsNearPerspective {\n\t\t\tif comps[i].(component.GOiD) == compsNearPerspective[j].(component.GOiD) {\n\t\t\t\tcompsToSend.Insert(comps[i].(component.GOiD))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn compsToSend, errs\n}",
"func (r Ray) ProjectX(x float64) float64 {\n\t/*\n\t\tderived by evaluating y form r.Formula\n\t*/\n\treturn (r.V.Y*x - r.V.Y*r.O.X + r.V.X*r.O.Y) / r.V.X\n}",
"func (c *Camera) Reset() {\n\tc.Object.Reset()\n\tc.Projection = ConvertMat4(lmath.Mat4Identity)\n}",
"func DebugFindPoints(viewImage *image.Image, screenImage *image.Image, width int, height int) {\n\twindow1 := gocv.NewWindow(\"test1\")\n\tdefer window1.Close()\n\n\twindow2 := gocv.NewWindow(\"test2\")\n\tdefer window2.Close()\n\n\tpoints := make([]image.Point, 5)\n\n\tpoints[0] = image.Point{((*viewImage).Bounds().Max.X - 1) / 2, ((*viewImage).Bounds().Max.Y - 1) / 2}\n\tpoints[1] = image.Point{points[0].X - width, points[0].Y - height}\n\tpoints[2] = image.Point{points[0].X + width, points[0].Y - height}\n\tpoints[3] = image.Point{points[0].X - width, points[0].Y + height}\n\tpoints[4] = image.Point{points[0].X + width, points[0].Y + height}\n\n\tmatchedPoints, err := FindPoints(viewImage, screenImage, points)\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn\n\t}\n\n\tphotoMat, _ := gocv.ImageToMatRGBA(*viewImage)\n\tdefer photoMat.Close()\n\tscreenMat, _ := gocv.ImageToMatRGBA(*screenImage)\n\tdefer screenMat.Close()\n\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\tfor _, point := range points {\n\t\tgocv.Circle(&photoMat, point, 5, color.RGBA{255, 0, 0, 0}, -1)\n\t}\n\n\tgocv.Line(&photoMat, points[1], points[2], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[1], points[3], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[3], points[4], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&photoMat, points[4], points[2], color.RGBA{0, 255, 0, 0}, 2)\n\n\tfor _, point := range matchedPoints {\n\t\tgocv.Circle(&screenMat, point, 5, color.RGBA{255, 0, 0, 0}, -1)\n\t}\n\n\tgocv.Line(&screenMat, matchedPoints[1], matchedPoints[2], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[1], matchedPoints[3], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[3], matchedPoints[4], color.RGBA{0, 255, 0, 0}, 2)\n\tgocv.Line(&screenMat, matchedPoints[2], matchedPoints[4], color.RGBA{0, 255, 0, 0}, 2)\n\n\tnewWidth := (matchedPoints[2].X - matchedPoints[1].X + matchedPoints[4].X - matchedPoints[3].X) / 2\n\tnewHeight := (matchedPoints[3].Y - matchedPoints[1].Y + matchedPoints[4].Y - matchedPoints[2].Y) / 2\n\tlog.Println(\"End Calculating\")\n\n\tlog.Printf(\"[DEBUG] Width: %v\\n\", newWidth)\n\tlog.Printf(\"[DEBUG] Height: %v\\n\", newHeight)\n\n\twindow1.IMShow(photoMat)\n\twindow2.IMShow(screenMat)\n\n\tfor {\n\n\t\tif window1.WaitKey(1) >= 0 {\n\t\t\tbreak\n\t\t}\n\n\t\tif window2.WaitKey(1) >= 0 {\n\t\t\tbreak\n\t\t}\n\n\t}\n}",
"func (scaler *MinMaxScaler) InverseTransform(X, Y *mat.Dense) (Xout, Yout *mat.Dense) {\n\tif X == nil {\n\t\treturn X, Y\n\t}\n\tnSamples, nFeatures := X.Dims()\n\tXout = mat.NewDense(nSamples, nFeatures, nil)\n\tXout.Apply(func(i int, j int, x float64) float64 {\n\t\treturn (x - scaler.Min.At(0, j)) / scaler.Scale.At(0, j)\n\t}, X)\n\treturn Xout, Y\n}",
"func (poly *PolynomialFeatures) InverseTransform(X, Y *mat.Dense) (Xout, Yout *mat.Dense) {\n\tif X == nil {\n\t\treturn X, Y\n\t}\n\ttype jt struct{ jorig, jpoly int }\n\tvar jts []jt\n\tnSamples, _ := X.Dims()\n\tXoutCols := 0\n\n\tintsum := func(a []int) int {\n\t\tvar s int\n\t\tfor _, v := range a {\n\t\t\ts += v\n\t\t}\n\t\treturn s\n\t}\n\tintmaxidx := func(a []int) int {\n\t\ti := 0\n\t\tfor j, v := range a {\n\t\t\tif v > a[i] {\n\t\t\t\ti = j\n\t\t\t}\n\t\t}\n\t\treturn i\n\t}\n\tfor ioutput, p := range poly.Powers {\n\t\tvar jMax = intmaxidx(p)\n\t\tvar sumpj = intsum(p)\n\t\tif sumpj == 1 {\n\t\t\t//fmt.Println(ioutput, \"p\", p, \"sumpj\", sumpj, \"jWith1\", jMax)\n\t\t\tjts = append(jts, jt{jMax, ioutput})\n\t\t\tif jMax >= XoutCols {\n\t\t\t\tXoutCols = jMax + 1\n\t\t\t}\n\t\t}\n\t}\n\tXout = mat.NewDense(nSamples, XoutCols, nil)\n\n\tfor _, pair := range jts {\n\t\t//fmt.Println(jts)\n\t\tfor i := 0; i < nSamples; i++ {\n\t\t\tXout.Set(i, pair.jorig, X.At(i, pair.jpoly))\n\t\t}\n\t}\n\treturn\n}",
"func FromProjection(h dogma.ProjectionMessageHandler) RichProjection {\n\tcfg := &projection{\n\t\tentity: entity{\n\t\t\trt: reflect.TypeOf(h),\n\t\t},\n\t\timpl: h,\n\t\tdeliveryPolicy: dogma.UnicastProjectionDeliveryPolicy{},\n\t}\n\n\tc := &projectionConfigurer{\n\t\thandlerConfigurer: handlerConfigurer{\n\t\t\tentityConfigurer: entityConfigurer{\n\t\t\t\tentity: &cfg.entity,\n\t\t\t},\n\t\t},\n\t}\n\n\th.Configure(c)\n\n\tc.validate()\n\tc.mustConsume(message.EventRole)\n\n\tif c.deliveryPolicy != nil {\n\t\tcfg.deliveryPolicy = c.deliveryPolicy\n\t}\n\n\treturn cfg\n}",
"func FourPointTransform(img gocv.Mat, pts []image.Point, dst *gocv.Mat) {\n\trect := OrderPoints(pts)\n\ttl := rect[0]\n\ttr := rect[1]\n\tbr := rect[2]\n\tbl := rect[3]\n\n\twidthA := math.Sqrt(math.Pow(float64(br.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(br.Y)-float64(bl.Y), 2))\n\twidthB := math.Sqrt(math.Pow(float64(tr.X)-float64(tl.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(tl.Y), 2))\n\tmaxWidth := math.Max(widthA, widthB)\n\n\theightA := math.Sqrt(math.Pow(float64(tr.X)-float64(br.X), 2) +\n\t\tmath.Pow(float64(tr.Y)-float64(br.Y), 2))\n\theightB := math.Sqrt(math.Pow(float64(tl.X)-float64(bl.X), 2) +\n\t\tmath.Pow(float64(tl.Y)-float64(bl.Y), 2))\n\tmaxHeight := math.Max(heightA, heightB)\n\n\tdt := []image.Point{\n\t\timage.Pt(0, 0),\n\t\timage.Pt(int(maxWidth)-1, 0),\n\t\timage.Pt(int(maxWidth)-1, int(maxHeight)-1),\n\t\timage.Pt(0, int(maxHeight)-1)}\n\n\tm := gocv.GetPerspectiveTransform(rect, dt)\n\tgocv.WarpPerspective(img, dst, m, image.Pt(int(maxWidth), int(maxHeight)))\n\n}",
"func PruneProject(baseDir string, lp LockedProject, options PruneOptions, logger *log.Logger) error {\n\tprojectDir := filepath.Join(baseDir, string(lp.Ident().ProjectRoot))\n\n\tif (options & PruneNestedVendorDirs) != 0 {\n\t\tif err := pruneNestedVendorDirs(projectDir); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif (options & PruneUnusedPackages) != 0 {\n\t\tif err := pruneUnusedPackages(lp, projectDir, logger); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to prune unused packages\")\n\t\t}\n\t}\n\n\tif (options & PruneNonGoFiles) != 0 {\n\t\tif err := pruneNonGoFiles(projectDir, logger); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to prune non-Go files\")\n\t\t}\n\t}\n\n\tif (options & PruneGoTestFiles) != 0 {\n\t\tif err := pruneGoTestFiles(projectDir, logger); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to prune Go test files\")\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (obj *GenericDimension) UnPublish(ctx context.Context) error {\n\terr := obj.RPC(ctx, \"UnPublish\", nil)\n\treturn err\n}",
"func (m *Normalizer) InverseTransform(X, Y *mat.Dense) (Xout, Yout *mat.Dense) {\n\tNSamples, NFeatures := X.Dims()\n\tif m.Axis == 0 {\n\t\ttmp := make([]float64, NSamples)\n\t\tfor i := 0; i < NFeatures; i++ {\n\t\t\tmat.Col(tmp, i, X)\n\t\t\tnrm := m.nrmValues[i]\n\t\t\tfloats.Scale(nrm, tmp)\n\t\t\tXout.SetCol(i, tmp)\n\t\t}\n\t} else {\n\t\ttmp := make([]float64, NFeatures)\n\t\tfor i := 0; i < NSamples; i++ {\n\t\t\tmat.Row(tmp, i, X)\n\t\t\tnrm := m.nrmValues[i]\n\t\t\tfloats.Scale(nrm, tmp)\n\t\t\tXout.SetRow(i, tmp)\n\t\t}\n\t}\n\treturn\n}",
"func (p *PointProj) FromAffine(p1 *PointAffine) *PointProj {\n\tp.X.Set(&p1.X)\n\tp.Y.Set(&p1.Y)\n\tp.Z.SetOne()\n\treturn p\n}",
"func (c *Camera) SetupProjection(aspect float32) {\n\tc.ProjMat = types.Perspective4(c.FOV, aspect, 1, 1000)\n\tc.Update()\n}",
"func (p *point) unassignCenter() int {\n\tif p.Center == nil {\n\t\treturn 0\n\t}\n\tif p.Center.Members > 1 {\n\t\t//update members\n\t\tp.Center.Members--\n\t\t//get rid of the old value\n\t\tp.Center.X -= (int64(p.X) - p.Center.X) / p.Center.Members\n\t\tp.Center.Y -= (int64(p.Y) - p.Center.Y) / p.Center.Members\n\t\tp.Center.Z -= (int64(p.Z) - p.Center.Z) / p.Center.Members\n\t} else {\n\t\t//we cannot adjust the location any lower\n\t\tp.Center.Members--\n\t}\n\t//update the points p.Center\n\tp.Center = nil\n\t//TODO return distance that it moved as per optimizations\n\treturn 0\n}",
"func (c *Camera) debugUpdate() {\n\tc.State = gfx.NewState()\n\tc.Shader = shader\n\tc.State.FaceCulling = gfx.BackFaceCulling\n\n\tm := gfx.NewMesh()\n\tm.Primitive = gfx.Lines\n\n\tm.Vertices = []gfx.Vec3{}\n\tm.Colors = []gfx.Color{}\n\n\tnear := float32(c.Near)\n\tfar := float32(c.Far)\n\n\tif c.Ortho {\n\t\twidth := float32(c.View.Dx())\n\t\theight := float32(c.View.Dy())\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{width / 2, 0, height / 2},\n\n\t\t\t// Near\n\t\t\t{0, near, 0},\n\t\t\t{width, near, 0},\n\t\t\t{width, near, height},\n\t\t\t{0, near, height},\n\n\t\t\t// Far\n\t\t\t{0, far, 0},\n\t\t\t{width, far, 0},\n\t\t\t{width, far, height},\n\t\t\t{0, far, height},\n\n\t\t\t{width / 2, far, height / 2},\n\n\t\t\t// Up\n\t\t\t{0, near, height},\n\t\t\t{0, near, height},\n\t\t\t{width, near, height},\n\t\t}\n\t} else {\n\t\tratio := float32(c.View.Dx()) / float32(c.View.Dy())\n\t\tfovRad := c.FOV / 180 * math.Pi\n\n\t\thNear := float32(2 * math.Tan(fovRad/2) * c.Near)\n\t\twNear := hNear * ratio\n\n\t\thFar := float32(2 * math.Tan(fovRad/2) * c.Far)\n\t\twFar := hFar * ratio\n\n\t\tm.Vertices = []gfx.Vec3{\n\t\t\t{0, 0, 0},\n\n\t\t\t// Near\n\t\t\t{-wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, -hNear / 2},\n\t\t\t{wNear / 2, near, hNear / 2},\n\t\t\t{-wNear / 2, near, hNear / 2},\n\n\t\t\t// Far\n\t\t\t{-wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, -hFar / 2},\n\t\t\t{wFar / 2, far, hFar / 2},\n\t\t\t{-wFar / 2, far, hFar / 2},\n\n\t\t\t{0, far, 0},\n\n\t\t\t// Up\n\t\t\t{0, near, hNear},\n\t\t\t{-wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t\t{wNear / 2 * 0.7, near, hNear / 2 * 1.1},\n\t\t}\n\t}\n\n\tm.Colors = []gfx.Color{\n\t\t{1, 1, 1, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 0.67, 0, 1},\n\t\t{1, 1, 1, 1},\n\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t\t{0, 0.67, 1, 1},\n\t}\n\n\tm.Indices = []uint32{\n\t\t// From 0 to near plane\n\t\t0, 1,\n\t\t0, 2,\n\t\t0, 3,\n\t\t0, 4,\n\n\t\t// Near plane\n\t\t1, 2,\n\t\t2, 3,\n\t\t3, 4,\n\t\t4, 1,\n\n\t\t// Far plane\n\t\t5, 6,\n\t\t6, 7,\n\t\t7, 8,\n\t\t8, 5,\n\n\t\t// Lines from near to far plane\n\t\t1, 5,\n\t\t2, 6,\n\t\t3, 7,\n\t\t4, 8,\n\n\t\t0, 9,\n\n\t\t// Up\n\t\t10, 11,\n\t\t11, 12,\n\t\t12, 10,\n\t}\n\n\tc.Meshes = []*gfx.Mesh{m}\n}",
"func optimizePlan(plan logicalPlan) {\n\tfor _, lp := range plan.Inputs() {\n\t\toptimizePlan(lp)\n\t}\n\n\tthis, ok := plan.(*simpleProjection)\n\tif !ok {\n\t\treturn\n\t}\n\n\tinput, ok := this.input.(*simpleProjection)\n\tif !ok {\n\t\treturn\n\t}\n\n\tfor i, col := range this.eSimpleProj.Cols {\n\t\tthis.eSimpleProj.Cols[i] = input.eSimpleProj.Cols[col]\n\t}\n\tthis.input = input.input\n}",
"func (wou *WorkOrderUpdate) ClearProject() *WorkOrderUpdate {\n\twou.clearedProject = true\n\treturn wou\n}",
"func canvasToViewPort(x, y float64, c Canvas) Vector {\n\tfw, fh := float64(c.Width()), float64(c.Height())\n\treturn Vector{(x - fw/2) / fw, (y - fh/2) / fh, 1}\n}",
"func (u UDim) Neg() UDim {\n\treturn UDim{\n\t\tScale: -u.Scale,\n\t\tOffset: -u.Offset,\n\t}\n}",
"func (a Vec4) Dehomogenized() Vec3 {\n\treturn Vec3{a.X / a.W, a.Y / a.W, a.Z / a.W}\n}",
"func (p *Projection) grid(xInterval float64, yInterval float64) {\n l := draw2d.NewGraphicContext(p.img)\n l.SetStrokeColor(color.RGBA{0xEE, 0xEE, 0xEE, 0xFF})\n l.SetLineWidth(0.5)\n\n xCount := p.worldWidth / xInterval\n yCount := p.worldHeight / yInterval\n\n // horizontal lines\n for x := 1.0; x < xCount; x += 1 {\n xx, _ := p.project((x - (xCount / 2)) * xInterval, 0)\n l.MoveTo(xx, 0)\n l.LineTo(xx, p.canvasHeight)\n l.Stroke()\n }\n\n // vertical lines\n for y := 1.0; y < yCount; y += 1 {\n _, yy := p.project(0, (y - (yCount / 2)) * yInterval)\n l.MoveTo(0, yy)\n l.LineTo(p.canvasWidth, yy)\n l.Stroke()\n }\n\n l.SetStrokeColor(color.RGBA{0xAA, 0xAA, 0xAA, 0xFF})\n\n // horiz axis\n l.MoveTo(p.canvasWidth/2, 0)\n l.LineTo(p.canvasWidth/2, p.canvasHeight)\n l.Stroke()\n\n // vert axis\n l.MoveTo(0, p.canvasHeight/2)\n l.LineTo(p.canvasWidth, p.canvasHeight/2)\n l.Stroke()\n}"
] | [
"0.76688826",
"0.6347564",
"0.57971174",
"0.57515115",
"0.54273236",
"0.52502817",
"0.51743394",
"0.48640013",
"0.48253465",
"0.48199877",
"0.47961015",
"0.47573304",
"0.47210604",
"0.47186816",
"0.4718039",
"0.46531895",
"0.4639835",
"0.46380505",
"0.46234328",
"0.4603798",
"0.4593653",
"0.45770988",
"0.45645157",
"0.45298624",
"0.45182312",
"0.445036",
"0.4449817",
"0.44045874",
"0.43733147",
"0.43700266",
"0.4361479",
"0.43515489",
"0.431227",
"0.42830497",
"0.42564753",
"0.42375195",
"0.42351118",
"0.42241234",
"0.4217979",
"0.41806388",
"0.4174569",
"0.41718194",
"0.41688654",
"0.4157075",
"0.41457266",
"0.4145468",
"0.4125434",
"0.41206005",
"0.41189858",
"0.41114566",
"0.40958866",
"0.40825954",
"0.40774146",
"0.40774146",
"0.40654352",
"0.40619192",
"0.40611282",
"0.40604407",
"0.4060362",
"0.40365228",
"0.4035732",
"0.4031252",
"0.40239927",
"0.40239698",
"0.40216565",
"0.40206975",
"0.40001598",
"0.399326",
"0.39929566",
"0.39915723",
"0.39898708",
"0.397459",
"0.39713874",
"0.39686817",
"0.39655644",
"0.3963481",
"0.3950984",
"0.39506498",
"0.39439797",
"0.39363033",
"0.39342764",
"0.39335135",
"0.39203078",
"0.39186674",
"0.39091113",
"0.39084494",
"0.39042878",
"0.39015952",
"0.3898068",
"0.38968498",
"0.38959885",
"0.38927752",
"0.38919643",
"0.38884854",
"0.38748524",
"0.38738146",
"0.38669446",
"0.38634408",
"0.3861864",
"0.3849426"
] | 0.83639854 | 0 |
ComputeDomain computes a domain. | func ComputeDomain(domainType DomainType, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) {
if len(forkVersion) != 4 {
return nil, errors.New("fork version must be 4 bytes in length")
}
if len(genesisValidatorsRoot) != 32 {
return nil, errors.New("genesis validators root must be 32 bytes in length")
}
// Generate fork data root from fork version and genesis validators root.
forkData := &ForkData{
CurrentVersion: forkVersion,
GenesisValidatorsRoot: genesisValidatorsRoot,
}
forkDataRoot, err := forkData.HashTreeRoot()
if err != nil {
return nil, errors.Wrap(err, "failed to generate fork data hash tree root")
}
res := make([]byte, 32)
copy(res[0:4], domainType[:])
copy(res[4:32], forkDataRoot[:])
return res, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetDomain(fork Fork, epoch Epoch, dom BLSDomain) BLSDomain {\n\t// combine fork version with domain.\n\tv := fork.GetVersion(epoch)\n\treturn BLSDomain(v[0] << 24 | v[1] << 16 | v[2] << 8 | v[3]) + dom\n}",
"func (o GetDomainsResultOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GetDomainsResult) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}",
"func Domain(domainType DomainType, forkVersion []byte, genesisValidatorsRoot []byte) []byte {\n\t// Generate fork data root from fork version and genesis validators root.\n\tforkData := &ForkData{\n\t\tCurrentVersion: forkVersion,\n\t\tGenesisValidatorsRoot: genesisValidatorsRoot,\n\t}\n\tforkDataRoot, err := forkData.HashTreeRoot()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tres := make([]byte, 32)\n\tcopy(res[0:4], domainType[:])\n\tcopy(res[4:32], forkDataRoot[:])\n\n\treturn res\n}",
"func NormaliseDomain(start, end []byte, reverse bool) ([]byte, []byte) {\n\tif reverse {\n\t\tif len(end) == 0 {\n\t\t\tend = []byte{}\n\t\t}\n\t} else {\n\t\tif len(start) == 0 {\n\t\t\tstart = []byte{}\n\t\t}\n\t}\n\treturn start, end\n}",
"func (spec *MachineSpec) Domain() string {\n\treturn fmt.Sprintf(\"%s.%s.%s\", spec.Machine.Uid, spec.Machine.Credential, dnsZones[spec.env()])\n}",
"func NewDomain(m, depth uint64, precomputeReversedTable bool) *Domain {\n\n\t// generator of the largest 2-adic subgroup\n\tvar rootOfUnity fr.Element\n\n\trootOfUnity.SetString(\"4991787701895089137426454739366935169846548798279261157172811661565882460884369603588700158257\")\n\tconst maxOrderRoot uint64 = 20\n\n\tdomain := &Domain{}\n\tx := ecc.NextPowerOfTwo(m)\n\tdomain.Cardinality = uint64(x)\n\tdomain.Depth = depth\n\tif precomputeReversedTable {\n\t\tdomain.PrecomputeReversedTable = 1\n\t}\n\n\t// find generator for Z/2^(log(m))Z and Z/2^(log(m)+cosets)Z\n\tlogx := uint64(bits.TrailingZeros64(x))\n\tif logx > maxOrderRoot {\n\t\tpanic(fmt.Sprintf(\"m (%d) is too big: the required root of unity does not exist\", m))\n\t}\n\tlogGen := logx + depth\n\tif logGen > maxOrderRoot {\n\t\tpanic(\"log(m) + cosets is too big: the required root of unity does not exist\")\n\t}\n\n\texpo := uint64(1 << (maxOrderRoot - logGen))\n\tbExpo := new(big.Int).SetUint64(expo)\n\tdomain.FinerGenerator.Exp(rootOfUnity, bExpo)\n\tdomain.FinerGeneratorInv.Inverse(&domain.FinerGenerator)\n\n\t// Generator = FinerGenerator^2 has order x\n\texpo = uint64(1 << (maxOrderRoot - logx))\n\tbExpo.SetUint64(expo)\n\tdomain.Generator.Exp(rootOfUnity, bExpo) // order x\n\tdomain.GeneratorInv.Inverse(&domain.Generator)\n\tdomain.CardinalityInv.SetUint64(uint64(x)).Inverse(&domain.CardinalityInv)\n\n\t// twiddle factors\n\tdomain.preComputeTwiddles()\n\n\t// store the bit reversed coset tables if needed\n\tif depth > 0 && precomputeReversedTable {\n\t\tdomain.reverseCosetTables()\n\t}\n\n\treturn domain\n}",
"func (d *Domain) Validate(v validate.Validator) validate.Error {\n\t//func IsDomain(p []byte) (res validate.Result) {\n\t// Domain rules:\n\t// - 255 character total length max\n\t// - 63 character label max\n\t// - 127 sub-domains\n\t// - Characters a-z, A-Z, 0-9, and -\n\t// - Labels may not start or end with -\n\t// - TLD may not be all numeric\n\n\t// Check for max length.\n\t// NOTE: Invalid unicode will count as a 1 byte rune, but we'll catch that\n\t// later.\n\n\tp := d.domain\n\t// If a max length was specified, use it\n\tif d.checks[\"maxlength\"] != nil &&\n\t\tutf8.RuneCount(p) > d.checks[\"maxlength\"].(int) {\n\t\treturn ErrDomainLength\n\n\t} else if utf8.RuneCount(p) > 255 {\n\t\treturn ErrDomainLength\n\t}\n\n\t// First we split by label\n\tdomain := bytes.Split(p, []byte(\".\"))\n\t// 127 sub-domains max (not including TLD)\n\tif len(domain) > 128 {\n\t\treturn ErrDomainLength\n\t}\n\n\tif d.checks[\"minsubs\"] != nil && len(domain) < d.checks[\"minsubs\"].(int)+1 {\n\t\treturn ErrDomainLength\n\t}\n\tif d.checks[\"maxsubs\"] != nil && len(domain) > d.checks[\"maxsubs\"].(int)+1 {\n\t\treturn ErrDomainLength\n\t}\n\t// Check each domain for valid characters\n\tfor _, subDomain := range domain {\n\t\tlength := len(subDomain)\n\t\t// Check for a domain with two periods next to eachother.\n\t\tif length < 1 {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\t// Check 63 character max.\n\t\tif length > 63 {\n\t\t\treturn ErrDomainLength\n\t\t}\n\n\t\t// Check that label doesn't start or end with hyphen.\n\t\tr, size := utf8.DecodeRune(subDomain)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\t// Invalid rune\n\t\t\treturn validate.ErrInvalidUTF8\n\t\t}\n\n\t\tif r == '-' {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\tr, size = utf8.DecodeLastRune(subDomain)\n\t\tif r == utf8.RuneError && size == 1 {\n\t\t\t// Invalid rune\n\t\t\treturn validate.ErrInvalidUTF8\n\t\t}\n\n\t\tif r == '-' {\n\t\t\treturn ErrFormatting\n\t\t}\n\n\t\t// Now we check each rune individually to make sure its valid unicode\n\t\t// and an acceptable character.\n\t\tfor i := 0; i < length; {\n\t\t\tif subDomain[i] < utf8.RuneSelf {\n\t\t\t\t// Check if it's a valid domain character\n\t\t\t\tif !unicode.Is(domainTable, rune(subDomain[i])) {\n\t\t\t\t\treturn ErrFormatting\n\t\t\t\t}\n\t\t\t\ti++\n\t\t\t} else {\n\t\t\t\tr, size := utf8.DecodeRune(subDomain[i:])\n\t\t\t\tif size == 1 {\n\t\t\t\t\t// All valid runes of size 1 (those\n\t\t\t\t\t// below RuneSelf) were handled above.\n\t\t\t\t\t// This must be a RuneError.\n\t\t\t\t\treturn validate.ErrInvalidUTF8\n\t\t\t\t}\n\t\t\t\t// Check if it's a valid domain character\n\t\t\t\tif !unicode.Is(domainTable, r) {\n\t\t\t\t\treturn ErrFormatting\n\t\t\t\t}\n\t\t\t\ti += size\n\t\t\t}\n\t\t}\n\t}\n\n\t// We have all valid unicode characters, now make sure the TLD is real.\n\t// TODO(inhies): Add check for an all numeric TLD.\n\tdomainTLD := domain[len(domain)-1]\n\tif tld.Valid(domainTLD) {\n\t\treturn nil\n\t}\n\n\t// Not sure how we got here, but lets return false just in case.\n\treturn ErrUnknown\n}",
"func (o LookupDatasetGroupResultOutput) Domain() DatasetGroupDomainPtrOutput {\n\treturn o.ApplyT(func(v LookupDatasetGroupResult) *DatasetGroupDomain { return v.Domain }).(DatasetGroupDomainPtrOutput)\n}",
"func (r *DomainIdentity) Domain() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"domain\"])\n}",
"func (c *DomainController) Domain() types.Domain {\n\tif err := c.requireDomain(); err != nil {\n\t\tpanic(\"get domain without running existence checks is not allowed\")\n\t}\n\treturn *c.domain\n}",
"func (pub *Publisher) Domain() string {\n\tident, _ := pub.registeredIdentity.GetFullIdentity()\n\treturn ident.Domain\n}",
"func (o DnsRecordOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsRecord) pulumi.StringOutput { return v.Domain }).(pulumi.StringOutput)\n}",
"func Domain(named Named) string {\n\tif r, ok := named.(namedRepository); ok {\n\t\treturn r.Domain()\n\t}\n\tdomain, _ := splitDomain(named.Name())\n\treturn domain\n}",
"func RunDomainCreate(ns string, config doit.Config, out io.Writer, args []string) error {\n\tif len(args) != 1 {\n\t\treturn doit.NewMissingArgsErr(ns)\n\t}\n\tdomainName := args[0]\n\n\tclient := config.GetGodoClient()\n\n\tipAddress, err := config.GetString(ns, \"ip-address\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treq := &godo.DomainCreateRequest{\n\t\tName: domainName,\n\t\tIPAddress: ipAddress,\n\t}\n\n\td, _, err := client.Domains.Create(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn displayOutput(&domain{domains: domains{*d}}, out)\n}",
"func (l *Libvirt) DomainCreate(Dom Domain) (err error) {\n\tvar buf []byte\n\n\targs := DomainCreateArgs {\n\t\tDom: Dom,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(9, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) {\n\tif isDomain&r.databaseType == 0 {\n\t\treturn nil, InvalidMethodError{\"Domain\", r.Metadata().DatabaseType}\n\t}\n\tvar val Domain\n\terr := r.mmdbReader.Lookup(ipAddress, &val)\n\treturn &val, err\n}",
"func (p *prod) ManagedDomain(domain string) (string, error) {\n\tif domain == \"\" ||\n\t\tstrings.HasPrefix(domain, \".\") ||\n\t\tstrings.HasSuffix(domain, \".\") {\n\t\t// belt and braces: validation should already prevent this\n\t\treturn \"\", fmt.Errorf(\"invalid domain %q\", domain)\n\t}\n\n\tdomain = strings.TrimSuffix(domain, \".\"+p.Domain())\n\tif strings.ContainsRune(domain, '.') {\n\t\treturn \"\", nil\n\t}\n\treturn domain + \".\" + p.Domain(), nil\n}",
"func (o ObjectAccessControlResponseOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlResponse) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (o AccountActiveDirectoryOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AccountActiveDirectory) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func NewDomain(domainMatrix *mat64.Dense) *Domain {\n\n\t// get domain size\n\trows, cols := domainMatrix.Dims()\n\n\t// compute band count\n\tbandCount := 2 + (int(math.Floor(math.Sqrt(math.Pow(float64(rows), 2.0)+math.Pow(float64(cols), 2.0)))) / 142)\n\n\t//return output\n\treturn &Domain{\n\t\tRows: rows,\n\t\tCols: cols,\n\t\tMatrix: domainMatrix,\n\t\tBndCnt: bandCount,\n\t}\n}",
"func GetDomain(err error) Domain { return domains.GetDomain(err) }",
"func GetDomain(err error) Domain { return domains.GetDomain(err) }",
"func (m *cassandraMetadataPersistence) CreateDomain(request *p.InternalCreateDomainRequest) (*p.CreateDomainResponse, error) {\n\tquery := m.session.Query(templateCreateDomainQuery, request.Info.ID, request.Info.Name)\n\tapplied, err := query.MapScanCAS(make(map[string]interface{}))\n\tif err != nil {\n\t\treturn nil, &workflow.InternalServiceError{\n\t\t\tMessage: fmt.Sprintf(\"CreateDomain operation failed. Inserting into domains table. Error: %v\", err),\n\t\t}\n\t}\n\tif !applied {\n\t\treturn nil, &workflow.InternalServiceError{\n\t\t\tMessage: fmt.Sprintf(\"CreateDomain operation failed because of uuid collision.\"),\n\t\t}\n\t}\n\n\tquery = m.session.Query(templateCreateDomainByNameQuery,\n\t\trequest.Info.Name,\n\t\trequest.Info.ID,\n\t\trequest.Info.Name,\n\t\trequest.Info.Status,\n\t\trequest.Info.Description,\n\t\trequest.Info.OwnerEmail,\n\t\trequest.Info.Data,\n\t\trequest.Config.Retention,\n\t\trequest.Config.EmitMetric,\n\t\trequest.Config.ArchivalBucket,\n\t\trequest.Config.ArchivalStatus,\n\t\trequest.Config.HistoryArchivalStatus,\n\t\trequest.Config.HistoryArchivalURI,\n\t\trequest.Config.VisibilityArchivalStatus,\n\t\trequest.Config.VisibilityArchivalURI,\n\t\trequest.Config.BadBinaries.Data,\n\t\tstring(request.Config.BadBinaries.GetEncoding()),\n\t\trequest.ReplicationConfig.ActiveClusterName,\n\t\tp.SerializeClusterConfigs(request.ReplicationConfig.Clusters),\n\t\trequest.IsGlobalDomain,\n\t\trequest.ConfigVersion,\n\t\trequest.FailoverVersion,\n\t)\n\n\tprevious := make(map[string]interface{})\n\tapplied, err = query.MapScanCAS(previous)\n\n\tif err != nil {\n\t\treturn nil, &workflow.InternalServiceError{\n\t\t\tMessage: fmt.Sprintf(\"CreateDomain operation failed. Inserting into domains_by_name table. Error: %v\", err),\n\t\t}\n\t}\n\n\tif !applied {\n\t\t// Domain already exist. Delete orphan domain record before returning back to user\n\t\tif errDelete := m.session.Query(templateDeleteDomainQuery, request.Info.ID).Exec(); errDelete != nil {\n\t\t\tm.logger.Warn(\"Unable to delete orphan domain record\", tag.Error(errDelete))\n\t\t}\n\n\t\tif domain, ok := previous[\"domain\"].(map[string]interface{}); ok {\n\t\t\tmsg := fmt.Sprintf(\"Domain already exists. DomainId: %v\", domain[\"id\"])\n\t\t\treturn nil, &workflow.DomainAlreadyExistsError{\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\treturn nil, &workflow.DomainAlreadyExistsError{\n\t\t\tMessage: fmt.Sprintf(\"CreateDomain operation failed because of conditional failure.\"),\n\t\t}\n\t}\n\n\treturn &p.CreateDomainResponse{ID: request.Info.ID}, nil\n}",
"func AnalyzeDomain(domain string) []finding.Finding {\n\tvar findings []finding.Finding\n\n\trdapDomainTests(domain, &findings)\n\treturn findings\n}",
"func (input *BeegoInput) Domain() string {\n\treturn input.Host()\n}",
"func (p *PrincipalMock) Domain() string {\n\treturn p.DomainFunc()\n}",
"func (r commonResult) ExtractDomain() (*Domain, error) {\n\tvar s struct {\n\t\tDomain *Domain `json:\"domain\"`\n\t}\n\terr := r.ExtractInto(&s)\n\treturn s.Domain, err\n}",
"func NewDomain(s string) E {\n\treturn newInfrastructure(s, 1)\n}",
"func (o AppMonitorOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AppMonitor) pulumi.StringOutput { return v.Domain }).(pulumi.StringOutput)\n}",
"func (m *Mutator) MutateDomain(awsCluster infrastructurev1alpha3.AWSCluster) ([]mutator.PatchOperation, error) {\n\tvar result []mutator.PatchOperation\n\tif awsCluster.Spec.Cluster.DNS.Domain == \"\" {\n\t\t// If the dns domain is not set, we default here\n\t\tm.Log(\"level\", \"debug\", \"message\", fmt.Sprintf(\"AWSCluster %s DNS domain is not set and will be defaulted to %s\",\n\t\t\tawsCluster.ObjectMeta.Name,\n\t\t\tm.dnsDomain),\n\t\t)\n\t\tpatch := mutator.PatchAdd(\"/spec/cluster/dns/domain\", m.dnsDomain)\n\t\tresult = append(result, patch)\n\t}\n\treturn result, nil\n}",
"func (o BucketAccessControlResponseOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketAccessControlResponse) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (d *WindowsDesktopV3) GetDomain() string {\n\treturn d.Spec.Domain\n}",
"func (db *cdb) SelectDomain(\n\tctx context.Context,\n\tdomainID *string,\n\tdomainName *string,\n) (*nosqlplugin.DomainRow, error) {\n\tif domainID != nil && domainName != nil {\n\t\treturn nil, fmt.Errorf(\"GetDomain operation failed. Both ID and Name specified in request\")\n\t} else if domainID == nil && domainName == nil {\n\t\treturn nil, fmt.Errorf(\"GetDomain operation failed. Both ID and Name are empty\")\n\t}\n\n\tvar query gocql.Query\n\tvar err error\n\tif domainID != nil {\n\t\tquery = db.session.Query(templateGetDomainQuery, domainID).WithContext(ctx)\n\t\terr = query.Scan(&domainName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tinfo := &p.DomainInfo{}\n\tconfig := &nosqlplugin.NoSQLInternalDomainConfig{}\n\treplicationConfig := &p.DomainReplicationConfig{}\n\n\t// because of encoding/types, we can't directly read from config struct\n\tvar badBinariesData []byte\n\tvar badBinariesDataEncoding string\n\tvar replicationClusters []map[string]interface{}\n\n\tvar failoverNotificationVersion int64\n\tvar notificationVersion int64\n\tvar failoverVersion int64\n\tvar previousFailoverVersion int64\n\tvar failoverEndTime int64\n\tvar lastUpdatedTime int64\n\tvar configVersion int64\n\tvar isGlobalDomain bool\n\tvar retentionDays int32\n\n\tquery = db.session.Query(templateGetDomainByNameQueryV2, constDomainPartition, domainName).WithContext(ctx)\n\terr = query.Scan(\n\t\t&info.ID,\n\t\t&info.Name,\n\t\t&info.Status,\n\t\t&info.Description,\n\t\t&info.OwnerEmail,\n\t\t&info.Data,\n\t\t&retentionDays,\n\t\t&config.EmitMetric,\n\t\t&config.ArchivalBucket,\n\t\t&config.ArchivalStatus,\n\t\t&config.HistoryArchivalStatus,\n\t\t&config.HistoryArchivalURI,\n\t\t&config.VisibilityArchivalStatus,\n\t\t&config.VisibilityArchivalURI,\n\t\t&badBinariesData,\n\t\t&badBinariesDataEncoding,\n\t\t&replicationConfig.ActiveClusterName,\n\t\t&replicationClusters,\n\t\t&isGlobalDomain,\n\t\t&configVersion,\n\t\t&failoverVersion,\n\t\t&failoverNotificationVersion,\n\t\t&previousFailoverVersion,\n\t\t&failoverEndTime,\n\t\t&lastUpdatedTime,\n\t\t¬ificationVersion,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))\n\tconfig.Retention = common.DaysToDuration(retentionDays)\n\treplicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)\n\n\tdr := &nosqlplugin.DomainRow{\n\t\tInfo: info,\n\t\tConfig: config,\n\t\tReplicationConfig: replicationConfig,\n\t\tConfigVersion: configVersion,\n\t\tFailoverVersion: failoverVersion,\n\t\tFailoverNotificationVersion: failoverNotificationVersion,\n\t\tPreviousFailoverVersion: previousFailoverVersion,\n\t\tNotificationVersion: notificationVersion,\n\t\tLastUpdatedTime: time.Unix(0, lastUpdatedTime),\n\t\tIsGlobalDomain: isGlobalDomain,\n\t}\n\tif failoverEndTime > emptyFailoverEndTime {\n\t\tdr.FailoverEndTime = common.TimePtr(time.Unix(0, failoverEndTime))\n\t}\n\n\treturn dr, nil\n}",
"func NewDomain(domain string) *Domain {\n\td := Domain{\n\t\tdomain: []byte(domain),\n\t\tchecks: make(map[string]interface{}),\n\t}\n\treturn &d\n}",
"func (s *PollForDecisionTaskInput) SetDomain(v string) *PollForDecisionTaskInput {\n\ts.Domain = &v\n\treturn s\n}",
"func NewDomain(ctx *pulumi.Context,\n\tname string, args *DomainArgs, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.DomainName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DomainName'\")\n\t}\n\tif args.Sources == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Sources'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Domain\n\terr := ctx.RegisterResource(\"alicloud:dcdn/domain:Domain\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (c *CompanyAPI) Domain(value string, webhook *Webhook) (*CompanyResponse, error) {\n\tif !strings.Contains(value, \".\") { // Instead of a regex let's at least check if there's at least a period in the domain\n\t\treturn nil, fmt.Errorf(\"%s. Invalid domain: %s\", errLibrary, value)\n\t}\n\tr, err := c.get(\"domain\", value, \"company/lookup\", webhook)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.do(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response CompanyResponse\n\tdecoder := json.NewDecoder(resp.Body)\n\tdefer resp.Body.Close()\n\terr = decoder.Decode(&response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}",
"func (l *Libvirt) DomainGetVcpus(Dom Domain, Maxinfo int32, Maplen int32) (rInfo []VcpuInfo, rCpumaps []byte, err error) {\n\tvar buf []byte\n\n\targs := DomainGetVcpusArgs {\n\t\tDom: Dom,\n\t\tMaxinfo: Maxinfo,\n\t\tMaplen: Maplen,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar r response\n\tr, err = l.requestStream(20, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Return value unmarshaling\n\ttpd := typedParamDecoder{}\n\tct := map[string]xdr.TypeDecoder{\"libvirt.TypedParam\": tpd}\n\trdr := bytes.NewReader(r.Payload)\n\tdec := xdr.NewDecoderCustomTypes(rdr, 0, ct)\n\t// Info: []VcpuInfo\n\t_, err = dec.Decode(&rInfo)\n\tif err != nil {\n\t\treturn\n\t}\n\t// Cpumaps: []byte\n\t_, err = dec.Decode(&rCpumaps)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func WithDomain(err error, domain Domain) error { return domains.WithDomain(err, domain) }",
"func WithDomain(err error, domain Domain) error { return domains.WithDomain(err, domain) }",
"func (v *MatchingListTaskListPartitionsRequest) GetDomain() (o string) {\n\tif v != nil && v.Domain != nil {\n\t\treturn *v.Domain\n\t}\n\treturn\n}",
"func NewDomain(name string) (*Domain, error) {\n\tvar domain Domain\n\n\tdomain.Name = name\n\tdomain.Components = map[string]*Component{}\n\tdomain.ComponentsX = sync.RWMutex{}\n\tdomain.Architectures = map[string]*Architecture{}\n\tdomain.ArchitecturesX = sync.RWMutex{}\n\tdomain.Solutions = map[string]*Solution{}\n\tdomain.SolutionsX = sync.RWMutex{}\n\tdomain.Tasks = map[string]*Task{}\n\tdomain.TasksX = sync.RWMutex{}\n\tdomain.Events = map[string]*Event{}\n\tdomain.EventsX = sync.RWMutex{}\n\n\t// success\n\treturn &domain, nil\n}",
"func NewDomain(value string) *Domain {\n\tthis := Domain{}\n\tthis.Value = value\n\treturn &this\n}",
"func NewDomain(dom string) *Domain {\n\treturn &Domain{dom}\n}",
"func (a *Client) DomainCreate(params *DomainCreateParams) (*DomainCreateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDomainCreateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"domainCreate\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/domain\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DomainCreateReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DomainCreateOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for domainCreate: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (a *app) NewDomain(name string, delegate Delegate) Domain {\n\tDebug(\"Creating domain %s\", name)\n\td := &domain{\n\t\tapp: a,\n\t\tDelegate: delegate,\n\t\tname: name,\n\t\tjoined: false,\n\t\tsubscriptions: make(map[uint]*boundEndpoint),\n\t\tregistrations: make(map[uint]*boundEndpoint),\n\t}\n\n\t// TODO: trigger onJoin if the superdomain has joined\n\n\ta.domains = append(a.domains, d)\n\treturn d\n}",
"func (m MySqlStorage) UpdateDomain(d Domain) (dom Domain, err error) {\n\t_, err = m.db.Exec(\"UPDATE ZNS_domains SET d_zid=?, d_name=?, d_txt=? WHERE d_id=? LIMIT 1\", d.ZoneID, d.Name, d.Txt, d.Id)\n\tif err != nil {\n\t\treturn Domain{}, err\n\t}\n\n\treturn m.r.fetchDomainById(d.Id)\n}",
"func (x WorkloadEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func CmdCreateDomain(c *cli.Context) error {\n\tctx := c.Context\n\tsess := edgegrid.GetSession(ctx)\n\tclient := gtm.Client(sess)\n\n\t// tfWorkPath is a target directory for generated terraform resources\n\tvar tfWorkPath = \"./\"\n\tif c.IsSet(\"tfworkpath\") {\n\t\ttfWorkPath = c.String(\"tfworkpath\")\n\t}\n\n\tdatacentersPath := filepath.Join(tfWorkPath, \"datacenters.tf\")\n\tdomainPath := filepath.Join(tfWorkPath, \"domain.tf\")\n\timportPath := filepath.Join(tfWorkPath, \"import.sh\")\n\tmapsPath := filepath.Join(tfWorkPath, \"maps.tf\")\n\tpropertiesPath := filepath.Join(tfWorkPath, \"properties.tf\")\n\tresourcesPath := filepath.Join(tfWorkPath, \"resources.tf\")\n\tvariablesPath := filepath.Join(tfWorkPath, \"variables.tf\")\n\n\ttemplateToFile := map[string]string{\n\t\t\"datacenters.tmpl\": datacentersPath,\n\t\t\"domain.tmpl\": domainPath,\n\t\t\"imports.tmpl\": importPath,\n\t\t\"maps.tmpl\": mapsPath,\n\t\t\"properties.tmpl\": propertiesPath,\n\t\t\"resources.tmpl\": resourcesPath,\n\t\t\"variables.tmpl\": variablesPath,\n\t}\n\n\terr := tools.CheckFiles(datacentersPath, domainPath, importPath, mapsPath, propertiesPath, resourcesPath, variablesPath)\n\tif err != nil {\n\t\treturn cli.Exit(color.RedString(err.Error()), 1)\n\t}\n\n\tprocessor := templates.FSTemplateProcessor{\n\t\tTemplatesFS: templateFiles,\n\t\tTemplateTargets: templateToFile,\n\t\tAdditionalFuncs: template.FuncMap{\n\t\t\t\"normalize\": normalizeResourceName,\n\t\t\t\"toUpper\": strings.ToUpper,\n\t\t\t\"isDefaultDC\": isDefaultDatacenter,\n\t\t\t\"escapeString\": tools.EscapeQuotedStringLit,\n\t\t},\n\t}\n\n\tdomainName := c.Args().First()\n\tsection := edgegrid.GetEdgercSection(c)\n\tif err := createDomain(ctx, client, domainName, section, processor); err != nil {\n\t\treturn cli.Exit(color.RedString(fmt.Sprintf(\"Error exporting domain HCL: %s\", err)), 1)\n\t}\n\treturn nil\n}",
"func (o DataSourceAmazonElasticsearchParametersOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSourceAmazonElasticsearchParameters) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (this *NurbsCurve) Domain() (min, max float64) {\n\tmin = this.knots[0]\n\tmax = this.knots[len(this.knots)-1]\n\treturn\n}",
"func (s *Server) Domain(domainName string) (Domain, error) {\n\tdomain := Domain{}\n\n\tdomainExists, err := s.domainExists(domainName)\n\tif err != nil {\n\t\treturn domain, err\n\t}\n\tif !domainExists {\n\t\treturn domain, fmt.Errorf(\"Domain %s doesn't exist\", domainName)\n\t}\n\n\tdomaines, err := s.domainQuery(domainQueryByDomain, domainName)\n\tif err != nil {\n\t\treturn domain, err\n\t}\n\n\tif len(domaines) == 0 {\n\t\treturn domain, fmt.Errorf(\"Domain not found\")\n\t}\n\n\treturn domaines[0], nil\n}",
"func (*XMLDocument) Domain() (domain string) {\n\tmacro.Rewrite(\"$_.domain\")\n\treturn domain\n}",
"func (o ApplicationOutput) GcrDomain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Application) pulumi.StringOutput { return v.GcrDomain }).(pulumi.StringOutput)\n}",
"func (l *Libvirt) DomainDestroy(Dom Domain) (err error) {\n\tvar buf []byte\n\n\targs := DomainDestroyArgs {\n\t\tDom: Dom,\n\t}\n\n\tbuf, err = encode(&args)\n\tif err != nil {\n\t\treturn\n\t}\n\n\n\t_, err = l.requestStream(12, constants.Program, buf, nil, nil)\n\tif err != nil {\n\t\treturn\n\t}\n\n\treturn\n}",
"func NewDomain(name ...string) *Domain {\n\td := new(Domain)\n\tif len(name) > 0 {\n\t\td.SetName(name[0])\n\t}\n\treturn d\n}",
"func GetDomain(store kv.Storage) (*domain.Domain, error) {\n\treturn domap.Get(store)\n}",
"func (c *FakeAthenzDomains) Create(athenzDomain *athenz_v1.AthenzDomain) (result *athenz_v1.AthenzDomain, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewCreateAction(athenzdomainsResource, c.ns, athenzDomain), &athenz_v1.AthenzDomain{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*athenz_v1.AthenzDomain), err\n}",
"func (s *RecommendationJobContainerConfig) SetDomain(v string) *RecommendationJobContainerConfig {\n\ts.Domain = &v\n\treturn s\n}",
"func (ctx *Context) Domain() string {\r\n\tparts := strings.Split(ctx.Host(), \".\")\r\n\tif len(parts) >= 3 {\r\n\t\treturn strings.Join(parts[len(parts)-2:], \".\")\r\n\t}\r\n\treturn \"localhost\"\r\n}",
"func NewDomain(domain string) (Domain, error) {\n\tvar p *idna.Profile\n\tp = idna.New()\n\tidnDomain, e := p.ToASCII(domain)\n\tif e != nil {\n\t\treturn Domain{}, e\n\t}\n\n\teTLD, icann := publicsuffix.PublicSuffix(idnDomain)\n\tif icann == false {\n\t\treturn Domain{}, errors.New(\"Domain not valid ICANN\")\n\t}\n\n\tdt := Domain{}\n\n\tdt.ASCII, _ = publicsuffix.EffectiveTLDPlusOne(idnDomain)\n\tdt.TLDASCII = eTLD\n\n\tdt.Unicode, _ = p.ToUnicode(dt.ASCII)\n\tdt.TLDUnicode, _ = p.ToUnicode(eTLD)\n\n\treturn dt, nil\n}",
"func (o ServerGroupHealthCheckOutput) HealthCheckDomain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ServerGroupHealthCheck) *string { return v.HealthCheckDomain }).(pulumi.StringPtrOutput)\n}",
"func (c *FakeAthenzDomains) Update(athenzDomain *athenz_v1.AthenzDomain) (result *athenz_v1.AthenzDomain, err error) {\n\tobj, err := c.Fake.\n\t\tInvokes(testing.NewUpdateAction(athenzdomainsResource, c.ns, athenzDomain), &athenz_v1.AthenzDomain{})\n\n\tif obj == nil {\n\t\treturn nil, err\n\t}\n\treturn obj.(*athenz_v1.AthenzDomain), err\n}",
"func Query(args ...interface{}) interface{} {\n\n\tdomain := args[0].(string)\n\tstate := args[1].(*helper.State)\n\n\t// Make a http request to CertDB\n\tresp, err := helper.GetHTTPResponse(\"https://www.threatminer.org/getData.php?e=subdomains_container&q=\"+domain+\"&t=0&rt=10&p=1\", state.Timeout)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nthreatminer: %v\\n\", err)\n\t\treturn subdomains\n\t}\n\n\t// Get the response body\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tfmt.Printf(\"\\nthreatminer: %v\\n\", err)\n\t\treturn subdomains\n\t}\n\n\tsrc := string(body)\n\n\t// Parse Subdomains found\n\tRegex, _ := regexp.Compile(\"\\\"domain\\\\.php\\\\?q=([a-zA-Z0-9\\\\*_.-]+\\\\.\" + domain + \")\")\n\tmatch := Regex.FindAllStringSubmatch(src, -1)\n\n\tfor _, m := range match {\n\n\t\t// First Capturing group\n\t\tsubdomain := m[1]\n\n\t\tif state.Verbose == true {\n\t\t\tif state.Color == true {\n\t\t\t\tfmt.Printf(\"\\n[%sTHREATMINER%s] %s\", helper.Red, helper.Reset, subdomain)\n\t\t\t} else {\n\t\t\t\tfmt.Printf(\"\\n[THREATMINER] %s\", subdomains)\n\t\t\t}\n\t\t}\n\n\t\tsubdomains = append(subdomains, subdomain)\n\t}\n\n\treturn subdomains\n}",
"func (d *DB) Get_domain(ipaddress string) (IP2Locationrecord, error) {\n\treturn d.query(ipaddress, domain)\n}",
"func (rp *ResolverPool) SubdomainToDomain(name string) string {\n\trp.domainLock.Lock()\n\tdefer rp.domainLock.Unlock()\n\n\tvar domain string\n\t// Obtain all parts of the subdomain name\n\tlabels := strings.Split(strings.TrimSpace(name), \".\")\n\t// Check the cache for all parts of the name\n\tfor i := len(labels); i >= 0; i-- {\n\t\tsub := strings.Join(labels[i:], \".\")\n\n\t\tif _, ok := rp.domainCache[sub]; ok {\n\t\t\tdomain = sub\n\t\t\tbreak\n\t\t}\n\t}\n\tif domain != \"\" {\n\t\treturn domain\n\t}\n\t// Check the DNS for all parts of the name\n\tfor i := 0; i < len(labels)-1; i++ {\n\t\tsub := strings.Join(labels[i:], \".\")\n\n\t\tif ns, _, err := rp.Resolve(context.TODO(), sub, \"NS\", PriorityHigh); err == nil {\n\t\t\tpieces := strings.Split(ns[0].Data, \",\")\n\t\t\trp.domainCache[pieces[0]] = struct{}{}\n\t\t\tdomain = pieces[0]\n\t\t\tbreak\n\t\t}\n\t}\n\treturn domain\n}",
"func (s *Handler) CreateDomain(d string) error {\n\n\tcollection := s.DB.Database(\"go-telegram-bot-base-bot\").Collection(\"domains\")\n\n\ts.SetDomain(d)\n\tinsertResult, err := collection.InsertOne(context.TODO(), s.Domain)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tfmt.Println(\"Inserted multiple documents: \", insertResult)\n\n\treturn nil\n}",
"func (r *Bucket) WebsiteDomain() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"websiteDomain\"])\n}",
"func (d *DHCPv4) DomainSearch() *rfc1035label.Labels {\n\tv := d.Options.Get(OptionDNSDomainSearchList)\n\tif v == nil {\n\t\treturn nil\n\t}\n\tlabels, err := rfc1035label.FromBytes(v)\n\tif err != nil {\n\t\treturn nil\n\t}\n\treturn labels\n}",
"func (s *CreateModelPackageInput) SetDomain(v string) *CreateModelPackageInput {\n\ts.Domain = &v\n\treturn s\n}",
"func NewDomain(domain string) Domain {\n\treturn Domain{\n\t\tDomainName: domain,\n\t\tServersChanged: \"\",\n\t\tSslGrade: \"\",\n\t\tPreviousSslGrade: \"\",\n\t\tLogo: \"\",\n\t\tTitle: \"\",\n\t\tIsDown: false,\n\t\tState: \"I\"}\n}",
"func (d *Domain) Create() error {\n\treq := libvirt.RemoteDomainCreateReq{Domain: d.RemoteDomain}\n\n\tbuf, err := encode(&req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tresp, err := d.l.send(libvirt.RemoteProcDomainCreate, 0, libvirt.MessageTypeCall, libvirt.RemoteProgram, libvirt.MessageStatusOK, &buf)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := <-resp\n\tif r.Header.Status != libvirt.MessageStatusOK {\n\t\treturn decodeError(r.Payload)\n\t}\n\n\treturn nil\n}",
"func (s *DescribeModelPackageOutput) SetDomain(v string) *DescribeModelPackageOutput {\n\ts.Domain = &v\n\treturn s\n}",
"func (x GenericInfrastructureEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (r Dns_Domain) GetByDomainName(name *string) (resp []datatypes.Dns_Domain, err error) {\n\tparams := []interface{}{\n\t\tname,\n\t}\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain\", \"getByDomainName\", params, &r.Options, &resp)\n\treturn\n}",
"func (x SyntheticMonitorEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (o ObjectAccessControlTypeOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlType) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}",
"func (x InfrastructureHostEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func normalizeDomain(domain string) (string, error) {\n\tdomain = strings.Trim(strings.ToLower(domain), \" \")\n\t// not checking if it belongs to icann\n\tsuffix, _ := publicsuffix.PublicSuffix(domain)\n\tif domain != \"\" && suffix == domain { // input is publicsuffix\n\t\treturn \"\", errors.New(\"domain [\" + domain + \"] is public suffix\")\n\t}\n\tif !strings.HasPrefix(domain, \"http\") {\n\t\tdomain = fmt.Sprintf(\"http://%s\", domain)\n\t}\n\turl, err := url.Parse(domain)\n\tif nil == err && url.Host != \"\" {\n\t\treturn strings.Replace(url.Host, \"www.\", \"\", 1), nil\n\t}\n\treturn \"\", err\n}",
"func (o NetworkOutput) DnsDomain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Network) pulumi.StringOutput { return v.DnsDomain }).(pulumi.StringOutput)\n}",
"func (a *Client) DomainUpdate(params *DomainUpdateParams) (*DomainUpdateOK, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewDomainUpdateParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"domainUpdate\",\n\t\tMethod: \"PUT\",\n\t\tPathPattern: \"/domain/{name}\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &DomainUpdateReader{formats: a.formats},\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*DomainUpdateOK)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for domainUpdate: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func domainConfig(cfg *admin.CertificateAuthorityConfig, domain string) *admin.DomainConfig {\n\tfor _, domainCfg := range cfg.KnownDomains {\n\t\tfor _, domainInCfg := range domainCfg.Domain {\n\t\t\tif domainInCfg == domain || strings.HasSuffix(domain, \".\"+domainInCfg) {\n\t\t\t\treturn domainCfg\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (this *DoClient) CreateDomain(domainName, ipAddress string) (interface{}, error) {\n\treturn this.client.CreateDomain(domainName, ipAddress)\n}",
"func (o AccountActiveDirectoryPtrOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AccountActiveDirectory) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Domain\n\t}).(pulumi.StringPtrOutput)\n}",
"func (s *ModelPackage) SetDomain(v string) *ModelPackage {\n\ts.Domain = &v\n\treturn s\n}",
"func (o GetServerGroupsGroupHealthCheckOutput) HealthCheckDomain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetServerGroupsGroupHealthCheck) string { return v.HealthCheckDomain }).(pulumi.StringOutput)\n}",
"func GetDomain(site, UserAgent string) string {\n\treturn getAPIResponse(site, UserAgent).Domain\n}",
"func CreateDomain(domain string) (*DomainEntry, error) {\n\treturn CreateDomainDetail(domain, \"\", \"\")\n}",
"func Domain() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"domain\",\n\t\tShort: \"domain commands\",\n\t\tLong: \"domain is used to access domain commands\",\n\t}\n\n\tcmdDomainCreate := cmdBuilder(RunDomainCreate, \"create\", \"create domain\", writer, aliasOpt(\"c\"))\n\tcmd.AddCommand(cmdDomainCreate)\n\taddStringFlag(cmdDomainCreate, doit.ArgIPAddress, \"\", \"IP address\", requiredOpt())\n\n\tcmdDomainList := cmdBuilder(RunDomainList, \"list\", \"list comains\", writer, aliasOpt(\"ls\"))\n\tcmd.AddCommand(cmdDomainList)\n\n\tcmdDomainGet := cmdBuilder(RunDomainGet, \"get\", \"get domain\", writer, aliasOpt(\"g\"))\n\tcmd.AddCommand(cmdDomainGet)\n\n\tcmdDomainDelete := cmdBuilder(RunDomainDelete, \"delete\", \"delete droplet\", writer, aliasOpt(\"g\"))\n\tcmd.AddCommand(cmdDomainDelete)\n\n\tcmdRecord := &cobra.Command{\n\t\tUse: \"records\",\n\t\tShort: \"domain record commands\",\n\t\tLong: \"commands for interacting with an individual domain\",\n\t}\n\tcmd.AddCommand(cmdRecord)\n\n\tcmdRecordList := cmdBuilder(RunRecordList, \"list\", \"list records\", writer, aliasOpt(\"ls\"))\n\tcmdRecord.AddCommand(cmdRecordList)\n\taddStringFlag(cmdRecordList, doit.ArgDomainName, \"\", \"Domain name\")\n\n\tcmdRecordCreate := cmdBuilder(RunRecordCreate, \"create\", \"create record\", writer, aliasOpt(\"c\"))\n\tcmdRecord.AddCommand(cmdRecordCreate)\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordType, \"\", \"Record type\")\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordName, \"\", \"Record name\")\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordData, \"\", \"Record data\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordPriority, 0, \"Record priority\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordPort, 0, \"Record port\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordWeight, 0, \"Record weight\")\n\n\tcmdRecordDelete := cmdBuilder(RunRecordDelete, \"delete <domain> <record id...>\", \"delete record\", writer, aliasOpt(\"d\"))\n\tcmdRecord.AddCommand(cmdRecordDelete)\n\n\tcmdRecordUpdate := cmdBuilder(RunRecordUpdate, \"update\", \"update record\", writer, aliasOpt(\"u\"))\n\tcmdRecord.AddCommand(cmdRecordUpdate)\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordID, 0, \"Record ID\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordType, \"\", \"Record type\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordName, \"\", \"Record name\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordData, \"\", \"Record data\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordPriority, 0, \"Record priority\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordPort, 0, \"Record port\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordWeight, 0, \"Record weight\")\n\n\treturn cmd\n}",
"func (o DataSourceAmazonElasticsearchParametersPtrOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DataSourceAmazonElasticsearchParameters) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Domain\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o DataSourceAmazonOpenSearchParametersOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSourceAmazonOpenSearchParameters) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (rp *ResolverPool) SubdomainToDomain(name string) string {\n\tch := make(chan string, 2)\n\n\trp.domainCacheChan <- &domainReq{\n\t\tName: name,\n\t\tCh: ch,\n\t}\n\n\treturn <-ch\n}",
"func (client IdentityClient) GetDomain(ctx context.Context, request GetDomainRequest) (response GetDomainResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getDomain, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetDomainResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetDomainResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetDomainResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetDomainResponse\")\n\t}\n\treturn\n}",
"func (hg *HostGroup) AddDomain(ctx context.Context, strDomain string, nType int64) ([]byte, error) {\n\tparams := []byte(fmt.Sprintf(`{\"strDomain\": \"%s\", \"nType\" : %d }`, strDomain, nType))\n\treturn hg.client.PostInOut(ctx, \"/api/v1.0/HostGroup.AddDomain\", params, nil)\n}",
"func DomainAddress(domain string) Address {\n\treturn domainAddress(domain)\n}",
"func NewWorkerDomain(ctx *pulumi.Context,\n\tname string, args *WorkerDomainArgs, opts ...pulumi.ResourceOption) (*WorkerDomain, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.AccountId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'AccountId'\")\n\t}\n\tif args.Hostname == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Hostname'\")\n\t}\n\tif args.Service == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Service'\")\n\t}\n\tif args.ZoneId == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'ZoneId'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource WorkerDomain\n\terr := ctx.RegisterResource(\"cloudflare:index/workerDomain:WorkerDomain\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (jid JID) Domain() JID {\n\ts := 0\n\n\tif i := strings.Index(string(jid), \"@\"); i != -1 {\n\t\ts = i + 1\n\t}\n\n\te := len(jid)\n\n\tif i := strings.Index(string(jid), \"/\"); i != -1 {\n\t\te = i\n\t}\n\n\treturn jid[s:e]\n}",
"func (r Dns_Secondary) GetDomain() (resp datatypes.Dns_Domain, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Secondary\", \"getDomain\", nil, &r.Options, &resp)\n\treturn\n}",
"func WrapAsDomain(err error, message string) E {\n\treturn wrapInfrastructure(err, message, 1)\n}",
"func (o DatasetAccessTypeOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasetAccessType) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}"
] | [
"0.59415394",
"0.5931642",
"0.5741345",
"0.56521034",
"0.56398815",
"0.5539767",
"0.5539069",
"0.5498731",
"0.5496328",
"0.5477649",
"0.5436799",
"0.5431728",
"0.5416263",
"0.53798914",
"0.5355684",
"0.5344459",
"0.53300226",
"0.53053075",
"0.5295649",
"0.5276572",
"0.52718955",
"0.52718955",
"0.5261124",
"0.52459234",
"0.52400756",
"0.5238499",
"0.52192646",
"0.52049106",
"0.52021855",
"0.518459",
"0.5148863",
"0.513293",
"0.512319",
"0.5105388",
"0.51025695",
"0.5095364",
"0.5091922",
"0.509001",
"0.508895",
"0.508895",
"0.50861317",
"0.50671107",
"0.50648975",
"0.5058551",
"0.5050226",
"0.504652",
"0.5040907",
"0.5031686",
"0.50312686",
"0.50261426",
"0.502263",
"0.5016303",
"0.5005953",
"0.4995372",
"0.49930567",
"0.4966068",
"0.49645028",
"0.49633184",
"0.4955369",
"0.49507087",
"0.49379742",
"0.49324617",
"0.49089763",
"0.4908287",
"0.49063167",
"0.489822",
"0.4887851",
"0.4887462",
"0.48825616",
"0.48789284",
"0.48788965",
"0.48782375",
"0.48779568",
"0.48765066",
"0.48763168",
"0.48744977",
"0.48733836",
"0.4871503",
"0.48676756",
"0.48655745",
"0.48641807",
"0.48603314",
"0.48529005",
"0.48511153",
"0.4848381",
"0.4847596",
"0.48469564",
"0.48365209",
"0.4833359",
"0.48317823",
"0.48314136",
"0.48285604",
"0.48255655",
"0.48247272",
"0.4823569",
"0.4821924",
"0.48181242",
"0.48097754",
"0.48083368",
"0.48079792"
] | 0.72771716 | 0 |
Domain returns a complete domain. Deprecated: due to panicking on error. Use ComputeDomain() instead. | func Domain(domainType DomainType, forkVersion []byte, genesisValidatorsRoot []byte) []byte {
// Generate fork data root from fork version and genesis validators root.
forkData := &ForkData{
CurrentVersion: forkVersion,
GenesisValidatorsRoot: genesisValidatorsRoot,
}
forkDataRoot, err := forkData.HashTreeRoot()
if err != nil {
panic(err)
}
res := make([]byte, 32)
copy(res[0:4], domainType[:])
copy(res[4:32], forkDataRoot[:])
return res
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func GetDomain(err error) Domain { return domains.GetDomain(err) }",
"func GetDomain(err error) Domain { return domains.GetDomain(err) }",
"func (o GetDomainsResultOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v GetDomainsResult) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}",
"func (c *DomainController) Domain() types.Domain {\n\tif err := c.requireDomain(); err != nil {\n\t\tpanic(\"get domain without running existence checks is not allowed\")\n\t}\n\treturn *c.domain\n}",
"func GetDomain(fork Fork, epoch Epoch, dom BLSDomain) BLSDomain {\n\t// combine fork version with domain.\n\tv := fork.GetVersion(epoch)\n\treturn BLSDomain(v[0] << 24 | v[1] << 16 | v[2] << 8 | v[3]) + dom\n}",
"func WithDomain(err error, domain Domain) error { return domains.WithDomain(err, domain) }",
"func WithDomain(err error, domain Domain) error { return domains.WithDomain(err, domain) }",
"func (o LookupDatasetGroupResultOutput) Domain() DatasetGroupDomainPtrOutput {\n\treturn o.ApplyT(func(v LookupDatasetGroupResult) *DatasetGroupDomain { return v.Domain }).(DatasetGroupDomainPtrOutput)\n}",
"func NewDomain(s string) E {\n\treturn newInfrastructure(s, 1)\n}",
"func ToDomain(gymData *Gym) gyms.Domain {\n\treturn gyms.Domain{\n\t\tID: gymData.ID,\n\t\tName: gymData.Name,\n\t\tAddress: gymData.Address,\n\t\tCreatedAt: gymData.CreatedAt,\n\t\tUpdatedAt: gymData.UpdatedAt,\n\t\tDeletedAt: gymData.DeletedAt,\n\t}\n}",
"func (r *DomainIdentity) Domain() pulumi.StringOutput {\n\treturn (pulumi.StringOutput)(r.s.State[\"domain\"])\n}",
"func GetDomain(store kv.Storage) (*domain.Domain, error) {\n\treturn domap.Get(store)\n}",
"func NewDomain(dom string) *Domain {\n\treturn &Domain{dom}\n}",
"func (s *Server) Domain(domainName string) (Domain, error) {\n\tdomain := Domain{}\n\n\tdomainExists, err := s.domainExists(domainName)\n\tif err != nil {\n\t\treturn domain, err\n\t}\n\tif !domainExists {\n\t\treturn domain, fmt.Errorf(\"Domain %s doesn't exist\", domainName)\n\t}\n\n\tdomaines, err := s.domainQuery(domainQueryByDomain, domainName)\n\tif err != nil {\n\t\treturn domain, err\n\t}\n\n\tif len(domaines) == 0 {\n\t\treturn domain, fmt.Errorf(\"Domain not found\")\n\t}\n\n\treturn domaines[0], nil\n}",
"func (o DnsRecordOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *DnsRecord) pulumi.StringOutput { return v.Domain }).(pulumi.StringOutput)\n}",
"func NamedDomain(domainName string) Domain { return domains.NamedDomain(domainName) }",
"func NamedDomain(domainName string) Domain { return domains.NamedDomain(domainName) }",
"func (spec *MachineSpec) Domain() string {\n\treturn fmt.Sprintf(\"%s.%s.%s\", spec.Machine.Uid, spec.Machine.Credential, dnsZones[spec.env()])\n}",
"func NewDomain(value string) *Domain {\n\tthis := Domain{}\n\tthis.Value = value\n\treturn &this\n}",
"func NewDomain(name ...string) *Domain {\n\td := new(Domain)\n\tif len(name) > 0 {\n\t\td.SetName(name[0])\n\t}\n\treturn d\n}",
"func NewDomain(domain string) Domain {\n\treturn Domain{\n\t\tDomainName: domain,\n\t\tServersChanged: \"\",\n\t\tSslGrade: \"\",\n\t\tPreviousSslGrade: \"\",\n\t\tLogo: \"\",\n\t\tTitle: \"\",\n\t\tIsDown: false,\n\t\tState: \"I\"}\n}",
"func (pub *Publisher) Domain() string {\n\tident, _ := pub.registeredIdentity.GetFullIdentity()\n\treturn ident.Domain\n}",
"func WrapAsDomain(err error, message string) E {\n\treturn wrapInfrastructure(err, message, 1)\n}",
"func (d *WindowsDesktopV3) GetDomain() string {\n\treturn d.Spec.Domain\n}",
"func NewDomain(domain string) *Domain {\n\td := Domain{\n\t\tdomain: []byte(domain),\n\t\tchecks: make(map[string]interface{}),\n\t}\n\treturn &d\n}",
"func (p *PrincipalMock) Domain() string {\n\treturn p.DomainFunc()\n}",
"func (c *Client) Domain(method string, id interface{}, data interface{}) System {\n\tvar system System\n\n\tswitch method {\n\tcase \"GET\":\n\t\tendpoint := fmt.Sprintf(\"systems/%s\", id)\n\t\tc.invokeAPI(\"GET\", endpoint, nil, &system)\n\tcase \"CREATE\":\n\t\tendpoint := \"systems\"\n\t\tc.invokeAPI(\"POST\", endpoint, data, &system)\n\tcase \"UPDATE\":\n\t\tendpoint := fmt.Sprintf(\"systems/%s\", id)\n\t\tc.invokeAPI(\"PUT\", endpoint, data, &system)\n\tcase \"DELETE\":\n\t\tendpoint := fmt.Sprintf(\"systems/%s\", id)\n\t\tc.invokeAPI(\"DELETE\", endpoint, nil, nil)\n\t}\n\n\treturn system\n}",
"func (*XMLDocument) Domain() (domain string) {\n\tmacro.Rewrite(\"$_.domain\")\n\treturn domain\n}",
"func NewDomain(name string) (*Domain, error) {\n\tvar domain Domain\n\n\tdomain.Name = name\n\tdomain.Components = map[string]*Component{}\n\tdomain.ComponentsX = sync.RWMutex{}\n\tdomain.Architectures = map[string]*Architecture{}\n\tdomain.ArchitecturesX = sync.RWMutex{}\n\tdomain.Solutions = map[string]*Solution{}\n\tdomain.SolutionsX = sync.RWMutex{}\n\tdomain.Tasks = map[string]*Task{}\n\tdomain.TasksX = sync.RWMutex{}\n\tdomain.Events = map[string]*Event{}\n\tdomain.EventsX = sync.RWMutex{}\n\n\t// success\n\treturn &domain, nil\n}",
"func (o AccountActiveDirectoryOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v AccountActiveDirectory) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func Domain(named Named) string {\n\tif r, ok := named.(namedRepository); ok {\n\t\treturn r.Domain()\n\t}\n\tdomain, _ := splitDomain(named.Name())\n\treturn domain\n}",
"func PackageDomain() Domain { return domains.PackageDomainAtDepth(1) }",
"func PackageDomain() Domain { return domains.PackageDomainAtDepth(1) }",
"func NewDomain(domainMatrix *mat64.Dense) *Domain {\n\n\t// get domain size\n\trows, cols := domainMatrix.Dims()\n\n\t// compute band count\n\tbandCount := 2 + (int(math.Floor(math.Sqrt(math.Pow(float64(rows), 2.0)+math.Pow(float64(cols), 2.0)))) / 142)\n\n\t//return output\n\treturn &Domain{\n\t\tRows: rows,\n\t\tCols: cols,\n\t\tMatrix: domainMatrix,\n\t\tBndCnt: bandCount,\n\t}\n}",
"func (l *Locale) GetDomain() string {\n\tl.RLock()\n\tdom := l.defaultDomain\n\tl.RUnlock()\n\treturn dom\n}",
"func (o AppMonitorOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *AppMonitor) pulumi.StringOutput { return v.Domain }).(pulumi.StringOutput)\n}",
"func (o DatasetAccessTypeOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v DatasetAccessType) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}",
"func (a *app) NewDomain(name string, delegate Delegate) Domain {\n\tDebug(\"Creating domain %s\", name)\n\td := &domain{\n\t\tapp: a,\n\t\tDelegate: delegate,\n\t\tname: name,\n\t\tjoined: false,\n\t\tsubscriptions: make(map[uint]*boundEndpoint),\n\t\tregistrations: make(map[uint]*boundEndpoint),\n\t}\n\n\t// TODO: trigger onJoin if the superdomain has joined\n\n\ta.domains = append(a.domains, d)\n\treturn d\n}",
"func (d *DB) Get_domain(ipaddress string) (IP2Locationrecord, error) {\n\treturn d.query(ipaddress, domain)\n}",
"func (o ObjectAccessControlResponseOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlResponse) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func Get_domain(ipaddress string) IP2Locationrecord {\n\treturn handleError(defaultDB.query(ipaddress, domain))\n}",
"func (o AccountActiveDirectoryPtrOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *AccountActiveDirectory) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Domain\n\t}).(pulumi.StringPtrOutput)\n}",
"func GetDomain(site, UserAgent string) string {\n\treturn getAPIResponse(site, UserAgent).Domain\n}",
"func (c *CompanyAPI) Domain(value string, webhook *Webhook) (*CompanyResponse, error) {\n\tif !strings.Contains(value, \".\") { // Instead of a regex let's at least check if there's at least a period in the domain\n\t\treturn nil, fmt.Errorf(\"%s. Invalid domain: %s\", errLibrary, value)\n\t}\n\tr, err := c.get(\"domain\", value, \"company/lookup\", webhook)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp, err := c.do(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar response CompanyResponse\n\tdecoder := json.NewDecoder(resp.Body)\n\tdefer resp.Body.Close()\n\terr = decoder.Decode(&response)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &response, nil\n}",
"func (j JID) Domain() JID {\n\treturn JID{\n\t\tdomainlen: j.domainlen,\n\t\tdata: j.data[j.locallen : j.domainlen+j.locallen],\n\t}\n}",
"func (ctx *Context) Domain() string {\r\n\tparts := strings.Split(ctx.Host(), \".\")\r\n\tif len(parts) >= 3 {\r\n\t\treturn strings.Join(parts[len(parts)-2:], \".\")\r\n\t}\r\n\treturn \"localhost\"\r\n}",
"func (r Dns_Secondary) GetDomain() (resp datatypes.Dns_Domain, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Secondary\", \"getDomain\", nil, &r.Options, &resp)\n\treturn\n}",
"func (m *MockAll) Domain() Domain {\n\tret := m.ctrl.Call(m, \"Domain\")\n\tret0, _ := ret[0].(Domain)\n\treturn ret0\n}",
"func (s *RegistrarAPI) GetDomain(req *RegistrarAPIGetDomainRequest, opts ...scw.RequestOption) (*Domain, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.Domain) == \"\" {\n\t\treturn nil, errors.New(\"field Domain cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2beta1/domains/\" + fmt.Sprint(req.Domain) + \"\",\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp Domain\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (r *Reader) Domain(ipAddress net.IP) (*Domain, error) {\n\tif isDomain&r.databaseType == 0 {\n\t\treturn nil, InvalidMethodError{\"Domain\", r.Metadata().DatabaseType}\n\t}\n\tvar val Domain\n\terr := r.mmdbReader.Lookup(ipAddress, &val)\n\treturn &val, err\n}",
"func NewDomain(domain string) (Domain, error) {\n\tvar p *idna.Profile\n\tp = idna.New()\n\tidnDomain, e := p.ToASCII(domain)\n\tif e != nil {\n\t\treturn Domain{}, e\n\t}\n\n\teTLD, icann := publicsuffix.PublicSuffix(idnDomain)\n\tif icann == false {\n\t\treturn Domain{}, errors.New(\"Domain not valid ICANN\")\n\t}\n\n\tdt := Domain{}\n\n\tdt.ASCII, _ = publicsuffix.EffectiveTLDPlusOne(idnDomain)\n\tdt.TLDASCII = eTLD\n\n\tdt.Unicode, _ = p.ToUnicode(dt.ASCII)\n\tdt.TLDUnicode, _ = p.ToUnicode(eTLD)\n\n\treturn dt, nil\n}",
"func NewDomain(ctx *pulumi.Context,\n\tname string, args *DomainArgs, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tif args == nil {\n\t\targs = &DomainArgs{}\n\t}\n\tvar resource Domain\n\terr := ctx.RegisterResource(\"aws:elasticsearch/domain:Domain\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetDomain(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *DomainState, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tvar resource Domain\n\terr := ctx.ReadResource(\"aws:elasticsearch/domain:Domain\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func NewDomain(name string) (*Domain, error) {\n\td := Domain{name: name}\n\treturn &d, nil\n}",
"func (i Internet) Domain() string {\n\tdomain := strings.ToLower(i.Faker.Lexify(\"???\"))\n\treturn domain + \".\" + i.TLD()\n}",
"func (s *API) GetDomain(req *GetDomainRequest, opts ...scw.RequestOption) (*GetDomainResponse, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.Domain) == \"\" {\n\t\treturn nil, errors.New(\"field Domain cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"GET\",\n\t\tPath: \"/domain/v2alpha2/domains/\" + fmt.Sprint(req.Domain) + \"\",\n\t\tHeaders: http.Header{},\n\t}\n\n\tvar resp GetDomainResponse\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (req *Admins) ToDomain() *admins.Domain {\n\treturn &admins.Domain{\n\t\tUsername: req.Username,\n\t\tEmail: req.Email,\n\t\tPassword: req.Password,\n\t}\n}",
"func NewDomain(ctx *pulumi.Context,\n\tname string, args *DomainArgs, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tif args == nil {\n\t\treturn nil, errors.New(\"missing one or more required arguments\")\n\t}\n\n\tif args.DomainName == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'DomainName'\")\n\t}\n\tif args.Sources == nil {\n\t\treturn nil, errors.New(\"invalid value for required argument 'Sources'\")\n\t}\n\topts = internal.PkgResourceDefaultOpts(opts)\n\tvar resource Domain\n\terr := ctx.RegisterResource(\"alicloud:dcdn/domain:Domain\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (x ApmExternalServiceEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (v *MatchingListTaskListPartitionsRequest) GetDomain() (o string) {\n\tif v != nil && v.Domain != nil {\n\t\treturn *v.Domain\n\t}\n\treturn\n}",
"func (db *cdb) SelectDomain(\n\tctx context.Context,\n\tdomainID *string,\n\tdomainName *string,\n) (*nosqlplugin.DomainRow, error) {\n\tif domainID != nil && domainName != nil {\n\t\treturn nil, fmt.Errorf(\"GetDomain operation failed. Both ID and Name specified in request\")\n\t} else if domainID == nil && domainName == nil {\n\t\treturn nil, fmt.Errorf(\"GetDomain operation failed. Both ID and Name are empty\")\n\t}\n\n\tvar query gocql.Query\n\tvar err error\n\tif domainID != nil {\n\t\tquery = db.session.Query(templateGetDomainQuery, domainID).WithContext(ctx)\n\t\terr = query.Scan(&domainName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tinfo := &p.DomainInfo{}\n\tconfig := &nosqlplugin.NoSQLInternalDomainConfig{}\n\treplicationConfig := &p.DomainReplicationConfig{}\n\n\t// because of encoding/types, we can't directly read from config struct\n\tvar badBinariesData []byte\n\tvar badBinariesDataEncoding string\n\tvar replicationClusters []map[string]interface{}\n\n\tvar failoverNotificationVersion int64\n\tvar notificationVersion int64\n\tvar failoverVersion int64\n\tvar previousFailoverVersion int64\n\tvar failoverEndTime int64\n\tvar lastUpdatedTime int64\n\tvar configVersion int64\n\tvar isGlobalDomain bool\n\tvar retentionDays int32\n\n\tquery = db.session.Query(templateGetDomainByNameQueryV2, constDomainPartition, domainName).WithContext(ctx)\n\terr = query.Scan(\n\t\t&info.ID,\n\t\t&info.Name,\n\t\t&info.Status,\n\t\t&info.Description,\n\t\t&info.OwnerEmail,\n\t\t&info.Data,\n\t\t&retentionDays,\n\t\t&config.EmitMetric,\n\t\t&config.ArchivalBucket,\n\t\t&config.ArchivalStatus,\n\t\t&config.HistoryArchivalStatus,\n\t\t&config.HistoryArchivalURI,\n\t\t&config.VisibilityArchivalStatus,\n\t\t&config.VisibilityArchivalURI,\n\t\t&badBinariesData,\n\t\t&badBinariesDataEncoding,\n\t\t&replicationConfig.ActiveClusterName,\n\t\t&replicationClusters,\n\t\t&isGlobalDomain,\n\t\t&configVersion,\n\t\t&failoverVersion,\n\t\t&failoverNotificationVersion,\n\t\t&previousFailoverVersion,\n\t\t&failoverEndTime,\n\t\t&lastUpdatedTime,\n\t\t¬ificationVersion,\n\t)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.BadBinaries = p.NewDataBlob(badBinariesData, common.EncodingType(badBinariesDataEncoding))\n\tconfig.Retention = common.DaysToDuration(retentionDays)\n\treplicationConfig.Clusters = p.DeserializeClusterConfigs(replicationClusters)\n\n\tdr := &nosqlplugin.DomainRow{\n\t\tInfo: info,\n\t\tConfig: config,\n\t\tReplicationConfig: replicationConfig,\n\t\tConfigVersion: configVersion,\n\t\tFailoverVersion: failoverVersion,\n\t\tFailoverNotificationVersion: failoverNotificationVersion,\n\t\tPreviousFailoverVersion: previousFailoverVersion,\n\t\tNotificationVersion: notificationVersion,\n\t\tLastUpdatedTime: time.Unix(0, lastUpdatedTime),\n\t\tIsGlobalDomain: isGlobalDomain,\n\t}\n\tif failoverEndTime > emptyFailoverEndTime {\n\t\tdr.FailoverEndTime = common.TimePtr(time.Unix(0, failoverEndTime))\n\t}\n\n\treturn dr, nil\n}",
"func (jid JID) Domain() JID {\n\ts := 0\n\n\tif i := strings.Index(string(jid), \"@\"); i != -1 {\n\t\ts = i + 1\n\t}\n\n\te := len(jid)\n\n\tif i := strings.Index(string(jid), \"/\"); i != -1 {\n\t\te = i\n\t}\n\n\treturn jid[s:e]\n}",
"func NewDomain(ctx *pulumi.Context,\n\tname string, args *DomainArgs, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tif args == nil || args.DomainName == nil {\n\t\treturn nil, errors.New(\"missing required argument 'DomainName'\")\n\t}\n\tif args == nil {\n\t\targs = &DomainArgs{}\n\t}\n\tvar resource Domain\n\terr := ctx.RegisterResource(\"aws:lightsail/domain:Domain\", name, args, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func GetDomain(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *DomainState, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tvar resource Domain\n\terr := ctx.ReadResource(\"aws:lightsail/domain:Domain\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (this *NurbsCurve) Domain() (min, max float64) {\n\tmin = this.knots[0]\n\tmax = this.knots[len(this.knots)-1]\n\treturn\n}",
"func (p *prod) ManagedDomain(domain string) (string, error) {\n\tif domain == \"\" ||\n\t\tstrings.HasPrefix(domain, \".\") ||\n\t\tstrings.HasSuffix(domain, \".\") {\n\t\t// belt and braces: validation should already prevent this\n\t\treturn \"\", fmt.Errorf(\"invalid domain %q\", domain)\n\t}\n\n\tdomain = strings.TrimSuffix(domain, \".\"+p.Domain())\n\tif strings.ContainsRune(domain, '.') {\n\t\treturn \"\", nil\n\t}\n\treturn domain + \".\" + p.Domain(), nil\n}",
"func (input *BeegoInput) Domain() string {\n\treturn input.Host()\n}",
"func (r Dns_Domain_ResourceRecord_MxType) GetDomain() (resp datatypes.Dns_Domain, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_MxType\", \"getDomain\", nil, &r.Options, &resp)\n\treturn\n}",
"func (x DashboardEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func GetDomain(ctx *pulumi.Context,\n\tname string, id pulumi.IDInput, state *DomainState, opts ...pulumi.ResourceOption) (*Domain, error) {\n\tvar resource Domain\n\terr := ctx.ReadResource(\"alicloud:dcdn/domain:Domain\", name, id, state, &resource, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resource, nil\n}",
"func (o DataSourceAmazonElasticsearchParametersPtrOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DataSourceAmazonElasticsearchParameters) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Domain\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o *LaunchpadClicks) GetDomain() string {\n\tif o == nil || o.Domain == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Domain\n}",
"func (o BucketAccessControlResponseOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketAccessControlResponse) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func Domain() *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"domain\",\n\t\tShort: \"domain commands\",\n\t\tLong: \"domain is used to access domain commands\",\n\t}\n\n\tcmdDomainCreate := cmdBuilder(RunDomainCreate, \"create\", \"create domain\", writer, aliasOpt(\"c\"))\n\tcmd.AddCommand(cmdDomainCreate)\n\taddStringFlag(cmdDomainCreate, doit.ArgIPAddress, \"\", \"IP address\", requiredOpt())\n\n\tcmdDomainList := cmdBuilder(RunDomainList, \"list\", \"list comains\", writer, aliasOpt(\"ls\"))\n\tcmd.AddCommand(cmdDomainList)\n\n\tcmdDomainGet := cmdBuilder(RunDomainGet, \"get\", \"get domain\", writer, aliasOpt(\"g\"))\n\tcmd.AddCommand(cmdDomainGet)\n\n\tcmdDomainDelete := cmdBuilder(RunDomainDelete, \"delete\", \"delete droplet\", writer, aliasOpt(\"g\"))\n\tcmd.AddCommand(cmdDomainDelete)\n\n\tcmdRecord := &cobra.Command{\n\t\tUse: \"records\",\n\t\tShort: \"domain record commands\",\n\t\tLong: \"commands for interacting with an individual domain\",\n\t}\n\tcmd.AddCommand(cmdRecord)\n\n\tcmdRecordList := cmdBuilder(RunRecordList, \"list\", \"list records\", writer, aliasOpt(\"ls\"))\n\tcmdRecord.AddCommand(cmdRecordList)\n\taddStringFlag(cmdRecordList, doit.ArgDomainName, \"\", \"Domain name\")\n\n\tcmdRecordCreate := cmdBuilder(RunRecordCreate, \"create\", \"create record\", writer, aliasOpt(\"c\"))\n\tcmdRecord.AddCommand(cmdRecordCreate)\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordType, \"\", \"Record type\")\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordName, \"\", \"Record name\")\n\taddStringFlag(cmdRecordCreate, doit.ArgRecordData, \"\", \"Record data\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordPriority, 0, \"Record priority\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordPort, 0, \"Record port\")\n\taddIntFlag(cmdRecordCreate, doit.ArgRecordWeight, 0, \"Record weight\")\n\n\tcmdRecordDelete := cmdBuilder(RunRecordDelete, \"delete <domain> <record id...>\", \"delete record\", writer, aliasOpt(\"d\"))\n\tcmdRecord.AddCommand(cmdRecordDelete)\n\n\tcmdRecordUpdate := cmdBuilder(RunRecordUpdate, \"update\", \"update record\", writer, aliasOpt(\"u\"))\n\tcmdRecord.AddCommand(cmdRecordUpdate)\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordID, 0, \"Record ID\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordType, \"\", \"Record type\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordName, \"\", \"Record name\")\n\taddStringFlag(cmdRecordUpdate, doit.ArgRecordData, \"\", \"Record data\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordPriority, 0, \"Record priority\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordPort, 0, \"Record port\")\n\taddIntFlag(cmdRecordUpdate, doit.ArgRecordWeight, 0, \"Record weight\")\n\n\treturn cmd\n}",
"func ComputeDomain(domainType DomainType, forkVersion []byte, genesisValidatorsRoot []byte) ([]byte, error) {\n\tif len(forkVersion) != 4 {\n\t\treturn nil, errors.New(\"fork version must be 4 bytes in length\")\n\t}\n\tif len(genesisValidatorsRoot) != 32 {\n\t\treturn nil, errors.New(\"genesis validators root must be 32 bytes in length\")\n\t}\n\n\t// Generate fork data root from fork version and genesis validators root.\n\tforkData := &ForkData{\n\t\tCurrentVersion: forkVersion,\n\t\tGenesisValidatorsRoot: genesisValidatorsRoot,\n\t}\n\tforkDataRoot, err := forkData.HashTreeRoot()\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to generate fork data hash tree root\")\n\t}\n\n\tres := make([]byte, 32)\n\tcopy(res[0:4], domainType[:])\n\tcopy(res[4:32], forkDataRoot[:])\n\n\treturn res, nil\n}",
"func (s *API) TradeDomain(req *TradeDomainRequest, opts ...scw.RequestOption) (*Domain, error) {\n\tvar err error\n\n\tif fmt.Sprint(req.Domain) == \"\" {\n\t\treturn nil, errors.New(\"field Domain cannot be empty in request\")\n\t}\n\n\tscwReq := &scw.ScalewayRequest{\n\t\tMethod: \"POST\",\n\t\tPath: \"/domain/v2alpha2/domains/\" + fmt.Sprint(req.Domain) + \"/trade\",\n\t\tHeaders: http.Header{},\n\t}\n\n\terr = scwReq.SetBody(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar resp Domain\n\n\terr = s.client.Do(scwReq, &resp, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &resp, nil\n}",
"func (o ObjectAccessControlTypeOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ObjectAccessControlType) *string { return v.Domain }).(pulumi.StringPtrOutput)\n}",
"func (x GenericInfrastructureEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (x WorkloadEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func Domain() string {\n\tif domain := os.Getenv(DomainEnv); domain != \"\" {\n\t\treturn domain\n\t}\n\treturn \"\"\n}",
"func (c *Client) GetDomain(customerID, domain string) (*Domain, error) {\n\tdomainURL := fmt.Sprintf(pathDomains, c.baseURL, domain)\n\treq, err := http.NewRequest(http.MethodGet, domainURL, nil)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\td := new(Domain)\n\tif err := c.execute(customerID, req, &d); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn d, nil\n}",
"func (o DataSourceAmazonOpenSearchParametersPtrOutput) Domain() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *DataSourceAmazonOpenSearchParameters) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Domain\n\t}).(pulumi.StringPtrOutput)\n}",
"func (actor Actor) GetDomain(domainGUID string) (Domain, Warnings, error) {\n\tvar allWarnings Warnings\n\n\tdomain, warnings, err := actor.GetSharedDomain(domainGUID)\n\tallWarnings = append(allWarnings, warnings...)\n\tswitch err.(type) {\n\tcase nil:\n\t\treturn domain, allWarnings, nil\n\tcase actionerror.DomainNotFoundError:\n\tdefault:\n\t\treturn Domain{}, allWarnings, err\n\t}\n\n\tdomain, warnings, err = actor.GetPrivateDomain(domainGUID)\n\tallWarnings = append(allWarnings, warnings...)\n\tswitch err.(type) {\n\tcase nil:\n\t\treturn domain, allWarnings, nil\n\tdefault:\n\t\treturn Domain{}, allWarnings, err\n\t}\n}",
"func (m *GraphBaseServiceClient) Domains()(*i957076b10ba162b23efec7b94dd26b84c6475d285449c1cbc9c5b85910d36a12.DomainsRequestBuilder) {\n return i957076b10ba162b23efec7b94dd26b84c6475d285449c1cbc9c5b85910d36a12.NewDomainsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func (m *GraphBaseServiceClient) Domains()(*i957076b10ba162b23efec7b94dd26b84c6475d285449c1cbc9c5b85910d36a12.DomainsRequestBuilder) {\n return i957076b10ba162b23efec7b94dd26b84c6475d285449c1cbc9c5b85910d36a12.NewDomainsRequestBuilderInternal(m.pathParameters, m.requestAdapter);\n}",
"func (o DataSourceAmazonOpenSearchParametersOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSourceAmazonOpenSearchParameters) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (client IdentityClient) GetDomain(ctx context.Context, request GetDomainRequest) (response GetDomainResponse, err error) {\n\tvar ociResponse common.OCIResponse\n\tpolicy := common.NoRetryPolicy()\n\tif client.RetryPolicy() != nil {\n\t\tpolicy = *client.RetryPolicy()\n\t}\n\tif request.RetryPolicy() != nil {\n\t\tpolicy = *request.RetryPolicy()\n\t}\n\tociResponse, err = common.Retry(ctx, request, client.getDomain, policy)\n\tif err != nil {\n\t\tif ociResponse != nil {\n\t\t\tif httpResponse := ociResponse.HTTPResponse(); httpResponse != nil {\n\t\t\t\topcRequestId := httpResponse.Header.Get(\"opc-request-id\")\n\t\t\t\tresponse = GetDomainResponse{RawResponse: httpResponse, OpcRequestId: &opcRequestId}\n\t\t\t} else {\n\t\t\t\tresponse = GetDomainResponse{}\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\tif convertedResponse, ok := ociResponse.(GetDomainResponse); ok {\n\t\tresponse = convertedResponse\n\t} else {\n\t\terr = fmt.Errorf(\"failed to convert OCIResponse into GetDomainResponse\")\n\t}\n\treturn\n}",
"func (cs *RegistrarStatus) Domain() string {\n\treturn cs.domain\n}",
"func (o DataSourceAmazonElasticsearchParametersOutput) Domain() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DataSourceAmazonElasticsearchParameters) string { return v.Domain }).(pulumi.StringOutput)\n}",
"func (r Dns_Domain_ResourceRecord) GetDomain() (resp datatypes.Dns_Domain, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord\", \"getDomain\", nil, &r.Options, &resp)\n\treturn\n}",
"func (app *App) Domain(domain string) *Router {\n\tapp.domainListLock.Lock()\n\tif app.domains[domain] == nil {\n\t\tapp.domains[domain] = &Router{\n\t\t\trouter: newRouter(),\n\t\t\tapp: app,\n\t\t}\n\t}\n\tapp.domainListLock.Unlock()\n\treturn app.domains[domain]\n}",
"func (g *Gandi) GetDomain(fqdn string) (domain Domain, err error) {\n\t_, err = g.askGandi(mGET, \"domains/\"+fqdn, nil, &domain)\n\treturn\n}",
"func (r Dns_Domain_ResourceRecord_SrvType) GetDomain() (resp datatypes.Dns_Domain, err error) {\n\terr = r.Session.DoRequest(\"SoftLayer_Dns_Domain_ResourceRecord_SrvType\", \"getDomain\", nil, &r.Options, &resp)\n\treturn\n}",
"func (x BrowserApplicationEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (x UnavailableEntity) GetDomain() string {\n\treturn x.Domain\n}",
"func (user *UserID) Domain() ServerName {\n\treturn ServerName(user.domain)\n}",
"func (_options *UpdateEnterpriseOptions) SetDomain(domain string) *UpdateEnterpriseOptions {\n\t_options.Domain = core.StringPtr(domain)\n\treturn _options\n}",
"func (_options *CreateEnterpriseOptions) SetDomain(domain string) *CreateEnterpriseOptions {\n\t_options.Domain = core.StringPtr(domain)\n\treturn _options\n}",
"func NormaliseDomain(start, end []byte, reverse bool) ([]byte, []byte) {\n\tif reverse {\n\t\tif len(end) == 0 {\n\t\t\tend = []byte{}\n\t\t}\n\t} else {\n\t\tif len(start) == 0 {\n\t\t\tstart = []byte{}\n\t\t}\n\t}\n\treturn start, end\n}",
"func (d *Domains) GetDomain(name string) (*domain.Domain, error) {\n\treturn d.source(name).GetDomain(name)\n}"
] | [
"0.7280717",
"0.7280717",
"0.71779233",
"0.710203",
"0.69977224",
"0.68014425",
"0.68014425",
"0.6780974",
"0.6719316",
"0.6703464",
"0.6622421",
"0.66126794",
"0.66029614",
"0.65618205",
"0.656128",
"0.6482219",
"0.6482219",
"0.6480019",
"0.6478523",
"0.6469457",
"0.64446455",
"0.64300895",
"0.64246315",
"0.64220136",
"0.6418779",
"0.6387456",
"0.6377809",
"0.6359925",
"0.6357184",
"0.6346082",
"0.6341727",
"0.6332642",
"0.6332642",
"0.63185936",
"0.6312752",
"0.6302582",
"0.6299008",
"0.627553",
"0.62740016",
"0.62581366",
"0.6246076",
"0.6238674",
"0.6237591",
"0.6223424",
"0.6209909",
"0.62085146",
"0.6200711",
"0.61690277",
"0.6166977",
"0.6161913",
"0.6154343",
"0.61535543",
"0.61499715",
"0.6147302",
"0.61390513",
"0.61264884",
"0.6123106",
"0.6115228",
"0.61146337",
"0.61096644",
"0.6096107",
"0.608564",
"0.60800046",
"0.60688025",
"0.6062971",
"0.6060121",
"0.60580385",
"0.60571456",
"0.60569966",
"0.6052372",
"0.6049895",
"0.6049788",
"0.6042449",
"0.6042054",
"0.6037785",
"0.6033324",
"0.6031549",
"0.60306793",
"0.6023022",
"0.6005925",
"0.59933645",
"0.59901375",
"0.599005",
"0.59824324",
"0.59824324",
"0.5982356",
"0.5982244",
"0.5961787",
"0.5961629",
"0.59543943",
"0.5944001",
"0.59177023",
"0.5915518",
"0.5915037",
"0.5904929",
"0.5900902",
"0.5896992",
"0.58814347",
"0.58795196",
"0.5875652"
] | 0.6336992 | 31 |
True should only be used for testing purposes, it says that everyone is authenticated! | func True(baseURL string) Strategy {
return authTrue{
baseURL: baseURL,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (a Anonymous) Authenticated() bool { return false }",
"func isAuthenticated(w http.ResponseWriter, r *http.Request) {\n\tisLoggedIn := isLoggedIn(r)\n\n\tresp := map[string]interface{}{\n\t\t\"success\": isLoggedIn,\n\t}\n\tapiResponse(resp, w)\n}",
"func (recv *receiver) isAuthenticated() bool {\n\tif recv.mode == receiverModeClient && recv.authenticated {\n\t\treturn true\n\t}\n\treturn false\n}",
"func isAuthenticated(r *http.Request) bool {\n\ts, _ := Store.Get(r, \"sessid\")\n\tval, ok := s.Values[\"authenticated\"].(bool)\n\treturn ok && val\n}",
"func (o *Operator) authenticated(r *http.Request) bool {\n\tlog.Print(o.config.Auth.Password)\n\n\t_, password, _ := r.BasicAuth()\n\tif o.config.Auth.Password != \"\" && o.config.Auth.Password != password {\n\t\treturn false\n\t}\n\treturn true\n}",
"func isAuthenticated(req *http.Request) bool {\n\tif _, err := sessionStore.Get(req, sessionName); err == nil {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (app *application) isAuthenticated(r *http.Request) bool {\n\tisAuthenticated, ok := r.Context().Value(contextKeyIsAuthenticated).(bool)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn isAuthenticated\n}",
"func IsAuthenticated(r *http.Request) bool {\n\t//todo write logic here\n\treturn true\n}",
"func IsAuthenticated(r *http.Request) bool {\n\tsession, err := app.Store.Get(r, \"auth-session\")\n\tif err != nil {\n\t\toptions := sessions.Options{MaxAge: -1}\n\t\tsessions.NewCookie(\"auth-session\", \"_\", &options)\n\t\tlog.Println(err)\n\t\treturn false\n\t}\n\n\t_, ok := session.Values[\"profile\"]\n\treturn ok\n}",
"func IsAuthenticated(r *http.Request) bool {\n\texists := app.Session.Exists(r.Context(), \"user_id\")\n\treturn exists\n}",
"func IsAuthenticated(cloudAddr string) bool {\n\tcreds := MustLoadDefaultCredentials()\n\tclient := http.Client{}\n\treq, err := http.NewRequest(\"GET\", fmt.Sprintf(\"https://%s/api/authorized\", cloudAddr), nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treq.Header.Add(\"Authorization\", fmt.Sprintf(\"Bearer %s\", creds.Token))\n\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tbody, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn string(body) == \"OK\"\n}",
"func (s *Server) isAuth() bool {\n\treturn s.credential != \"\"\n}",
"func (u *AnonUser) IsAuthenticated() bool {\n\treturn false\n}",
"func (u *AuthUser) IsAuthenticated() bool {\n\treturn u.id != 0\n}",
"func isAuthSession(r *http.Request, w http.ResponseWriter) bool {\n\tloggedIn, loggedInMat := loggedIn(r)\n\tloggedInUser, err := user.FromMatrikel(loggedInMat)\n\n\tif !loggedIn || loggedInUser.Usertype == user.STUDENT || err != nil {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (s *server) isAuthentication(authRequired bool, loginStatus bool) bool {\n\treturn !(authRequired) || loginStatus\n}",
"func (u User) IsAuthenticated() bool {\n\treturn u.Email != \"\"\n}",
"func IsUserAuthenticated(r *http.Request) bool {\n\tval := r.Context().Value(authUserAuthenticatedKey)\n\tswitch val.(type) {\n\tcase bool:\n\t\treturn val.(bool)\n\tdefault:\n\t\treturn false\n\t}\n}",
"func (p *hcAutonomywww) isLoggedIn(f http.HandlerFunc) http.HandlerFunc {\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tlog.Debugf(\"isLoggedIn: %v %v %v %v\", remoteAddr(r), r.Method,\n\t\t\tr.URL, r.Proto)\n\n\t\temail, err := p.getSessionEmail(r)\n\t\tif err != nil {\n\t\t\tutil.RespondWithJSON(w, http.StatusUnauthorized, v1.ErrorReply{\n\t\t\t\tErrorCode: int64(v1.ErrorStatusNotLoggedIn),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\t// Check if user is authenticated\n\t\tif email == \"\" {\n\t\t\tutil.RespondWithJSON(w, http.StatusUnauthorized, v1.ErrorReply{\n\t\t\t\tErrorCode: int64(v1.ErrorStatusNotLoggedIn),\n\t\t\t})\n\t\t\treturn\n\t\t}\n\n\t\tf(w, r)\n\t}\n}",
"func RequireAuthentication(ctx *web.Context) bool {\n\tsession, _ := CookieStore.Get(ctx.Request, \"monet-session\")\n\n\tif session.Values[\"authenticated\"] != true {\n\t\tctx.Redirect(302, \"/admin/login/\")\n\t\treturn true\n\t}\n\treturn false\n}",
"func isAuthorized(w http.ResponseWriter, r *http.Request) bool {\n\tusername, err := r.Cookie(\"username\")\n\tif err == nil {\n\t\tsessionID, err := r.Cookie(\"sessionID\")\n\t\tif err == nil {\n\t\t\tif sessionID.Value != \"\" && gostuff.SessionManager[username.Value] == sessionID.Value {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\tgostuff.Show404Page(w, r)\n\treturn false\n}",
"func (RequestAuthenticationToken *TRequestAuthenticationToken) IsAuthenticated() bool {\n\treturn RequestAuthenticationToken.Authenticated\n}",
"func IsAuthenticated(r *http.Request) *Session {\n\tkey := getSession(r)\n\n\tif strings.HasPrefix(key, \"nouser:\") {\n\t\treturn nil\n\t}\n\n\ts := getSessionByKey(key)\n\tif isValidSession(r, s) {\n\t\treturn s\n\t}\n\treturn nil\n}",
"func verifyLogin(r *http.Request) bool {\n\tsession, err := store.Get(r, sessionName)\n\tif err != nil {\n\t\tfmt.Printf(\"Failed to get session: %s\", err)\n\t\treturn false\n\t}\n\tif session.Values[\"LoggedIn\"] != \"yes\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (ctx *RequestContext) IsAuthenticated() bool {\n\treturn ctx.principal != nil\n}",
"func isAuthenticated(next http.HandlerFunc) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tisLogged,_ := CheckLoginStatus(w, r) //first check if the user is logged in\n\t\t\tif isLogged { //if they are serve the function inside\n\t\t\t\tnext.ServeHTTP(w,r)\n\t\t\t}else { //otherwise redirect to the unauthorized page\n\t\t\t\thttp.Redirect(w,r,\"/unauthorized\",http.StatusSeeOther)\n\t\t\t}\n\t\t})\n}",
"func (ih *ImapHandler) isAuthenticated() bool {\n\treturn ih.client.State() != imap.NotAuthenticatedState\n}",
"func (netgear *Netgear) IsLoggedIn() bool {\n return netgear.loggedIn\n}",
"func (it IssueTracker) IsAuthenticated() bool {\n\treturn it.OAuthTransport.Token != nil\n}",
"func (s *Subject) IsAuthenticated() bool {\n\tif s.Session == nil {\n\t\treturn false\n\t}\n\treturn s.Session.IsAuthenticated\n}",
"func loginWithBasicAuthOK(r *rest.Request) bool {\n\tcookie, err := r.Request.Cookie(sessionCookie)\n\tif err != nil {\n\t\tglog.V(1).Info(\"Error getting cookie \", err)\n\t\treturn false\n\t}\n\tsessionsLock.Lock()\n\tdefer sessionsLock.Unlock()\n\tvalue, err := url.QueryUnescape(strings.Replace(cookie.Value, \"+\", url.QueryEscape(\"+\"), -1))\n\tif err != nil {\n\t\tglog.Warning(\"Unable to decode session \", cookie.Value)\n\t\treturn false\n\t}\n\tsession, err := findsessionT(value)\n\tif err != nil {\n\t\tglog.Info(\"Unable to find session \", value)\n\t\treturn false\n\t}\n\tsession.access = time.Now()\n\tglog.V(2).Infof(\"sessionT %s used\", session.ID)\n\treturn true\n}",
"func IsLoggedIn(r *http.Request) (bool, error) {\n\tsession, err := getSession(r)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tt := session.Values[\"accessToken\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tstoredToken, ok := t.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"accessToken\", err)\n\t}\n\tgp := session.Values[\"gplusID\"]\n\tif t == nil {\n\t\treturn false, nil\n\t}\n\tgplusId, ok := gp.(string)\n\tif !ok {\n\t\treturn false, fmt.Errorf(\"bad type of %q value in session: %v\", \"gplusID\", err)\n\t}\n\treturn storedToken != \"\" && isAllowed(gplusId), nil\n}",
"func IsAuthenticated(nextHandler http.HandlerFunc) http.HandlerFunc {\n\t// token format \"Authorization\": \"Bearer token\"\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Add(\"content-type\", \"application/json\")\n\n\t\tif r.Header[\"Authorization\"] == nil {\n\t\t\tutils.GetError(NoAuthToken, http.StatusUnauthorized, w)\n\t\t\treturn\n\t\t}\n\t\t\t\t\n\t\tSECRET_KEY, _ := os.LookupEnv(\"AUTH_SECRET_KEY\")\n\t\tif SECRET_KEY == \"\" { SECRET_KEY = secretKey }\n\t\t\n\t\tauthToken := strings.Split(r.Header[\"Authorization\"][0], \" \")[1]\n\t\t\n\t\ttoken, err := jwt.ParseWithClaims(authToken, &MyCustomClaims{}, func(token *jwt.Token) (interface{}, error) {\n\t\t\treturn []byte(SECRET_KEY), nil\n\t\t})\n\n\t\tif claims, ok := token.Claims.(*MyCustomClaims); ok && token.Valid {\n\t\t\t// Extract user information and add it to request context.\n\t\t\tctx := context.WithValue(r.Context(), \"user\", claims.User)\n\t\t\tnextHandler.ServeHTTP(w, r.WithContext(ctx))\n\t\t} else {\n\t\t\tfmt.Print(err)\n\t\t\tutils.GetError(NotAuthorized, http.StatusUnauthorized, w)\n\t\t\treturn\n\t\t}\n\t}\n}",
"func Authorized() gin.HandlerFunc {\n // Build handler function\n return func(c *gin.Context) {\n // Get session field\n user := utils.SessionGet(c, \"user\")\n\n // Check value\n if user != \"\" {\n // Update status\n c.Set(\"user\", user)\n } else {\n // Set status\n c.Redirect(302, \"/login\")\n\n // End the request\n c.Abort()\n return\n }\n\n // Jump to next function\n c.Next()\n }\n}",
"func IsAuthenticated(next http.HandlerFunc) http.HandlerFunc {\n\treturn func(response http.ResponseWriter, request *http.Request) {\n\n\t\t//has cookies?\n\t\tfor _, cookie := range request.Cookies() {\n\t\t\tif cookie.Name == \"Token\" {\n\t\t\t\tok, err := validateCookie(cookie.Value)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Println(err)\n\t\t\t\t}\n\t\t\t\tif ok {\n\t\t\t\t\tlog.Println(\"serve because of cookies\")\n\t\t\t\t\tnext.ServeHTTP(response, request)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\t//has token?\n\t\tok, err := hasJWT(request)\n\t\tif err != nil {\n\t\t\tlog.Fatal(\"there was a error finding token\")\n\t\t}\n\t\tif ok {\n\t\t\tlog.Println(\"serve because of has jwt token\")\n\t\t\tnext.ServeHTTP(response, request)\n\t\t\treturn\n\t\t}\n\t\t//basic auth\n\t\tbasicAuth := hasBasicAuth(request)\n\t\tfmt.Println(basicAuth)\n\t\tif basicAuth {\n\t\t\ttoken, err := GenerateJWT()\n\t\t\tif err != nil {\n\t\t\t\tlog.Fatal(err)\n\t\t\t}\n\t\t\tcookie := http.Cookie{\n\t\t\t\tName: \"Token\",\n\t\t\t\tValue: token,\n\t\t\t\tPath: \"/\",\n\t\t\t}\n\t\t\thttp.SetCookie(response, &cookie)\n\t\t\tfmt.Println(token)\n\t\t\tnext.ServeHTTP(response, request)\n\t\t\treturn\n\n\t\t}\n\t\tresponse.WriteHeader(http.StatusUnauthorized) //unauthorized\n\t\treturn\n\t}\n}",
"func (parser *PdfParser) IsAuthenticated() bool {\n\treturn parser.crypter.authenticated\n}",
"func auth(u *user) bool {\n\tfor _, id := range admins {\n\t\tif u.id == id || u.addr == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func IsAuthenticated(handler httprouter.Handle) httprouter.Handle {\n\treturn func(res http.ResponseWriter, req *http.Request, params httprouter.Params) {\n\t\ttoken := Session.Get(req)\n\t\tif token == \"\" {\n\t\t\thttp.Redirect(res, req, \"/login\", http.StatusSeeOther)\n\t\t} else {\n\t\t\tuser := User{}\n\t\t\tdb.Get().First(&user, \"token = ?\", token)\n\t\t\tif user == (User{}) {\n\t\t\t\thttp.Redirect(res, req, \"/login\", http.StatusSeeOther)\n\t\t\t}\n\t\t\tj := jwt.Jwt{UID: user.ID, Name: user.Name, Username: user.Username}\n\t\t\tisValid := j.ValidateToken(token)\n\t\t\tif !isValid {\n\t\t\t\thttp.Redirect(res, req, \"/login\", http.StatusSeeOther)\n\t\t\t}\n\t\t\thandler(res, req, params)\n\t\t}\n\t}\n}",
"func Authorized(c *gin.Context) {\n\t_, exists := c.Get(\"user\")\n\tif !exists {\n\t\tc.AbortWithStatusJSON(401, gin.H{\n\t\t\t\"status\": false,\n\t\t\t\"message\": \"Unauthorization!!\",\n\t\t})\n\n\t\treturn\n\t}\n}",
"func (err *UnauthorizedError) isLoggedIn() bool {\n\treturn err.SessionId != 0 // SessionId is 0 for non-logged in users\n}",
"func CheckIfAuthenticated(uuid string) bool {\n\t// Make sure a session exists for the extracted UUID\n\t_, sessionFound := sessions[uuid]\n\treturn sessionFound\n}",
"func verifyAuth(w http.ResponseWriter, r *http.Request) {\n\thttpJSON(w, httpMessageReturn{Message: \"OK\"}, http.StatusOK, nil)\n}",
"func CheckAuth(c *gin.Context) {\n\n}",
"func verifyLoggedIn(resp *http.Response) bool {\n\tif resp.Request != nil && resp.Request.URL != nil {\n\t\treturn strings.HasPrefix(resp.Request.URL.String(), loggedinURLPrefix)\n\t}\n\treturn false\n}",
"func isAuthorized(endpoint func(http.ResponseWriter, *http.Request)) http.Handler {\r\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\r\n\t\r\n\t\r\n\tvar N = len(idlist) //Size of the users list\r\n\t\r\n\tif r.Header[\"Clientid\"] != nil { //Let's first check the user filled the 'Clientid' header key\r\n\t\t//We verify whether or not the ID submitted is valid\r\n\t\tvar verif string = \"not ok\"\r\n\t\tfor i:=0;i<N;i++{\r\n\t\t\tif r.Header[\"Clientid\"][0]==idlist[i]{\r\n\t\t\t\tverif=\"ok\" \r\n\t\t\t}\r\n\t\t}\r\n\t\t\r\n\t\t//Verification of the encrypted password submitted by the user\r\n\t\tif verif==\"ok\"{ //The Client ID is valid, let's verify the password is correct and matches the client ID\r\n\t\t\tif r.Header[\"Clientid\"][0]==\"1\" { //First case: the user signed in as user 1\r\n\t\t\t\tif r.Header[\"Token\"] != nil { //Once again, a token is required to continue the authentication process\r\n\t\t\t\t\r\n\t\t\t\t\t\ttoken, err := jwt.Parse(r.Header[\"Token\"][0], func(token *jwt.Token) (interface{}, error) {\r\n\t\t\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\r\n\t\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"There was an error\")\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\treturn mySigningKeyone, nil\t\r\n\t\t\t\t\t\t})\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tfmt.Fprintf(w, err.Error())\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\t//if the 'token' is valid, user 1 has now signed in\r\n\t\t\t\t\t\tif token.Valid {\r\n\t\t\t\t\t\t\tclientnum=r.Header[\"Clientid\"][0] \r\n\t\t\t\t\t\t\tendpoint(w, r)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\telse {\r\n\t\t\t\t\t\t\tfmt.Fprintf(w, \"Not Authorized\")\r\n\t\t\t\t\t}\r\n\t\t\t}\telse { //Second case: the user signed in as user 2\r\n\t\t\t\tif r.Header[\"Token\"] != nil { //Once again, a token is required to continue the authentication process\r\n\t\t\t\t\r\n\t\t\t\t\t\ttoken, err := jwt.Parse(r.Header[\"Token\"][0], func(token *jwt.Token) (interface{}, error) {\r\n\t\t\t\t\t\t\tif _, ok := token.Method.(*jwt.SigningMethodHMAC); !ok {\r\n\t\t\t\t\t\t\t\treturn nil, fmt.Errorf(\"There was an error\")\r\n\t\t\t\t\t\t\t}\r\n\t\t\t\t\t\t\t\treturn mySigningKeytwo, nil\t\r\n\t\t\t\t\t\t})\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif err != nil {\r\n\t\t\t\t\t\t\tfmt.Fprintf(w, err.Error())\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t\t//if the 'token' is valid, user 2 has now signed in\r\n\t\t\t\t\t\tif token.Valid {\r\n\t\t\t\t\t\t\tclientnum=r.Header[\"Clientid\"][0] \r\n\t\t\t\t\t\t\tendpoint(w, r)\r\n\t\t\t\t\t\t}\r\n\t\t\t\t\t}\telse {\r\n\t\t\t\t\r\n\t\t\t\t\t\t\tfmt.Fprintf(w, \"Not Authorized\")\r\n\t\t\t\t\t}\r\n\t\t\t}\r\n\t\t}\telse { \r\n\t\t\t\tfmt.Fprintf(w, \"This ID does not exist\")\r\n\t\t}\r\n\t\t\t\t\t\r\n\t\t} else {\r\n\t\t\tfmt.Fprintf(w, \"Insert ID\")\r\n\t\t}\r\n\t})\r\n}",
"func (p *Pub) Auth(secret string) bool {\n\treturn p.appsecret == secret\n}",
"func (a BasicAuthenticator) Auth(r *http.Request) bool {\n\t// Retrieve Authorization header\n\tauth := r.Header.Get(\"Authorization\")\n\n\t// No header provided\n\tif auth == \"\" {\n\t\treturn false\n\t}\n\n\t// Ensure format is valid\n\tbasic := strings.Split(auth, \" \")\n\tif basic[0] != \"Basic\" {\n\t\treturn false\n\t}\n\n\t// Decode base64'd user:password pair\n\tbuf, err := base64.URLEncoding.DecodeString(basic[1])\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn false\n\t}\n\n\t// Split into username/password\n\tcredentials := strings.Split(string(buf), \":\")\n\n\t// Load user by username, verify user exists\n\tuser := new(data.UserRecord).Load(credentials[0], \"username\")\n\tif user == (data.UserRecord{}) {\n\t\treturn false\n\t}\n\n\t// Load user's API key\n\tkey := new(data.APIKey).Load(user.ID, \"user_id\")\n\tif key == (data.APIKey{}) {\n\t\treturn false\n\t}\n\n\t// Hash input password\n\tsha := sha1.New()\n\tif _, err = sha.Write([]byte(credentials[1] + key.Salt)); err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn false\n\t}\n\n\thash := fmt.Sprintf(\"%x\", sha.Sum(nil))\n\n\t// Verify hashes match, using timing-attack resistant method\n\t// If function returns 1, hashes match\n\treturn subtle.ConstantTimeCompare([]byte(hash), []byte(key.Key)) == 1\n}",
"func (sa *staticAuthenticator) Authenticate(user string, pass string) bool {\n\tpassword, found := sa.credentials[user]\n\tif !found {\n\t\treturn false\n\t}\n\treturn pass == password\n}",
"func EnsureLoggedIn(c *gin.Context) bool {\n\tgeneral := c.GetStringMapString(\"general\")\n\n\tif general[\"isloggedin\"] != \"true\" {\n\t\tSendHTML(http.StatusForbidden, c, \"blocked\", nil)\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (handler *SimpleHandler) Authenticate(login, password string) bool {\n\tif _, ok := handler.Users[login]; !ok {\n\t\treturn false\n\t}\n\n\treturn handler.Hash(password) == handler.Users[login]\n}",
"func (auth *AuthCookie) IsAuthenticated() bool {\n\treturn auth.ClaimsIdentity() != \"\"\n}",
"func (a AuthNone) Authenticate(metric *string) (bool, error) {\n\treturn true, nil\n}",
"func (u User) IsAnonymous() bool { return u == \"\" }",
"func verifyLogin(req *restful.Request, resp *restful.Response ) bool {\n\tcookie, err := req.Request.Cookie(\"session-id\")\n\tif cookie.Value != \"\" {\n\t\t_, exists := sessions[cookie.Value]\n\t\tif !exists {\n\t\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t} else if err != nil {\n\t\tfmt.Println(err.Error())\n\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\treturn false\n\t} else {\n\t\thttp.Redirect(resp.ResponseWriter, req.Request, \"/\", 302)\n\t\treturn false\n\t}\n}",
"func (e AuthService) Check() (bool, error) {\n\turl := \"/authentication\"\n\n\tresp, err := e.client.MakeRequest(\n\t\t\"GET\",\n\t\turl,\n\t\t0,\n\t\tnil,\n\t)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer resp.Body.Close()\n\n\tswitch resp.StatusCode {\n\tcase http.StatusOK:\n\t\treturn true, nil\n\tcase http.StatusUnauthorized:\n\t\tfallthrough\n\tcase http.StatusForbidden:\n\t\treturn false, nil\n\tdefault:\n\t\treturn false, e.client.handleUnexpectedResponse(resp)\n\t}\n}",
"func (q *Query) IsAuthenticated() bool {\n\treturn q.WalletID != \"\"\n}",
"func IsAuthenticated(messageReceived socket.RawMessageReceived) bool {\n\tdefer mutex.Unlock()\n\tmutex.Lock()\n\n\tif messageReceived.Payload.MessageType == byte(socket.MessageType.ServerConnection) {\n\n\t\tbytes := messageReceived.Payload.Bytes\n\t\ttoken := string(bytes)\n\n\t\tif session, ok := tokenAvailable[token]; ok {\n\n\t\t\tif hasUserSession(session.userID) {\n\t\t\t\tlog.Printf(\"[Auth] -> Connection already exists dropping %s\", messageReceived.SocketID)\n\t\t\t\tsendAuthResponse(false, messageReceived.SocketID)\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\tmodel.DB().Create(&model.Session{\n\t\t\t\tUserID: session.userID,\n\t\t\t\tSessionToken: token,\n\t\t\t\tSocketID: messageReceived.SocketID,\n\t\t\t})\n\n\t\t\tcbroadcast.Broadcast(broadcast.BCreateConnection, session.userID)\n\n\t\t\tsessionCache[messageReceived.SocketID] = session //Set the value in the cache so pacquets are routed fast\n\t\t\tuserCache[session.userID] = messageReceived.SocketID\n\t\t\tlog.Printf(\"[Auth] -> Connection made socket:%s userid:%s\", messageReceived.SocketID, session.userID)\n\t\t\tsendAuthResponse(true, messageReceived.SocketID)\n\t\t\tcbroadcast.Broadcast(socket.BSocketAuthConnected, messageReceived.SocketID) //Broadcast only when the auth is connected\n\t\t\treturn true\n\t\t}\n\t\tsendAuthResponse(false, messageReceived.SocketID)\n\t\treturn false\n\t}\n\n\t_, ok := sessionCache[messageReceived.SocketID]\n\tif !ok {\n\t\t//Send a fail message any time if the socket is not correct or if the user was forced disconnected\n\t\tsendAuthResponse(false, messageReceived.SocketID)\n\t}\n\treturn ok\n}",
"func IsLoggedIn(r *http.Request) bool {\n\tsession, err := loggedUserSession.Get(r, \"authenticated-user-session\")\n\tif err != nil || session.Values[\"username\"] != \"admin\" {\n\t\treturn false\n\t}\n\treturn true\n}",
"func IsLoggedIn(w http.ResponseWriter, r *http.Request) {\n\tsession, err := Store.Get(r, \"session\")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif session.Values[\"loggedin\"] == \"true\" {\n\t\t_, _ = w.Write([]byte(\"true\"))\n\t\treturn\n\t}\n\t_, _ = w.Write([]byte(\"false\"))\n\treturn\n}",
"func checkAuth(w http.ResponseWriter, r *http.Request, s *MemorySessionStore) bool {\n\tauth := r.Header.Get(\"Authorization\")\n\tif auth == \"\" {\n\t\treturnHTTP(w, http.StatusUnauthorized, nil)\n\t\treturn false\n\t}\n\n\tmatch := authRegexp.FindStringSubmatch(auth)\n\tif len(match) != 2 {\n\t\treturnHTTP(w, http.StatusBadRequest, nil)\n\t\treturn false\n\t}\n\n\tid := match[1]\n\tif !s.Check(id) {\n\t\treturnHTTP(w, http.StatusUnauthorized, nil)\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func CheckAuthorizedUser(store JSonStore, req *http.Request) (string, bool) {\n\tvar credentials = req.Header[\"Authorization\"][0];\n\tvar decoded, decodedErr = base64.StdEncoding.DecodeString(credentials)\n\tif decodedErr != nil {\n\t\tlog.Println(\"error could not decode credentials\");\n\t\treturn \"\", false;\n\t}\n\tvar s = string(decoded);\n\tvar parts = strings.Split(s, \":\");\n\tvar username = parts[0];\n\tvar password = parts[1];\n\n\tvar res []map[string]string = store.GetJSonBlobs(map[string]string{\"type\":\"user\", \"username\": username, \"password\": password});\n\n\tif (len(res) == 0) {\n\t\tlog.Println(\"User authenticated.\");\n\t\treturn username, true;\n\t} else {\n\t\tlog.Println(\"User access Denied: \" + username);\n\t\treturn username, false;\n\t}\n}",
"func Authorized(c *gin.Context) {\n\tuserRaw, exists := c.Get(\"user\")\n\tif !exists {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n\tsession := sessions.Default(c)\n\n\tuser := userRaw.(models.User)\n\tredisToken := session.Get(user.Email)\n\ttokenString, err := c.Cookie(\"token\")\n\tif err != nil {\n\t\t// try reading HTTP Header\n\t\tauthorization := c.Request.Header.Get(\"Authorization\")\n\t\tif authorization == \"\" {\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\t\tsp := strings.Split(authorization, \"Bearer \")\n\t\t// invalid token\n\t\tif len(sp) < 1 {\n\t\t\tc.AbortWithStatus(401)\n\t\t\treturn\n\t\t}\n\t\ttokenString = sp[1]\n\t}\n\n\tif redisToken != tokenString {\n\t\tc.AbortWithStatus(401)\n\t\treturn\n\t}\n}",
"func isloggedin(ctx context.Context) error {\n\tuser, ok := controllers.IsLoggedIn(ctx)\n\tif ok {\n\t\tsession, _ := core.GetSession(ctx.HttpRequest())\n\t\tuserInfo := struct {\n\t\t\tSessionID string `json:\"sessionid\"`\n\t\t\tId string `json:\"id\"`\n\t\t\tName string `json:\"name\"`\n\t\t\tEmail string `json:\"email\"`\n\t\t\tApiToken string `json:\"apitoken\"`\n\t\t}{\n\t\t\tSessionID: session.ID,\n\t\t\tId: user.Id.Hex(),\n\t\t\tName: user.Name,\n\t\t\tEmail: user.Email,\n\t\t\tApiToken: user.Person.ApiToken,\n\t\t}\n\t\tdata, err := json.Marshal(userInfo)\n\t\tif err != nil {\n\t\t\tlog.Error(\"Unable to marshal: \", err)\n\t\t\treturn goweb.Respond.WithStatus(ctx, http.StatusInternalServerError)\n\t\t}\n\t\tctx.HttpResponseWriter().Header().Set(\"Content-Type\", \"application/json\")\n\t\treturn goweb.Respond.With(ctx, http.StatusOK, data)\n\t}\n\treturn goweb.Respond.WithStatus(ctx, http.StatusUnauthorized)\n}",
"func (client *Client) IsLoggedIn() bool {\n\treturn len(client.currentUser) > 0\n}",
"func isOIDCTokenAuth(req *http.Request) bool {\n\treturn req.URL.Path == \"/konnect/v1/token\"\n}",
"func isLoggedIn(req *http.Request) bool {\n\tloginCookie, err := req.Cookie(\"loginCookie\")\n\tif err != nil {\n\t\treturn false\n\t}\n\tusername := mapSessions[loginCookie.Value]\n\t_, ok := mapUsers[username]\n\treturn ok\n}",
"func ServeAuthCheck(ctx *fasthttp.RequestCtx) {\n\tif !security.IsAuthorized(ctx) {\n\t\treturn\n\t}\n\tresponse.SendNothing(ctx)\n}",
"func (a HMACAuthenticator) Auth(r *http.Request) bool {\n\treturn true\n}",
"func IsAuthenticatedQuick(socketID uuid.UUID) bool {\n\tdefer mutex.Unlock()\n\tmutex.Lock()\n\n\t_, ok := sessionCache[socketID]\n\treturn ok\n}",
"func (p *Provider) SessionAuthenticated() bool {\n\tp.sessionLock.RLock()\n\tdefer p.sessionLock.RUnlock()\n\treturn p.sessionAuth\n}",
"func ReplyAuthOk() *Reply { return &Reply{235, []string{\"Authentication successful\"}, nil} }",
"func (c *client) IsAuthencated() bool {\n\treturn c.authencated && c.ws != nil\n}",
"func (s *Server) Authenticate(wg *sync.WaitGroup) {\n\tdefer wg.Done()\n\ts.Session = grequests.NewSession(nil)\n\tresp, err := s.Session.Get(s.URL+loginURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to get csrf: \" + s.Name)\n\t}\n\n\tdoc, err := goquery.NewDocumentFromReader(strings.NewReader((resp.String())))\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to parse html: \" + s.Name)\n\t}\n\n\tcsrfToken := \"\"\n\tdoc.Find(\"meta\").Each(func(i int, s *goquery.Selection) {\n\t\tif name, _ := s.Attr(\"name\"); name == \"csrf-token\" {\n\t\t\tcsrfToken, _ = s.Attr(\"content\")\n\t\t\treturn\n\t\t}\n\t})\n\n\tresp, err = s.Session.Get(s.URL+verifyURL, nil)\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to verify: \" + s.Name)\n\t}\n\n\ts.Headers = &map[string]string{\n\t\t\"csrf-token\": csrfToken,\n\t\t\"Connection\": \"keep-alive\",\n\t\t\"Content-Type\": \"application/json\",\n\t\t\"Accept\": \"application/x-www-form-urlencoded; charset=utf-8\",\n\t}\n\n\tresp, err = s.Session.Post(s.URL+passwordURL, &grequests.RequestOptions{\n\t\tJSON: map[string]string{\n\t\t\t\"username\": s.Username,\n\t\t\t\"password\": s.Password,\n\t\t},\n\t\t// Cookies: cookies.Cookies,\n\t\tHeaders: *s.Headers,\n\t})\n\tif err != nil {\n\t\tlog.Println(err)\n\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t}\n\n\t// pretty.Println(resp.String())\n\tif !s.IsThirdParty {\n\t\t// log.Println(\"TOTP logic here...\")\n\t\ttotp := gotp.NewDefaultTOTP(s.Seed)\n\t\t// log.Println(totp.Now())\n\t\tresp, err = s.Session.Post(s.URL+challengeURL, &grequests.RequestOptions{\n\t\t\tJSON: map[string]string{\n\t\t\t\t\"username\": s.Username,\n\t\t\t\t\"password\": s.Password,\n\t\t\t\t\"challenge\": totp.Now(),\n\t\t\t},\n\t\t\t// Cookies: cookies.Cookies,\n\t\t\tHeaders: *s.Headers,\n\t\t})\n\t\tif err != nil {\n\t\t\tlog.Println(err)\n\t\t\tlog.Fatalln(\"Failed to login: \" + s.Name)\n\t\t}\n\t\t// pretty.Println(resp.String())\n\t}\n}",
"func (ba *BasicAuth) IsOK(w http.ResponseWriter, r *http.Request) bool {\n\tusername, password, ok := r.BasicAuth()\n\tif !ok {\n\t\tif DEBUG {\n\t\t\tfmt.Printf(\"r.BasicAuth() failed.\\n\")\n\t\t}\n\t\tgoto end\n\t}\n\n\tif username != ba.UserName || password != ba.Password {\n\t\tok = false\n\t\tgoto end\n\t}\n\nend:\n\tif !ok {\n\t\tw.Header().Set(\"WWW-Authenticate\", fmt.Sprintf(`Basic realm=\"%v\"`, ba.Args.BasicRealmStr))\n\t\tw.WriteHeader(401)\n\t}\n\treturn ok\n}",
"func IsAuthorized(request *http.Request) bool {\n\tauthHeaderValue, exist := request.Header[\"Authorization\"]\n\n\tif !exist && len(authHeaderValue) != 1 {\n\t\treturn false\n\t}\n\n\tsplitBearer := strings.Split(authHeaderValue[0], \"Bearer \")\n\n\tif len(splitBearer) != 2 {\n\t\treturn false\n\t}\n\n\tvar token string = splitBearer[1]\n\n\tvar queryResult string\n\terr := database.Con.QueryRow(\"select token from users where token = ?\", token).Scan(&queryResult)\n\n\tif err == sql.ErrNoRows {\n\t\treturn false\n\t}\n\n\tif err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n\n\treturn true\n}",
"func (auth *AdminAuth) Auth() (bool, error) {\n\tif auth.Bearer != \"\" && auth.AdminID != \"\" {\n\t\tadminModel := &orm.Admin{\n\t\t\tAdminID: auth.AdminID,\n\t\t}\n\t\tif err := adminModel.GetSingle(); err != nil {\n\t\t\treturn false, err\n\t\t}\n\t\tbearer := generateBearerToken(adminModel.Token, adminModel.AdminID, auth.Req)\n\t\tif bearer != auth.Bearer {\n\t\t\treturn false, errors.New(\"bearer token not match\")\n\t\t}\n\t\treturn true, nil\n\t}\n\treturn false, errors.New(\"authenticate parameters invalid\")\n}",
"func RequestIsAuth(r * http.Request) bool {\n\tif r.FormValue(\"username\") != \"\" && r.FormValue(\"key\") != \"\" {\n\t\tuser := UserForName(r.FormValue(\"username\"))\n\t\tif IsUser(user) {\n\t\t\tfor i := 0 ; i < len(Keys); i++ {\n\t\t\t\tif Keys[i].User == user.ID && Keys[i].Key == r.FormValue(\"key\") {\n\t\t\t\t\ttimeNow := time.Now()\n\t\t\t\t\tif timeNow.After(Keys[i].StartTime) && timeNow.Before(Keys[i].EndTime) {\n\t\t\t\t\t\treturn true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}",
"func (g *GitHubImpl) CheckAuth() (bool, error) {\n\n\tURL := fmt.Sprintf(g.URLNoEsc(urls.userRepo))\n\n\treq, _ := http.NewRequest(\"GET\", URL, nil)\n\tq := req.URL.Query()\n\tq.Add(\"access_token\", g.token)\n\treq.URL.RawQuery = q.Encode()\n\n\tclient := http.DefaultClient\n\tres, err := client.Do(req)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tif res.StatusCode != http.StatusOK {\n\t\treturn false, errors.New(strconv.Itoa(res.StatusCode))\n\t}\n\treturn true, nil\n}",
"func (a *App) CheckAuth() {\n\tsynchronizer := NewSynchronizer()\n\tsynchronizer.CheckAuth()\n}",
"func (c *UCSClient) IsLoggedIn() bool {\n\treturn len(c.cookie) > 0\n}",
"func GetAuth() bool {\n\tveryFlagInput()\n\treturn auth\n}",
"func IsAuthorised(ctx context.Context, r *http.Request) bool {\n\trequestAuthorised := r.Header.Get(constant.HeaderAuthorised)\n\tlogger.Debugf(ctx, \"security util\", \"checking if request has been authorised, authorised %s\", requestAuthorised)\n\n\tif requestAuthorised != \"\" && requestAuthorised == \"true\" {\n\t\tlogger.Debugf(ctx, \"security util\", \"request contains authorization information, authorised %s\", requestAuthorised)\n\t\treturn true\n\t}\n\n\tlogger.Debugf(ctx, \"security util\", \"request not authorised\")\n\treturn false\n}",
"func (u *User) Authorize(password string) bool {\n\treturn u.password == sha256.Sum256([]byte(password))\n}",
"func isAuthorizationDataCorrect(authInf AuthInf, responseWriter http.ResponseWriter) bool {\n\tauthInf.Login = html.EscapeString(authInf.Login)\n\tauthInf.Password = GeneratePasswordHash(authInf.Password)\n\tvar count int\n\terr := src.Connection.QueryRow(\"SELECT COUNT(id) as count FROM users WHERE \" +\n\t\t\"login=? AND password=?\", authInf.Login, authInf.Password).Scan(&count)\n\tif err != nil {\n\t\treturn conf.ErrDatabaseQueryFailed.Print(responseWriter)\n\t}\n\tif count > 0 {\n\t\treturn true\n\t} else {\n\t\treturn conf.ErrAuthDataIncorrect.Print(responseWriter)\n\t}\n}",
"func MiddleWareAuth(w http.ResponseWriter, r *http.Request) (bool, int) {\n\tusername := r.FormValue(\"username\")\n\tuserpass := r.FormValue(\"userpass\")\n\tvar dbPass string\n\tvar dbSalt string\n\tvar DbUID int\n\n\tuer := database.QueryRow(\"select user_password ,user_salt,user_id from users where user_nickname = ?\",\n\t\tusername).Scan(&dbPass, &dbSalt, DbUID)\n\n\tif uer != nil {\n\n\t}\n\texpectedPassword := password.GenerateHash(dbSalt, userpass)\n\n\tif dbPass == expectedPassword {\n\t\treturn true, DbUID\n\t}\n\treturn false, 0\n\n}",
"func (ptr *PwkTreeReader) Authenticate() bool {\r\n\tmapKey := GetMapKey(ptr.UsernameHashed, ptr.PasswordHashed)\r\n\treturn ptr.Tree.Has(mapKey)\r\n}",
"func IsAuthenticate(s string) bool {\n\tswitch s {\n\tcase\n\t\t\"all\",\n\t\t\"authenticate\":\n\t\treturn true\n\t}\n\treturn false\n}",
"func (p *OAuthProxy) AuthenticateOnly(rw http.ResponseWriter, req *http.Request) {\n\tlogger := log.NewLogEntry()\n\terr := p.Authenticate(rw, req)\n\tif err != nil {\n\t\tp.StatsdClient.Incr(\"application_error\", []string{\"action:auth\", \"error:unauthorized_request\"}, 1.0)\n\t\tlogger.Error(err, \"error authenticating\")\n\t\thttp.Error(rw, \"unauthorized request\", http.StatusUnauthorized)\n\t}\n\trw.WriteHeader(http.StatusAccepted)\n}",
"func authOk(sucProb byte) error {\n\t/* Get a random number */\n\tb := make([]byte, 1)\n\tif _, err := rand.Read(b); nil != err {\n\t\treturn fmt.Errorf(\"random read: %v\", err)\n\t}\n\t/* See if it's a winner */\n\tif b[0] <= sucProb {\n\t\treturn nil\n\t}\n\treturn errors.New(\"permission denied\")\n}",
"func TestValidAuth(t *testing.T) {\n\tt.Parallel()\n\ta, err := getAuth()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tif !ValidAuth(a) {\n\t\tt.Error(ErrInvalidAuth)\n\t}\n}",
"func IsUnauthenticated(err error) bool {\n\tif err == nil {\n\t\treturn false\n\t}\n\n\tif errors.As(err, &gophercloud.ErrDefault401{}) {\n\t\treturn true\n\t}\n\n\tvar e gophercloud.Err401er\n\treturn errors.As(err, &e)\n}",
"func MustLogin(res http.ResponseWriter, req *http.Request) bool {\r\n\t_, err := GetUserFromSession(req)\r\n\tif err != nil {\r\n\t\tpath := strings.Replace(req.URL.Path[1:], \"%2f\", \"/\", -1)\r\n\t\thttp.Redirect(res, req, PATH_AUTH_Login+\"?redirect=\"+path, http.StatusSeeOther)\r\n\t\treturn true\r\n\t}\r\n\treturn false\r\n}",
"func (app *MgmtApp) defaultUserAuthenticator(client Client, username string, password string) bool {\n return true\n}",
"func (g *Gonf) MetaBasicAuth() bool {\n\treturn g.metaBasicAuth\n}",
"func Login(registry string) (bool, error) {\n\n\tlogrus.Info(\"log in: \", registry)\n\n\tif strings.Contains(registry, \"docker\") {\n\t\tregistry = \"https://\" + registry + \"/v2\"\n\n\t} else {\n\t\tregistry = \"http://\" + registry + \"/v2\"\n\t}\n\n\tclient := httpclient.Get()\n\trequest, err := http.NewRequest(\"GET\", registry, nil)\n\tresponse, err := client.Do(request)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"log in %v: %v\", registry, err)\n\t}\n\tauthorized := response.StatusCode != http.StatusUnauthorized\n\tif !authorized {\n\t\tlogrus.Info(\"Unauthorized access\")\n\t\terr := AuthenticateResponse(response, request)\n\n\t\tif err != nil {\n\t\t\tif err == xerrors.Unauthorized {\n\t\t\t\tauthorized = false\n\t\t\t}\n\t\t\treturn false, err\n\t\t} else {\n\t\t\tauthorized = true\n\t\t}\n\t}\n\n\treturn authorized, nil\n}",
"func Authenticate(usrPassword string) bool {\n\treturn blockchainhelpers.HashStr(usrPassword) != model.PasswordHash\n}",
"func (c *Credentials) AuthUser(name, password string) bool {\n\tif password == \"\" {\n\t\treturn false\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tpw, has := c.users[name]\n\tif !has {\n\t\treturn false\n\t}\n\treturn pw == password\n}",
"func Authenticate(u string) bool {\n\tres, err := http.Get(\"https://reqres.in/api/users\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tvar r map[string][]map[string]interface{}\n\tb, err := io.ReadAll(res.Body)\n\tdefer res.Body.Close()\n\n\tjson.Unmarshal(b, &r)\n\tfor _,v := range r[\"data\"] {\n\t\tif v[\"email\"] == u {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func TestAuthOK(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.Auth(\"method\", func(r res.AuthRequest) {\n\t\t\tr.OK(mock.Result)\n\t\t}))\n\t}, func(s *restest.Session) {\n\t\ts.Auth(\"test.model\", \"method\", nil).\n\t\t\tResponse().\n\t\t\tAssertResult(mock.Result)\n\t})\n}",
"func (c *SQLiteConn) AuthEnabled() (exists bool) {\n\t// NOOP\n\treturn false\n}"
] | [
"0.77358335",
"0.7350411",
"0.71787417",
"0.7091597",
"0.7025409",
"0.7012067",
"0.6959855",
"0.6881702",
"0.68452483",
"0.6821381",
"0.6777624",
"0.6702661",
"0.66996557",
"0.6635247",
"0.661886",
"0.66089845",
"0.6597713",
"0.65401727",
"0.6521271",
"0.65150636",
"0.65101177",
"0.64531845",
"0.64329726",
"0.6401082",
"0.6378416",
"0.63551444",
"0.63356036",
"0.6333912",
"0.6289813",
"0.62610364",
"0.62542385",
"0.6231116",
"0.62191814",
"0.62060714",
"0.61809134",
"0.618076",
"0.61426616",
"0.61405855",
"0.61359024",
"0.61317736",
"0.6130689",
"0.61277187",
"0.6123673",
"0.61201817",
"0.6116548",
"0.6115226",
"0.61071765",
"0.6106851",
"0.6095152",
"0.6074544",
"0.60724497",
"0.60579735",
"0.60064024",
"0.6000818",
"0.59770674",
"0.5969168",
"0.59672755",
"0.5956794",
"0.59522253",
"0.5947747",
"0.5936917",
"0.5930064",
"0.5929622",
"0.5919272",
"0.5917829",
"0.58920383",
"0.5873408",
"0.58699316",
"0.584135",
"0.58401096",
"0.5839208",
"0.58382547",
"0.5837471",
"0.58198637",
"0.5818315",
"0.581295",
"0.5807512",
"0.5806132",
"0.58061296",
"0.5800197",
"0.57997084",
"0.57865405",
"0.5786309",
"0.577199",
"0.57591426",
"0.57575864",
"0.57457274",
"0.5730328",
"0.5722149",
"0.5721098",
"0.5710991",
"0.5700802",
"0.5700608",
"0.56939113",
"0.56807727",
"0.5679904",
"0.56777513",
"0.56765455",
"0.56749743",
"0.5664911"
] | 0.58030546 | 79 |
Fetch retrieves a phone number resource See for more details Context is defaulted to Background. See for more information | func (c Client) Fetch() (*FetchIncomingPhoneNumberResponse, error) {
return c.FetchWithContext(context.Background())
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c Client) FetchWithContext(context context.Context) (*FetchIncomingPhoneNumberResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Accounts/{accountSid}/IncomingPhoneNumbers/{sid}.json\",\n\t\tPathParams: map[string]string{\n\t\t\t\"accountSid\": c.accountSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchIncomingPhoneNumberResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (h *Harness) Phone(id string) string { return h.phoneCCG.Get(id) }",
"func GetPhoneNumberEndPoint(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tphonenumber := vars[\"phonenumber\"]\n\tusername := vars[\"username\"]\n\n\tperson := &util.Person{\n\t\tName: username,\n\t\tPhonenumber: phonenumber,\n\t\tAddress: \"\",\n\t}\n\n\tperson = query.CheckAndFetch(person)\n\tif person == nil {\n\t\tperson = &util.Person{\n\t\t\tName: \"Does Not Match Records\",\n\t\t\tPhonenumber: phonenumber,\n\t\t\tAddress: \"Possible captcha violation, visit truepeoplesearch.com and prove you are not a robot.\",\n\t\t}\n\t\t//fmt.Fprintf(w, \"\")\n\t}\n\tdata, err := json.Marshal(person)\n\tif err != nil {\n\t\tfmt.Printf(\"Error in Marshalling JSON: %s\", err)\n\t}\n\n\tfmt.Println(string(data))\n\tfmt.Fprintf(w, \"%s\", data)\n}",
"func (s *identityServer) Retrieve(ctx context.Context, in *identityPb.RetrieveRequest) (*identityPb.RetrieveReply, error) {\n\tfmt.Printf(\"Identity Retrieve Called %s\\n\", data[in.Id].PhoneNumber)\n\treturn &identityPb.RetrieveReply{PhoneNumber: data[in.Id].PhoneNumber}, nil\n}",
"func (__receiver_AService *AvailablePhoneNumberService) Get() *AvailablePhoneNumberService {\n\tif len(__receiver_AService.ResourceID) == 0 {\n\t\t__receiver_AService.action = types.BULKREAD\n\t\t__receiver_AService.data = resources.AvailablePhoneNumberFilter{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.BULKREAD]\n\t} else {\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\t}\n\treturn __receiver_AService\n}",
"func (m *BookingBusiness) GetPhone()(*string) {\n val, err := m.GetBackingStore().Get(\"phone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (o *RelationshipManager) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PhoneNumber.Get()\n}",
"func (s *SmartContract) QueryPhone(ctx contractapi.TransactionContextInterface, id string) (*Phone, error) {\n phoneAsBytes, err := ctx.GetStub().GetState(id)\n\n if err != nil {\n return nil, fmt.Errorf(\"Failed to read from world state. %s\", err.Error())\n }\n\n if phoneAsBytes == nil {\n return nil, fmt.Errorf(\"%s does not exist\", id)\n }\n\n f := new(Phone)\n _ = json.Unmarshal(phoneAsBytes, f)\n \n return f, nil\n}",
"func (pv *Client) FetchNumbers(url string, page int) []models.PhoneNumber {\n\tnumbers := make([]models.PhoneNumber, 0)\n\t// Load the HTML document\n\tdoc, err := models.FetchPage(httpClient, url, setDefaultHeaders)\n\tif err != nil {\n\t\treturn numbers\n\t}\n\n\tdoc.Find(\"div.number-boxes div.number-boxes-item\").Each(func(i int, s *goquery.Selection) {\n\t\tstatus := \"online\"\n\t\tnumberURL := s.Find(\"a\").AttrOr(\"href\", \"nil\")\n\t\tnumber := s.Find(\".number-boxes-itemm-number\").Text()\n\t\tid := strings.Replace(numberURL, \"/sms/\", \"\", 1)\n\n\t\tif len(number) == 0 || strings.Contains(numberURL, \"register\") {\n\t\t\treturn\n\t\t}\n\n\t\tnum, err := libphonenumber.Parse(number, \"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to parse number: %s, error: %+v\", number, err)\n\t\t}\n\n\t\tregionNumber := libphonenumber.GetRegionCodeForNumber(num)\n\t\tcountryCode := libphonenumber.GetCountryCodeForRegion(regionNumber)\n\t\tnationalNum := libphonenumber.GetNationalSignificantNumber(num)\n\n\t\tnumbers = append(numbers, models.PhoneNumber{\n\t\t\tProvider: pv.Name(),\n\t\t\tProviderID: id,\n\t\t\tRawNumber: number,\n\t\t\tNumber: nationalNum,\n\t\t\tCountry: regionNumber,\n\t\t\tCountryCode: countryCode,\n\t\t\tCountryName: utils.FindCountryName(regionNumber),\n\t\t\tCountrySlug: slug.Make(utils.FindCountryName(regionNumber)),\n\t\t\tStatus: status,\n\t\t})\n\t})\n\n\treturn numbers\n}",
"func (o *WhatsAppPhoneWhatsAppApiContent) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func (m *RelatedContact) GetMobilePhone()(*string) {\n val, err := m.GetBackingStore().Get(\"mobilePhone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func LookupPhoneNumber(ctx *pulumi.Context, args *LookupPhoneNumberArgs, opts ...pulumi.InvokeOption) (*LookupPhoneNumberResult, error) {\n\topts = internal.PkgInvokeDefaultOpts(opts)\n\tvar rv LookupPhoneNumberResult\n\terr := ctx.Invoke(\"aws-native:connect:getPhoneNumber\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}",
"func TaskNumGET(g *gin.Context) {\n\tg.JSON(http.StatusOK, gin.H{\"message\": \"ok\", \"num\": TaskNum})\n}",
"func (o *UserDisco) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func (o *CustomerInfoResponse) GetPhone() string {\n\tif o == nil || IsNil(o.Phone) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func (h *Harness) PhoneCC(cc, id string) string { return h.phoneCCG.GetWithArg(cc, id) }",
"func (o *EntityWatchlistScreeningSearchTerms) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn *o.PhoneNumber.Get()\n}",
"func (o *CustomerInfo) GetPhone() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Phone\n}",
"func (v *VerifyPhoneNumberService) Get(ctx context.Context, verifyServiceID string, sid string) (*VerifyPhoneNumber, error) {\n\tverify := new(VerifyPhoneNumber)\n\terr := v.client.GetResource(ctx, servicesPathPart+\"/\"+verifyServiceID+\"/\"+verificationsPathPart, sid, verify)\n\treturn verify, err\n}",
"func (m *CommunicationsIdentitySet) GetPhone()(Identityable) {\n val, err := m.GetBackingStore().Get(\"phone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(Identityable)\n }\n return nil\n}",
"func (obj *MessengerUser) PhoneNumber() string {\n\tproxyResult := /*pr4*/ C.vssq_messenger_user_phone_number(obj.cCtx)\n\n\truntime.KeepAlive(obj)\n\n\treturn C.GoString(C.vsc_str_chars(proxyResult)) /* r5.1 */\n}",
"func (u *User) GetPhone() domain.PhoneNumber {\n\treturn \"\"\n}",
"func (c *Client) AvailablePhoneNumber() *AvailablePhoneNumberService {\n\tavailablephonenumberService := AvailablePhoneNumberService{Client: *c}\n\tavailablephonenumberService.validActions = types.READ | types.BULKREAD | 0x00\n\treturn &availablephonenumberService\n}",
"func (__receiver_AService *AvailablePhoneNumberService) ID(id string) *AvailablePhoneNumberService {\n\t__receiver_AService.ResourceID = id\n\tswitch __receiver_AService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\n\t}\n\treturn __receiver_AService\n}",
"func (me *AccountRepository) GetByPhone(ctx context.Context, phone string) (*account.Account, error) {\n\tstatement := dbx.NewStatement(\"SELECT * FROM account WHERE phone = :phone\")\n\tstatement.AddParameter(\"phone\", phone)\n\n\trows, err := me.db.QueryStatementContext(ctx, statement)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar retrievedAccount *account.Account\n\n\tfor rows.Next() {\n\t\tretrievedAccount = &account.Account{}\n\t\terr := rows.Scan(&retrievedAccount.ID, &retrievedAccount.Name, &retrievedAccount.Email, &retrievedAccount.Phone, &retrievedAccount.Password, &retrievedAccount.CreatedAt, &retrievedAccount.UpdatedAt, &retrievedAccount.DeletedAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn retrievedAccount, nil\n\t}\n\n\treturn nil, nil\n}",
"func (d UserData) Phone() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Phone\", \"phone\"))\n\tif !d.Has(models.NewFieldName(\"Phone\", \"phone\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}",
"func Phone() string { return phone(globalFaker.Rand) }",
"func (o *DepositSwitchTargetUser) GetPhone() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Phone\n}",
"func Fetch(c *gin.Context) {\n\tvar recipe model.Recipe\n\tid, err := strconv.ParseUint(c.Param(\"id\"), 10, 64)\n\tif err != nil {\n\t\tc.JSON(http.StatusBadRequest, gin.H{\"message\": \"invalid id-format: \" + c.Param(\"id\")})\n\t\treturn\n\t}\n\trecipe.ID = uint(id)\n\terr = recipe.Read()\n\tif err != nil {\n\t\tc.JSON(http.StatusNotFound, gin.H{\"message\": err.Error()})\n\t\treturn\n\t}\n\n\tc.JSON(http.StatusOK, gin.H{\"message\": \"found\", \"data\": recipe.Description()})\n}",
"func (p Phone) Number() string {\n\tnumber := p.Faker.RandomStringElement(phoneFormats)\n\n\t// {{areaCode}}\n\tnumber = strings.Replace(number, \"{{areaCode}}\", p.AreaCode(), 1)\n\n\t// {{exchangeCode}}\n\tnumber = strings.Replace(number, \"{{exchangeCode}}\", p.ExchangeCode(), 1)\n\n\treturn p.Faker.Numerify(number)\n}",
"func (u *User) Phone() string { return u.userData.Phone }",
"func (o *Credit1099Payer) GetTelephoneNumber() string {\n\tif o == nil || o.TelephoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.TelephoneNumber.Get()\n}",
"func (o *MicrosoftGraphEducationSchool) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func getConsumber(c *cli.Context) error {\n\tid := c.String(\"id\")\n\tusername := c.String(\"username\")\n\n\tvar requestURL string\n\tif id != \"\" {\n\t\trequestURL = fmt.Sprintf(\"%s/%s\", CONSUMER_RESOURCE_OBJECT, id)\n\t} else if username != \"\" {\n\t\trequestURL = fmt.Sprintf(\"%s/%s\", CONSUMER_RESOURCE_OBJECT, username)\n\t} else {\n\t\treturn fmt.Errorf(\"username and id invalid.\")\n\t}\n\n\tctx, cannel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cannel()\n\n\tserverResponse, err := client.GatewayClient.Get(ctx, requestURL, nil, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbody, err := ioutil.ReadAll(serverResponse.Body)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttools.IndentFromBody(body)\n\n\treturn nil\n}",
"func GetPhoneNumbers(mctx libkb.MetaContext) ([]keybase1.UserPhoneNumber, error) {\n\targ := libkb.APIArg{\n\t\tEndpoint: \"user/phone_numbers\",\n\t\tSessionType: libkb.APISessionTypeREQUIRED,\n\t}\n\tvar resp phoneNumbersResponse\n\terr := mctx.G().API.GetDecode(mctx, arg, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.PhoneNumbers, nil\n}",
"func (s UserSet) Phone() string {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Phone\", \"phone\")).(string)\n\treturn res\n}",
"func GetExampleNumber(regionCode string) *PhoneNumber {\n\treturn GetExampleNumberForType(regionCode, FIXED_LINE)\n}",
"func (c *Client) Get(ctx context.Context, number int) (Comic, error) {\n\treturn c.do(ctx, fmt.Sprintf(\"/%d/info.0.json\", number))\n}",
"func (s *SavedPhoneContact) GetPhone() (value string) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.Phone\n}",
"func (c client) FetchContact(id string) (*Contact, error) {\n\tparams := make(url.Values)\n\tparams.Set(\"user_id\", id)\n\treturn c.FetchContactWithParams(params)\n}",
"func (f *Faker) Phone() string { return phone(f.Rand) }",
"func GetNumberContactsHandler(c *gin.Context) {\n\tphonenr := c.Param(\"number\")\n\n\tDBUser, DBPass, DBName := GetSettings()\n\tdb, err := sql.Open(\"mysql\", DBUser+\":\"+DBPass+DBName)\n\tcheckErr(err)\n\tdefer db.Close() //Close DB after function has returned a val\n\n\tstmtOut, err := db.Prepare(\"SELECT DISTINCT name, phonenumber FROM user INNER JOIN groupmember ON phonenumber = user_number WHERE group_id IN (SELECT group_id FROM groupmember WHERE user_number = ?) AND NOT phonenumber = ?\")\n\tcheckErr(err)\n\tdefer stmtOut.Close()\n\n\trows, err := stmtOut.Query(phonenr, phonenr)\n\tcheckErr(err)\n\n\tvar contacts []*Contacts\n\tfor rows.Next() {\n\t\tp := new(Contacts)\n\t\tif err := rows.Scan(&p.Name, &p.Phonenumber); err != nil {\n\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t\t}\n\t\tcontacts = append(contacts, p)\n\t}\n\tif err := rows.Err(); err != nil {\n\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t}\n\tc.JSON(http.StatusAccepted, contacts)\n\n}",
"func (b *Builder) Phone(s string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityPhone{Offset: offset, Length: limit}\n\t})\n}",
"func (i *Invoice) GetPhoneToProvider() (value bool) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.Flags.Has(6)\n}",
"func (*Person_PhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_address_book_addressbook_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (a *TelariaAdapter) FetchEndpoint() string {\n\treturn a.URI\n}",
"func (c *Client) GetUserByPhoneNumber(ctx context.Context, phone string) (*UserRecord, error) {\n\tif err := validatePhone(phone); err != nil {\n\t\treturn nil, err\n\t}\n\trequest := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tPhoneNumber: []string{phone},\n\t}\n\treturn c.getUser(ctx, request)\n}",
"func (o *Port) Fetch() *bambou.Error {\n\n\treturn bambou.CurrentSession().FetchEntity(o)\n}",
"func (o *GroupReplaceRequest) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PhoneNumber\n}",
"func (o GetUsersUserOutput) Phone() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetUsersUser) string { return v.Phone }).(pulumi.StringOutput)\n}",
"func (re *stubRegistrationService) FetchEntry(ctx context.Context, request pb.RegistrationEntryID) (common.RegistrationEntry, error) {\n\treply, err := re.registration.FetchEntry(request.Id)\n\tif err != nil {\n\t\treturn common.RegistrationEntry{}, err\n\t}\n\treturn *reply, err\n}",
"func decodeUserInfoByPhoneRequest(_ context.Context, r interface{}) (interface{}, error) {\n\treq := r.(*pb.UserInfoByPhoneRequest)\n\treturn endpoint.UserInfoByPhoneRequest{\n\t\tPhone:req.Phone,\n\t},nil\n}",
"func (*PhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_contacts_phones_proto_rawDescGZIP(), []int{0}\n}",
"func (b *GroupsEditBuilder) Phone(v string) *GroupsEditBuilder {\n\tb.Params[\"phone\"] = v\n\treturn b\n}",
"func (s *PhoneNumbersService) GetPhoneNumbers(params GetPhoneNumbersParams) (*GetPhoneNumbersReturn, *structure.VError, error) {\n\treq, err := s.client.NewRequest(\"POST\", \"GetPhoneNumbers\", params)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tresponse := &GetPhoneNumbersReturn{}\n\tverr, err := s.client.MakeResponse(req, response)\n\tif verr != nil || err != nil {\n\t\treturn nil, verr, err\n\t}\n\treturn response, nil, nil\n}",
"func (v *VerifyPhoneNumberService) Check(ctx context.Context, verifyServiceID string, data url.Values) (*CheckPhoneNumber, error) {\n\tcheck := new(CheckPhoneNumber)\n\terr := v.client.CreateResource(ctx, servicesPathPart+\"/\"+verifyServiceID+\"/\"+verificationCheckPart, data, check)\n\treturn check, err\n}",
"func AuthPhoneNumber(ctx context.Context, phoneNumber, pin string) (string, error) {\n\tidentity, err := phoneAuthHandler.Authenticate(ctx, phoneNumber, pin)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// TODO map to graphql viewer object and expose graphql viewer here by default.\n\treturn identity.Token, nil\n}",
"func GetExampleNumberForNonGeoEntity(countryCallingCode int) *PhoneNumber {\n\tvar metadata *PhoneMetadata = getMetadataForNonGeographicalRegion(countryCallingCode)\n\tif metadata == nil {\n\t\treturn nil\n\t}\n\n\t// For geographical entities, fixed-line data is always present. However, for non-geographical\n\t// entities, this is not the case, so we have to go through different types to find the\n\t// example number.\n\tdescPriority := []*PhoneNumberDesc{metadata.GetMobile(), metadata.GetTollFree(),\n\t\tmetadata.GetSharedCost(), metadata.GetVoip(), metadata.GetVoicemail(), metadata.GetUan(), metadata.GetPremiumRate()}\n\n\tfor _, desc := range descPriority {\n\t\tif desc != nil && desc.GetExampleNumber() != \"\" {\n\t\t\tnum, err := Parse(\"+\"+strconv.Itoa(countryCallingCode)+desc.GetExampleNumber(), \"ZZ\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn num\n\t\t}\n\t}\n\treturn nil\n}",
"func ClientTelGet(ct models.ClientTel, m *models.Message) {\n\tif ct.ID <= 0 {\n\t\tm.Code = http.StatusBadRequest\n\t\tm.Message = \"especifique telefono de cliente\"\n\t\treturn\n\t}\n\tdb := configuration.GetConnection()\n\tdefer db.Close()\n\terr := getClientTel(&ct, db)\n\tif err != nil {\n\t\tm.Code = http.StatusBadRequest\n\t\tm.Message = \"no se encotro telefono de cliente\"\n\t\treturn\n\t}\n\tm.Code = http.StatusOK\n\tm.Message = \"telefono de cliente encontrado\"\n\tm.Data = ct\n}",
"func (m *User) GetMobilePhone()(*string) {\n return m.mobilePhone\n}",
"func (c Client) Fetch() (*FetchAlphaSenderResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}",
"func (u *User) GetPhone() (value string, ok bool) {\n\tif u == nil {\n\t\treturn\n\t}\n\tif !u.Flags.Has(4) {\n\t\treturn value, false\n\t}\n\treturn u.Phone, true\n}",
"func Parse(numberToParse, defaultRegion string) (*PhoneNumber, error) {\n\tvar phoneNumber *PhoneNumber = &PhoneNumber{}\n\terr := ParseToNumber(numberToParse, defaultRegion, phoneNumber)\n\treturn phoneNumber, err\n}",
"func Number(phoneNum string) (string, error) {\n\tre, err := regexp.Compile(`[[:digit:]]*`)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error regexp\")\n\t}\n\tnums := strings.Join(re.FindAllString(phoneNum, -1), \"\")\n\n\tif len(nums) < 10 || len(nums) > 11 || (len(nums) == 11 && nums[:1] != \"1\") {\n\t\treturn \"\", errors.New(\"Error digits\")\n\t} else if len(nums) == 11 {\n\t\treturn nums[1:], nil\n\t}\n\treturn nums, nil\n}",
"func (c *Client) ModifyPhoneNumber(phone, token string) (*ModifyPhoneNumberResponse, error) {\n\tp := modifyPhoneNumberParams{\n\t\tCellNum: phone,\n\t}\n\tparamMap, err := toMap(p, map[string]string{\n\t\t\"token\": token,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := httpRequest(c, p.URI(), paramMap, nil, func() interface{} {\n\t\treturn &ModifyPhoneNumberResponse{}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp := ret.(*ModifyPhoneNumberResponse)\n\n\tif err = checkErr(rsp.Code, rsp.SubCode, rsp.Message); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp, nil\n}",
"func (a *AccountClient) Fetch(id string) (*Resource, error) {\n\n\t// Validate the account ID\n\t_, err := uuid.FromString(id)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"account ID must be a valid UUID\")\n\t}\n\n\tresp, err := a.client.R().\n\t\tSetResult(&Resource{}).\n\t\tSetPathParams(map[string]string{\"account.id\": id}).\n\t\tGet(\"/v1/organisation/accounts/{account.id}\")\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"fetch account for ID %s failed: %s\", id, err)\n\t}\n\n\tif resp.Error() != nil {\n\t\treturn nil, getAPIError(resp)\n\t}\n\treturn resp.Result().(*Resource), nil\n}",
"func Contact(id string, token string) (ContactReturn, error) {\n\n\t// Set config for new request\n\tr := Request{\"/contacts/\" + id, \"GET\", token, nil}\n\n\t// Send new request\n\tresponse, err := r.Send()\n\tif err != nil {\n\t\treturn ContactReturn{}, err\n\t}\n\n\t// Close response body after function ends\n\tdefer response.Body.Close()\n\n\t// Decode data\n\tvar decode ContactReturn\n\n\terr = json.NewDecoder(response.Body).Decode(&decode)\n\tif err != nil {\n\t\treturn ContactReturn{}, err\n\t}\n\n\t// Return data\n\treturn decode, nil\n\n}",
"func (service *ContrailService) RESTGetUser(c echo.Context) error {\n\tid := c.Param(\"id\")\n\trequest := &models.GetUserRequest{\n\t\tID: id,\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.GetUser(ctx, request)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusOK, response)\n}",
"func (*InputPhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_contacts_phones_proto_rawDescGZIP(), []int{1}\n}",
"func Phone(phone string) (string, error) {\n\tvar validPhone string\n\terrorInvalid := errors.New(\"invalid phone format\")\n\tif len(phone) > 20 || len(phone) < 10 {\n\t\treturn validPhone, errorInvalid\n\t}\n\treg := regexp.MustCompile(`^[0-9\\s+.-]+$`)\n\tif !reg.MatchString(phone) {\n\t\treturn validPhone, errorInvalid\n\t}\n\tvalidPhone = strings.TrimSpace(phone)\n\treturn validPhone, nil\n}",
"func (u *UserModel) GetByPhone(phone string) (*models.User, error) {\n\treg := regexp.MustCompile(\"[^0-9]\")\n\tphone = reg.ReplaceAllString(phone, \"\")\n\n\tstmt := `SELECT u.id, u.uuid, u.first_name, u.last_name, u.email, u.phone, s.slug, u.created\n\t\t\t FROM user AS u\n\t\t LEFT JOIN ref_user_status AS s ON u.status_id = s.id\n\t\t\t WHERE REGEXP_REPLACE(u.phone, '[^0-9]', '') = ?`\n\n\tuser := &models.User{}\n\terr := u.DB.QueryRow(stmt, NormalizePhone(phone)).Scan(&user.ID, &user.UUID, &user.FirstName, &user.LastName, &user.Email, &user.Phone, &user.Status, &user.Created)\n\n\tif err == sql.ErrNoRows {\n\t\treturn nil, models.ErrNoRecord\n\t} else if err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn user, nil\n}",
"func (twilio *Twilio) GetAvailablePhoneNumbers(numberType PhoneNumberType, country string, options AvailablePhoneNumbersOptions) ([]*AvailablePhoneNumber, *Exception, error) {\n\t// build initial request\n\tresourceName := country + \"/\" + numberType.String() + \".json\"\n\treq, err := http.NewRequest(http.MethodGet, twilio.buildUrl(\"AvailablePhoneNumbers/\"+resourceName), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// authenticate\n\treq.SetBasicAuth(twilio.getBasicAuthCredentials())\n\n\t// set query string\n\tqueryValues, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.URL.RawQuery = queryValues.Encode()\n\n\t// perform request\n\tres, err := twilio.do(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\tif res.StatusCode != http.StatusOK {\n\t\texception := new(Exception)\n\t\terr = decoder.Decode(exception)\n\t\treturn nil, exception, err\n\t}\n\n\t// decode response\n\tavailablePhoneNumberResponse := new(struct {\n\t\tAvailablePhoneNumbers []*AvailablePhoneNumber `json:\"available_phone_numbers\"`\n\t})\n\tdecoder.Decode(availablePhoneNumberResponse)\n\treturn availablePhoneNumberResponse.AvailablePhoneNumbers, nil, nil\n}",
"func (s *Standalone) _fetchPid(ctx context.Context, address string) (*upid.UPID, error) {\n\t//TODO(jdef) need SSL support\n\turi := fmt.Sprintf(\"http://%s/state.json\", address)\n\treq, err := http.NewRequest(\"GET\", uri, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar pid *upid.UPID\n\terr = s.httpDo(ctx, req, func(res *http.Response, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer res.Body.Close()\n\t\tif res.StatusCode != 200 {\n\t\t\treturn fmt.Errorf(\"HTTP request failed with code %d: %v\", res.StatusCode, res.Status)\n\t\t}\n\t\tblob, err1 := ioutil.ReadAll(res.Body)\n\t\tif err1 != nil {\n\t\t\treturn err1\n\t\t}\n\t\tlog.V(3).Infof(\"Got mesos state, content length %v\", len(blob))\n\t\ttype State struct {\n\t\t\tLeader string `json:\"leader\"` // ex: master(1)@10.22.211.18:5050\n\t\t}\n\t\tstate := &State{}\n\t\terr = json.Unmarshal(blob, state)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tpid, err = upid.Parse(state.Leader)\n\t\treturn err\n\t})\n\treturn pid, err\n}",
"func (o *CustomerInfo) GetPhoneOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn &o.Phone, true\n}",
"func GetPersonAddress(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tperson, err := models.LoadPersonByAddress(id)\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"error\": \"cannot find person: \" + id,\n\t\t})\n\t\treturn\n\t}\n\tif len(person) == 0 {\n\t\texplain := \"person \" + id + \" does not exist.\"\n\t\tc.JSON(400, gin.H{\n\t\t\t\"error\": explain,\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, person)\n}",
"func (r *DeviceManagementPartnerRequest) Get(ctx context.Context) (resObj *DeviceManagementPartner, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func GetCustomerbyPhone(details string) (customer Customers, err error) {\r\n\tvar rows *sql.Rows\r\n\tif rows, err = Get(fmt.Sprintf(`select * from customers where %s and deleted_at is null;`, details)); err != nil {\r\n\t\tCheckError(\"Error getting Customer details.\", err, false)\r\n\t\treturn Customers{}, err\r\n\t}\r\n\r\n\tdefer rows.Close()\r\n\tfor rows.Next() {\r\n\t\tif err = rows.Scan(&customer.ID, &customer.CardCode, &customer.CardName, &customer.Address, &customer.Phone, &customer.Phone1, &customer.City, &customer.Email, &customer.Synced, &customer.CreatedBy, &customer.CreatedAt, &customer.UpdatedAt, &customer.DeletedAt); err != nil {\r\n\t\t\tCheckError(\"Error Scanning Customers.\", err, false)\r\n\t\t}\r\n\t}\r\n\r\n\treturn\r\n}",
"func (s *server) Fetch(ctx context.Context, req *discovery.DiscoveryRequest) (*discovery.DiscoveryResponse, error) {\n\treturn s.rest.Fetch(ctx, req)\n}",
"func (o *UserDisco) GetPhoneOk() (*string, bool) {\n\tif o == nil || o.Phone == nil {\n\t\treturn nil, false\n\t}\n\treturn o.Phone, true\n}",
"func restGet(room string, client *http.Client, ip string, port string) (*http.Response, error) {\n\treturn client.Get(fmt.Sprintf(\"http://%v/rest/%v\", net.JoinHostPort(ip, port), room))\n}",
"func GetAccount(r *http.Request) (string, error) {\n\tif r.Context().Value(identity.Key) != nil {\n\t\tident := identity.Get(r.Context())\n\t\tif ident.Identity.AccountNumber != \"\" {\n\t\t\treturn ident.Identity.AccountNumber, nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"cannot find account number\")\n\n}",
"func (*Person_PhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_protomessage_proto_rawDescGZIP(), []int{0, 0}\n}",
"func GetExampleNumberForType(regionCode string, typ PhoneNumberType) *PhoneNumber {\n\t// Check the region code is valid.\n\tif !isValidRegionCode(regionCode) {\n\t\treturn nil\n\t}\n\t//PhoneNumberDesc (pointer?)\n\tvar desc = getNumberDescByType(getMetadataForRegion(regionCode), typ)\n\texNum := desc.GetExampleNumber()\n\tif len(exNum) > 0 {\n\t\tnum, err := Parse(exNum, regionCode)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn num\n\t}\n\treturn nil\n}",
"func (o LookupPhoneNumberResultOutput) Address() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v LookupPhoneNumberResult) *string { return v.Address }).(pulumi.StringPtrOutput)\n}",
"func (h *Harness) TwilioNumber(id string) string {\n\tif id != \"\" {\n\t\tid = \":\" + id\n\t}\n\tnum := h.phoneCCG.Get(\"twilio\" + id)\n\n\terr := h.tw.RegisterSMSCallback(num, h.URL()+\"/v1/twilio/sms/messages\")\n\tif err != nil {\n\t\th.t.Fatalf(\"failed to init twilio (SMS callback): %v\", err)\n\t}\n\terr = h.tw.RegisterVoiceCallback(num, h.URL()+\"/v1/twilio/voice/call\")\n\tif err != nil {\n\t\th.t.Fatalf(\"failed to init twilio (voice callback): %v\", err)\n\t}\n\n\treturn num\n}",
"func (o *RelationshipManager) GetPhoneNumberOk() (*string, bool) {\n\tif o == nil {\n\t\treturn nil, false\n\t}\n\treturn o.PhoneNumber.Get(), o.PhoneNumber.IsSet()\n}",
"func (app *Application) QueryPhoneHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\tvar keymap map[string]string\n\tkeymap = make(map[string]string)\n\tvar batch []string\n\toName := \"smartphone\"\n\tuName := \"wzx\"\n\tvar binfo webutil.CompanyInfo\n\tvar dinfo webutil.CompanyInfo\n\tvar cinfo webutil.CompanyInfo\n\tvar asinfo webutil.AssemblyInfo\n\tvar trinfo webutil.TransitInfo\n\tvar saInfo webutil.SalesInfo\n\tif r.FormValue(\"submitted\") == \"true\" {\n\t\tkey := r.FormValue(\"snumber\")\n\t\t//according snumber to find a batch key\n\t\tfor k, _ := range webutil.Orgnization {\n\t\t\tif k == \"smartphone\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbatch = app.GetPhoneBatchInfo(k, uName)\n\t\t\tfor _, vb := range batch {\n\t\t\t\tif key > vb {\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tkeymap[k] = vb\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetBatteryInfo\", keymap[\"battery\"])\n\t\tdinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetDisplayInfo\", keymap[\"display\"])\n\t\tcinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetCpuInfo\", keymap[\"cpu\"])\n\t\tasinfo = app.GetPhoneAssemblyInfo(oName, uName, keymap[\"assembly\"])\n\t\ttrinfo = app.GetPhoneLogisticsInfo(oName, uName, keymap[\"logistics\"])\n\t\tsaInfo = app.GetPhoneSalesInfo(oName, uName, key)\n\n\t\tdata[\"BatteryInfo\"] = binfo\n\t\tdata[\"DisplayInfo\"] = dinfo\n\t\tdata[\"CpuInfo\"] = cinfo\n\t\tdata[\"AssemblyInfo\"] = asinfo\n\t\tdata[\"LogisticsInfo\"] = trinfo.ConcreteTransitInfo\n\t\tdata[\"SalesInfo\"] = saInfo\n\t\tfmt.Println(\"all info is\", binfo, dinfo, cinfo, asinfo, trinfo, saInfo)\n\t}\n\tqueryphoneTemplate(w, r, \"queryphoneinfo.html\", data)\n}",
"func (a *paymentUsecase) Fetch(c context.Context, cursor string, num int64) ([]*models.Payment, string, error) {\n\tif num == 0 {\n\t\tnum = 10\n\t}\n\n\tctx, cancel := context.WithTimeout(c, a.contextTimeout)\n\tdefer cancel()\n\n\tlistPayment, err := a.repo.Fetch(ctx, cursor, num)\n\tif err != nil {\n\t\treturn nil, \"\", err\n\t}\n\n\tnextCursor := \"\"\n\n\tif size := len(listPayment); size == int(num) {\n\t\tlastID := listPayment[num-1].ID\n\t\tnextCursor = strconv.Itoa(int(lastID))\n\t}\n\n\treturn listPayment, nextCursor, nil\n}",
"func (p *PullRequest) GetNumber() int {\n\tif p == nil || p.Number == nil {\n\t\treturn 0\n\t}\n\treturn *p.Number\n}",
"func (p *NoteStoreClient) GetResource(ctx context.Context, authenticationToken string, guid GUID, withData bool, withRecognition bool, withAttributes bool, withAlternateData bool) (r *Resource, err error) {\n var _args135 NoteStoreGetResourceArgs\n _args135.AuthenticationToken = authenticationToken\n _args135.GUID = guid\n _args135.WithData = withData\n _args135.WithRecognition = withRecognition\n _args135.WithAttributes = withAttributes\n _args135.WithAlternateData = withAlternateData\n var _result136 NoteStoreGetResourceResult\n if err = p.Client_().Call(ctx, \"getResource\", &_args135, &_result136); err != nil {\n return\n }\n switch {\n case _result136.UserException!= nil:\n return r, _result136.UserException\n case _result136.SystemException!= nil:\n return r, _result136.SystemException\n case _result136.NotFoundException!= nil:\n return r, _result136.NotFoundException\n }\n\n return _result136.GetSuccess(), nil\n}",
"func (c *Client) CallResource(ctx context.Context, callSid string) (*CallResource, error) {\n\tctx, span := trace.StartSpan(ctx, \"twilio.Client.CallResource()\")\n\tdefer span.End()\n\n\turl := fmt.Sprintf(\"%s/Accounts/%s/Calls/%s.json\", baseURL, c.accountSid, callSid)\n\n\treq, err := c.newRequest(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.CallResource()\")\n\t}\n\n\tres, err := c.httpClient.Do(req)\n\tif err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.CallResource(): http.Do(\")\n\t}\n\tdefer res.Body.Close()\n\n\tif res.StatusCode != http.StatusOK {\n\t\treturn nil, errors.WithMessage(decodeError(res.Body), \"twilio.Client.CallResource()\")\n\t}\n\n\tcallResource := &CallResource{}\n\n\tif err := json.NewDecoder(res.Body).Decode(callResource); err != nil {\n\t\treturn nil, errors.WithMessage(err, \"twilio.Client.CallResource(): json.Decoder.Decode()\")\n\t}\n\n\treturn callResource, nil\n}",
"func Fetch(settings *Settings, taskSettings *TaskSettings) {\n\ttoken := fetchToken(settings, taskSettings)\n\tprintToken(token, taskSettings.Format, settings)\n}",
"func (c Client) Fetch() (*FetchRecordingResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}",
"func (*PhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_examples_documents_example_proto_rawDescGZIP(), []int{26}\n}",
"func (b *Buffer) RetrieveInt32() {\n\tb.Retrieve(4)\n}",
"func (h *Handler) Fetch(c echo.Context) error {\n\tidP, err := strconv.Atoi(c.Param(\"id\"))\n\tif err != nil {\n\t\treturn c.JSON(http.StatusNotFound, err)\n\t}\n\n\tid := uint(idP)\n\n\trec, err := h.Store.Fetch(id)\n\tif err != nil {\n\t\treturn c.JSON(http.StatusInternalServerError, err)\n\t}\n\n\treturn c.JSON(http.StatusOK, rec)\n}",
"func (app *Application) ContactRead(w http.ResponseWriter, r *http.Request) {\n\tid := r.URL.Query().Get(\"id\")\n\tc, err := contactRead(app.db, id)\n\n\tif err != nil {\n\t\tfail(w, r, err)\n\t\treturn\n\t}\n\n\twrite(w, r, Response{Ok: true, Data: c})\n}",
"func (m *User) GetFaxNumber()(*string) {\n return m.faxNumber\n}",
"func (ps PersonService) Get(n int) (Person, error) {\n\tp := ps.a.Retrieve(n)\n\tif p.First == \"\" {\n\t\treturn Person{}, fmt.Errorf(\"no person with n of %d\", n)\n\t}\n\treturn p, nil\n}",
"func PhoneFormatted() string { return phoneFormatted(globalFaker.Rand) }"
] | [
"0.60177976",
"0.5824734",
"0.5673012",
"0.5629686",
"0.5505302",
"0.5459282",
"0.54561484",
"0.5361664",
"0.53290045",
"0.5169997",
"0.5153889",
"0.5087201",
"0.5074009",
"0.5051332",
"0.5050677",
"0.5016039",
"0.50082606",
"0.49904212",
"0.49762186",
"0.49649933",
"0.49638295",
"0.49628758",
"0.4919978",
"0.4895562",
"0.48848724",
"0.48598343",
"0.48562056",
"0.48395488",
"0.48172778",
"0.4810342",
"0.48018098",
"0.47506043",
"0.47497833",
"0.47352102",
"0.47235638",
"0.47216648",
"0.4720436",
"0.4719688",
"0.47160432",
"0.4680658",
"0.4649608",
"0.46396762",
"0.46304476",
"0.462833",
"0.46150243",
"0.461381",
"0.460772",
"0.4605441",
"0.45984313",
"0.458405",
"0.45787475",
"0.45522314",
"0.4548475",
"0.45300785",
"0.45231372",
"0.45143825",
"0.45086506",
"0.4506732",
"0.44990575",
"0.44943115",
"0.4475327",
"0.44752187",
"0.4467139",
"0.4461393",
"0.44540465",
"0.44129807",
"0.44074917",
"0.44068393",
"0.44045195",
"0.4402559",
"0.44016242",
"0.43992427",
"0.4396913",
"0.43901664",
"0.43888187",
"0.43803394",
"0.43777558",
"0.43689886",
"0.43657145",
"0.43626124",
"0.43610924",
"0.43533447",
"0.43461454",
"0.43454012",
"0.43428102",
"0.43415987",
"0.43294895",
"0.43280843",
"0.43279174",
"0.43131188",
"0.4305338",
"0.4301081",
"0.4288131",
"0.42880407",
"0.4285071",
"0.42850184",
"0.4280159",
"0.42786586",
"0.42725012",
"0.4271897"
] | 0.7438931 | 0 |
FetchWithContext retrieves a phone number resource See for more details | func (c Client) FetchWithContext(context context.Context) (*FetchIncomingPhoneNumberResponse, error) {
op := client.Operation{
Method: http.MethodGet,
URI: "/Accounts/{accountSid}/IncomingPhoneNumbers/{sid}.json",
PathParams: map[string]string{
"accountSid": c.accountSid,
"sid": c.sid,
},
}
response := &FetchIncomingPhoneNumberResponse{}
if err := c.client.Send(context, op, nil, response); err != nil {
return nil, err
}
return response, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c Client) Fetch() (*FetchIncomingPhoneNumberResponse, error) {\n\treturn c.FetchWithContext(context.Background())\n}",
"func (_obj *DataService) HasPhoneWithContext(tarsCtx context.Context, phone string, phoneExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(phone, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*phoneExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"hasPhone\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_bool(&(*phoneExist), 2, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func GetPhoneNumberEndPoint(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\tphonenumber := vars[\"phonenumber\"]\n\tusername := vars[\"username\"]\n\n\tperson := &util.Person{\n\t\tName: username,\n\t\tPhonenumber: phonenumber,\n\t\tAddress: \"\",\n\t}\n\n\tperson = query.CheckAndFetch(person)\n\tif person == nil {\n\t\tperson = &util.Person{\n\t\t\tName: \"Does Not Match Records\",\n\t\t\tPhonenumber: phonenumber,\n\t\t\tAddress: \"Possible captcha violation, visit truepeoplesearch.com and prove you are not a robot.\",\n\t\t}\n\t\t//fmt.Fprintf(w, \"\")\n\t}\n\tdata, err := json.Marshal(person)\n\tif err != nil {\n\t\tfmt.Printf(\"Error in Marshalling JSON: %s\", err)\n\t}\n\n\tfmt.Println(string(data))\n\tfmt.Fprintf(w, \"%s\", data)\n}",
"func (h *Harness) Phone(id string) string { return h.phoneCCG.Get(id) }",
"func (s *SmartContract) QueryPhone(ctx contractapi.TransactionContextInterface, id string) (*Phone, error) {\n phoneAsBytes, err := ctx.GetStub().GetState(id)\n\n if err != nil {\n return nil, fmt.Errorf(\"Failed to read from world state. %s\", err.Error())\n }\n\n if phoneAsBytes == nil {\n return nil, fmt.Errorf(\"%s does not exist\", id)\n }\n\n f := new(Phone)\n _ = json.Unmarshal(phoneAsBytes, f)\n \n return f, nil\n}",
"func (m *BookingBusiness) GetPhone()(*string) {\n val, err := m.GetBackingStore().Get(\"phone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (_obj *DataService) HasPhoneOneWayWithContext(tarsCtx context.Context, phone string, phoneExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(phone, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*phoneExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 1, \"hasPhone\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (o *RelationshipManager) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PhoneNumber.Get()\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchParticipantResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Rooms/{roomSid}/Participants/{sid}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"roomSid\": c.roomSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchParticipantResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (s *identityServer) Retrieve(ctx context.Context, in *identityPb.RetrieveRequest) (*identityPb.RetrieveReply, error) {\n\tfmt.Printf(\"Identity Retrieve Called %s\\n\", data[in.Id].PhoneNumber)\n\treturn &identityPb.RetrieveReply{PhoneNumber: data[in.Id].PhoneNumber}, nil\n}",
"func decodeUserInfoByPhoneRequest(_ context.Context, r interface{}) (interface{}, error) {\n\treq := r.(*pb.UserInfoByPhoneRequest)\n\treturn endpoint.UserInfoByPhoneRequest{\n\t\tPhone:req.Phone,\n\t},nil\n}",
"func (m *RelatedContact) GetMobilePhone()(*string) {\n val, err := m.GetBackingStore().Get(\"mobilePhone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(*string)\n }\n return nil\n}",
"func (m *CommunicationsIdentitySet) GetPhone()(Identityable) {\n val, err := m.GetBackingStore().Get(\"phone\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.(Identityable)\n }\n return nil\n}",
"func (obj *MessengerUser) PhoneNumber() string {\n\tproxyResult := /*pr4*/ C.vssq_messenger_user_phone_number(obj.cCtx)\n\n\truntime.KeepAlive(obj)\n\n\treturn C.GoString(C.vsc_str_chars(proxyResult)) /* r5.1 */\n}",
"func (i *Invoice) GetPhoneToProvider() (value bool) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.Flags.Has(6)\n}",
"func (c *Client) ModifyPhoneNumber(phone, token string) (*ModifyPhoneNumberResponse, error) {\n\tp := modifyPhoneNumberParams{\n\t\tCellNum: phone,\n\t}\n\tparamMap, err := toMap(p, map[string]string{\n\t\t\"token\": token,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret, err := httpRequest(c, p.URI(), paramMap, nil, func() interface{} {\n\t\treturn &ModifyPhoneNumberResponse{}\n\t})\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\trsp := ret.(*ModifyPhoneNumberResponse)\n\n\tif err = checkErr(rsp.Code, rsp.SubCode, rsp.Message); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn rsp, nil\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchRecordingResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Recordings/{sid}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchRecordingResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (h *Harness) PhoneCC(cc, id string) string { return h.phoneCCG.GetWithArg(cc, id) }",
"func (list *BillingInfoList) FetchWithContext(ctx context.Context) error {\n\tresources := &billingInfoList{}\n\terr := list.client.Call(ctx, http.MethodGet, list.nextPagePath, nil, nil, list.requestOptions, resources)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// copy over properties from the response\n\tlist.nextPagePath = resources.Next\n\tlist.hasMore = resources.HasMore\n\tlist.data = resources.Data\n\treturn nil\n}",
"func GetNumberContactsHandler(c *gin.Context) {\n\tphonenr := c.Param(\"number\")\n\n\tDBUser, DBPass, DBName := GetSettings()\n\tdb, err := sql.Open(\"mysql\", DBUser+\":\"+DBPass+DBName)\n\tcheckErr(err)\n\tdefer db.Close() //Close DB after function has returned a val\n\n\tstmtOut, err := db.Prepare(\"SELECT DISTINCT name, phonenumber FROM user INNER JOIN groupmember ON phonenumber = user_number WHERE group_id IN (SELECT group_id FROM groupmember WHERE user_number = ?) AND NOT phonenumber = ?\")\n\tcheckErr(err)\n\tdefer stmtOut.Close()\n\n\trows, err := stmtOut.Query(phonenr, phonenr)\n\tcheckErr(err)\n\n\tvar contacts []*Contacts\n\tfor rows.Next() {\n\t\tp := new(Contacts)\n\t\tif err := rows.Scan(&p.Name, &p.Phonenumber); err != nil {\n\t\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t\t}\n\t\tcontacts = append(contacts, p)\n\t}\n\tif err := rows.Err(); err != nil {\n\t\tc.AbortWithStatus(http.StatusInternalServerError)\n\t}\n\tc.JSON(http.StatusAccepted, contacts)\n\n}",
"func (rc *Client) FetchWithContext(ctx context.Context, url string) ([]*gofeed.Item, error) {\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tfeed, err := rc.Parser.Parse(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn feed.Items, nil\n}",
"func AuthPhoneNumber(ctx context.Context, phoneNumber, pin string) (string, error) {\n\tidentity, err := phoneAuthHandler.Authenticate(ctx, phoneNumber, pin)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t// TODO map to graphql viewer object and expose graphql viewer here by default.\n\treturn identity.Token, nil\n}",
"func (c *Client) GetUserByPhoneNumber(ctx context.Context, phone string) (*UserRecord, error) {\n\tif err := validatePhone(phone); err != nil {\n\t\treturn nil, err\n\t}\n\trequest := &identitytoolkit.IdentitytoolkitRelyingpartyGetAccountInfoRequest{\n\t\tPhoneNumber: []string{phone},\n\t}\n\treturn c.getUser(ctx, request)\n}",
"func (o *EntityWatchlistScreeningSearchTerms) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn *o.PhoneNumber.Get()\n}",
"func (c *Client) AvailablePhoneNumber() *AvailablePhoneNumberService {\n\tavailablephonenumberService := AvailablePhoneNumberService{Client: *c}\n\tavailablephonenumberService.validActions = types.READ | types.BULKREAD | 0x00\n\treturn &availablephonenumberService\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchRevisionResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Flows/{flowSid}/Revisions/{revisionNumber}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"flowSid\": c.flowSid,\n\t\t\t\"revisionNumber\": strconv.Itoa(c.revisionNumber),\n\t\t},\n\t}\n\n\tresponse := &FetchRevisionResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (list *SubscriptionAddOnList) FetchWithContext(ctx context.Context) error {\n\tresources := &subscriptionAddOnList{}\n\terr := list.client.Call(ctx, http.MethodGet, list.nextPagePath, nil, nil, list.requestOptions, resources)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// copy over properties from the response\n\tlist.nextPagePath = resources.Next\n\tlist.hasMore = resources.HasMore\n\tlist.data = resources.Data\n\treturn nil\n}",
"func LookupPhoneNumber(ctx *pulumi.Context, args *LookupPhoneNumberArgs, opts ...pulumi.InvokeOption) (*LookupPhoneNumberResult, error) {\n\topts = internal.PkgInvokeDefaultOpts(opts)\n\tvar rv LookupPhoneNumberResult\n\terr := ctx.Invoke(\"aws-native:connect:getPhoneNumber\", args, &rv, opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &rv, nil\n}",
"func GetExampleNumber(regionCode string) *PhoneNumber {\n\treturn GetExampleNumberForType(regionCode, FIXED_LINE)\n}",
"func (u *User) Phone() string { return u.userData.Phone }",
"func (__receiver_AService *AvailablePhoneNumberService) Get() *AvailablePhoneNumberService {\n\tif len(__receiver_AService.ResourceID) == 0 {\n\t\t__receiver_AService.action = types.BULKREAD\n\t\t__receiver_AService.data = resources.AvailablePhoneNumberFilter{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.BULKREAD]\n\t} else {\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\t}\n\treturn __receiver_AService\n}",
"func GetPhoneNumbers(mctx libkb.MetaContext) ([]keybase1.UserPhoneNumber, error) {\n\targ := libkb.APIArg{\n\t\tEndpoint: \"user/phone_numbers\",\n\t\tSessionType: libkb.APISessionTypeREQUIRED,\n\t}\n\tvar resp phoneNumbersResponse\n\terr := mctx.G().API.GetDecode(mctx, arg, &resp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.PhoneNumbers, nil\n}",
"func (o *CustomerInfo) GetPhone() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Phone\n}",
"func Phone() string { return phone(globalFaker.Rand) }",
"func (v *VerifyPhoneNumberService) Get(ctx context.Context, verifyServiceID string, sid string) (*VerifyPhoneNumber, error) {\n\tverify := new(VerifyPhoneNumber)\n\terr := v.client.GetResource(ctx, servicesPathPart+\"/\"+verifyServiceID+\"/\"+verificationsPathPart, sid, verify)\n\treturn verify, err\n}",
"func (p fullProvider) FetchDSNWithContext(_ context.Context, dsn string) (string, error) {\n\treturn p.FetchDSN(dsn)\n}",
"func (p Phone) Number() string {\n\tnumber := p.Faker.RandomStringElement(phoneFormats)\n\n\t// {{areaCode}}\n\tnumber = strings.Replace(number, \"{{areaCode}}\", p.AreaCode(), 1)\n\n\t// {{exchangeCode}}\n\tnumber = strings.Replace(number, \"{{exchangeCode}}\", p.ExchangeCode(), 1)\n\n\treturn p.Faker.Numerify(number)\n}",
"func (o *CustomerInfoResponse) GetPhone() string {\n\tif o == nil || IsNil(o.Phone) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func (u *User) GetPhone() domain.PhoneNumber {\n\treturn \"\"\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchBalanceResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Accounts/{accountSid}/Balance.json\",\n\t\tPathParams: map[string]string{\n\t\t\t\"accountSid\": c.accountSid,\n\t\t},\n\t}\n\n\tresponse := &FetchBalanceResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (c *Consumer) RetrieveFnWithContext(ctx context.Context, fn MessageProcessor) error {\n\tmsg, err := c.RetrieveWithContext(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif fn != nil {\n\t\tstart := time.Now()\n\t\tif err := fn(msg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tc.Stats.UpdateProcessedDuration(time.Since(start))\n\t\tc.Stats.AddProcessed(1)\n\n\t}\n\treturn nil\n}",
"func (list *AccountList) FetchWithContext(ctx context.Context) error {\n\tresources := &accountList{}\n\terr := list.client.Call(ctx, http.MethodGet, list.nextPagePath, nil, nil, list.requestOptions, resources)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// copy over properties from the response\n\tlist.nextPagePath = resources.Next\n\tlist.hasMore = resources.HasMore\n\tlist.data = resources.Data\n\treturn nil\n}",
"func (b *GroupsEditBuilder) Phone(v string) *GroupsEditBuilder {\n\tb.Params[\"phone\"] = v\n\treturn b\n}",
"func GetRequstByEmployeeId(c *gin.Context) {}",
"func (d UserData) Phone() string {\n\tval := d.ModelData.Get(models.NewFieldName(\"Phone\", \"phone\"))\n\tif !d.Has(models.NewFieldName(\"Phone\", \"phone\")) {\n\t\treturn *new(string)\n\t}\n\treturn val.(string)\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchAlphaSenderResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Services/{serviceSid}/AlphaSenders/{sid}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"serviceSid\": c.serviceSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchAlphaSenderResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func AuthPhoneExtendToken(ctx context.Context, token string) (string, error) {\n\treturn phoneAuthHandler.ExtendTokenExpiration(token)\n}",
"func (app *Application) QueryPhoneHandler(w http.ResponseWriter, r *http.Request) {\n\tvar data map[string]interface{}\n\tdata = make(map[string]interface{})\n\tvar keymap map[string]string\n\tkeymap = make(map[string]string)\n\tvar batch []string\n\toName := \"smartphone\"\n\tuName := \"wzx\"\n\tvar binfo webutil.CompanyInfo\n\tvar dinfo webutil.CompanyInfo\n\tvar cinfo webutil.CompanyInfo\n\tvar asinfo webutil.AssemblyInfo\n\tvar trinfo webutil.TransitInfo\n\tvar saInfo webutil.SalesInfo\n\tif r.FormValue(\"submitted\") == \"true\" {\n\t\tkey := r.FormValue(\"snumber\")\n\t\t//according snumber to find a batch key\n\t\tfor k, _ := range webutil.Orgnization {\n\t\t\tif k == \"smartphone\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tbatch = app.GetPhoneBatchInfo(k, uName)\n\t\t\tfor _, vb := range batch {\n\t\t\t\tif key > vb {\n\t\t\t\t\tcontinue\n\t\t\t\t} else {\n\t\t\t\t\tkeymap[k] = vb\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tbinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetBatteryInfo\", keymap[\"battery\"])\n\t\tdinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetDisplayInfo\", keymap[\"display\"])\n\t\tcinfo = app.GetPhoneSupplierInfo(oName, uName, \"GetCpuInfo\", keymap[\"cpu\"])\n\t\tasinfo = app.GetPhoneAssemblyInfo(oName, uName, keymap[\"assembly\"])\n\t\ttrinfo = app.GetPhoneLogisticsInfo(oName, uName, keymap[\"logistics\"])\n\t\tsaInfo = app.GetPhoneSalesInfo(oName, uName, key)\n\n\t\tdata[\"BatteryInfo\"] = binfo\n\t\tdata[\"DisplayInfo\"] = dinfo\n\t\tdata[\"CpuInfo\"] = cinfo\n\t\tdata[\"AssemblyInfo\"] = asinfo\n\t\tdata[\"LogisticsInfo\"] = trinfo.ConcreteTransitInfo\n\t\tdata[\"SalesInfo\"] = saInfo\n\t\tfmt.Println(\"all info is\", binfo, dinfo, cinfo, asinfo, trinfo, saInfo)\n\t}\n\tqueryphoneTemplate(w, r, \"queryphoneinfo.html\", data)\n}",
"func (o GetUsersUserOutput) Phone() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetUsersUser) string { return v.Phone }).(pulumi.StringOutput)\n}",
"func (me *AccountRepository) GetByPhone(ctx context.Context, phone string) (*account.Account, error) {\n\tstatement := dbx.NewStatement(\"SELECT * FROM account WHERE phone = :phone\")\n\tstatement.AddParameter(\"phone\", phone)\n\n\trows, err := me.db.QueryStatementContext(ctx, statement)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar retrievedAccount *account.Account\n\n\tfor rows.Next() {\n\t\tretrievedAccount = &account.Account{}\n\t\terr := rows.Scan(&retrievedAccount.ID, &retrievedAccount.Name, &retrievedAccount.Email, &retrievedAccount.Phone, &retrievedAccount.Password, &retrievedAccount.CreatedAt, &retrievedAccount.UpdatedAt, &retrievedAccount.DeletedAt)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn retrievedAccount, nil\n\t}\n\n\treturn nil, nil\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchSyncListItemResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Services/{serviceSid}/Lists/{syncListSid}/Items/{index}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"serviceSid\": c.serviceSid,\n\t\t\t\"syncListSid\": c.syncListSid,\n\t\t\t\"index\": strconv.Itoa(c.index),\n\t\t},\n\t}\n\n\tresponse := &FetchSyncListItemResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (c *Connect) ListPhoneNumbersPagesWithContext(ctx aws.Context, input *ListPhoneNumbersInput, fn func(*ListPhoneNumbersOutput, bool) bool, opts ...request.Option) error {\n\tp := request.Pagination{\n\t\tNewRequest: func() (*request.Request, error) {\n\t\t\tvar inCpy *ListPhoneNumbersInput\n\t\t\tif input != nil {\n\t\t\t\ttmp := *input\n\t\t\t\tinCpy = &tmp\n\t\t\t}\n\t\t\treq, _ := c.ListPhoneNumbersRequest(inCpy)\n\t\t\treq.SetContext(ctx)\n\t\t\treq.ApplyOptions(opts...)\n\t\t\treturn req, nil\n\t\t},\n\t}\n\n\tfor p.Next() {\n\t\tif !fn(p.Page().(*ListPhoneNumbersOutput), !p.HasNextPage()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn p.Err()\n}",
"func (b *Builder) Phone(s string) *Builder {\n\treturn b.appendMessage(s, func(offset, limit int) tg.MessageEntityClass {\n\t\treturn &tg.MessageEntityPhone{Offset: offset, Length: limit}\n\t})\n}",
"func (o *Credit1099Payer) GetTelephoneNumber() string {\n\tif o == nil || o.TelephoneNumber.Get() == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.TelephoneNumber.Get()\n}",
"func (f *Faker) Phone() string { return phone(f.Rand) }",
"func (e Employee) printPhone(){\n\tfmt.Println(\"The employee is: \", e.phone)\n}",
"func (pv *Client) FetchNumbers(url string, page int) []models.PhoneNumber {\n\tnumbers := make([]models.PhoneNumber, 0)\n\t// Load the HTML document\n\tdoc, err := models.FetchPage(httpClient, url, setDefaultHeaders)\n\tif err != nil {\n\t\treturn numbers\n\t}\n\n\tdoc.Find(\"div.number-boxes div.number-boxes-item\").Each(func(i int, s *goquery.Selection) {\n\t\tstatus := \"online\"\n\t\tnumberURL := s.Find(\"a\").AttrOr(\"href\", \"nil\")\n\t\tnumber := s.Find(\".number-boxes-itemm-number\").Text()\n\t\tid := strings.Replace(numberURL, \"/sms/\", \"\", 1)\n\n\t\tif len(number) == 0 || strings.Contains(numberURL, \"register\") {\n\t\t\treturn\n\t\t}\n\n\t\tnum, err := libphonenumber.Parse(number, \"\")\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Failed to parse number: %s, error: %+v\", number, err)\n\t\t}\n\n\t\tregionNumber := libphonenumber.GetRegionCodeForNumber(num)\n\t\tcountryCode := libphonenumber.GetCountryCodeForRegion(regionNumber)\n\t\tnationalNum := libphonenumber.GetNationalSignificantNumber(num)\n\n\t\tnumbers = append(numbers, models.PhoneNumber{\n\t\t\tProvider: pv.Name(),\n\t\t\tProviderID: id,\n\t\t\tRawNumber: number,\n\t\t\tNumber: nationalNum,\n\t\t\tCountry: regionNumber,\n\t\t\tCountryCode: countryCode,\n\t\t\tCountryName: utils.FindCountryName(regionNumber),\n\t\t\tCountrySlug: slug.Make(utils.FindCountryName(regionNumber)),\n\t\t\tStatus: status,\n\t\t})\n\t})\n\n\treturn numbers\n}",
"func (_obj *DataService) HasPhone(phone string, phoneExist *bool, _opt ...map[string]string) (ret int32, err error) {\n\n\tvar length int32\n\tvar have bool\n\tvar ty byte\n\t_os := codec.NewBuffer()\n\terr = _os.Write_string(phone, 1)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _os.Write_bool((*phoneExist), 2)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tvar _status map[string]string\n\tvar _context map[string]string\n\tif len(_opt) == 1 {\n\t\t_context = _opt[0]\n\t} else if len(_opt) == 2 {\n\t\t_context = _opt[0]\n\t\t_status = _opt[1]\n\t}\n\t_resp := new(requestf.ResponsePacket)\n\ttarsCtx := context.Background()\n\n\terr = _obj.s.Tars_invoke(tarsCtx, 0, \"hasPhone\", _os.ToBytes(), _status, _context, _resp)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\t_is := codec.NewReader(tools.Int8ToByte(_resp.SBuffer))\n\terr = _is.Read_int32(&ret, 0, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\terr = _is.Read_bool(&(*phoneExist), 2, true)\n\tif err != nil {\n\t\treturn ret, err\n\t}\n\n\tif len(_opt) == 1 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t} else if len(_opt) == 2 {\n\t\tfor k := range _context {\n\t\t\tdelete(_context, k)\n\t\t}\n\t\tfor k, v := range _resp.Context {\n\t\t\t_context[k] = v\n\t\t}\n\t\tfor k := range _status {\n\t\t\tdelete(_status, k)\n\t\t}\n\t\tfor k, v := range _resp.Status {\n\t\t\t_status[k] = v\n\t\t}\n\n\t}\n\t_ = length\n\t_ = have\n\t_ = ty\n\treturn ret, nil\n}",
"func (m *User) GetMobilePhone()(*string) {\n return m.mobilePhone\n}",
"func (o *WhatsAppPhoneWhatsAppApiContent) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func encodeUserInfoByPhoneResponse(_ context.Context, r interface{}) (interface{}, error) {\n\tresponse := r.(endpoint.UserInfoByPhoneResponse)\n\tvar user = &pb.UserInfo{\n\t\tId:response.U0.Id,\n\t\tPhone:response.U0.Phone,\n\t\tPassword:response.U0.Password,\n\t\tAge:response.U0.Age,\n\t}\n\treturn &pb.UserInfoByPhoneReply{\n\t\tUser:user,\n\t},nil\n}",
"func (o *DepositSwitchTargetUser) GetPhone() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Phone\n}",
"func (m *UserMutation) PhoneNumber() (r string, exists bool) {\n\tv := m.phone_number\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}",
"func (f DSNProviderWCFunc) FetchDSNWithContext(ctx context.Context, dsn string) (string, error) {\n\treturn f(ctx, dsn)\n}",
"func (s UserSet) Phone() string {\n\tres, _ := s.RecordCollection.Get(models.NewFieldName(\"Phone\", \"phone\")).(string)\n\treturn res\n}",
"func (*Person_PhoneNumber) Descriptor() ([]byte, []int) {\n\treturn file_address_book_addressbook_proto_rawDescGZIP(), []int{1, 0}\n}",
"func (o *UserDisco) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func (p applicationPackager) telephone(data map[string]interface{}) (template.HTML, error) {\n\treturn p.xmlTemplate(\"telephone.xml\", data)\n}",
"func PhoneFormatted() string { return phoneFormatted(globalFaker.Rand) }",
"func (__receiver_AService *AvailablePhoneNumberService) ID(id string) *AvailablePhoneNumberService {\n\t__receiver_AService.ResourceID = id\n\tswitch __receiver_AService.action {\n\tcase types.BULKREAD:\n\t\t__receiver_AService.data = struct{}{}\n\t\t__receiver_AService.url = resources.AvailablePhoneNumberURLS[types.READ]\n\t\t__receiver_AService.action = types.READ\n\n\t}\n\treturn __receiver_AService\n}",
"func GetExampleNumberForType(regionCode string, typ PhoneNumberType) *PhoneNumber {\n\t// Check the region code is valid.\n\tif !isValidRegionCode(regionCode) {\n\t\treturn nil\n\t}\n\t//PhoneNumberDesc (pointer?)\n\tvar desc = getNumberDescByType(getMetadataForRegion(regionCode), typ)\n\texNum := desc.GetExampleNumber()\n\tif len(exNum) > 0 {\n\t\tnum, err := Parse(exNum, regionCode)\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn num\n\t}\n\treturn nil\n}",
"func (o *MicrosoftGraphEducationSchool) GetPhone() string {\n\tif o == nil || o.Phone == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Phone\n}",
"func PatientPhoneGTE(v int) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPatientPhone), v))\n\t})\n}",
"func (o *GroupReplaceRequest) GetPhoneNumber() string {\n\tif o == nil || o.PhoneNumber == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.PhoneNumber\n}",
"func (c Client) CreateWithContext(context context.Context, input *CreateIncomingPhoneNumberInput) (*CreateIncomingPhoneNumberResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodPost,\n\t\tURI: \"/Accounts/{accountSid}/IncomingPhoneNumbers.json\",\n\t\tContentType: client.URLEncoded,\n\t\tPathParams: map[string]string{\n\t\t\t\"accountSid\": c.accountSid,\n\t\t},\n\t}\n\n\tif input == nil {\n\t\tinput = &CreateIncomingPhoneNumberInput{}\n\t}\n\n\tresponse := &CreateIncomingPhoneNumberResponse{}\n\tif err := c.client.Send(context, op, input, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func PhoneGTE(v string) predicate.User {\n\treturn predicate.User(sql.FieldGTE(FieldPhone, v))\n}",
"func Number(phoneNum string) (string, error) {\n\tre, err := regexp.Compile(`[[:digit:]]*`)\n\tif err != nil {\n\t\treturn \"\", errors.New(\"error regexp\")\n\t}\n\tnums := strings.Join(re.FindAllString(phoneNum, -1), \"\")\n\n\tif len(nums) < 10 || len(nums) > 11 || (len(nums) == 11 && nums[:1] != \"1\") {\n\t\treturn \"\", errors.New(\"Error digits\")\n\t} else if len(nums) == 11 {\n\t\treturn nums[1:], nil\n\t}\n\treturn nums, nil\n}",
"func (m *InviteeMutation) Phone() (r string, exists bool) {\n\tv := m.phone\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}",
"func GetExampleNumberForNonGeoEntity(countryCallingCode int) *PhoneNumber {\n\tvar metadata *PhoneMetadata = getMetadataForNonGeographicalRegion(countryCallingCode)\n\tif metadata == nil {\n\t\treturn nil\n\t}\n\n\t// For geographical entities, fixed-line data is always present. However, for non-geographical\n\t// entities, this is not the case, so we have to go through different types to find the\n\t// example number.\n\tdescPriority := []*PhoneNumberDesc{metadata.GetMobile(), metadata.GetTollFree(),\n\t\tmetadata.GetSharedCost(), metadata.GetVoip(), metadata.GetVoicemail(), metadata.GetUan(), metadata.GetPremiumRate()}\n\n\tfor _, desc := range descPriority {\n\t\tif desc != nil && desc.GetExampleNumber() != \"\" {\n\t\t\tnum, err := Parse(\"+\"+strconv.Itoa(countryCallingCode)+desc.GetExampleNumber(), \"ZZ\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn num\n\t\t}\n\t}\n\treturn nil\n}",
"func AddPhoneNumber(mctx libkb.MetaContext, phoneNumber keybase1.PhoneNumber, visibility keybase1.IdentityVisibility) error {\n\t// First try to delete if we have a superseded item for this phone number already\n\tnums, err := GetPhoneNumbers(mctx)\n\tif err == nil {\n\t\tfor _, num := range nums {\n\t\t\tif num.Superseded && num.PhoneNumber == phoneNumber {\n\t\t\t\terr = DeletePhoneNumber(mctx, num.PhoneNumber)\n\t\t\t\tif err != nil {\n\t\t\t\t\tmctx.Warning(\"error deleting superseded number on add: %s\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t} else {\n\t\tmctx.Warning(\"error fetching numbers on add: %s\", err)\n\t}\n\n\tpayload := make(libkb.JSONPayload)\n\tpayload[\"phone_number\"] = phoneNumber\n\tpayload[\"visibility\"] = visibility\n\n\targ := libkb.APIArg{\n\t\tEndpoint: \"user/phone_numbers\",\n\t\tJSONPayload: payload,\n\t\tSessionType: libkb.APISessionTypeREQUIRED,\n\t}\n\n\t_, err = mctx.G().API.PostJSON(mctx, arg)\n\treturn err\n}",
"func PhoneGTE(v string) predicate.Patient {\n\treturn predicate.Patient(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPhone), v))\n\t})\n}",
"func CallFHIREndpoint() {\n\tresp, err := http.Get(\"https://fhir-open.sandboxcerner.com/r4/0b8a0111-e8e6-4c26-a91c-5069cbc6b1ca/Patient?_id=4342009\")\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\tlog.Println(string(body))\n}",
"func (service *ContrailService) RESTGetUser(c echo.Context) error {\n\tid := c.Param(\"id\")\n\trequest := &models.GetUserRequest{\n\t\tID: id,\n\t}\n\tctx := c.Request().Context()\n\tresponse, err := service.GetUser(ctx, request)\n\tif err != nil {\n\t\treturn common.ToHTTPError(err)\n\t}\n\treturn c.JSON(http.StatusOK, response)\n}",
"func (r *DeviceManagementPartnerRequest) Get(ctx context.Context) (resObj *DeviceManagementPartner, err error) {\n\tvar query string\n\tif r.query != nil {\n\t\tquery = \"?\" + r.query.Encode()\n\t}\n\terr = r.JSONRequest(ctx, \"GET\", query, nil, &resObj)\n\treturn\n}",
"func (twilio *Twilio) GetAvailablePhoneNumbers(numberType PhoneNumberType, country string, options AvailablePhoneNumbersOptions) ([]*AvailablePhoneNumber, *Exception, error) {\n\t// build initial request\n\tresourceName := country + \"/\" + numberType.String() + \".json\"\n\treq, err := http.NewRequest(http.MethodGet, twilio.buildUrl(\"AvailablePhoneNumbers/\"+resourceName), nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t// authenticate\n\treq.SetBasicAuth(twilio.getBasicAuthCredentials())\n\n\t// set query string\n\tqueryValues, err := query.Values(options)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\treq.URL.RawQuery = queryValues.Encode()\n\n\t// perform request\n\tres, err := twilio.do(req)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdecoder := json.NewDecoder(res.Body)\n\tif res.StatusCode != http.StatusOK {\n\t\texception := new(Exception)\n\t\terr = decoder.Decode(exception)\n\t\treturn nil, exception, err\n\t}\n\n\t// decode response\n\tavailablePhoneNumberResponse := new(struct {\n\t\tAvailablePhoneNumbers []*AvailablePhoneNumber `json:\"available_phone_numbers\"`\n\t})\n\tdecoder.Decode(availablePhoneNumberResponse)\n\treturn availablePhoneNumberResponse.AvailablePhoneNumbers, nil, nil\n}",
"func (list *TransactionPaymentGatewayList) FetchWithContext(ctx context.Context) error {\n\tresources := &transactionPaymentGatewayList{}\n\terr := list.client.Call(ctx, http.MethodGet, list.nextPagePath, nil, nil, list.requestOptions, resources)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// copy over properties from the response\n\tlist.nextPagePath = resources.Next\n\tlist.hasMore = resources.HasMore\n\tlist.data = resources.Data\n\treturn nil\n}",
"func TaskNumGET(g *gin.Context) {\n\tg.JSON(http.StatusOK, gin.H{\"message\": \"ok\", \"num\": TaskNum})\n}",
"func (m *BookingBusiness) SetPhone(value *string)() {\n err := m.GetBackingStore().Set(\"phone\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (c *MobileCarriersGetCall) Context(ctx context.Context) *MobileCarriersGetCall {\n\tc.ctx_ = ctx\n\treturn c\n}",
"func (m *User) GetFaxNumber()(*string) {\n return m.faxNumber\n}",
"func PhoneGTE(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPhone), v))\n\t})\n}",
"func PhoneGTE(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPhone), v))\n\t})\n}",
"func PhoneGTE(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.GTE(s.C(FieldPhone), v))\n\t})\n}",
"func GetPersonAddress(c *gin.Context) {\n\tid := c.Param(\"id\")\n\tperson, err := models.LoadPersonByAddress(id)\n\tif err != nil {\n\t\tc.JSON(400, gin.H{\n\t\t\t\"error\": \"cannot find person: \" + id,\n\t\t})\n\t\treturn\n\t}\n\tif len(person) == 0 {\n\t\texplain := \"person \" + id + \" does not exist.\"\n\t\tc.JSON(400, gin.H{\n\t\t\t\"error\": explain,\n\t\t})\n\t\treturn\n\t}\n\tc.JSON(200, person)\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchFeedbackSummaryResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Accounts/{accountSid}/Calls/FeedbackSummary/{sid}.json\",\n\t\tPathParams: map[string]string{\n\t\t\t\"accountSid\": c.accountSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchFeedbackSummaryResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func Phone(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPhone), v))\n\t})\n}",
"func Phone(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPhone), v))\n\t})\n}",
"func Phone(v string) predicate.User {\n\treturn predicate.User(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldPhone), v))\n\t})\n}",
"func (c Client) FetchWithContext(context context.Context) (*FetchVersionResponse, error) {\n\top := client.Operation{\n\t\tMethod: http.MethodGet,\n\t\tURI: \"/Services/{serviceSid}/Functions/{functionSid}/Versions/{sid}\",\n\t\tPathParams: map[string]string{\n\t\t\t\"serviceSid\": c.serviceSid,\n\t\t\t\"functionSid\": c.functionSid,\n\t\t\t\"sid\": c.sid,\n\t\t},\n\t}\n\n\tresponse := &FetchVersionResponse{}\n\tif err := c.client.Send(context, op, nil, response); err != nil {\n\t\treturn nil, err\n\t}\n\treturn response, nil\n}",
"func (c *PostalCodesGetCall) Context(ctx context.Context) *PostalCodesGetCall {\n\tc.ctx_ = ctx\n\treturn c\n}"
] | [
"0.6585963",
"0.5658682",
"0.5600527",
"0.53562325",
"0.5331787",
"0.52035147",
"0.5187026",
"0.51413673",
"0.51345986",
"0.5115097",
"0.5114629",
"0.5031288",
"0.50233984",
"0.5000459",
"0.49247894",
"0.48535225",
"0.4846251",
"0.4827607",
"0.48153108",
"0.48106825",
"0.47828165",
"0.47544348",
"0.474893",
"0.47483438",
"0.47178298",
"0.4699584",
"0.46890056",
"0.46848997",
"0.46844015",
"0.46708506",
"0.46532077",
"0.46361858",
"0.46243626",
"0.45676297",
"0.4561737",
"0.45513287",
"0.4551145",
"0.45488954",
"0.45454133",
"0.454476",
"0.4541477",
"0.454068",
"0.45362723",
"0.45295113",
"0.45245993",
"0.45176715",
"0.45136327",
"0.45053726",
"0.45016003",
"0.4492466",
"0.44891286",
"0.447684",
"0.445616",
"0.44311252",
"0.44226423",
"0.44204304",
"0.44160485",
"0.44150484",
"0.44002187",
"0.43925825",
"0.43922138",
"0.4390347",
"0.43782452",
"0.43666923",
"0.43666568",
"0.4363914",
"0.43590438",
"0.43501163",
"0.43483147",
"0.43353698",
"0.43327138",
"0.43302923",
"0.43257496",
"0.43252108",
"0.43184304",
"0.43060738",
"0.42836797",
"0.42710245",
"0.42683315",
"0.4267817",
"0.4266518",
"0.4260796",
"0.42509082",
"0.4245099",
"0.42417154",
"0.42412516",
"0.42398366",
"0.42389905",
"0.42380825",
"0.42351732",
"0.42335248",
"0.42335248",
"0.42335248",
"0.4224131",
"0.4221469",
"0.4219322",
"0.4219322",
"0.4219322",
"0.42125225",
"0.4212312"
] | 0.7211016 | 0 |
Execute executes root command | func Execute() {
if err := rootCmd.Execute(); err != nil {
fmt.Println(err)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error {\n\treturn rootCmd.Execute()\n}",
"func Execute() error { return rootCmd.Execute() }",
"func Execute() {\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\r\n\tif err := rootCmd.Execute(); err != nil {\r\n\t\tfmt.Println(err)\r\n\t\tos.Exit(1)\r\n\t}\r\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Fatalln(err)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(-1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func Execute() {\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}"
] | [
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8431804",
"0.8393209",
"0.835623",
"0.8344712",
"0.83392686",
"0.83392686",
"0.83212906",
"0.83194506",
"0.83194506",
"0.83194506",
"0.8315363",
"0.8315363",
"0.8315363",
"0.8315363",
"0.8311743",
"0.8311743",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991",
"0.8306991"
] | 0.83705604 | 31 |
NewValidateCreateAnomalyDetectionDiskEventParams creates a new ValidateCreateAnomalyDetectionDiskEventParams object with the default values initialized. | func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {
var ()
return &ValidateCreateAnomalyDetectionDiskEventParams{
timeout: cr.DefaultTimeout,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewCreateEventAlertConditionParams() *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (a *Client) ValidateCreateAnomalyDetectionMetricEvent(params *ValidateCreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*ValidateCreateAnomalyDetectionMetricEventNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewValidateCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"validateCreateAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents/validator\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ValidateCreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ValidateCreateAnomalyDetectionMetricEventNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for validateCreateAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func NewEventsOnDisk(file string, clock Clock) (*EventsOnDisk, error) {\n\treturn &EventsOnDisk{\n\t\tfilename: filepath.Clean(file),\n\t\tclock: clock,\n\t}, nil\n}",
"func (m *VMAddDiskParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWhere(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMVolumeCreationParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateClusterID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSharing(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *MountNewCreateDisksParamsVMVolume) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func New(cfg FactoryConfig) (AnomalyDetector, error) {\n\tswitch {\n\tcase cfg.BreakerStrategyConfig.DiscreteValueOutOfList != nil:\n\t\treturn newDiscreteValueOutOfListAnalyser(cfg.Config)\n\tcase cfg.BreakerStrategyConfig.ContinuousValueDeviation != nil:\n\t\treturn newContinuousValueDeviation(cfg.Config)\n\tcase cfg.BreakerStrategyConfig.CustomService != \"\":\n\t\treturn newCustomAnalyser(cfg.Config)\n\tcase cfg.customFactory != nil:\n\t\treturn cfg.customFactory(cfg)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"no anomaly detection could be built, missing definition\")\n\t}\n}",
"func (m *MountNewCreateDisksParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateBoot(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxIopsPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMVolume(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewValidateParams() *ValidateParams {\n\treturn &ValidateParams{\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (x *fastReflection_EventCreateBatch) New() protoreflect.Message {\n\treturn new(fastReflection_EventCreateBatch)\n}",
"func (m *VMAddDiskParamsData) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIoPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxIopsPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (x *fastReflection_EventCreateClass) New() protoreflect.Message {\n\treturn new(fastReflection_EventCreateClass)\n}",
"func New(eventType Type, srv server.Server, bootType, script string, params map[string]interface{}) Event {\n\tvar event Event\n\n\tevent.Type = eventType\n\tevent.Date = time.Now()\n\tevent.Server = srv\n\tevent.BootType = bootType\n\tevent.Script = script\n\tevent.Params = params\n\n\tevent.setMessage()\n\n\treturn event\n}",
"func (x *fastReflection_ValidatorSlashEventRecord) New() protoreflect.Message {\n\treturn new(fastReflection_ValidatorSlashEventRecord)\n}",
"func NewHostsAnomalyDetectionConfig(connectionLostDetection ConnectionLostDetectionConfig, highCpuSaturationDetection HighCpuSaturationDetectionConfig, highMemoryDetection HighMemoryDetectionConfig, highGcActivityDetection HighGcActivityDetectionConfig, outOfMemoryDetection OutOfMemoryDetectionConfig, outOfThreadsDetection OutOfThreadsDetectionConfig, networkDroppedPacketsDetection NetworkDroppedPacketsDetectionConfig, networkErrorsDetection NetworkErrorsDetectionConfig, highNetworkDetection HighNetworkDetectionConfig, networkTcpProblemsDetection NetworkTcpProblemsDetectionConfig, networkHighRetransmissionDetection NetworkHighRetransmissionDetectionConfig, diskLowSpaceDetection DiskLowSpaceDetectionConfig, diskSlowWritesAndReadsDetection DiskSlowWritesAndReadsDetectionConfig, diskLowInodesDetection DiskLowInodesDetectionConfig, ) *HostsAnomalyDetectionConfig {\n\tthis := HostsAnomalyDetectionConfig{}\n\tthis.ConnectionLostDetection = connectionLostDetection\n\tthis.HighCpuSaturationDetection = highCpuSaturationDetection\n\tthis.HighMemoryDetection = highMemoryDetection\n\tthis.HighGcActivityDetection = highGcActivityDetection\n\tthis.OutOfMemoryDetection = outOfMemoryDetection\n\tthis.OutOfThreadsDetection = outOfThreadsDetection\n\tthis.NetworkDroppedPacketsDetection = networkDroppedPacketsDetection\n\tthis.NetworkErrorsDetection = networkErrorsDetection\n\tthis.HighNetworkDetection = highNetworkDetection\n\tthis.NetworkTcpProblemsDetection = networkTcpProblemsDetection\n\tthis.NetworkHighRetransmissionDetection = networkHighRetransmissionDetection\n\tthis.DiskLowSpaceDetection = diskLowSpaceDetection\n\tthis.DiskSlowWritesAndReadsDetection = diskSlowWritesAndReadsDetection\n\tthis.DiskLowInodesDetection = diskLowInodesDetection\n\treturn &this\n}",
"func (b EmployeeCreatedEvent) ValidateEmployeeCreatedEvent() error {\n\tvar validate *validator.Validate\n\tvalidate = validator.New()\n\terr := validate.Struct(b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func NewEvent(x, y float64, button, event string) Event {\n\treturn Event{\n\t\tPoint2: floatgeom.Point2{x, y},\n\t\tButton: button,\n\t\tEvent: event,\n\t}\n}",
"func NewAnomalyDetectionLimiter(numWorkloads int, numEventsAllowedPerPeriod int, period time.Duration) (*AnomalyDetectionLimiter, error) {\n\tlimiter, err := utils.NewLimiter[string](numWorkloads, numEventsAllowedPerPeriod, period)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &AnomalyDetectionLimiter{\n\t\tlimiter: limiter,\n\t}, nil\n}",
"func (a *Client) CreateAnomalyDetectionMetricEvent(params *CreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*CreateAnomalyDetectionMetricEventCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateAnomalyDetectionMetricEventCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (m *HostsAnomalyDetectionConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConnectionLostDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskLowInodesDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskLowSpaceDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskSlowWritesAndReadsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighCPUSaturationDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighGcActivityDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighMemoryDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighNetworkDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMetadata(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkDroppedPacketsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkErrorsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkHighRetransmissionDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkTCPProblemsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutOfMemoryDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutOfThreadsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (x *fastReflection_DuplicateVoteEvidence) New() protoreflect.Message {\n\treturn new(fastReflection_DuplicateVoteEvidence)\n}",
"func NewEventsLoader(roomVer RoomVersion, keyRing JSONVerifier, stateProvider StateProvider, provider EventProvider, performSoftFailCheck bool) *EventsLoader {\n\treturn &EventsLoader{\n\t\troomVer: roomVer,\n\t\tkeyRing: keyRing,\n\t\tprovider: provider,\n\t\tstateProvider: stateProvider,\n\t\tperformSoftFailCheck: performSoftFailCheck,\n\t}\n}",
"func NewBaseEvent( e EEventType ) (event *BaseEvent, err error) {\n event = new(BaseEvent)\n\n // setting these to true will force serialisation\n event.part2JsonExtracted = true\n event.sysParamsExtracted = true\n event.execParamsExtracted = true\n event.verbose = Verbose // pick up the global flag\n\n err = event.SetEventType(e)\n return\n}",
"func (m *VMSnapshotCreationParamsData) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConsistentType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetLogsByEventIDDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateMessage(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewEvent(x, y float64, button Button, event string) Event {\n\treturn Event{\n\t\tPoint2: floatgeom.Point2{x, y},\n\t\tButton: button,\n\t\tEvent: event,\n\t}\n}",
"func NewCreateEventAlertConditionParamsWithHTTPClient(client *http.Client) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PostAPIV2EventsParamsBodyAttributesHeaders) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func NewGeneralAlterEventRequestWithoutParam() *GeneralAlterEventRequest {\n\n return &GeneralAlterEventRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/event:generalAlter\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}",
"func NewEvent(timestampMs int64, message string) *Event {\n\tevent := &Event{\n\t\tInputLogEvent: &cloudwatchlogs.InputLogEvent{\n\t\t\tTimestamp: aws.Int64(timestampMs),\n\t\t\tMessage: aws.String(message)},\n\t}\n\treturn event\n}",
"func NewGetInstancesEventByEventIDParams() *GetInstancesEventByEventIDParams {\n\tvar ()\n\treturn &GetInstancesEventByEventIDParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func New(devEntries []PodDevicesEntry,\n\tdevices map[string][]string) DeviceManagerCheckpoint {\n\treturn &Data{\n\t\tData: checkpointData{\n\t\t\tPodDeviceEntries: devEntries,\n\t\t\tRegisteredDevices: devices,\n\t\t},\n\t}\n}",
"func NewEvent(action string, version int8, parent uuid.UUID, key []byte, data []byte) Event {\n\tid := uuid.Must(uuid.NewV4())\n\tif key == nil {\n\t\tkey = id.Bytes()\n\t}\n\n\t// Fix: unexpected end of JSON input\n\tif len(data) == 0 {\n\t\tdata = []byte(\"null\")\n\t}\n\n\tevent := Event{\n\t\tParent: parent,\n\t\tID: id,\n\t\tHeaders: make(map[string]string),\n\t\tAction: action,\n\t\tData: data,\n\t\tKey: key,\n\t\tStatus: StatusOK,\n\t\tVersion: version,\n\t\tCtx: context.Background(),\n\t}\n\n\treturn event\n}",
"func (e *Event) Validate() error {\n\tif e.Message == \"\" || e.MessageOffset == \"\" || e.Time == nil || e.Type == \"\" {\n\t\treturn errs.ErrMissingParameters\n\t}\n\treturn nil\n}",
"func ValidateCreateEvent(payload *model.CreateEventReq) error {\n\tif payload == nil {\n\t\terr := errors.New(\"invalid payload\")\n\t\treturn err\n\t}\n\n\tif strings.TrimSpace(payload.Title) == \"\" {\n\t\terr := errors.New(\"invalid title\")\n\t\treturn err\n\t}\n\n\tif payload.LocationID == 0 {\n\t\terr := errors.New(\"invalid location id\")\n\t\treturn err\n\t}\n\n\tif payload.StartDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif payload.EndDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif len(payload.TicketID) == 0 {\n\t\terr := errors.New(\"invalid ticket id\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func NewFromProto(pbSceneEvent *pb.SceneEvent) SceneEvent {\n\treturn SceneEvent{\n\t\tDownChangeData: pbSceneEvent.GetDownChangeData(),\n\t\tDownVersion: pbSceneEvent.GetDownVersion(),\n\t\tId: pbSceneEvent.GetId(),\n\t\tSceneId: pbSceneEvent.GetSceneId(),\n\t\tUpChangeData: pbSceneEvent.GetUpChangeData(),\n\t\tUpVersion: pbSceneEvent.GetUpVersion(),\n\t}\n}",
"func NewEvent(data map[string]interface{}) (Event, error) {\n\treturn parseToEventType(data)\n}",
"func NewGetAuditEventsParams() *GetAuditEventsParams {\n\tvar (\n\t\tpageDefault = int32(0)\n\t\tsizeDefault = int32(100)\n\t)\n\treturn &GetAuditEventsParams{\n\t\tPage: &pageDefault,\n\t\tSize: &sizeDefault,\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *PostAPIV2EventsParamsBodyAttributes) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCallback(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateHeaders(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewEvtFailureDetector(id int, nodeIDs []int, sr SuspectRestorer, delta time.Duration, hbSend chan<- Heartbeat) *EvtFailureDetector {\n\tsuspected := make(map[int]bool)\n\talive := make(map[int]bool)\n\n\t// TODO(student): perform any initialization necessary\n\tfor _, node := range nodeIDs {\n\t\talive[node] = true //assumes all provided node IDs is alive (when init fd)\n\t}\n\n\treturn &EvtFailureDetector{\n\t\tid: id,\n\t\tnodeIDs: nodeIDs,\n\t\talive: alive,\n\t\tsuspected: suspected,\n\n\t\tsr: sr,\n\n\t\tdelay: delta,\n\t\tdelta: delta,\n\n\t\thbSend: hbSend,\n\t\thbIn: make(chan Heartbeat, 30000),\n\t\tstop: make(chan struct{}),\n\n\t\ttestingHook: func() {}, // DO NOT REMOVE THIS LINE. A no-op when not testing.\n\t}\n}",
"func NewDescribeSnapshotPolicyDiskRelationsRequestWithoutParam() *DescribeSnapshotPolicyDiskRelationsRequest {\n\n return &DescribeSnapshotPolicyDiskRelationsRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/snapshotPolicyDiskRelations:describe\",\n Method: \"POST\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}",
"func (o *HostsAnomalyDetectionConfig) SetOutOfMemoryDetection(v OutOfMemoryDetectionConfig) {\n\to.OutOfMemoryDetection = v\n}",
"func NewCreateEventAlertConditionParamsWithTimeout(timeout time.Duration) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewE201(msg, value, path string, finder errorCondition) error {\n\tf, err := os.ReadFile(path)\n\tif err != nil {\n\t\treturn NewE100(\"NewE201\", errors.New(msg))\n\t}\n\n\tctx, err := annotate(f, value, finder)\n\tif err != nil {\n\t\treturn NewE100(\"NewE201/annotate\", err)\n\t}\n\n\ttitle := fmt.Sprintf(\n\t\t\"Invalid value [%s:%d:%d]:\",\n\t\tfilepath.ToSlash(path),\n\t\tctx.line,\n\t\tctx.span[0])\n\n\treturn NewError(\n\t\t\"E201\",\n\t\ttitle,\n\t\tfmt.Sprintf(\"%s\\n%s\", ctx.content, msg))\n}",
"func (x *fastReflection_Evidence) New() protoreflect.Message {\n\treturn new(fastReflection_Evidence)\n}",
"func New(config *Config) *Validate {\n\n\tv := &Validate{\n\t\ttagName: config.TagName,\n\t\tfieldNameTag: config.FieldNameTag,\n\t\ttagCache: &tagCacheMap{m: map[string]*cachedTag{}},\n\t\tstructCache: &structCacheMap{m: map[reflect.Type]*cachedStruct{}},\n\t\terrsPool: &sync.Pool{New: func() interface{} {\n\t\t\treturn ValidationErrors{}\n\t\t}}}\n\n\tif len(v.aliasValidators) == 0 {\n\t\t// must copy alias validators for separate validations to be used in each validator instance\n\t\tv.aliasValidators = map[string]string{}\n\t\tfor k, val := range bakedInAliasValidators {\n\t\t\tv.RegisterAliasValidation(k, val)\n\t\t}\n\t}\n\n\tif len(v.validationFuncs) == 0 {\n\t\t// must copy validators for separate validations to be used in each instance\n\t\tv.validationFuncs = map[string]Func{}\n\t\tfor k, val := range bakedInValidators {\n\t\t\tv.RegisterValidation(k, val)\n\t\t}\n\t}\n\n\treturn v\n}",
"func NewDeviceManagementTroubleshootingEvent()(*DeviceManagementTroubleshootingEvent) {\n m := &DeviceManagementTroubleshootingEvent{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewEvent(et EventType, ev string, path string, filename string) *Event {\n\treturn &Event{\n\t\tEventType: et,\n\t\tEvent: ev,\n\t\tPath: path,\n\t\tFilename: filename,\n\t}\n}",
"func newEvents(t *testing.T, s string) Events {\n\tt.Helper()\n\n\tvar (\n\t\tlines = strings.Split(s, \"\\n\")\n\t\tgroups = []string{\"\"}\n\t\tevents = make(map[string]Events)\n\t)\n\tfor no, line := range lines {\n\t\tif i := strings.IndexByte(line, '#'); i > -1 {\n\t\t\tline = line[:i]\n\t\t}\n\t\tline = strings.TrimSpace(line)\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(line, \":\") {\n\t\t\tgroups = strings.Split(strings.TrimRight(line, \":\"), \",\")\n\t\t\tfor i := range groups {\n\t\t\t\tgroups[i] = strings.TrimSpace(groups[i])\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\tfields := strings.Fields(line)\n\t\tif len(fields) < 2 {\n\t\t\tif strings.ToUpper(fields[0]) == \"EMPTY\" {\n\t\t\t\tfor _, g := range groups {\n\t\t\t\t\tevents[g] = Events{}\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tt.Fatalf(\"newEvents: line %d has less than 2 fields: %s\", no, line)\n\t\t}\n\n\t\tpath := strings.Trim(fields[len(fields)-1], `\"`)\n\n\t\tvar op Op\n\t\tfor _, e := range fields[:len(fields)-1] {\n\t\t\tif e == \"|\" {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, ee := range strings.Split(e, \"|\") {\n\t\t\t\tswitch strings.ToUpper(ee) {\n\t\t\t\tcase \"CREATE\":\n\t\t\t\t\top |= Create\n\t\t\t\tcase \"WRITE\":\n\t\t\t\t\top |= Write\n\t\t\t\tcase \"REMOVE\":\n\t\t\t\t\top |= Remove\n\t\t\t\tcase \"RENAME\":\n\t\t\t\t\top |= Rename\n\t\t\t\tcase \"CHMOD\":\n\t\t\t\t\top |= Chmod\n\t\t\t\tdefault:\n\t\t\t\t\tt.Fatalf(\"newEvents: line %d has unknown event %q: %s\", no, ee, line)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, g := range groups {\n\t\t\tevents[g] = append(events[g], Event{Name: path, Op: op})\n\t\t}\n\t}\n\n\tif e, ok := events[runtime.GOOS]; ok {\n\t\treturn e\n\t}\n\tswitch runtime.GOOS {\n\t// kqueue shortcut\n\tcase \"freebsd\", \"netbsd\", \"openbsd\", \"dragonfly\", \"darwin\":\n\t\tif e, ok := events[\"kqueue\"]; ok {\n\t\t\treturn e\n\t\t}\n\t// fen shortcut\n\tcase \"solaris\", \"illumos\":\n\t\tif e, ok := events[\"fen\"]; ok {\n\t\t\treturn e\n\t\t}\n\t}\n\treturn events[\"\"]\n}",
"func NewSystemEventsParams() *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewListEventsParams() *ListEventsParams {\n\tvar ()\n\treturn &ListEventsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func New(config Config) (spec.Eventer, error) {\n\t// Settings.\n\tif config.Flag == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"flag must not be empty\")\n\t}\n\tif config.Viper == nil {\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"viper must not be empty\")\n\t}\n\n\tvar err error\n\n\tvar newEventer spec.Eventer\n\tswitch config.Type {\n\tcase github.GithubEventerType:\n\t\tgithubConfig := github.DefaultConfig()\n\n\t\tgithubConfig.HTTPClient = config.HTTPClient\n\t\tgithubConfig.Logger = config.Logger\n\n\t\tgithubConfig.Environment = config.Viper.GetString(config.Flag.Service.Deployer.Environment)\n\t\tgithubConfig.OAuthToken = config.Viper.GetString(config.Flag.Service.Deployer.Eventer.GitHub.OAuthToken)\n\t\tgithubConfig.Organisation = config.Viper.GetString(config.Flag.Service.Deployer.Eventer.GitHub.Organisation)\n\t\tgithubConfig.PollInterval = config.Viper.GetDuration(config.Flag.Service.Deployer.Eventer.GitHub.PollInterval)\n\t\tgithubConfig.Provider = config.Viper.GetString(config.Flag.Service.Deployer.Provider)\n\n\t\t{\n\t\t\tprojectList := configuration.GetProjectList(githubConfig.Provider, githubConfig.Environment)\n\t\t\tgithubConfig.ProjectList = projectList\n\t\t}\n\n\t\tnewEventer, err = github.New(githubConfig)\n\t\tif err != nil {\n\t\t\treturn nil, microerror.Mask(err)\n\t\t}\n\n\tdefault:\n\t\treturn nil, microerror.Maskf(invalidConfigError, \"eventer type not implemented\")\n\t}\n\n\treturn newEventer, nil\n}",
"func NewKdfParams(n, p, r int) *KdfParams {\n\n\treturn &KdfParams{\n\t\tN: n,\n\t\tP: p,\n\t\tR: r,\n\t}\n}",
"func (m *VMAddDiskParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (c controller) CreateMultiEntityAnomalyDetector(ctx context.Context, request entity.CreateDetectorRequest, interactive bool, display bool) ([]string, error) {\n\tif request.PartitionField == nil || len(*request.PartitionField) < 1 {\n\t\tresult, err := c.CreateAnomalyDetector(ctx, request)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treturn []string{*result}, err\n\t}\n\tfilterValues, err := getFilterValues(ctx, request, c)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(filterValues) < 1 {\n\t\treturn nil, fmt.Errorf(\n\t\t\t\"failed to get values for partition field: %s, check whether any data is available in index %s\",\n\t\t\t*request.PartitionField,\n\t\t\trequest.Index,\n\t\t)\n\t}\n\tproceed := true\n\tif interactive {\n\t\tproceed = c.askForConfirmation(\n\t\t\tcmapper.StringToStringPtr(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t\"esad will create %d detector(s). Do you want to proceed? please type (y)es or (n)o and then press enter:\",\n\t\t\t\t\tlen(filterValues),\n\t\t\t\t),\n\t\t\t),\n\t\t)\n\t}\n\tif !proceed {\n\t\treturn nil, nil\n\t}\n\tvar bar *pb.ProgressBar\n\tif display {\n\t\tbar = createProgressBar(len(filterValues))\n\t}\n\tvar detectors []string\n\tname := request.Name\n\tfilter := request.Filter\n\tvar createdDetectors []entity.Detector\n\tfor _, value := range filterValues {\n\t\trequest.Filter = buildCompoundQuery(*request.PartitionField, value, filter)\n\t\trequest.Name = fmt.Sprintf(\"%s-%s\", name, value)\n\t\tresult, err := c.CreateAnomalyDetector(ctx, request)\n\t\tif err != nil {\n\t\t\tc.cleanupCreatedDetectors(ctx, createdDetectors)\n\t\t\treturn nil, err\n\t\t}\n\t\tcreatedDetectors = append(createdDetectors, entity.Detector{\n\t\t\tID: *result,\n\t\t\tName: request.Name,\n\t\t})\n\t\tdetectors = append(detectors, request.Name)\n\t\tif bar != nil {\n\t\t\tbar.Increment()\n\t\t}\n\t}\n\tif bar != nil {\n\t\tbar.Finish()\n\t}\n\treturn detectors, nil\n}",
"func (m *NfsExportCreationParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateClusterID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateReplicaNum(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateThinProvision(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (e VulnEvent) Validate() error {\n\tif e.SourceName == \"\" {\n\t\treturn fmt.Errorf(\"must set SourceName in event\")\n\t}\n\tif e.Asset.IPAddress == \"\" && e.Asset.Hostname == \"\" {\n\t\treturn fmt.Errorf(\"must set IPAddress or Hostname in event\")\n\t}\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (m *VMAddDiskParamsDataVMDisks) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMountDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMountNewCreateDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewDescribeAuditLogRequestWithoutParam() *DescribeAuditLogRequest {\n\n return &DescribeAuditLogRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/instances/{insId}/logs/{logId}\",\n Method: \"GET\",\n Header: nil,\n Version: \"v1\",\n },\n }\n}",
"func NewEvent(msg string) *Event {\n\treturn &Event{\n\t\tCreated: time.Now(),\n\t\tDescription: msg,\n\t}\n}",
"func NewCreateApplicationDetectionRuleBadRequest() *CreateApplicationDetectionRuleBadRequest {\n\treturn &CreateApplicationDetectionRuleBadRequest{}\n}",
"func (*EventNotificationsV1) NewRules(eventTypeFilter string) (_model *Rules, err error) {\n\t_model = &Rules{\n\t\tEventTypeFilter: core.StringPtr(eventTypeFilter),\n\t}\n\terr = core.ValidateStruct(_model, \"required parameters\")\n\treturn\n}",
"func (r *ProjectsLocationsProcessesRunsLineageEventsService) Create(parent string, googleclouddatacataloglineagev1lineageevent *GoogleCloudDatacatalogLineageV1LineageEvent) *ProjectsLocationsProcessesRunsLineageEventsCreateCall {\n\tc := &ProjectsLocationsProcessesRunsLineageEventsCreateCall{s: r.s, urlParams_: make(gensupport.URLParams)}\n\tc.parent = parent\n\tc.googleclouddatacataloglineagev1lineageevent = googleclouddatacataloglineagev1lineageevent\n\treturn c\n}",
"func NewFault(name, id, condition, specificProblem string, priority EventPriority, severity Severity, sourceType SourceType, status VfStatus, sourceName string) *EventFault {\n\tfault := new(EventFault)\n\n\tfault.AlarmCondition = condition\n\tfault.SpecificProblem = specificProblem\n\tfault.EventSeverity = severity\n\tfault.EventSourceType = sourceType\n\tfault.VfStatus = status\n\tfault.FaultFieldsVersion = 2.0\n\n\tfault.Domain = DomainFault\n\tfault.SourceName = sourceName\n\tfault.EventName = name\n\tfault.EventID = id\n\tfault.Version = 3.0\n\tfault.Priority = priority\n\n\tfault.StartEpochMicrosec = time.Now().UnixNano() / 1000\n\tfault.LastEpochMicrosec = fault.StartEpochMicrosec\n\n\treturn fault\n}",
"func New(agentConfig config.AgentConfig, maxRetries int, apicClient apic.Client) *EventProcessor {\n\tclient = apicClient\n\tep := &EventProcessor{\n\t\ttenantID: agentConfig.Central.GetTenantID(),\n\t\tdeployment: agentConfig.Central.GetAPICDeployment(),\n\t\tenvironment: agentConfig.Central.GetEnvironmentName(),\n\t\tenvironmentID: agentConfig.Central.GetEnvironmentID(),\n\t\tteamID: agentConfig.Central.GetTeamID(),\n\t\t//\tapiWatcher: apimanager.GetWatcher(),\n\t\tmaxRetries: maxRetries,\n\t\teventGenerator: transaction.NewEventGenerator(),\n\t}\n\t//environment = agentConfig.Central.GetEnvironmentName()\n\tenvironmentURL = agentConfig.Central.GetEnvironmentURL()\n\tdebugf(\"Event Processor Created with EnvironmentID: %s\", ep.environmentID)\n\treturn ep\n}",
"func (c *Client) NewEventRequest(method, path string, body interface{}) (*http.Request, error) {\n\treturn newRequest(c.EventsURL, method, path, body)\n}",
"func NewUpdateEventParams() *UpdateEventParams {\n\tvar ()\n\treturn &UpdateEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewDatabaseConnectionFailureDetectionConfig(enabled bool, ) *DatabaseConnectionFailureDetectionConfig {\n\tthis := DatabaseConnectionFailureDetectionConfig{}\n\tthis.Enabled = enabled\n\treturn &this\n}",
"func (m *MountNewCreateDisksParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateBus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMVolume(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewEvent(event interface{}, source string, sourcetype string, index string) (Event) {\n\thostname, _ := os.Hostname()\n\te := Event{Time: time.Now().Unix(), Host: hostname, Source: source, SourceType: sourcetype, Index: index, Event: event}\n\n\treturn e\n}",
"func NewEvent(id, eventType string, version int, payload []byte) *Event {\n\tpayloadStr := string(payload)\n\treturn &Event{\n\t\tAggregateID: id,\n\t\tEventType: eventType,\n\t\tVersion: version,\n\t\tEventAt: time.Now(),\n\t\tPayload: &payloadStr,\n\t}\n}",
"func NewMachineValidatingWebhookConfiguration() *admissionregistrationv1.ValidatingWebhookConfiguration {\n\tvalidatingWebhookConfiguration := &admissionregistrationv1.ValidatingWebhookConfiguration{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: machineWebhookConfigurationName,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\t\"service.beta.openshift.io/inject-cabundle\": \"true\",\n\t\t\t},\n\t\t},\n\t\tWebhooks: []admissionregistrationv1.ValidatingWebhook{\n\t\t\tMachineValidatingWebhook(),\n\t\t\tMachineSetValidatingWebhook(),\n\t\t},\n\t}\n\n\t// Setting group version is required for testEnv to create unstructured objects, as the new structure sets it on empty strings\n\t// Usual way to populate those values, is to create the resource in the cluster first, which we can't yet do.\n\tvalidatingWebhookConfiguration.SetGroupVersionKind(admissionregistrationv1.SchemeGroupVersion.WithKind(\"ValidatingWebhookConfiguration\"))\n\treturn validatingWebhookConfiguration\n}",
"func NewListAlertableEventTypeParams() *ListAlertableEventTypeParams {\n\tvar ()\n\treturn &ListAlertableEventTypeParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewEventLogger(root, base string, size int64) *EventLogger {\n\troot = strings.TrimRight(root, string(os.PathSeparator))\n\treturn &EventLogger{\n\t\troot: root,\n\t\tbase: base,\n\t\tmaxSize: size,\n\t\tcache: make(map[string]*IndexedLogfile),\n\t}\n}",
"func (tr *Repository) NewEvent(t Transaction, eventType string, violations []string) es.Event {\n\ttime := time.Now()\n\n\tevent := es.Event{\n\t\tTimestamp: time,\n\t\tName: eventType,\n\t\tPayload: t,\n\t\tViolations: violations,\n\t}\n\n\treturn event\n}",
"func NewCreateanewPbxDeviceConfigRequest(server string, body CreateanewPbxDeviceConfigJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewPbxDeviceConfigRequestWithBody(server, \"application/json\", bodyReader)\n}",
"func NewCreateanewEmergencyMappingContainerRequest(server string, body CreateanewEmergencyMappingContainerJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewEmergencyMappingContainerRequestWithBody(server, \"application/json\", bodyReader)\n}",
"func (f *ForkParams) Validate() error {\n\treturn nil\n}",
"func CreateGetHealthMonitorLogsRequest() (request *GetHealthMonitorLogsRequest) {\n\trequest = &GetHealthMonitorLogsRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"EHPC\", \"2018-04-12\", \"GetHealthMonitorLogs\", \"\", \"\")\n\trequest.Method = requests.GET\n\treturn\n}",
"func NewPDFAlertEvent() *PDFAlertEvent {\n\treturn (*PDFAlertEvent)(allocPDFAlertEventMemory(1))\n}",
"func NewDescribeAlarmHistoryRequestWithoutParam() *DescribeAlarmHistoryRequest {\n\n return &DescribeAlarmHistoryRequest{\n JDCloudRequest: core.JDCloudRequest{\n URL: \"/regions/{regionId}/describeAlarmHistory\",\n Method: \"GET\",\n Header: nil,\n Version: \"v2\",\n },\n }\n}",
"func NewEmptyEvent(et EventType) *Event {\n\treturn &Event{\n\t\tEventType: et,\n\t}\n}",
"func (x *fastReflection_EventReceive) New() protoreflect.Message {\n\treturn new(fastReflection_EventReceive)\n}",
"func NewEventFromBytes(ctx context.Context, acker Acknowledger, data []byte) *Event {\n\treturn &Event{\n\t\tctx: ctx,\n\t\tacker: acker,\n\t\tencoded: data,\n\t}\n}",
"func NewUserExperienceAnalyticsAnomalyDevice()(*UserExperienceAnalyticsAnomalyDevice) {\n m := &UserExperienceAnalyticsAnomalyDevice{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewValidateCreateServiceRequestNamingParams() *ValidateCreateServiceRequestNamingParams {\n\tvar ()\n\treturn &ValidateCreateServiceRequestNamingParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewInvalidArgument(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultInvalidArgument, wparams.NewParamStorer(parameters...))\n}",
"func NewCheckpointCommand(dockerCli command.Cli) *cobra.Command {\n\tcmd := &cobra.Command{\n\t\tUse: \"checkpoint\",\n\t\tShort: \"Manage checkpoints\",\n\t\tArgs: cli.NoArgs,\n\t\tRunE: command.ShowHelp(dockerCli.Err()),\n\t\tAnnotations: map[string]string{\n\t\t\t\"experimental\": \"\",\n\t\t\t\"ostype\": \"linux\",\n\t\t\t\"version\": \"1.25\",\n\t\t},\n\t}\n\tcmd.AddCommand(\n\t\tnewCreateCommand(dockerCli),\n\t\tnewListCommand(dockerCli),\n\t\tnewRemoveCommand(dockerCli),\n\t)\n\treturn cmd\n}",
"func NewEvent(subject string, eventType EventType, obj interface{}) Event {\n\treturn eventImpl{\n\t\tsubject: subject,\n\t\ttime: time.Now(),\n\t\teventType: eventType,\n\t\tobject: obj,\n\t}\n}",
"func (nofe NodeOpenFailedEvent) AsPartitionHealthReportCreatedEvent() (*PartitionHealthReportCreatedEvent, bool) {\n\treturn nil, false\n}",
"func (m *VMAddDiskParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIoPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] | [
"0.7977251",
"0.7609439",
"0.7068783",
"0.6903687",
"0.66981363",
"0.66625434",
"0.64095527",
"0.5028895",
"0.49809852",
"0.4936524",
"0.4910335",
"0.4879529",
"0.48306087",
"0.47612834",
"0.46680626",
"0.46357358",
"0.46251673",
"0.45927116",
"0.45336115",
"0.45284343",
"0.44969893",
"0.44969243",
"0.44957474",
"0.44423446",
"0.44158396",
"0.4406344",
"0.43810228",
"0.43269363",
"0.43202305",
"0.42957672",
"0.42888954",
"0.4281061",
"0.42799658",
"0.4270298",
"0.4228603",
"0.42220846",
"0.4218164",
"0.4213197",
"0.42127684",
"0.41866463",
"0.41842347",
"0.41801715",
"0.41488674",
"0.41465566",
"0.41455144",
"0.414005",
"0.41272083",
"0.41219303",
"0.41215006",
"0.41192028",
"0.4105325",
"0.41023406",
"0.40945035",
"0.40944877",
"0.40939116",
"0.40932253",
"0.4091673",
"0.4086973",
"0.40772066",
"0.4076352",
"0.40757957",
"0.40552554",
"0.4034605",
"0.40265465",
"0.40227565",
"0.40222883",
"0.40191606",
"0.40176705",
"0.4013833",
"0.40131244",
"0.39943197",
"0.39937675",
"0.39918554",
"0.39896116",
"0.398898",
"0.39728245",
"0.3967452",
"0.39639232",
"0.39625773",
"0.39586344",
"0.39539474",
"0.39419192",
"0.39414778",
"0.39349404",
"0.39258826",
"0.3924971",
"0.39225593",
"0.39213315",
"0.39163497",
"0.39143768",
"0.3908966",
"0.390708",
"0.39026484",
"0.38997075",
"0.38955078",
"0.38872188",
"0.38821858",
"0.38813022",
"0.38777986",
"0.38676655"
] | 0.8647856 | 0 |
NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout creates a new ValidateCreateAnomalyDetectionDiskEventParams object with the default values initialized, and the ability to set a timeout on a request | func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {
var ()
return &ValidateCreateAnomalyDetectionDiskEventParams{
timeout: timeout,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func NewCreateEventAlertConditionParamsWithTimeout(timeout time.Duration) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetAuditEventsParamsWithTimeout(timeout time.Duration) *GetAuditEventsParams {\n\tvar (\n\t\tpageDefault = int32(0)\n\t\tsizeDefault = int32(100)\n\t)\n\treturn &GetAuditEventsParams{\n\t\tPage: &pageDefault,\n\t\tSize: &sizeDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewSystemEventsParamsWithTimeout(timeout time.Duration) *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *FileInfoCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateEventAlertConditionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateParamsWithTimeout(timeout time.Duration) *ValidateParams {\n\treturn &ValidateParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetInstancesEventByEventIDParamsWithTimeout(timeout time.Duration) *GetInstancesEventByEventIDParams {\n\tvar ()\n\treturn &GetInstancesEventByEventIDParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CreateEventAlertConditionParams) WithTimeout(timeout time.Duration) *CreateEventAlertConditionParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *ValidateParams) WithTimeout(timeout time.Duration) *ValidateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewTimeout(parameters ...wparams.ParamStorer) Error {\n\treturn newGenericError(nil, DefaultTimeout, wparams.NewParamStorer(parameters...))\n}",
"func NewVolumeDeleteParamsWithTimeout(timeout time.Duration) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteNodeParamsWithTimeout(timeout time.Duration) *DeleteNodeParams {\n\tvar ()\n\treturn &DeleteNodeParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewDeleteVersionControlRequestParamsWithTimeout(timeout time.Duration) *DeleteVersionControlRequestParams {\n\treturn &DeleteVersionControlRequestParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (c *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall) Timeout(timeout int64) *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall {\n\tc.urlParams_.Set(\"timeout\", fmt.Sprint(timeout))\n\treturn c\n}",
"func (o *CreateLifecycleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetEventLogsUsingGETParamsWithTimeout(timeout time.Duration) *GetEventLogsUsingGETParams {\n\treturn &GetEventLogsUsingGETParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *SystemEventsParams) WithTimeout(timeout time.Duration) *SystemEventsParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewListEventsParamsWithTimeout(timeout time.Duration) *ListEventsParams {\n\tvar ()\n\treturn &ListEventsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewFileInfoCreateParamsWithTimeout(timeout time.Duration) *FileInfoCreateParams {\n\treturn &FileInfoCreateParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewDeleteaspecificEmergencyMappingContainerParamsWithTimeout(timeout time.Duration) *DeleteaspecificEmergencyMappingContainerParams {\n\tvar ()\n\treturn &DeleteaspecificEmergencyMappingContainerParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *BackupsCreateStatusParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRunbookRunCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetPrivateToggleDepositAddressCreationParamsWithTimeout(timeout time.Duration) *GetPrivateToggleDepositAddressCreationParams {\n\tvar ()\n\treturn &GetPrivateToggleDepositAddressCreationParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewCreateDashboardRenderTaskParamsWithTimeout(timeout time.Duration) *CreateDashboardRenderTaskParams {\n\tvar ()\n\treturn &CreateDashboardRenderTaskParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *EntryServiceDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewCreateEventAlertConditionParams() *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CreateTokenParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CloudTargetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDrgAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteVersionControlRequestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SystemEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetInstancesEventByEventIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetEventLogsUsingGETParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewUpdateEventParamsWithTimeout(timeout time.Duration) *UpdateEventParams {\n\tvar ()\n\treturn &UpdateEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *VolumeDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteLTENetworkIDNetworkProbeTasksTaskIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewDeleteProtectedEntityParamsWithTimeout(timeout time.Duration) *DeleteProtectedEntityParams {\n\treturn &DeleteProtectedEntityParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetAuditEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateUpdateSymfilePinningParamsWithTimeout(timeout time.Duration) *ValidateUpdateSymfilePinningParams {\n\tvar ()\n\treturn &ValidateUpdateSymfilePinningParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *DeleteSubscribedEventParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewListAlertableEventTypeParamsWithTimeout(timeout time.Duration) *ListAlertableEventTypeParams {\n\tvar ()\n\treturn &ListAlertableEventTypeParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewCreateDrgAttachmentParamsWithTimeout(timeout time.Duration) *CreateDrgAttachmentParams {\n\tvar ()\n\treturn &CreateDrgAttachmentParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *DeleteConditionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetJobEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteDebugRequestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewSizeParamsWithTimeout(timeout time.Duration) *SizeParams {\n\tvar ()\n\treturn &SizeParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *EmsEventCollectionGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFileSystemParametersInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewDeleteTagParamsWithTimeout(timeout time.Duration) *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PostHostStorageSectorsDeleteMerklerootParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreatePolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewSystemEventsParams() *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *GetFileSystemParametersInternalParams) WithTimeout(timeout time.Duration) *GetFileSystemParametersInternalParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (f MLFindFileStructure) WithTimeout(v time.Duration) func(*MLFindFileStructureRequest) {\n\treturn func(r *MLFindFileStructureRequest) {\n\t\tr.Timeout = v\n\t}\n}",
"func (o *CreateDashboardRenderTaskParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetJobEventsParamsWithTimeout(timeout time.Duration) *GetJobEventsParams {\n\tvar ()\n\treturn &GetJobEventsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *GetEventLogsUsingGETParams) WithTimeout(timeout time.Duration) *GetEventLogsUsingGETParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CreateDrgAttachmentParams) WithTimeout(timeout time.Duration) *CreateDrgAttachmentParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewV2ListEventsParamsWithTimeout(timeout time.Duration) *V2ListEventsParams {\n\treturn &V2ListEventsParams{\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteSubnetParamsWithTimeout(timeout time.Duration) *DeleteSubnetParams {\n\tvar ()\n\treturn &DeleteSubnetParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteConditionParamsWithTimeout(timeout time.Duration) *DeleteConditionParams {\n\tvar ()\n\treturn &DeleteConditionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewPostAutoDiscoveryPingsweepParamsWithTimeout(timeout time.Duration) *PostAutoDiscoveryPingsweepParams {\n\tvar ()\n\treturn &PostAutoDiscoveryPingsweepParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetFileSystemParametersInternalParamsWithTimeout(timeout time.Duration) *GetFileSystemParametersInternalParams {\n\tvar (\n\t\tattachedClusterDefault = bool(false)\n\t\tsecureDefault = bool(false)\n\t)\n\treturn &GetFileSystemParametersInternalParams{\n\t\tAttachedCluster: &attachedClusterDefault,\n\t\tSecure: &secureDefault,\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *FileInfoCreateParams) WithTimeout(timeout time.Duration) *FileInfoCreateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *DeleteNodeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRackTopoesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SizeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkExternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CacheServiceMetricsKeySizeGetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRepoNotificationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteProtectedEntityParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SafeContactCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewCreateCrossConnectParamsWithTimeout(timeout time.Duration) *CreateCrossConnectParams {\n\tvar ()\n\treturn &CreateCrossConnectParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewMonitorCheckGetScreenshotsParamsWithTimeout(timeout time.Duration) *MonitorCheckGetScreenshotsParams {\n\tvar ()\n\treturn &MonitorCheckGetScreenshotsParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteTagParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewGetNetworkExternalParamsWithTimeout(timeout time.Duration) *GetNetworkExternalParams {\n\n\treturn &GetNetworkExternalParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewValidateCreateServiceRequestNamingParamsWithTimeout(timeout time.Duration) *ValidateCreateServiceRequestNamingParams {\n\tvar ()\n\treturn &ValidateCreateServiceRequestNamingParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewDeleteSubscribedEventParamsWithTimeout(timeout time.Duration) *DeleteSubscribedEventParams {\n\tvar ()\n\treturn &DeleteSubscribedEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *AddVMParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBlockLatestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateAccessPolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostV1IncidentsIncidentIDRelatedChangeEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *StreamsDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewRejectLogoutRequestParamsWithTimeout(timeout time.Duration) *RejectLogoutRequestParams {\n\tvar ()\n\treturn &RejectLogoutRequestParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func NewGetRackTopoesParamsWithTimeout(timeout time.Duration) *GetRackTopoesParams {\n\treturn &GetRackTopoesParams{\n\t\ttimeout: timeout,\n\t}\n}"
] | [
"0.8055414",
"0.7582346",
"0.7013783",
"0.6802064",
"0.6197475",
"0.602131",
"0.57749593",
"0.5656192",
"0.5575306",
"0.55126685",
"0.5423514",
"0.5379903",
"0.53752625",
"0.5371916",
"0.5364046",
"0.5356103",
"0.53343153",
"0.52897924",
"0.52777815",
"0.5271008",
"0.5241991",
"0.52203006",
"0.5181054",
"0.51731795",
"0.51683927",
"0.51616055",
"0.5159571",
"0.5134556",
"0.5121291",
"0.5106953",
"0.5102717",
"0.50970495",
"0.5095474",
"0.5069895",
"0.5069687",
"0.5064404",
"0.5062009",
"0.50581676",
"0.5057093",
"0.5055905",
"0.5048325",
"0.5047936",
"0.50368345",
"0.50329775",
"0.5022075",
"0.5012254",
"0.5006111",
"0.49941334",
"0.49739254",
"0.4973229",
"0.49692166",
"0.49585593",
"0.49518153",
"0.4942741",
"0.49404407",
"0.49330902",
"0.49140847",
"0.49097893",
"0.49066374",
"0.490547",
"0.49046078",
"0.4904197",
"0.49002045",
"0.48952848",
"0.48857284",
"0.4877529",
"0.48774615",
"0.4875318",
"0.48726216",
"0.4871263",
"0.48669448",
"0.48661023",
"0.4861577",
"0.48513874",
"0.4843016",
"0.484027",
"0.4839861",
"0.48396724",
"0.4824334",
"0.48237",
"0.4822868",
"0.48219714",
"0.4816542",
"0.4798384",
"0.47919306",
"0.47906584",
"0.47884452",
"0.47791186",
"0.47655556",
"0.47619",
"0.47597283",
"0.47585115",
"0.47554103",
"0.47508815",
"0.4742763",
"0.4739419",
"0.47347376",
"0.4730044",
"0.47293386",
"0.47286826"
] | 0.8468644 | 0 |
NewValidateCreateAnomalyDetectionDiskEventParamsWithContext creates a new ValidateCreateAnomalyDetectionDiskEventParams object with the default values initialized, and the ability to set a context for a request | func NewValidateCreateAnomalyDetectionDiskEventParamsWithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {
var ()
return &ValidateCreateAnomalyDetectionDiskEventParams{
Context: ctx,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (m *VMAddDiskParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a *Client) ValidateCreateAnomalyDetectionMetricEvent(params *ValidateCreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*ValidateCreateAnomalyDetectionMetricEventNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewValidateCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"validateCreateAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents/validator\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ValidateCreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ValidateCreateAnomalyDetectionMetricEventNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for validateCreateAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (m *VMAddDiskParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateWhere(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (b EmployeeCreatedEvent) ValidateEmployeeCreatedEvent() error {\n\tvar validate *validator.Validate\n\tvalidate = validator.New()\n\terr := validate.Struct(b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func NewCreateEventAlertConditionParamsWithContext(ctx context.Context) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (b EmployeeDeletedEvent) ValidateEmployeeDeletedEvent() error {\n\tvar validate *validator.Validate\n\tvalidate = validator.New()\n\terr := validate.Struct(b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func (m *VMAddDiskParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIoPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMAddDiskParamsDataVMDisks) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateMountDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMountNewCreateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *MountNewCreateDisksParamsVMVolume) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMVolumeCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewEventsOnDisk(file string, clock Clock) (*EventsOnDisk, error) {\n\treturn &EventsOnDisk{\n\t\tfilename: filepath.Clean(file),\n\t\tclock: clock,\n\t}, nil\n}",
"func NewEventsLoader(roomVer RoomVersion, keyRing JSONVerifier, stateProvider StateProvider, provider EventProvider, performSoftFailCheck bool) *EventsLoader {\n\treturn &EventsLoader{\n\t\troomVer: roomVer,\n\t\tkeyRing: keyRing,\n\t\tprovider: provider,\n\t\tstateProvider: stateProvider,\n\t\tperformSoftFailCheck: performSoftFailCheck,\n\t}\n}",
"func NewValidateUpdateSymfilePinningParamsWithContext(ctx context.Context) *ValidateUpdateSymfilePinningParams {\n\tvar ()\n\treturn &ValidateUpdateSymfilePinningParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (m *VMAddDiskParamsData) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIoPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxIopsPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *MountNewCreateDisksParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateBus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMVolume(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (e VulnEvent) Validate() error {\n\tif e.SourceName == \"\" {\n\t\treturn fmt.Errorf(\"must set SourceName in event\")\n\t}\n\tif e.Asset.IPAddress == \"\" && e.Asset.Hostname == \"\" {\n\t\treturn fmt.Errorf(\"must set IPAddress or Hostname in event\")\n\t}\n\treturn nil\n}",
"func NewGetInstancesEventByEventIDParamsWithContext(ctx context.Context) *GetInstancesEventByEventIDParams {\n\tvar ()\n\treturn &GetInstancesEventByEventIDParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (m *AuditLogEvent) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateIdentity(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateRequestContext(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateResults(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewListAlertableEventTypeParamsWithContext(ctx context.Context) *ListAlertableEventTypeParams {\n\tvar ()\n\treturn &ListAlertableEventTypeParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewCreateApplicationDetectionRuleBadRequest() *CreateApplicationDetectionRuleBadRequest {\n\treturn &CreateApplicationDetectionRuleBadRequest{}\n}",
"func (e *Event) Validate() error {\n\tif e.Message == \"\" || e.MessageOffset == \"\" || e.Time == nil || e.Type == \"\" {\n\t\treturn errs.ErrMissingParameters\n\t}\n\treturn nil\n}",
"func (m *MountNewCreateDisksParamsVMVolume) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewAnomalyDetectionLimiter(numWorkloads int, numEventsAllowedPerPeriod int, period time.Duration) (*AnomalyDetectionLimiter, error) {\n\tlimiter, err := utils.NewLimiter[string](numWorkloads, numEventsAllowedPerPeriod, period)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &AnomalyDetectionLimiter{\n\t\tlimiter: limiter,\n\t}, nil\n}",
"func (e Event) Validate() error {\n\tif e.Title == \"\" {\n\t\treturn errors.New(\"title cannot be empty\")\n\t}\n\tif e.Description == \"\" {\n\t\treturn errors.New(\"description cannot be empty\")\n\t}\n\treturn nil\n}",
"func NewEvtFailureDetector(id int, nodeIDs []int, sr SuspectRestorer, delta time.Duration, hbSend chan<- Heartbeat) *EvtFailureDetector {\n\tsuspected := make(map[int]bool)\n\talive := make(map[int]bool)\n\n\t// TODO(student): perform any initialization necessary\n\tfor _, node := range nodeIDs {\n\t\talive[node] = true //assumes all provided node IDs is alive (when init fd)\n\t}\n\n\treturn &EvtFailureDetector{\n\t\tid: id,\n\t\tnodeIDs: nodeIDs,\n\t\talive: alive,\n\t\tsuspected: suspected,\n\n\t\tsr: sr,\n\n\t\tdelay: delta,\n\t\tdelta: delta,\n\n\t\thbSend: hbSend,\n\t\thbIn: make(chan Heartbeat, 30000),\n\t\tstop: make(chan struct{}),\n\n\t\ttestingHook: func() {}, // DO NOT REMOVE THIS LINE. A no-op when not testing.\n\t}\n}",
"func NewHostsAnomalyDetectionConfig(connectionLostDetection ConnectionLostDetectionConfig, highCpuSaturationDetection HighCpuSaturationDetectionConfig, highMemoryDetection HighMemoryDetectionConfig, highGcActivityDetection HighGcActivityDetectionConfig, outOfMemoryDetection OutOfMemoryDetectionConfig, outOfThreadsDetection OutOfThreadsDetectionConfig, networkDroppedPacketsDetection NetworkDroppedPacketsDetectionConfig, networkErrorsDetection NetworkErrorsDetectionConfig, highNetworkDetection HighNetworkDetectionConfig, networkTcpProblemsDetection NetworkTcpProblemsDetectionConfig, networkHighRetransmissionDetection NetworkHighRetransmissionDetectionConfig, diskLowSpaceDetection DiskLowSpaceDetectionConfig, diskSlowWritesAndReadsDetection DiskSlowWritesAndReadsDetectionConfig, diskLowInodesDetection DiskLowInodesDetectionConfig, ) *HostsAnomalyDetectionConfig {\n\tthis := HostsAnomalyDetectionConfig{}\n\tthis.ConnectionLostDetection = connectionLostDetection\n\tthis.HighCpuSaturationDetection = highCpuSaturationDetection\n\tthis.HighMemoryDetection = highMemoryDetection\n\tthis.HighGcActivityDetection = highGcActivityDetection\n\tthis.OutOfMemoryDetection = outOfMemoryDetection\n\tthis.OutOfThreadsDetection = outOfThreadsDetection\n\tthis.NetworkDroppedPacketsDetection = networkDroppedPacketsDetection\n\tthis.NetworkErrorsDetection = networkErrorsDetection\n\tthis.HighNetworkDetection = highNetworkDetection\n\tthis.NetworkTcpProblemsDetection = networkTcpProblemsDetection\n\tthis.NetworkHighRetransmissionDetection = networkHighRetransmissionDetection\n\tthis.DiskLowSpaceDetection = diskLowSpaceDetection\n\tthis.DiskSlowWritesAndReadsDetection = diskSlowWritesAndReadsDetection\n\tthis.DiskLowInodesDetection = diskLowInodesDetection\n\treturn &this\n}",
"func (m *VMVolumeCreationParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateClusterID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateElfStoragePolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSharing(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSize(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateSizeUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TopMetricsSvmDirectoryExcludedVolumeInlineReason) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCode(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMessage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *ElfDataStore) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCluster(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiTarget(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNfsExport(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfSubsystem(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *HostsAnomalyDetectionConfig) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConnectionLostDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskLowInodesDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskLowSpaceDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDiskSlowWritesAndReadsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighCPUSaturationDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighGcActivityDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighMemoryDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateHighNetworkDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMetadata(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkDroppedPacketsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkErrorsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkHighRetransmissionDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateNetworkTCPProblemsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutOfMemoryDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateOutOfThreadsDetection(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (event Event) Validate() (err error) {\n\treturn\n}",
"func (m *VMAddDiskParamsDataVMDisks) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateMountDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMountNewCreateDisks(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMSnapshotCreationParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateConsistentType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *DeviceStateChangedEvent) Validate() error {\n\tif err := m.Device.Validate(); err != nil {\n\t\treturn err\n\t}\n\n\tif m.Device == nil {\n\t\treturn oops.BadRequest(\"field 'device' is required\")\n\t}\n\treturn nil\n}",
"func (m *DeviceEvent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateDevice(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *AlertableEventType) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCategory(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateScope(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TopMetricsSvmDirectoryExcludedVolumeInlineVolumeInlineLinks) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateSelf(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a *Client) ValidateUpdateAnomalyDetectionMetricEvent(params *ValidateUpdateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*ValidateUpdateAnomalyDetectionMetricEventNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewValidateUpdateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"validateUpdateAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents/{id}/validator\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ValidateUpdateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ValidateUpdateAnomalyDetectionMetricEventNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for validateUpdateAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (m *TopMetricsSvmDirectoryExcludedVolumeInlineVolume) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateLinks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewKeyspaceEventWatcher(ctx context.Context, topoServer srvtopo.Server, hc HealthCheck, localCell string) *KeyspaceEventWatcher {\n\tkew := &KeyspaceEventWatcher{\n\t\thc: hc,\n\t\tts: topoServer,\n\t\tlocalCell: localCell,\n\t\tkeyspaces: make(map[string]*keyspaceState),\n\t\tsubs: make(map[chan *KeyspaceEvent]struct{}),\n\t}\n\tkew.run(ctx)\n\tlog.Infof(\"started watching keyspace events in %q\", localCell)\n\treturn kew\n}",
"func NewAlertDefinitionAddOperatorToEscalationLevelParamsWithContext(ctx context.Context) *AlertDefinitionAddOperatorToEscalationLevelParams {\n\tvar ()\n\treturn &AlertDefinitionAddOperatorToEscalationLevelParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (o *GetLogsByEventIDDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateMessage(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *MountNewCreateDisksParams) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateBoot(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateBus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxBandwidthUnit(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMaxIopsPolicy(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMVolume(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a *Client) CreateAnomalyDetectionMetricEvent(params *CreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*CreateAnomalyDetectionMetricEventCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateAnomalyDetectionMetricEventCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (m *ElfImageWhereInput) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAND(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNOT(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateOR(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateCluster(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContentLibraryImage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNot(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNotIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TopMetricsSvmDirectoryExcludedVolume) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateReason(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVolume(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (b *Event) Validate() []error {\n\terrorsList := []error{}\n\tif b.VisitorID == \"\" {\n\t\terrorsList = append(errorsList, errors.New(\"Visitor ID should not by empty\"))\n\t}\n\tif b.Type != \"CONTEXT\" {\n\t\terrorsList = append(errorsList, fmt.Errorf(\"Type %s, is not handled\", b.Type))\n\t}\n\n\tcontextErrs := b.Data.Validate()\n\tfor _, e := range contextErrs {\n\t\terrorsList = append(errorsList, e)\n\t}\n\treturn errorsList\n}",
"func (s *OpenconfigOfficeAp_System_Aaa_Authorization_Events_Event) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Aaa_Authorization_Events_Event\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (o *GetAuditEventsParams) WithSize(size *int32) *GetAuditEventsParams {\n\to.SetSize(size)\n\treturn o\n}",
"func (o *HostsAnomalyDetectionConfig) SetOutOfMemoryDetection(v OutOfMemoryDetectionConfig) {\n\to.OutOfMemoryDetection = v\n}",
"func (b EmployeeUpdatedEvent) ValidateEmployeeUpdatedEvent() error {\n\tvar validate *validator.Validate\n\tvalidate = validator.New()\n\terr := validate.Struct(b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func (m *DeviceEvent) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateAction(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateDevice(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (s *OpenconfigOfficeAp_System_Aaa_Accounting_Events_Event) Validate() error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigOfficeAp_System_Aaa_Accounting_Events_Event\"], s); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (t *OpenconfigSystem_System_Aaa_Authorization_Events_Event) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Aaa_Authorization_Events_Event\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (m *AlertConfigurationThreshold) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMSnapshotCreationParamsData) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateConsistentType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateVMID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewValidateByteCodeBadRequest() *ValidateByteCodeBadRequest {\n\treturn &ValidateByteCodeBadRequest{}\n}",
"func (m DiskType) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *HostsAnomalyDetectionConfig) SetDiskLowSpaceDetection(v DiskLowSpaceDetectionConfig) {\n\to.DiskLowSpaceDetection = v\n}",
"func NewManagedTenantsManagedTenantAlertRuleDefinitionsItemAlertRulesRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*ManagedTenantsManagedTenantAlertRuleDefinitionsItemAlertRulesRequestBuilder) {\n m := &ManagedTenantsManagedTenantAlertRuleDefinitionsItemAlertRulesRequestBuilder{\n BaseRequestBuilder: *i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewBaseRequestBuilder(requestAdapter, \"{+baseurl}/tenantRelationships/managedTenants/managedTenantAlertRuleDefinitions/{managedTenantAlertRuleDefinition%2Did}/alertRules{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}\", pathParameters),\n }\n return m\n}",
"func NewListEventsBadRequest() *ListEventsBadRequest {\n\treturn &ListEventsBadRequest{}\n}",
"func (m *ElevationPoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateLocation(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateCreateEvent(payload *model.CreateEventReq) error {\n\tif payload == nil {\n\t\terr := errors.New(\"invalid payload\")\n\t\treturn err\n\t}\n\n\tif strings.TrimSpace(payload.Title) == \"\" {\n\t\terr := errors.New(\"invalid title\")\n\t\treturn err\n\t}\n\n\tif payload.LocationID == 0 {\n\t\terr := errors.New(\"invalid location id\")\n\t\treturn err\n\t}\n\n\tif payload.StartDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif payload.EndDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif len(payload.TicketID) == 0 {\n\t\terr := errors.New(\"invalid ticket id\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (t *OpenconfigSystem_System_Aaa_Accounting_Events_Event) Validate(opts ...ygot.ValidationOption) error {\n\tif err := ytypes.Validate(SchemaTree[\"OpenconfigSystem_System_Aaa_Accounting_Events_Event\"], t, opts...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (m *VlanVds) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func NewEventLogger(root, base string, size int64) *EventLogger {\n\troot = strings.TrimRight(root, string(os.PathSeparator))\n\treturn &EventLogger{\n\t\troot: root,\n\t\tbase: base,\n\t\tmaxSize: size,\n\t\tcache: make(map[string]*IndexedLogfile),\n\t}\n}",
"func (m *Event) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCommandPatch(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateType(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *PerformanceNvmeMetricInlineSvm) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DeleteEmployeeResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for EmpNo\n\n\treturn nil\n}",
"func (o *PostAPIV2EventsParamsBodyAttributes) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCallback(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateHeaders(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostAPIV2EventsBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAttributes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEntityID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEvent(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *DeleteEmployeeRequest) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\tif m.GetEmpNo() < 1 {\n\t\treturn DeleteEmployeeRequestValidationError{\n\t\t\tfield: \"EmpNo\",\n\t\t\treason: \"value must be greater than or equal to 1\",\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (m *StoragePoolInlineHealth) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIsHealthy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateState(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUnhealthyReason(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewCreateEventAlertConditionParams() *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewListAlertableEventTypeBadRequest() *ListAlertableEventTypeBadRequest {\n\treturn &ListAlertableEventTypeBadRequest{}\n}",
"func (m *TopMetricsSvmDirectoryExcludedVolumeInlineReason) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func Validate(validateStopOnFirstErr, validateRecursive, validateUsingTags bool) ConfigOptions {\n\treturn func(h *Config) {\n\t\th.validateStopOnFirstErr = validateStopOnFirstErr\n\t\th.validateRecursive = validateRecursive\n\t\th.validateUsingTags = validateUsingTags\n\t}\n}",
"func NewGetInstancesEventByEventIDParams() *GetInstancesEventByEventIDParams {\n\tvar ()\n\treturn &GetInstancesEventByEventIDParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func NewValidateCreateServiceRequestNamingParamsWithContext(ctx context.Context) *ValidateCreateServiceRequestNamingParams {\n\tvar ()\n\treturn &ValidateCreateServiceRequestNamingParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func (m *ConsoleLogEvent) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with RealtimeBuildEvent\n\tif err := m.RealtimeBuildEvent.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateStepRecordID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTimelineID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTimelineRecordID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostAPIV2EventsParamsBodyAttributesHeaders) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Event) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateEventType(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateMedium(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateTimestamp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewEvent(x, y float64, button, event string) Event {\n\treturn Event{\n\t\tPoint2: floatgeom.Point2{x, y},\n\t\tButton: button,\n\t\tEvent: event,\n\t}\n}",
"func (m *CreateEmployeeResponse) Validate() error {\n\tif m == nil {\n\t\treturn nil\n\t}\n\n\t// no validation rules for EmpNo\n\n\treturn nil\n}",
"func NewV2ListEventsParamsWithContext(ctx context.Context) *V2ListEventsParams {\n\treturn &V2ListEventsParams{\n\t\tContext: ctx,\n\t}\n}",
"func NewEventFromBytes(ctx context.Context, acker Acknowledger, data []byte) *Event {\n\treturn &Event{\n\t\tctx: ctx,\n\t\tacker: acker,\n\t\tencoded: data,\n\t}\n}",
"func (m *IPInterfaceSvmInlineLocationInlineHomeNodeInlineLinks) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateSelf(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *DockerAttributes) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetPastUsageNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (dn *Daemon) validateOnDiskState(currentConfig *mcfgv1.MachineConfig) bool {\n\t// Be sure we're booted into the OS we expect\n\tosMatch, err := dn.checkOS(currentConfig.Spec.OSImageURL)\n\tif err != nil {\n\t\tglog.Errorf(\"%s\", err);\n\t\treturn false\n\t}\n\tif !osMatch {\n\t\tglog.Errorf(\"Expected target osImageURL %s\", currentConfig.Spec.OSImageURL)\n\t\treturn false\n\t}\n\t// And the rest of the disk state\n\tif !checkFiles(currentConfig.Spec.Config.Storage.Files) {\n\t\treturn false\n\t}\n\tif !checkUnits(currentConfig.Spec.Config.Systemd.Units) {\n\t\treturn false\n\t}\n\treturn true\n}",
"func (params PullToDirectoryParams) Validate() error {\n\tvar merr = multierror.NewPrefixed(\"invalid instance config pull params\")\n\tif params.API == nil {\n\t\tmerr = merr.Append(apierror.ErrMissingAPI)\n\t}\n\n\tif params.Directory == \"\" {\n\t\tmerr = merr.Append(errors.New(\"folder not specified and is required for the operation\"))\n\t}\n\n\tif err := ec.RequireRegionSet(params.Region); err != nil {\n\t\tmerr = merr.Append(err)\n\t}\n\n\treturn merr.ErrorOrNil()\n}",
"func (m *PerformanceLunMetricInlineIops) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] | [
"0.78989136",
"0.76160055",
"0.70350945",
"0.69921076",
"0.68241984",
"0.615827",
"0.599709",
"0.47806025",
"0.47231936",
"0.4633743",
"0.45910293",
"0.44896173",
"0.43707833",
"0.4338113",
"0.43370208",
"0.42844075",
"0.42407042",
"0.42394692",
"0.42143732",
"0.42122546",
"0.40977114",
"0.40946996",
"0.40742743",
"0.40685222",
"0.40421256",
"0.40396982",
"0.40219977",
"0.40047228",
"0.3994164",
"0.3993104",
"0.39876157",
"0.39725482",
"0.3966029",
"0.39426753",
"0.39272052",
"0.39262545",
"0.39155298",
"0.3897172",
"0.38819107",
"0.3868123",
"0.38322228",
"0.383191",
"0.38172126",
"0.38038096",
"0.3796118",
"0.37918773",
"0.3790518",
"0.37736502",
"0.3762119",
"0.37531078",
"0.37503982",
"0.37466547",
"0.37465978",
"0.3745681",
"0.3745377",
"0.37440556",
"0.37415117",
"0.3726908",
"0.37262607",
"0.3725579",
"0.37224206",
"0.3720157",
"0.3717227",
"0.37075844",
"0.3694836",
"0.36941615",
"0.36929852",
"0.3691203",
"0.36795875",
"0.36748293",
"0.3673153",
"0.36718124",
"0.36665538",
"0.36536017",
"0.3653213",
"0.36522686",
"0.364787",
"0.3647682",
"0.36345494",
"0.36254147",
"0.36194342",
"0.36189038",
"0.36158058",
"0.36153066",
"0.36142567",
"0.36105347",
"0.36029547",
"0.36015844",
"0.36015186",
"0.35975084",
"0.35922027",
"0.35852414",
"0.3583913",
"0.35834217",
"0.35822687",
"0.35770842",
"0.35643616",
"0.3562092",
"0.35593745",
"0.3556717"
] | 0.84938425 | 0 |
NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient creates a new ValidateCreateAnomalyDetectionDiskEventParams object with the default values initialized, and the ability to set a custom HTTPClient for a request | func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {
var ()
return &ValidateCreateAnomalyDetectionDiskEventParams{
HTTPClient: client,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *CreateEventAlertConditionParams) WithHTTPClient(client *http.Client) *CreateEventAlertConditionParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewCreateEventAlertConditionParamsWithHTTPClient(client *http.Client) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func NewSystemEventsParamsWithHTTPClient(client *http.Client) *SystemEventsParams {\n\tvar ()\n\treturn &SystemEventsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetAuditEventsParamsWithHTTPClient(client *http.Client) *GetAuditEventsParams {\n\tvar (\n\t\tpageDefault = int32(0)\n\t\tsizeDefault = int32(100)\n\t)\n\treturn &GetAuditEventsParams{\n\t\tPage: &pageDefault,\n\t\tSize: &sizeDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateEventAlertConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateParams) WithHTTPClient(client *http.Client) *ValidateParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\tContext: ctx,\n\t}\n}",
"func NewListEventsParamsWithHTTPClient(client *http.Client) *ListEventsParams {\n\tvar ()\n\treturn &ListEventsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetEventLogsUsingGETParamsWithHTTPClient(client *http.Client) *GetEventLogsUsingGETParams {\n\treturn &GetEventLogsUsingGETParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetJobEventsParamsWithHTTPClient(client *http.Client) *GetJobEventsParams {\n\tvar ()\n\treturn &GetJobEventsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewGetInstancesEventByEventIDParamsWithHTTPClient(client *http.Client) *GetInstancesEventByEventIDParams {\n\tvar ()\n\treturn &GetInstancesEventByEventIDParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetEventLogsUsingGETParams) WithHTTPClient(client *http.Client) *GetEventLogsUsingGETParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *SystemEventsParams) WithHTTPClient(client *http.Client) *SystemEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetInstancesEventByEventIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewValidateParamsWithHTTPClient(client *http.Client) *ValidateParams {\n\treturn &ValidateParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func NewUpdateEventParamsWithHTTPClient(client *http.Client) *UpdateEventParams {\n\tvar ()\n\treturn &UpdateEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetJobEventsParams) WithHTTPClient(client *http.Client) *GetJobEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *V2ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListEventsParams) WithHTTPClient(client *http.Client) *ListEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetJobEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAuditEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetInstancesEventByEventIDParams) WithHTTPClient(client *http.Client) *GetInstancesEventByEventIDParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *FileInfoCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAuditEventsParams) WithHTTPClient(client *http.Client) *GetAuditEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *ListAlertableEventTypeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SystemEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewVolumeDeleteParamsWithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\tvar (\n\t\tforceDefault = bool(false)\n\t)\n\treturn &VolumeDeleteParams{\n\t\tForce: &forceDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateDrgAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSubscribedEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewDeleteVersionControlRequestParamsWithHTTPClient(client *http.Client) *DeleteVersionControlRequestParams {\n\treturn &DeleteVersionControlRequestParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *VolumeDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewDeleteNodeParamsWithHTTPClient(client *http.Client) *DeleteNodeParams {\n\tvar ()\n\treturn &DeleteNodeParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *UpdateEventParams) WithHTTPClient(client *http.Client) *UpdateEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewListAlertableEventTypeParamsWithHTTPClient(client *http.Client) *ListAlertableEventTypeParams {\n\tvar ()\n\treturn &ListAlertableEventTypeParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateLifecycleParams) WithHTTPClient(client *http.Client) *CreateLifecycleParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetEventLogsUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmsEventCollectionGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateLifecycleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDrgAttachmentParams) WithHTTPClient(client *http.Client) *CreateDrgAttachmentParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *EntryServiceDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostV1PostMortemsReportsReportIDEventsNotesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAlertableEventTypeParams) WithHTTPClient(client *http.Client) *ListAlertableEventTypeParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewDeleteSubscribedEventParamsWithHTTPClient(client *http.Client) *DeleteSubscribedEventParams {\n\tvar ()\n\treturn &DeleteSubscribedEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetBootVolumeAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRunbookRunCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LogRequestDownloadParams) WithHTTPClient(client *http.Client) *LogRequestDownloadParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *V2ListEventsParams) WithHTTPClient(client *http.Client) *V2ListEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteVersionControlRequestParams) WithHTTPClient(client *http.Client) *DeleteVersionControlRequestParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *GetFileSystemParametersInternalParams) WithHTTPClient(client *http.Client) *GetFileSystemParametersInternalParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteVersionControlRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *VolumeDeleteParams) WithHTTPClient(client *http.Client) *VolumeDeleteParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *PostV1IncidentsIncidentIDRelatedChangeEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewDeleteTagParamsWithHTTPClient(client *http.Client) *DeleteTagParams {\n\tvar ()\n\treturn &DeleteTagParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateVolumeBackupParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LedgerVoucherAttachmentDeleteAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *FieldHistogramKeywordParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewCreateDashboardRenderTaskParamsWithHTTPClient(client *http.Client) *CreateDashboardRenderTaskParams {\n\tvar ()\n\treturn &CreateDashboardRenderTaskParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewMonitorCheckGetScreenshotsParamsWithHTTPClient(client *http.Client) *MonitorCheckGetScreenshotsParams {\n\tvar ()\n\treturn &MonitorCheckGetScreenshotsParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *PostAttendanceHourlyPaidLeaveParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CloudTargetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewGetLogsParamsWithHTTPClient(client *http.Client) *GetLogsParams {\n\tvar (\n\t\tpageDefault = int64(1)\n\t\tpageSizeDefault = int64(10)\n\t)\n\treturn &GetLogsParams{\n\t\tPage: &pageDefault,\n\t\tPageSize: &pageSizeDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *MonitorCheckGetScreenshotsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BackupsCreateStatusParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewCreateWidgetParamsWithHTTPClient(client *http.Client) *CreateWidgetParams {\n\tvar (\n\t\tacceptDefault = string(\"application/json\")\n\t\tcontentTypeDefault = string(\"application/json\")\n\t)\n\treturn &CreateWidgetParams{\n\t\tAccept: &acceptDefault,\n\t\tContentType: &contentTypeDefault,\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateTokenParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewCreateLifecycleParamsWithHTTPClient(client *http.Client) *CreateLifecycleParams {\n\tvar ()\n\treturn &CreateLifecycleParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *DeleteConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDashboardRenderTaskParams) WithHTTPClient(client *http.Client) *CreateDashboardRenderTaskParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CreatePolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostHostStorageSectorsDeleteMerklerootParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewDeleteConditionParamsWithHTTPClient(client *http.Client) *DeleteConditionParams {\n\tvar ()\n\treturn &DeleteConditionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *UpdateEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewLogRequestDownloadParamsWithHTTPClient(client *http.Client) *LogRequestDownloadParams {\n\tvar ()\n\treturn &LogRequestDownloadParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *DeleteLTENetworkIDNetworkProbeTasksTaskIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSubscribedEventParams) WithHTTPClient(client *http.Client) *DeleteSubscribedEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteKeyPairsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSearchEmployeesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGUIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDebugRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SafeContactCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (a *Client) ValidateCreateAnomalyDetectionMetricEvent(params *ValidateCreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*ValidateCreateAnomalyDetectionMetricEventNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewValidateCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"validateCreateAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents/validator\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ValidateCreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ValidateCreateAnomalyDetectionMetricEventNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for validateCreateAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o *DeleteGerritListenerByIDUsingDELETEParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteTagParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateAccessPolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCrossConnectParams) WithHTTPClient(client *http.Client) *CreateCrossConnectParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func NewCreateDrgAttachmentParamsWithHTTPClient(client *http.Client) *CreateDrgAttachmentParams {\n\tvar ()\n\treturn &CreateDrgAttachmentParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *MonitorDeleteMaintenancePeriodFromMonitorParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | [
"0.8327069",
"0.70185035",
"0.67619973",
"0.6500562",
"0.60969204",
"0.59961534",
"0.59404904",
"0.59183556",
"0.5709208",
"0.5660249",
"0.56007624",
"0.55978113",
"0.55552834",
"0.54664594",
"0.537748",
"0.53703725",
"0.53691846",
"0.5349769",
"0.5288904",
"0.52809405",
"0.5261928",
"0.5258647",
"0.52581906",
"0.52236223",
"0.5204796",
"0.5174636",
"0.5172846",
"0.5168083",
"0.5122784",
"0.5122384",
"0.5100927",
"0.5099568",
"0.50943357",
"0.5090917",
"0.5069809",
"0.50635535",
"0.5050391",
"0.50406486",
"0.50029474",
"0.4993689",
"0.4984893",
"0.49800682",
"0.49705833",
"0.49689224",
"0.4965535",
"0.49542555",
"0.49408838",
"0.49356627",
"0.49275383",
"0.4925237",
"0.49230346",
"0.49040413",
"0.49037614",
"0.48831558",
"0.48793814",
"0.48704252",
"0.4868109",
"0.48637992",
"0.48542535",
"0.48477456",
"0.48424265",
"0.4816716",
"0.48161137",
"0.4808486",
"0.48082536",
"0.47995737",
"0.47972754",
"0.47945085",
"0.47914743",
"0.4784957",
"0.47830716",
"0.4780197",
"0.47700283",
"0.4766712",
"0.4760846",
"0.47559223",
"0.4753687",
"0.4750151",
"0.4746712",
"0.47322",
"0.4731379",
"0.47136095",
"0.47133538",
"0.47080576",
"0.46936104",
"0.46855655",
"0.46790546",
"0.4674314",
"0.46731994",
"0.46685687",
"0.46684653",
"0.46664992",
"0.46611753",
"0.4660869",
"0.46578783",
"0.46545714",
"0.4649318",
"0.46418583",
"0.46394494",
"0.4631598"
] | 0.8368145 | 0 |
WithTimeout adds the timeout to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {
o.SetTimeout(timeout)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *FileInfoCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRunbookRunCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDrgAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateEventAlertConditionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CloudTargetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateLifecycleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateParams) WithTimeout(timeout time.Duration) *ValidateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SafeContactCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (c *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall) Timeout(timeout int64) *OrganizationsEnvironmentsApisRevisionsDebugsessionsCreateCall {\n\tc.urlParams_.Set(\"timeout\", fmt.Sprint(timeout))\n\treturn c\n}",
"func (o *BackupsCreateStatusParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateEventAlertConditionParams) WithTimeout(timeout time.Duration) *CreateEventAlertConditionParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CreatePolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *CreateDashboardRenderTaskParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout int) CreateDBOpFn {\n\treturn func(op *CreateDBOp) {\n\t\tif 0 == timeout {\n\t\t\treturn\n\t\t}\n\t\top.timeout = &timeout\n\t\top.set = true\n\t}\n}",
"func (o *CreateTokenParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (b *taskBuilder) timeout(timeout time.Duration) {\n\tb.Spec.ExecutionTimeout = timeout\n\tb.Spec.IoTimeout = timeout // With kitchen, step logs don't count toward IoTimeout.\n}",
"func (o *FileInfoCreateParams) WithTimeout(timeout time.Duration) *FileInfoCreateParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *CreateDatabaseOnServerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CloudNFSExportAddParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRoomParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateScriptParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRoutingInstanceUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateGUIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDatalakeDbConfigParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (e *InvalidArgumentError) Timeout() bool { return false }",
"func (o *GetInstancesEventByEventIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFileSystemParametersInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateWidgetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPrivateToggleDepositAddressCreationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateBlueprintInWorkspaceInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IgroupInitiatorCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDrgAttachmentParams) WithTimeout(timeout time.Duration) *CreateDrgAttachmentParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *EmployeeEntitlementGrantEntitlementsByTemplateGrantEntitlementsByTemplateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RegenerateDeployKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRackTopoesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteSystemObjectDefinitionsByIDAttributeGroupsByIDAttributeDefinitionsByIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (m *monitor) withTimeout(timeout time.Duration) *monitor {\n\tm.timeout = timeout\n\treturn m\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IndicatorCreateV1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateAccessPolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AddVMParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateTenantParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SizeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *TestEmailConfigurationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(o *options) {\n\t\to.timeout = timeout\n\t}\n}",
"func (o *CreateRepoNotificationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (f MLFindFileStructure) WithTimeout(v time.Duration) func(*MLFindFileStructureRequest) {\n\treturn func(r *MLFindFileStructureRequest) {\n\t\tr.Timeout = v\n\t}\n}",
"func (o *GetProvidersFileByIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) BuilderOptionFunc {\n\treturn func(b *Builder) error {\n\t\tb.timeout = timeout\n\t\treturn nil\n\t}\n}",
"func (o *ReadStorageV1alpha1VolumeAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateVolumeBackupParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteLTENetworkIDNetworkProbeTasksTaskIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EmployeesByIDContractrulePutParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (f Factory) WithTimeoutHeight(height uint64) Factory {\n\tf.timeoutHeight = height\n\treturn f\n}",
"func validateTimeout(timeout time.Duration) error {\n\tif timeout < time.Millisecond {\n\t\treturn nosqlerr.NewIllegalArgument(\"Timeout must be greater than or equal to 1 millisecond\")\n\t}\n\n\treturn nil\n}",
"func (o *AddOrUpdateNodePoolConfigItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(timeout time.Duration) Option {\n\treturn func(opts *Opts) error {\n\t\topts.Timeout = timeout\n\t\treturn nil\n\t}\n}",
"func (o *PostLTENetworkIDDNSRecordsDomainParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *UploadTaskFileParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *StartPostgreSQLShowCreateTableActionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkExternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func WithTimeout(timeout time.Duration) configF {\n\treturn func(c *config) *config {\n\t\tc.defaultTimeout = timeout\n\t\treturn c\n\t}\n}",
"func NewCreateEventAlertConditionParamsWithTimeout(timeout time.Duration) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *PostDeviceUpsertParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteSubscribedEventParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func WithTimeout(duration time.Duration) Option {\n\treturn wrappedOption{oconf.WithTimeout(duration)}\n}",
"func (o *CreatePublicIPAdressUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateCreateServiceRequestNamingParams) WithTimeout(timeout time.Duration) *ValidateCreateServiceRequestNamingParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SaveTemplateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBootVolumeAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostDeviceRackParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateListParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateInstantPaymentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CatalogProductTierPriceManagementV1AddPostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRestoreDatalakeStatusParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRuntimeMapParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *V2GetPresignedForClusterFilesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SharedCatalogSharedCatalogRepositoryV1SavePostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostHostStorageSectorsDeleteMerklerootParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func Timeout(timeout time.Duration, timeoutFunction OnTimeout) crOption {\n\treturn func(cr *ConsumerRegistration) *ConsumerRegistration {\n\t\tcr.timeout = timeout\n\t\tcr.onTimeout = timeoutFunction\n\t\treturn cr\n\t}\n}",
"func (o *CreatePackageRepositoryDeltaUploadParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *UploadDeployFileParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *VolumeDeleteParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] | [
"0.7189499",
"0.69476575",
"0.6387285",
"0.6127074",
"0.592812",
"0.59120953",
"0.5909155",
"0.58984953",
"0.583984",
"0.5824878",
"0.581385",
"0.5808678",
"0.58043957",
"0.5803733",
"0.5779184",
"0.57258874",
"0.57109934",
"0.5698315",
"0.56639814",
"0.5615885",
"0.56069446",
"0.5603765",
"0.5597737",
"0.55824375",
"0.5575835",
"0.5558424",
"0.5551569",
"0.55449766",
"0.55439675",
"0.55401695",
"0.5520202",
"0.5510616",
"0.54891926",
"0.547677",
"0.5475856",
"0.5468699",
"0.5463268",
"0.5461841",
"0.54570067",
"0.5456439",
"0.5454853",
"0.54272544",
"0.5425916",
"0.54206437",
"0.5419628",
"0.5413017",
"0.5410313",
"0.5405909",
"0.5386625",
"0.53857434",
"0.53804576",
"0.5369532",
"0.53695077",
"0.5365477",
"0.5358028",
"0.535643",
"0.5349919",
"0.5349919",
"0.5346837",
"0.53330606",
"0.53330606",
"0.532766",
"0.5327615",
"0.53244996",
"0.5318331",
"0.5311775",
"0.53090906",
"0.53063315",
"0.5305876",
"0.5303686",
"0.5303678",
"0.5303327",
"0.53017306",
"0.530169",
"0.5292084",
"0.52844065",
"0.52779776",
"0.5274153",
"0.5271958",
"0.52715635",
"0.52712464",
"0.5270182",
"0.5264964",
"0.5264305",
"0.5261619",
"0.52589077",
"0.52574617",
"0.5257379",
"0.5248467",
"0.524449",
"0.52428174",
"0.5239762",
"0.5237674",
"0.5234707",
"0.52338916",
"0.5228141",
"0.5226942",
"0.5224754",
"0.5223903",
"0.5219531"
] | 0.7611753 | 0 |
SetTimeout adds the timeout to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetTimeout(timeout time.Duration) {
o.timeout = timeout
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *FileInfoCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateLifecycleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRunbookRunCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CloudTargetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDashboardRenderTaskParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateEventAlertConditionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDrgAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateCrossConnectParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *BackupsCreateStatusParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateScriptParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRackTopoesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetFileSystemParametersInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *SafeContactCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreatePolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetInstancesEventByEventIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateWidgetParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PetCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetDatalakeDbConfigParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateDatabaseOnServerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetPrivateToggleDepositAddressCreationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateAccessPolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IgroupInitiatorCreateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateGUIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateBlueprintInWorkspaceInternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRoutingInstanceUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SizeParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostLTENetworkIDDNSRecordsDomainParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SystemEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CloudNFSExportAddParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteSystemObjectDefinitionsByIDAttributeGroupsByIDAttributeDefinitionsByIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRoomParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AddVMParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *DeleteLTENetworkIDNetworkProbeTasksTaskIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkExternalParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ValidateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *IndicatorCreateV1Params) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateTokenParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetRestoreDatalakeStatusParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RegenerateDeployKeyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRuntimeMapParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AddOrUpdateNodePoolConfigItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EmployeeEntitlementGrantEntitlementsByTemplateGrantEntitlementsByTemplateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *TestEmailConfigurationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateVolumeBackupParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostDeviceRackParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreatePackageRepositoryDeltaUploadParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *UploadTaskFileParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostSecdefSearchParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SaveTemplateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *SharedCatalogSharedCatalogRepositoryV1SavePostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ReadStorageV1alpha1VolumeAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostAutoDiscoveryDNSParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetActionTemplateLogoVersionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ListHetznerSizesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetProvidersFileByIDParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetBootVolumeAttachmentParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CatalogProductTierPriceManagementV1AddPostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *StartPostgreSQLShowCreateTableActionParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostMultiNodeDeviceParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetAuditEventsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PutClusterForAutoscaleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *UploadDeployFileParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (options *CreateLoadBalancerMonitorOptions) SetTimeout(timeout int64) *CreateLoadBalancerMonitorOptions {\n\toptions.Timeout = core.Int64Ptr(timeout)\n\treturn options\n}",
"func (o *CreateTenantParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetGCParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateNetworkGroupPolicyParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AllLookmlTestsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateRepoNotificationParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostHostStorageSectorsDeleteMerklerootParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetCustomRuleParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetWorkflowBuildTaskMetaMoidParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreateIscsiLunSnapshotParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostAPI24PoliciesNfsMembersParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreatePublicIPAdressUsingPOSTParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostV1PostMortemsReportsReportIDEventsNotesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *UploadWorkflowTemplateParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetOperatingSystemsParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *CreatePolicyResetItemParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *ColumnFamilyCrcCheckChanceByNamePostParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetTenantTagTestSpacesParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *PostDeviceUpsertParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *EmployeesByIDContractrulePutParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *RecoverySubmitParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetCreationTasksParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetAPI24ArraysNtpTestParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *GetNetworkAppliancePortParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *AdminCreateJusticeUserParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}",
"func (o *MonitorCheckGetWaterfallInfoParams) SetTimeout(timeout time.Duration) {\n\to.timeout = timeout\n}"
] | [
"0.7360465",
"0.7286983",
"0.70200294",
"0.6936747",
"0.6879686",
"0.6790714",
"0.6787776",
"0.6773603",
"0.67690927",
"0.67641747",
"0.6741514",
"0.6710848",
"0.66715544",
"0.66256624",
"0.66121364",
"0.66099334",
"0.6607404",
"0.6592908",
"0.65900815",
"0.6586507",
"0.6586065",
"0.6581403",
"0.65691614",
"0.65628195",
"0.6548574",
"0.65158486",
"0.6499149",
"0.6494937",
"0.64833516",
"0.6473403",
"0.6465468",
"0.6462259",
"0.6459391",
"0.6456811",
"0.6453469",
"0.6450584",
"0.64437085",
"0.6443357",
"0.64388233",
"0.64375776",
"0.6431041",
"0.6429999",
"0.6427767",
"0.6421373",
"0.6409643",
"0.6382825",
"0.6378013",
"0.63775784",
"0.63670003",
"0.6360424",
"0.63550794",
"0.63549477",
"0.6343536",
"0.6332319",
"0.63303924",
"0.6328395",
"0.63260615",
"0.6325692",
"0.6312166",
"0.6312067",
"0.63074106",
"0.62927663",
"0.6292182",
"0.6287614",
"0.62874925",
"0.6282269",
"0.62752396",
"0.62705904",
"0.6269086",
"0.62665445",
"0.6264649",
"0.6264377",
"0.62634575",
"0.6257256",
"0.62492377",
"0.6242649",
"0.6236936",
"0.62199587",
"0.62140745",
"0.6212768",
"0.6207504",
"0.62070537",
"0.6201964",
"0.6201215",
"0.61982906",
"0.618639",
"0.618454",
"0.618004",
"0.6176877",
"0.6169352",
"0.61671764",
"0.6155658",
"0.61447656",
"0.6139208",
"0.61390007",
"0.61386764",
"0.61382955",
"0.61375993",
"0.6134803",
"0.6134276"
] | 0.82259035 | 0 |
WithContext adds the context to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {
o.SetContext(ctx)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (m *VMVolumeCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *NfsExportCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *CreateTicketPayload) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DockerAttributes) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ShadowcopyAddFiles) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetPastUsageNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMSnapshotCreationParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateConsistentType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *CreateSignal) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMAddDiskParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetPastUsageBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *LogInlineRetention) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMTemplateCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *CreateManifestParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *EmsSyslogInlineFormat) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *WeightAllotment) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VolumeGroupStorageDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostSilencesOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VolumeEfficiencyPolicyInlineSchedule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *PTXServiceDTOBusSpecificationV2N1Estimate) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SnaplockLitigationFile) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MountNewCreateDisksParamsVMVolume) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *FwmgrMsaDateRangeSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Logger) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostWalletSiafundsOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMAddDiskParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIoPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *OBWriteDomesticResponse5DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UtilTestBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AuditSchedule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *notification) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ClusterLicenseUpdationParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m VMFirmware) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MountNewCreateDisksParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateBus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMVolume(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m Alerts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostPunchInOKBodyResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.contextValidateAttendance(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m MetricDatapoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetLighthouseOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *InstallVmtoolsParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VlanVds) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *IoArgoprojWorkflowV1alpha1TemplateRef) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidateContext(ctx *scheduler.Context, errChan ...*chan error) {\n\tdefer func() {\n\t\tif len(errChan) > 0 {\n\t\t\tclose(*errChan[0])\n\t\t}\n\t}()\n\tginkgo.Describe(fmt.Sprintf(\"For validation of %s app\", ctx.App.Key), func() {\n\t\tvar timeout time.Duration\n\t\tappScaleFactor := time.Duration(Inst().GlobalScaleFactor)\n\t\tif ctx.ReadinessTimeout == time.Duration(0) {\n\t\t\ttimeout = appScaleFactor * defaultTimeout\n\t\t} else {\n\t\t\ttimeout = appScaleFactor * ctx.ReadinessTimeout\n\t\t}\n\n\t\tStep(fmt.Sprintf(\"validate %s app's volumes\", ctx.App.Key), func() {\n\t\t\tif !ctx.SkipVolumeValidation {\n\t\t\t\tValidateVolumes(ctx, errChan...)\n\t\t\t}\n\t\t})\n\n\t\tStep(fmt.Sprintf(\"wait for %s app to start running\", ctx.App.Key), func() {\n\t\t\terr := Inst().S.WaitForRunning(ctx, timeout, defaultRetryInterval)\n\t\t\tif err != nil {\n\t\t\t\tprocessError(err, errChan...)\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\n\t\tStep(fmt.Sprintf(\"validate if %s app's volumes are setup\", ctx.App.Key), func() {\n\t\t\tif ctx.SkipVolumeValidation {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvols, err := Inst().S.GetVolumes(ctx)\n\t\t\tprocessError(err, errChan...)\n\n\t\t\tfor _, vol := range vols {\n\t\t\t\tStep(fmt.Sprintf(\"validate if %s app's volume: %v is setup\", ctx.App.Key, vol), func() {\n\t\t\t\t\terr := Inst().V.ValidateVolumeSetup(vol)\n\t\t\t\t\tprocessError(err, errChan...)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n}",
"func (m *OBWriteInternational3DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m SupportedIeeeStandards) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteDomesticStandingOrderConsent5DataAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostOauth2TokenOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetPastUsageOKBodyItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *CreateProductUnprocessableEntityBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternationalScheduledConsent5DataAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteVMTempNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ElfDataStore) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCluster(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiTarget(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNfsExport(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfSubsystem(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetHelloOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AlertConfigurationThreshold) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternationalScheduledConsent5DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ElfImageWhereInput) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAND(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNOT(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateOR(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateCluster(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContentLibraryImage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNot(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNotIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLabelsSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisksSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMSnapshotsSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplatesSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *FalconxMITREAttackParent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteDomesticResponse5DataMultiAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteOrgNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DomainBatchInitSessionRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *BrickTopoUpdationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *UniverseResourceDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *FirmwareShelf) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateInProgressCount(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m PartitionUsage) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SoftwareDataEncryption) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *BackupWPA) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *RunnersSummary) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *PostUpdatePushTokenPayload) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ParsedOVF) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCPU(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateFirmware(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNics(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *PacketAnalyzerConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Annotation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AlertNotifierWhereInput) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAND(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNOT(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateOR(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateClustersEvery(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateClustersNone(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateClustersSome(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNot(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatusNotIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLanguageCode(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLanguageCodeIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLanguageCodeNot(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLanguageCodeNotIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSecurityMode(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSecurityModeIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSecurityModeNot(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSecurityModeNotIn(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSMTPServerConfig(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMAddDiskParamsDataVMDisks) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateMountDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMountNewCreateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *TogglePacketGeneratorsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ClusterHostRequirementsDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m VMVolumeOrderByInput) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AcceptInvitationBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DeviceapiUpdateDeviceTagsRequestV1) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteVMTempOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MmdsConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MountNfs) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ConfirmDownloadPayload) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DomainAPIVulnerabilityCVECISAInfo) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *JobJobFilament) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteOrgOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *WireguardSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ClusterLicenseUpdationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *PTXServiceDTORailSpecificationV2MetroMRTAlertListAlertScopeTrain) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *LoyaltyEventFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateDateTimeFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLocationFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLoyaltyAccountFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateOrderFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateTypeFilter(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *TableReporterParamsFilter) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAlerts(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateClusters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateConsistencyGroups(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContentLibraryImages(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateContentLibraryVMTemplates(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDatacenters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateElfDataStores(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateElfImages(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateGlobalAlertRules(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateHosts(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiConnections(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiLunSnapshots(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiLuns(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiTargets(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNamespaceGroups(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNfsExports(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNics(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfNamespaceSnapshots(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfNamespaces(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfSubsystems(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSnapshotPlans(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSystemAuditLogs(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateTasks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUsbDevices(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUserAuditLogs(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateUsers(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVdses(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVlans(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMEntityFilters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMPlacementGroups(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMTemplates(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMVolumes(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVms(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *UserLastReleaseNotesSeen) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternational3DataInitiationExchangeRateInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (m *AccumulateLoyaltyPointsRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAccumulatePoints(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *SAPCreate) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateInstances(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNetworks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidatePinPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateStorageAffinity(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *PTXAPIRailModelV2THSRODAvailableSeatStatusWrapperPTXServiceDTORailSpecificationV2THSRODAvailableSeat) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateAvailableSeats(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m OBFeeFrequency1Code3) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m VMStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SentEmail) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SovrenParserModelSovrenDateWithParts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m TenderCardDetailsStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AlertNotifier) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateClusters(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateEntityAsyncStatus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateLanguageCode(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSecurityMode(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSMTPServerConfig(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] | [
"0.5943772",
"0.58909607",
"0.58349013",
"0.57999563",
"0.57827944",
"0.5740681",
"0.5737816",
"0.571996",
"0.56811744",
"0.5654252",
"0.5654151",
"0.56178325",
"0.56175965",
"0.56051695",
"0.5603444",
"0.5560031",
"0.55424637",
"0.55214834",
"0.5504825",
"0.5498485",
"0.5489761",
"0.5478404",
"0.5474744",
"0.5463156",
"0.54568595",
"0.54532236",
"0.5444814",
"0.5432237",
"0.5425734",
"0.5422464",
"0.5421435",
"0.5412953",
"0.54088056",
"0.54016006",
"0.5400883",
"0.54002404",
"0.53997684",
"0.53907365",
"0.53728104",
"0.53705394",
"0.5360483",
"0.53526485",
"0.5341557",
"0.5337829",
"0.53327936",
"0.5328158",
"0.532341",
"0.531029",
"0.5305958",
"0.5302828",
"0.53012484",
"0.52999306",
"0.5291119",
"0.5288362",
"0.52838475",
"0.52768",
"0.52719104",
"0.527104",
"0.5269029",
"0.52661073",
"0.52645695",
"0.52618265",
"0.5261352",
"0.52579004",
"0.5254741",
"0.52440554",
"0.52359444",
"0.5234746",
"0.52305096",
"0.5225396",
"0.5224965",
"0.5219033",
"0.52124685",
"0.52059484",
"0.52033496",
"0.5197617",
"0.51944464",
"0.51929367",
"0.51885957",
"0.518679",
"0.5185538",
"0.5185399",
"0.51839113",
"0.51801634",
"0.5179999",
"0.5177215",
"0.5164404",
"0.51581615",
"0.51545185",
"0.5151662",
"0.5150418",
"0.5146342",
"0.5141553",
"0.5140299",
"0.5137987",
"0.5136463",
"0.51363695",
"0.5133806",
"0.51321113",
"0.51320195"
] | 0.66061366 | 0 |
SetContext adds the context to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetContext(ctx context.Context) {
o.Context = ctx
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (m *NfsExportCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (rec *RawEventCreate) SetContext(m map[string]interface{}) *RawEventCreate {\n\trec.mutation.SetContext(m)\n\treturn rec\n}",
"func (m *CreateTicketPayload) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMTemplateCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMVolumeCreationParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VMSnapshotCreationParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateConsistentType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *ShadowcopyAddFiles) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *FileInfoCreateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *CreateSignal) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteDomesticResponse5DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *PTXServiceDTOBusSpecificationV2N1Estimate) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *CreateManifestParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UtilTestBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostSilencesOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *SystemEventsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateEventAlertConditionParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *PacketAnalyzerConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DockerAttributes) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMAddDiskParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateIoPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *InstallVmtoolsParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VMAddDiskParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateData(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateWhere(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m VMFirmware) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MountNewCreateDisksParams) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateBus(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxBandwidthUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMaxIopsPolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateVMVolume(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *VolumeGroupStorageDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternationalScheduledConsent5DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SnaplockLitigationFile) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostV1PostMortemsReportsReportIDEventsNotesParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *PostUpdatePushTokenPayload) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *LogInlineRetention) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m MetricDatapoint) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteDomesticResponse5DataMultiAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetPastUsageBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *CreateDatabaseOnServerParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateLifecycleParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *MmdsConfig) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *JVMSystemDiagnosticsSnapshotDTO) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateContentRepositoryStorageUsage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateFlowFileRepositoryStorageUsage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateGarbageCollectionDiagnostics(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateProvenanceRepositoryStorageUsage(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *DeviceapiUpdateDeviceTagsRequestV1) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteDomesticStandingOrderConsent5DataAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AlertConfigurationThreshold) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *FwmgrMsaDateRangeSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *MountNewCreateDisksParamsVMVolume) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateElfStoragePolicy(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateSizeUnit(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *EmsSyslogInlineFormat) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *SentEmail) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostOauth2TokenOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ClusterHostRequirementsDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m PartitionUsage) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternational3DataInitiationRemittanceInformation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteVMTempOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *CreateDashboardRenderTaskParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *VolumeEfficiencyPolicyInlineSchedule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostWalletSiafundsOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *AuditSchedule) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *OBWriteInternationalScheduledConsent5DataAuthorisation) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *VlanVds) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *TogglePacketGeneratorsBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m TenderCardDetailsStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *ParsedOVF) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCPU(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateFirmware(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNics(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateRunbookRunCreateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetAuditEventsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *RemoteProcessGroupStatusSnapshotDTO) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *CreateScriptParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ListAlertableEventTypeParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *SoftwareDataEncryption) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m VMStatus) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (f *framework) createContext() context.Context {\n\treturn context.WithValue(context.Background(), epochKey, f.epoch)\n}",
"func (o *PostPunchInOKBodyResponse) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.contextValidateAttendance(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (m *ElfDataStore) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateCluster(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateIscsiTarget(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNfsExport(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateNvmfSubsystem(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateType(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateDrgAttachmentParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *WeightAllotment) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateEventParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostAPI24ProtectionGroupSnapshotsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m Alerts) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetJobEventsParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *ClusterLicenseUpdationParamsData) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *WireguardSpec) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *FalconxMITREAttackParent) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidateContext(ctx *scheduler.Context, errChan ...*chan error) {\n\tdefer func() {\n\t\tif len(errChan) > 0 {\n\t\t\tclose(*errChan[0])\n\t\t}\n\t}()\n\tginkgo.Describe(fmt.Sprintf(\"For validation of %s app\", ctx.App.Key), func() {\n\t\tvar timeout time.Duration\n\t\tappScaleFactor := time.Duration(Inst().GlobalScaleFactor)\n\t\tif ctx.ReadinessTimeout == time.Duration(0) {\n\t\t\ttimeout = appScaleFactor * defaultTimeout\n\t\t} else {\n\t\t\ttimeout = appScaleFactor * ctx.ReadinessTimeout\n\t\t}\n\n\t\tStep(fmt.Sprintf(\"validate %s app's volumes\", ctx.App.Key), func() {\n\t\t\tif !ctx.SkipVolumeValidation {\n\t\t\t\tValidateVolumes(ctx, errChan...)\n\t\t\t}\n\t\t})\n\n\t\tStep(fmt.Sprintf(\"wait for %s app to start running\", ctx.App.Key), func() {\n\t\t\terr := Inst().S.WaitForRunning(ctx, timeout, defaultRetryInterval)\n\t\t\tif err != nil {\n\t\t\t\tprocessError(err, errChan...)\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\n\t\tStep(fmt.Sprintf(\"validate if %s app's volumes are setup\", ctx.App.Key), func() {\n\t\t\tif ctx.SkipVolumeValidation {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvols, err := Inst().S.GetVolumes(ctx)\n\t\t\tprocessError(err, errChan...)\n\n\t\t\tfor _, vol := range vols {\n\t\t\t\tStep(fmt.Sprintf(\"validate if %s app's volume: %v is setup\", ctx.App.Key, vol), func() {\n\t\t\t\t\terr := Inst().V.ValidateVolumeSetup(vol)\n\t\t\t\t\tprocessError(err, errChan...)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t})\n}",
"func (o *DeleteVMTempNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m DiskType) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *DomainBatchInitSessionRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *Logger) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *CloudNFSExportAddParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *IgroupInitiatorCreateParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *VMAddDiskParamsDataVMDisks) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.contextValidateMountDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.contextValidateMountNewCreateDisks(ctx, formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetInstancesEventByEventIDParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *GetPastUsageNotFoundBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (o *PostLTENetworkIDDNSRecordsDomainParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}",
"func (m *UniverseResourceDetails) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *JobJobFilament) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (m *notification) ContextValidate(ctx context.Context, formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *PostDeviceRackParams) SetContext(ctx context.Context) {\n\to.Context = ctx\n}"
] | [
"0.70760375",
"0.5917966",
"0.5904085",
"0.58512914",
"0.5800697",
"0.57943416",
"0.5761121",
"0.57570463",
"0.5737906",
"0.5687909",
"0.5654442",
"0.5629598",
"0.5559078",
"0.55423695",
"0.553606",
"0.5504134",
"0.55035645",
"0.5488992",
"0.54832834",
"0.5477848",
"0.5464404",
"0.54632384",
"0.5437233",
"0.5407198",
"0.54049575",
"0.5399056",
"0.5397599",
"0.5388862",
"0.53828716",
"0.53791344",
"0.537491",
"0.5368125",
"0.53564405",
"0.535163",
"0.5350494",
"0.53485656",
"0.5346604",
"0.5345522",
"0.53396285",
"0.5339131",
"0.5332466",
"0.5329805",
"0.5328041",
"0.53270274",
"0.53252894",
"0.5319614",
"0.53191936",
"0.53118926",
"0.5297427",
"0.5289656",
"0.5288377",
"0.5288313",
"0.52776843",
"0.5272397",
"0.5271942",
"0.5267981",
"0.5264723",
"0.5264581",
"0.52634376",
"0.52610576",
"0.52590716",
"0.5252995",
"0.5252802",
"0.5250655",
"0.52466995",
"0.52446926",
"0.5242161",
"0.5239335",
"0.5238385",
"0.52363825",
"0.52339154",
"0.52321494",
"0.5231431",
"0.52309847",
"0.5227234",
"0.52256787",
"0.5214219",
"0.5213739",
"0.52131385",
"0.52069134",
"0.5205588",
"0.52012444",
"0.5199839",
"0.5198273",
"0.5196846",
"0.518822",
"0.518784",
"0.51866025",
"0.5180941",
"0.5180166",
"0.5171011",
"0.51705873",
"0.51703995",
"0.51685876",
"0.5166733",
"0.5162551",
"0.5160551",
"0.5154871",
"0.5149077",
"0.5146725"
] | 0.677753 | 1 |
WithHTTPClient adds the HTTPClient to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {
o.SetHTTPClient(client)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *CreateEventAlertConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *FileInfoCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateParams) WithHTTPClient(client *http.Client) *ValidateParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CreateEventAlertConditionParams) WithHTTPClient(client *http.Client) *CreateEventAlertConditionParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *CreateDrgAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAuditEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SystemEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetInstancesEventByEventIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRunbookRunCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAlertableEventTypeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateLifecycleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGUIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CloudTargetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAttendanceHourlyPaidLeaveParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SafeContactCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDashboardRenderTaskParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetJobEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CloudNFSExportAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *V2ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BudgetAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRoomParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSubscribedEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPrivateToggleDepositAddressCreationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBootVolumeAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostV1PostMortemsReportsReportIDEventsNotesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateWidgetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateScriptParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreatePolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDatabaseOnServerParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BackupsCreateStatusParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetEventLogsUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateAccessPolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MonitorCheckGetWaterfallInfoParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmsEventCollectionGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateTokenParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *TestEmailConfigurationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDatalakeDbConfigParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateVolumeBackupParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAutoDiscoveryDNSParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SystemEventsParams) WithHTTPClient(client *http.Client) *SystemEventsParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *TravelExpenseAttachmentDownloadAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MonitorCheckGetScreenshotsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmployeesByIDContractrulePutParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRackTopoesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetFileSystemParametersInternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MultiDeleteIssueAttachmentOfIssueParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *FieldHistogramKeywordParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IgroupInitiatorCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IndicatorCreateV1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAlertsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostLTENetworkIDDNSRecordsDomainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteAttributeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSystemObjectDefinitionsByIDAttributeGroupsByIDAttributeDefinitionsByIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *StartPostgreSQLShowCreateTableActionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDeviceRackParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteVersionControlRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCustomRuleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateListParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateTenantParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DownloadFlowFileContentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetaspecificPbxDeviceFirmwareBinaryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkExternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmployeeEntitlementGrantEntitlementsByTemplateGrantEntitlementsByTemplateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCreationTasksParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetKeyBlockByHeightParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OptionsTodoTodoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetIntrusionDetectionReportsIntrusionReportIDDownloadPdfParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateInstantPaymentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostV1IncidentsIncidentIDRelatedChangeEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func HTTPClient(client *http.Client) HTTPOption {\n\treturn func(c *HTTPCollector) { c.client = client }\n}",
"func (o *ReadStorageV1alpha1VolumeAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDebugRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func NewCreateEventAlertConditionParamsWithHTTPClient(client *http.Client) *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *GetRestoreDatalakeStatusParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LedgerVoucherPutParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDeviceUpsertParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAPI24ArraysNtpTestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBundleByKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EntryServiceDeleteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetApplianceUpgradePoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDocumentMergeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LogRequestDownloadParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | [
"0.73106545",
"0.6677077",
"0.6381298",
"0.6102895",
"0.6074314",
"0.60508007",
"0.6032167",
"0.6009698",
"0.59386265",
"0.59197384",
"0.58717716",
"0.58530694",
"0.5832113",
"0.578593",
"0.5780251",
"0.57785845",
"0.57711965",
"0.57654274",
"0.5756823",
"0.57354456",
"0.5712473",
"0.5699998",
"0.56966877",
"0.5696515",
"0.56909406",
"0.56860214",
"0.56764865",
"0.5661819",
"0.5655145",
"0.56422436",
"0.563808",
"0.5637446",
"0.56333566",
"0.5617703",
"0.56159174",
"0.56148434",
"0.5614657",
"0.56124914",
"0.56050587",
"0.5604562",
"0.56034327",
"0.5591362",
"0.5590451",
"0.5544499",
"0.5540638",
"0.5533643",
"0.5523353",
"0.5523035",
"0.5517515",
"0.5503667",
"0.54971796",
"0.54844344",
"0.5477382",
"0.5470218",
"0.54652214",
"0.54645646",
"0.54567474",
"0.54486126",
"0.54414505",
"0.5434542",
"0.5429157",
"0.5417951",
"0.54115707",
"0.5403231",
"0.54006356",
"0.539907",
"0.539719",
"0.5394187",
"0.5392651",
"0.53873414",
"0.53865564",
"0.53822374",
"0.5379408",
"0.53787136",
"0.5378246",
"0.5376592",
"0.537548",
"0.5373801",
"0.5373124",
"0.5365467",
"0.53572303",
"0.5352581",
"0.53522044",
"0.5344921",
"0.534027",
"0.53401893",
"0.5339817",
"0.53387463",
"0.533668",
"0.5336669",
"0.53328735",
"0.5331215",
"0.53259605",
"0.5319926",
"0.5313081",
"0.5308342",
"0.53068197",
"0.5304038",
"0.530358",
"0.53034806"
] | 0.75251514 | 0 |
SetHTTPClient adds the HTTPClient to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetHTTPClient(client *http.Client) {
o.HTTPClient = client
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *CreateEventAlertConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *FileInfoCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateLifecycleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetAuditEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SystemEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IntegrationsManualHTTPSCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDrgAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDashboardRenderTaskParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetInstancesEventByEventIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateCrossConnectParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CloudTargetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetJobEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateDatabaseOnServerParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CloudNFSExportAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateAccessPolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateWidgetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateScriptParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAutoDiscoveryPingsweepParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRunbookRunCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListAlertableEventTypeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGUIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetPrivateToggleDepositAddressCreationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetDatalakeDbConfigParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *SafeContactCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreatePolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRackTopoesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateGitWebhookUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostV1PostMortemsReportsReportIDEventsNotesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IgroupInitiatorCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetEventLogsUsingGETParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *AlertDefinitionAddOperatorToEscalationLevelParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAutoDiscoveryDNSParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRoomParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MonitorCheckGetWaterfallInfoParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetFileSystemParametersInternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *TestEmailConfigurationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkExternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *V2ListEventsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BudgetAddParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCustomRuleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MonitorCheckGetScreenshotsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *IndicatorCreateV1Params) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostLTENetworkIDDNSRecordsDomainParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreatePackageRepositoryDeltaUploadParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDeviceRackParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *BackupsCreateStatusParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSubscribedEventParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateTokenParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAttendanceHourlyPaidLeaveParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PetCreateParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *OptionsTodoTodoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateVolumeBackupParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DownloadFlowFileContentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreatePolicyResetItemParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteVersionControlRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetApplianceUpgradePoliciesMoidParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmsEventCollectionGetParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetCreationTasksParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteaspecificEmergencyMappingContainerParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PublicWebLinkPlatformEstablishParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UploadTaskFileParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *StartPostgreSQLShowCreateTableActionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetSimulationActivityParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetBootVolumeAttachmentParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RegenerateDeployKeyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetTagAuditLogsWithHistoryParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetRestoreDatalakeStatusParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *FieldHistogramKeywordParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *NotifyStateChangedParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LineRouteParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *EmployeesByIDContractrulePutParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *GetNetworkAppliancePortParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateBlueprintInWorkspaceInternalParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDeviceUpsertParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRoutingInstanceUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteSystemObjectDefinitionsByIDAttributeGroupsByIDAttributeDefinitionsByIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateNetworkGroupPolicyParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *MultiDeleteIssueAttachmentOfIssueParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostApplyManifestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostConditionParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteDebugRequestParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostDocumentMergeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *UpdateAutoTagParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeprecatedCycleParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *LogRequestDownloadParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteAttributeParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreatePublicIPAdressUsingPOSTParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *PostAlertsParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateIscsiLunSnapshotParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *StartPacketCaptureParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *CreateRepoNotificationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *DeleteLTENetworkIDNetworkProbeTasksTaskIDParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}",
"func (o *RegisterApplicationParams) SetHTTPClient(client *http.Client) {\n\to.HTTPClient = client\n}"
] | [
"0.7470085",
"0.7298815",
"0.7279338",
"0.7270448",
"0.72573",
"0.7251727",
"0.72381556",
"0.7171434",
"0.7168642",
"0.71093386",
"0.7044503",
"0.70371735",
"0.7037165",
"0.70271677",
"0.69839877",
"0.6973847",
"0.697179",
"0.6959221",
"0.69567066",
"0.6947108",
"0.6944572",
"0.69308525",
"0.69291633",
"0.6928894",
"0.69286567",
"0.69251525",
"0.69177455",
"0.6916849",
"0.69127625",
"0.68914855",
"0.6886646",
"0.68841094",
"0.68805057",
"0.6878669",
"0.68700445",
"0.68682355",
"0.6866208",
"0.6843273",
"0.6834413",
"0.6831564",
"0.68314236",
"0.6827806",
"0.6806296",
"0.6804413",
"0.6804052",
"0.6799136",
"0.6784862",
"0.6782257",
"0.67789465",
"0.6771779",
"0.6770831",
"0.6764356",
"0.676039",
"0.67556965",
"0.67509216",
"0.67492825",
"0.6742352",
"0.6739626",
"0.6736658",
"0.67315775",
"0.6731205",
"0.67297345",
"0.6729649",
"0.67223155",
"0.6720777",
"0.67125255",
"0.6712246",
"0.67114854",
"0.6708859",
"0.6705695",
"0.6703086",
"0.66905427",
"0.6687407",
"0.66869336",
"0.6685327",
"0.66794413",
"0.66733176",
"0.6673176",
"0.66730416",
"0.66695446",
"0.66650486",
"0.66630644",
"0.66614693",
"0.6656429",
"0.66562235",
"0.6655956",
"0.665545",
"0.66547537",
"0.66531646",
"0.66524786",
"0.66494846",
"0.6649103",
"0.664667",
"0.6646223",
"0.6641746",
"0.6639622",
"0.66372675",
"0.6632968",
"0.6630937",
"0.6627841"
] | 0.84539205 | 0 |
WithBody adds the body to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {
o.SetBody(body)
return o
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewValidateCreateAnomalyDetectionDiskEventParams() *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetBody(body *dynatrace.DiskEventAnomalyDetectionConfig) {\n\to.Body = body\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\t\tHTTPClient: client,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewValidateCreateAnomalyDetectionDiskEventParamsWithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\tvar ()\n\treturn &ValidateCreateAnomalyDetectionDiskEventParams{\n\n\t\ttimeout: timeout,\n\t}\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithTimeout(timeout time.Duration) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetTimeout(timeout)\n\treturn o\n}",
"func (o *VolumeCreateBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithContext(ctx context.Context) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetContext(ctx)\n\treturn o\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithHTTPClient(client *http.Client) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetHTTPClient(client)\n\treturn o\n}",
"func (o *DeleteDatasetCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostAPIV2EventsParamsBodyAttributes) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCallback(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateHeaders(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ServiceCreateCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostAPIV2EventsParamsBodyAttributesHeaders) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (s *server) createEvent(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tdefer r.Body.Close()\n\n\t// Read the body out into a buffer.\n\tbuf, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Read the body as generic JSON, so we can perform JDDF validation on it.\n\t//\n\t// If the request body is invalid JSON, send the user a 400 Bad Request.\n\tvar eventRaw interface{}\n\tif err := json.Unmarshal(buf, &eventRaw); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Validate the event (in eventRaw) against our schema for JDDF events.\n\t//\n\t// In practice, there will never be errors arising here -- see the jddf-go\n\t// docs for details, but basically jddf.Validator.Validate can only error if\n\t// you use \"ref\" in a cyclic manner in your schemas.\n\t//\n\t// Therefore, we ignore the possibility of an error here.\n\tvalidator := jddf.Validator{}\n\tvalidationResult, _ := validator.Validate(s.EventSchema, eventRaw)\n\n\t// If there were validation errors, then we write them out to the response\n\t// body, and send the user a 400 Bad Request.\n\tif len(validationResult.Errors) != 0 {\n\t\tencoder := json.NewEncoder(w)\n\t\tif err := encoder.Encode(validationResult.Errors); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// If we made it here, the request body contained JSON that passed our schema.\n\t// Let's now write it into the database.\n\t//\n\t// The events table has a \"payload\" column of type \"jsonb\". In Golang-land,\n\t// you can send that to Postgres by just using []byte. The user's request\n\t// payload is already in that format, so we'll use that.\n\t_, err = s.DB.ExecContext(r.Context(), `\n\t\tinsert into events (payload) values ($1)\n\t`, buf)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// We're done!\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"%s\", buf)\n}",
"func (a *Client) ValidateCreateAnomalyDetectionMetricEvent(params *ValidateCreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*ValidateCreateAnomalyDetectionMetricEventNoContent, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewValidateCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"validateCreateAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents/validator\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &ValidateCreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*ValidateCreateAnomalyDetectionMetricEventNoContent)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for validateCreateAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func (o *PostAPIV2EventsBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAttributes(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEntityID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEvent(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostLeasesIDAuthCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func ValidateEventPayloadRequestBody(body *EventPayloadRequestBody) (err error) {\n\terr = goa.MergeErrors(err, goa.ValidateFormat(\"body.start\", body.Start, goa.FormatDateTime))\n\n\tif body.End != nil {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"body.end\", *body.End, goa.FormatDateTime))\n\t}\n\treturn\n}",
"func (o *CreateDatasetBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAccess(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateComp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateCompressMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEnc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateMetadata(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateProject(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validatePushMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func ValidateCreateEvent(payload *model.CreateEventReq) error {\n\tif payload == nil {\n\t\terr := errors.New(\"invalid payload\")\n\t\treturn err\n\t}\n\n\tif strings.TrimSpace(payload.Title) == \"\" {\n\t\terr := errors.New(\"invalid title\")\n\t\treturn err\n\t}\n\n\tif payload.LocationID == 0 {\n\t\terr := errors.New(\"invalid location id\")\n\t\treturn err\n\t}\n\n\tif payload.StartDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif payload.EndDate.UTC().IsZero() {\n\t\terr := errors.New(\"invalid time. time format is YYYY:MM:dd HH:mm:ss\")\n\t\treturn err\n\t}\n\n\tif len(payload.TicketID) == 0 {\n\t\terr := errors.New(\"invalid ticket id\")\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (body *CreateResponseBody) Validate() (err error) {\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Color == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"color\", \"body\"))\n\t}\n\tif body.Cron == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"cron\", \"body\"))\n\t}\n\tif body.Name != nil {\n\t\tif utf8.RuneCountInString(*body.Name) > 100 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(\"body.name\", *body.Name, utf8.RuneCountInString(*body.Name), 100, false))\n\t\t}\n\t}\n\tif body.Color != nil {\n\t\tif !(*body.Color == \"red\" || *body.Color == \"yellow\" || *body.Color == \"green\" || *body.Color == \"off\") {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.color\", *body.Color, []interface{}{\"red\", \"yellow\", \"green\", \"off\"}))\n\t\t}\n\t}\n\treturn\n}",
"func (o *GenerateClusterISOCreatedBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateImageID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ValidateParams) WithBody(body *models.APIValidationRequestV1) *ValidateParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (m *UserExperienceAnalyticsAnomalyRequestBuilder) Post(ctx context.Context, body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.UserExperienceAnalyticsAnomalyable, requestConfiguration *UserExperienceAnalyticsAnomalyRequestBuilderPostRequestConfiguration)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.UserExperienceAnalyticsAnomalyable, error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateUserExperienceAnalyticsAnomalyFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.UserExperienceAnalyticsAnomalyable), nil\n}",
"func (m *GaragesCreateBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCentreID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocations(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewCreateanewEmergencyMappingRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/emergencymappings\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}",
"func (c *ClientWithResponses) CreateanewEmergencyMappingWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateanewEmergencyMappingResponse, error) {\n\trsp, err := c.CreateanewEmergencyMappingWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateanewEmergencyMappingResponse(rsp)\n}",
"func (payload *CreateOutputPayload) Validate() (err error) {\n\tif payload.Alias == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"alias\"))\n\t}\n\tif payload.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"name\"))\n\t}\n\treturn\n}",
"func (o *AddExternalBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (payload *PutEventPayload) Validate() (err error) {\n\tif payload.Etype == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"etype\"))\n\t}\n\tif payload.Action == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"action\"))\n\t}\n\tif payload.From == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"from\"))\n\t}\n\treturn\n}",
"func ValidateRequestBody(r http.Header, body []byte, signingSecret string) error {\n\tsv, err := slack.NewSecretsVerifier(r, signingSecret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := sv.Write(body); err != nil {\n\t\treturn err\n\t}\n\tif err := sv.Ensure(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func NewCreateanewEmergencyMappingContainerRequestWithBody(server string, contentType string, body io.Reader) (*http.Request, error) {\n\tvar err error\n\n\tqueryUrl, err := url.Parse(server)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbasePath := fmt.Sprintf(\"/emergencymappingcontainers\")\n\tif basePath[0] == '/' {\n\t\tbasePath = basePath[1:]\n\t}\n\n\tqueryUrl, err = queryUrl.Parse(basePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq, err := http.NewRequest(\"POST\", queryUrl.String(), body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treq.Header.Add(\"Content-Type\", contentType)\n\treturn req, nil\n}",
"func Create(rw http.ResponseWriter, r *http.Request) {\n\tuserID := r.Header.Get(\"userid\")\n\n\t// Get event data from body\n\tbody, err := ioutil.ReadAll(r.Body)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading request body: %s\\n\", err)\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Error reading request body\"))\n\t\treturn\n\t}\n\n\te := Event{}\n\terr = json.Unmarshal(body, &e)\n\tif err != nil {\n\t\tlog.Printf(\"Error reading request body: %s\\n\", err)\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Error reading request body\"))\n\t\treturn\n\t}\n\n\t// Validate event data \n\tif strings.TrimSpace(e.Title) == \"\" || strings.TrimSpace(e.StartTime) == \"\" || strings.TrimSpace(e.EndTime) == \"\" {\n\t\tlog.Printf(\"Invalid params: title, startDate or endDate is missing\\n\")\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Missing title, startDate or endDate\"))\n\t\treturn\n\t}\n\n\tif _, err := time.Parse(time.RFC3339, e.StartTime); err != nil {\n\t\tlog.Printf(\"Invalid value for startDate\\n\")\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Invalid startDate\"))\n\t\treturn\n\t}\n\n\tif _, err := time.Parse(time.RFC3339, e.EndTime); err != nil {\n\t\tlog.Printf(\"Invalid value for endDate\\n\")\n\t\trw.WriteHeader(http.StatusBadRequest)\n\t\trw.Write([]byte(\"Invalid endDate\"))\n\t\treturn\n\t}\n\n\t// Insert into DB\n\teventID := -1\n\tquery := `INSERT INTO events\n\t\t(title, \"start_time\", \"end_time\", location, notes, \"owner_id\")\n\t\tVALUES ($1, $2, $3, $4, $5, $6)\n\t\tRETURNING id`\n\n\terr = conn.DB.QueryRow(query, e.Title, e.StartTime, e.EndTime, e.Location, e.Notes, userID).Scan(&eventID)\n\tif err != nil {\n\t\tlog.Printf(\"Error creating event: %s\\n\", err)\n\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\trw.Write([]byte(\"Error creating event\"))\n\t\treturn\n\t}\n\n\tlog.Printf(\"Event %s created\\n\", e.Title)\n\trw.WriteHeader(http.StatusOK)\n\trw.Write([]byte(fmt.Sprintf(\"Event %s created\", e.Title)))\n}",
"func (o *EavAttributeSetManagementV1CreatePostBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAttributeSet(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEntityTypeCode(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateSkeletonID(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (payload *putEventPayload) Validate() (err error) {\n\tif payload.Etype == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"etype\"))\n\t}\n\tif payload.Action == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"action\"))\n\t}\n\tif payload.From == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"from\"))\n\t}\n\treturn\n}",
"func (o *ValidateCreateServiceRequestNamingParams) SetBody(body *dynatrace.RequestNaming) {\n\to.Body = body\n}",
"func (o *CreateChannelCreatedBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ValidateCreateServiceRequestNamingParams) WithBody(body *dynatrace.RequestNaming) *ValidateCreateServiceRequestNamingParams {\n\to.SetBody(body)\n\treturn o\n}",
"func ValidateCreateBadRequestResponseBody(body *CreateBadRequestResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}",
"func (o *ServiceCreateBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\t// validation for a type composition with models.ServiceSpec\n\tif err := o.ServiceSpec.Validate(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\t// validation for a type composition with ServiceCreateParamsBodyAllOf1\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (c controller) CreateAnomalyDetector(ctx context.Context, r entity.CreateDetectorRequest) (*string, error) {\n\n\tif err := validateCreateRequest(r); err != nil {\n\t\treturn nil, err\n\t}\n\tpayload, err := mapper.MapToCreateDetector(r)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse, err := c.gateway.CreateDetector(ctx, payload)\n\tif err != nil {\n\t\treturn nil, processEntityError(err)\n\t}\n\tvar data map[string]interface{}\n\t_ = json.Unmarshal(response, &data)\n\n\tdetectorID := fmt.Sprintf(\"%s\", data[\"_id\"])\n\tif !r.Start {\n\t\treturn cmapper.StringToStringPtr(detectorID), nil\n\t}\n\n\terr = c.StartDetector(ctx, detectorID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"detector is created with id: %s, but failed to start due to %v\", detectorID, err)\n\t}\n\treturn cmapper.StringToStringPtr(detectorID), nil\n}",
"func (o *DeleteAppointmentBadRequestBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *PostSiteCreatedBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (ut *addFieldNotePayload) Validate() (err error) {\n\tif ut.Created == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"created\"))\n\t}\n\treturn\n}",
"func (o *CreateDashboardRenderTaskParams) WithBody(body *models.CreateDashboardRenderTask) *CreateDashboardRenderTaskParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (payload *createOutputPayload) Validate() (err error) {\n\tif payload.Alias == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"alias\"))\n\t}\n\tif payload.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"name\"))\n\t}\n\treturn\n}",
"func (o *UpdateEventParams) WithBody(body *models.Event) *UpdateEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (c *ClientWithResponses) CreateanewEmergencyMappingContainerWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateanewEmergencyMappingContainerResponse, error) {\n\trsp, err := c.CreateanewEmergencyMappingContainerWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateanewEmergencyMappingContainerResponse(rsp)\n}",
"func (o *GetPastUsageNotFoundBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *AddRemoteRDSNodeBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (b EmployeeCreatedEvent) ValidateEmployeeCreatedEvent() error {\n\tvar validate *validator.Validate\n\tvalidate = validator.New()\n\terr := validate.Struct(b)\n\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn err\n}",
"func (command HelloWorldResource) Create(ctx context.Context, awsConfig awsv2.Config,\n\tevent *CloudFormationLambdaEvent,\n\tlogger *zerolog.Logger) (map[string]interface{}, error) {\n\trequest := HelloWorldResourceRequest{}\n\trequestPropsErr := json.Unmarshal(event.ResourceProperties, &request)\n\tif requestPropsErr != nil {\n\t\treturn nil, requestPropsErr\n\t}\n\tlogger.Info().Msgf(\"create: Hello %s\", request.Message)\n\treturn map[string]interface{}{\n\t\t\"Resource\": \"Created message: \" + request.Message,\n\t}, nil\n}",
"func NewCreateanewEmergencyMappingRequest(server string, body CreateanewEmergencyMappingJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewEmergencyMappingRequestWithBody(server, \"application/json\", bodyReader)\n}",
"func NewCreateEventAlertConditionParams() *CreateEventAlertConditionParams {\n\tvar ()\n\treturn &CreateEventAlertConditionParams{\n\n\t\ttimeout: cr.DefaultTimeout,\n\t}\n}",
"func CreateDescribeCheckWarningDetailRequest() (request *DescribeCheckWarningDetailRequest) {\n\trequest = &DescribeCheckWarningDetailRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Sas\", \"2018-12-03\", \"DescribeCheckWarningDetail\", \"sas\", \"openAPI\")\n\trequest.Method = requests.POST\n\treturn\n}",
"func (m *ParticipantCreateBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateKind(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateKindID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (a AddEventRequest) Validate() error {\n\tif err := v2.Validate(a); err != nil {\n\t\treturn err\n\t}\n\n\t// BaseReading has the skip(\"-\") validation annotation for BinaryReading and SimpleReading\n\t// Otherwise error will occur as only one of them exists\n\t// Therefore, need to validate the nested BinaryReading and SimpleReading struct here\n\tfor _, r := range a.Event.Readings {\n\t\tif err := r.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func (o *CreateChannelBadRequestBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (a *Client) CreateAnomalyDetectionMetricEvent(params *CreateAnomalyDetectionMetricEventParams, authInfo runtime.ClientAuthInfoWriter) (*CreateAnomalyDetectionMetricEventCreated, error) {\n\t// TODO: Validate the params before sending\n\tif params == nil {\n\t\tparams = NewCreateAnomalyDetectionMetricEventParams()\n\t}\n\n\tresult, err := a.transport.Submit(&runtime.ClientOperation{\n\t\tID: \"createAnomalyDetectionMetricEvent\",\n\t\tMethod: \"POST\",\n\t\tPathPattern: \"/anomalyDetection/metricEvents\",\n\t\tProducesMediaTypes: []string{\"application/json\"},\n\t\tConsumesMediaTypes: []string{\"application/json\"},\n\t\tSchemes: []string{\"https\"},\n\t\tParams: params,\n\t\tReader: &CreateAnomalyDetectionMetricEventReader{formats: a.formats},\n\t\tAuthInfo: authInfo,\n\t\tContext: params.Context,\n\t\tClient: params.HTTPClient,\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuccess, ok := result.(*CreateAnomalyDetectionMetricEventCreated)\n\tif ok {\n\t\treturn success, nil\n\t}\n\t// unexpected success response\n\t// safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue\n\tmsg := fmt.Sprintf(\"unexpected success response for createAnomalyDetectionMetricEvent: API contract not enforced by server. Client expected to get an error, but got: %T\", result)\n\tpanic(msg)\n}",
"func ValidateCreateRequestBody(body *CreateRequestBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.Code == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"code\", \"body\"))\n\t}\n\tif body.Address == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"address\", \"body\"))\n\t}\n\tif body.Type == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"type\", \"body\"))\n\t}\n\tif body.FounderID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"founder_id\", \"body\"))\n\t}\n\tif body.Type != nil {\n\t\tif !(*body.Type == 1 || *body.Type == 2 || *body.Type == 3) {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.type\", *body.Type, []interface{}{1, 2, 3}))\n\t\t}\n\t}\n\treturn\n}",
"func (w *BareMetalDiscoveryWebhook) ValidateCreate(obj runtime.Object) error {\n\tr := obj.(*baremetalv1alpha1.BareMetalDiscovery)\n\n\tbaremetaldiscoverylog.Info(\"validate create\", \"name\", r.Name)\n\n\t// TODO: make sure there is only 1 discovery for a given systemUUID\n\treturn nil\n}",
"func (o *GetEventsOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateEvents(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (r ApiApiProjectsIdDeploymentsPostRequest) Body(body RequestsCreateProjectDeploymentRequest) ApiApiProjectsIdDeploymentsPostRequest {\n\tr.body = &body\n\treturn r\n}",
"func (o *PostSiteBadRequestBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (payload *CreateVerificationPayload) Validate() (err error) {\n\n\treturn\n}",
"func (o *ResourceInfoNotFoundBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateEventParams) SetBody(body *models.Event) {\n\to.Body = body\n}",
"func (payload *CreateFilterPayload) Validate() (err error) {\n\tif payload.Alias == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"alias\"))\n\t}\n\tif payload.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"name\"))\n\t}\n\treturn\n}",
"func (r *ScanRequest) Create(evt *cloudformationevt.Event, ctx *runtime.Context) (string, interface{}, error) {\n // TODO: export each property as an environment variable\n evt.PhysicalResourceID = customres.NewPhysicalResourceID(evt)\n return r.Update(evt, ctx)\n}",
"func (webhook *VSphereFailureDomainWebhook) ValidateCreate(_ context.Context, raw runtime.Object) (admission.Warnings, error) {\n\tvar allErrs field.ErrorList\n\n\tobj, ok := raw.(*infrav1.VSphereFailureDomain)\n\tif !ok {\n\t\treturn nil, apierrors.NewBadRequest(fmt.Sprintf(\"expected a VSphereFailureDomain but got a %T\", raw))\n\t}\n\tif obj.Spec.Topology.ComputeCluster == nil && obj.Spec.Topology.Hosts != nil {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\", \"Topology\", \"ComputeCluster\"), \"cannot be empty if Hosts is not empty\"))\n\t}\n\n\tif obj.Spec.Region.Type == infrav1.HostGroupFailureDomain {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\", \"Region\", \"Type\"), fmt.Sprintf(\"region's Failure Domain type cannot be %s\", obj.Spec.Region.Type)))\n\t}\n\n\tif obj.Spec.Zone.Type == infrav1.HostGroupFailureDomain && obj.Spec.Topology.Hosts == nil {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\", \"Topology\", \"Hosts\"), fmt.Sprintf(\"cannot be nil if zone's Failure Domain type is %s\", obj.Spec.Zone.Type)))\n\t}\n\n\tif obj.Spec.Region.Type == infrav1.ComputeClusterFailureDomain && obj.Spec.Topology.ComputeCluster == nil {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\", \"Topology\", \"ComputeCluster\"), fmt.Sprintf(\"cannot be nil if region's Failure Domain type is %s\", obj.Spec.Region.Type)))\n\t}\n\n\tif obj.Spec.Zone.Type == infrav1.ComputeClusterFailureDomain && obj.Spec.Topology.ComputeCluster == nil {\n\t\tallErrs = append(allErrs, field.Forbidden(field.NewPath(\"spec\", \"Topology\", \"ComputeCluster\"), fmt.Sprintf(\"cannot be nil if zone's Failure Domain type is %s\", obj.Spec.Zone.Type)))\n\t}\n\n\treturn nil, aggregateObjErrors(obj.GroupVersionKind().GroupKind(), obj.Name, allErrs)\n}",
"func (o *CreateEventAlertConditionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.EventAlertCondition != nil {\n\t\tif err := r.SetBodyParam(o.EventAlertCondition); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func validateCreateTestExonerationRequest(req *pb.CreateTestExonerationRequest, requireInvocation bool) error {\n\tif requireInvocation || req.Invocation != \"\" {\n\t\tif err := pbutil.ValidateInvocationName(req.Invocation); err != nil {\n\t\t\treturn errors.Annotate(err, \"invocation\").Err()\n\t\t}\n\t}\n\n\tex := req.GetTestExoneration()\n\tif err := pbutil.ValidateTestID(ex.GetTestId()); err != nil {\n\t\treturn errors.Annotate(err, \"test_exoneration: test_id\").Err()\n\t}\n\tif err := pbutil.ValidateVariant(ex.GetVariant()); err != nil {\n\t\treturn errors.Annotate(err, \"test_exoneration: variant\").Err()\n\t}\n\n\thasVariant := len(ex.GetVariant().GetDef()) != 0\n\thasVariantHash := ex.VariantHash != \"\"\n\tif hasVariant && hasVariantHash {\n\t\tcomputedHash := pbutil.VariantHash(ex.GetVariant())\n\t\tif computedHash != ex.VariantHash {\n\t\t\treturn errors.Reason(\"computed and supplied variant hash don't match\").Err()\n\t\t}\n\t}\n\n\tif err := pbutil.ValidateRequestID(req.RequestId); err != nil {\n\t\treturn errors.Annotate(err, \"request_id\").Err()\n\t}\n\n\tif ex.ExplanationHtml == \"\" {\n\t\treturn errors.Reason(\"test_exoneration: explanation_html: unspecified\").Err()\n\t}\n\tif ex.Reason == pb.ExonerationReason_EXONERATION_REASON_UNSPECIFIED {\n\t\treturn errors.Reason(\"test_exoneration: reason: unspecified\").Err()\n\t}\n\treturn nil\n}",
"func (o *DeleteOrgNotFoundBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *IndicatorCreateV1Params) SetBody(body *models.APIIndicatorCreateReqsV1) {\n\to.Body = body\n}",
"func (o *AddExternalDefaultBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateDetails(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostIPAMIpsOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *SaveBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateHash(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateKeyPair(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func NewCreateanewEmergencyMappingContainerRequest(server string, body CreateanewEmergencyMappingContainerJSONRequestBody) (*http.Request, error) {\n\tvar bodyReader io.Reader\n\tbuf, err := json.Marshal(body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tbodyReader = bytes.NewReader(buf)\n\treturn NewCreateanewEmergencyMappingContainerRequestWithBody(server, \"application/json\", bodyReader)\n}",
"func (o *AddExternalOKBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateExternalExporter(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateService(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostSilencesOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (r *SearchAuditLogsOptionalParameters) WithBody(body AuditLogsSearchEventsRequest) *SearchAuditLogsOptionalParameters {\n\tr.Body = &body\n\treturn r\n}",
"func (payload *CreateItemPayload) Validate() (err error) {\n\tif payload.Name == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"name\"))\n\t}\n\tif payload.Description == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"description\"))\n\t}\n\n\tif payload.Image1 == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"image1\"))\n\t}\n\treturn\n}",
"func (o *PostDocumentFieldextractOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (c *ClientWithResponses) CreateElasticIpWithBodyWithResponse(ctx context.Context, contentType string, body io.Reader) (*CreateElasticIpResponse, error) {\n\trsp, err := c.CreateElasticIpWithBody(ctx, contentType, body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn ParseCreateElasticIpResponse(rsp)\n}",
"func (payload *postEventPayload) Validate() (err error) {\n\tif payload.Etype == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"etype\"))\n\t}\n\tif payload.Action == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"action\"))\n\t}\n\tif payload.From == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"from\"))\n\t}\n\treturn\n}",
"func (a AddEventRequest) Validate() error {\n\tif err := contracts.Validate(a); err != nil {\n\t\treturn err\n\t}\n\n\t// BaseReading has the skip(\"-\") validation annotation for BinaryReading and SimpleReading\n\t// Otherwise error will occur as only one of them exists\n\t// Therefore, need to validate the nested BinaryReading and SimpleReading struct here\n\tfor _, r := range a.Event.Readings {\n\t\tif err := r.Validate(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func EventCreate(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\teventId, _ := strconv.ParseInt(vars[\"eventId\"], 10, 64)\n\n\tevent, err := event.EventGetById(userId)\n\n\tif err == nil {\n\t\tresponse.Success(w, event)\n\t} else {\n\t\tresponse.Fail(w, http.StatusNotFound, err.Error())\n\t}\n}",
"func (m *MonitoringAlertRecordsRequestBuilder) Post(ctx context.Context, body i2edb12705e6a63a8a0fb3f8c7a11f4ab12f4be764e61fa1094f401595fb171bf.AlertRecordable, requestConfiguration *MonitoringAlertRecordsRequestBuilderPostRequestConfiguration)(i2edb12705e6a63a8a0fb3f8c7a11f4ab12f4be764e61fa1094f401595fb171bf.AlertRecordable, error) {\n requestInfo, err := m.ToPostRequestInformation(ctx, body, requestConfiguration);\n if err != nil {\n return nil, err\n }\n errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {\n \"4XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n \"5XX\": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,\n }\n res, err := m.BaseRequestBuilder.RequestAdapter.Send(ctx, requestInfo, i2edb12705e6a63a8a0fb3f8c7a11f4ab12f4be764e61fa1094f401595fb171bf.CreateAlertRecordFromDiscriminatorValue, errorMapping)\n if err != nil {\n return nil, err\n }\n if res == nil {\n return nil, nil\n }\n return res.(i2edb12705e6a63a8a0fb3f8c7a11f4ab12f4be764e61fa1094f401595fb171bf.AlertRecordable), nil\n}",
"func (ut *eventPayload) Validate() (err error) {\n\tif ut.SportID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"sportId\"))\n\t}\n\tif ut.EventID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"eventId\"))\n\t}\n\tif ut.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"name\"))\n\t}\n\tif ut.SubTitle == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"subTitle\"))\n\t}\n\tif ut.StartDtTm == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"startDtTm\"))\n\t}\n\tif ut.EndDtTm == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"endDtTm\"))\n\t}\n\tif ut.LocationID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"locationId\"))\n\t}\n\tif ut.TeamAdvanceMethod == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`request`, \"teamAdvanceMethod\"))\n\t}\n\treturn\n}",
"func ValidateCreateWaitlistEntryConflictResponseBody(body *CreateWaitlistEntryConflictResponseBody) (err error) {\n\tif body.Name == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"name\", \"body\"))\n\t}\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Message == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"message\", \"body\"))\n\t}\n\tif body.Temporary == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"temporary\", \"body\"))\n\t}\n\tif body.Timeout == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"timeout\", \"body\"))\n\t}\n\tif body.Fault == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"fault\", \"body\"))\n\t}\n\treturn\n}",
"func (o *CreateRepoNotificationParams) SetBody(body *models.NotificationCreateRequest) {\n\to.Body = body\n}",
"func (payload *PostEventPayload) Validate() (err error) {\n\tif payload.Etype == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"etype\"))\n\t}\n\tif payload.Action == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"action\"))\n\t}\n\tif payload.From == \"\" {\n\t\terr = goa.MergeErrors(err, goa.MissingAttributeError(`raw`, \"from\"))\n\t}\n\treturn\n}",
"func (o *JudgeInternalServerErrorBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *GetHelloOKBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UtilTestBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateCommand(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateResponse(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateStatus(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (options *CreateLoadBalancerMonitorOptions) SetExpectedBody(expectedBody string) *CreateLoadBalancerMonitorOptions {\n\toptions.ExpectedBody = core.StringPtr(expectedBody)\n\treturn options\n}",
"func (client *AvailabilityGroupListenersClient) createOrUpdateCreateRequest(ctx context.Context, resourceGroupName string, sqlVirtualMachineGroupName string, availabilityGroupListenerName string, parameters AvailabilityGroupListener, options *AvailabilityGroupListenersClientBeginCreateOrUpdateOptions) (*policy.Request, error) {\n\turlPath := \"/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.SqlVirtualMachine/sqlVirtualMachineGroups/{sqlVirtualMachineGroupName}/availabilityGroupListeners/{availabilityGroupListenerName}\"\n\tif resourceGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter resourceGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{resourceGroupName}\", url.PathEscape(resourceGroupName))\n\tif sqlVirtualMachineGroupName == \"\" {\n\t\treturn nil, errors.New(\"parameter sqlVirtualMachineGroupName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{sqlVirtualMachineGroupName}\", url.PathEscape(sqlVirtualMachineGroupName))\n\tif availabilityGroupListenerName == \"\" {\n\t\treturn nil, errors.New(\"parameter availabilityGroupListenerName cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{availabilityGroupListenerName}\", url.PathEscape(availabilityGroupListenerName))\n\tif client.subscriptionID == \"\" {\n\t\treturn nil, errors.New(\"parameter client.subscriptionID cannot be empty\")\n\t}\n\turlPath = strings.ReplaceAll(urlPath, \"{subscriptionId}\", url.PathEscape(client.subscriptionID))\n\treq, err := runtime.NewRequest(ctx, http.MethodPut, runtime.JoinPaths(client.internal.Endpoint(), urlPath))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treqQP := req.Raw().URL.Query()\n\treqQP.Set(\"api-version\", \"2022-08-01-preview\")\n\treq.Raw().URL.RawQuery = reqQP.Encode()\n\treq.Raw().Header[\"Accept\"] = []string{\"application/json\"}\n\treturn req, runtime.MarshalAsJSON(req, parameters)\n}",
"func (h *Handlers) CreateEventBatch(w http.ResponseWriter, r *http.Request) {\n\tctx := r.Context()\n\tvar rec []events.AWSEvent\n\n\tif err := io.DecodeJSONBody(w, r, &rec); err != nil {\n\t\tio.RespondError(ctx, h.Log, w, err)\n\t\treturn\n\t}\n\n\th.Log.With(\"events\", rec).Info(\"received events\")\n\n\tadvisor := recommendations.NewAdvisor()\n\n\tvar res CreateEventBatchResponse\n\tfor _, e := range rec {\n\t\t// censor info if in demo mode\n\t\tif h.Demo {\n\t\t\te.Identity.User = \"iamzero-test-user\"\n\t\t\te.Identity.Account = \"123456789\"\n\t\t}\n\n\t\tadvice, err := advisor.Advise(e)\n\t\tif err != nil {\n\t\t\tio.RespondError(ctx, h.Log, w, err)\n\t\t\treturn\n\t\t} else {\n\t\t\th.Log.With(\"advice\", advice).Info(\"matched advisor recommendation\")\n\t\t}\n\n\t\talert := recommendations.AWSAlert{\n\t\t\tID: uuid.NewString(),\n\t\t\tEvent: e,\n\t\t\tStatus: events.AlertActive,\n\t\t\tTime: time.Now(),\n\t\t\tHasRecommendations: false,\n\t\t}\n\n\t\tres.AlertIDs = append(res.AlertIDs, alert.ID)\n\n\t\tif len(advice) > 0 {\n\t\t\talert.HasRecommendations = true\n\t\t\talert.Recommendations = advice\n\t\t}\n\n\t\th.Log.With(\"alert\", alert).Info(\"adding alert\")\n\t\th.Storage.Add(alert)\n\t}\n\n\tio.RespondJSON(ctx, h.Log, w, res, http.StatusAccepted)\n}",
"func CreatePutMetricAlarmRequest() (request *PutMetricAlarmRequest) {\n\trequest = &PutMetricAlarmRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"Cms\", \"2018-03-08\", \"PutMetricAlarm\", \"cms\", \"openAPI\")\n\treturn\n}",
"func CreateModifyOfficeSiteAttributeRequest() (request *ModifyOfficeSiteAttributeRequest) {\n\trequest = &ModifyOfficeSiteAttributeRequest{\n\t\tRpcRequest: &requests.RpcRequest{},\n\t}\n\trequest.InitWithApiInfo(\"ecd\", \"2020-09-30\", \"ModifyOfficeSiteAttribute\", \"\", \"\")\n\trequest.Method = requests.POST\n\treturn\n}"
] | [
"0.6171052",
"0.6097584",
"0.60782963",
"0.57583094",
"0.569712",
"0.5620338",
"0.5547921",
"0.54595846",
"0.53731334",
"0.5274153",
"0.52110165",
"0.514304",
"0.5132416",
"0.49500784",
"0.49387568",
"0.4919972",
"0.48995632",
"0.48722732",
"0.4861128",
"0.4841427",
"0.48275897",
"0.4815935",
"0.48025185",
"0.4780285",
"0.47438708",
"0.47418737",
"0.4725626",
"0.46842453",
"0.46739987",
"0.46536398",
"0.46103436",
"0.4592331",
"0.45911446",
"0.45464307",
"0.45374793",
"0.45262957",
"0.45049113",
"0.45034686",
"0.44980597",
"0.44946322",
"0.44933152",
"0.4485352",
"0.44729775",
"0.447106",
"0.44642216",
"0.44529542",
"0.44526508",
"0.4420052",
"0.4395368",
"0.43946376",
"0.43793768",
"0.4367411",
"0.4357745",
"0.43562233",
"0.43517298",
"0.43475512",
"0.4342053",
"0.43388063",
"0.43373367",
"0.43169433",
"0.43093264",
"0.43074355",
"0.4304827",
"0.43014705",
"0.4301006",
"0.4297893",
"0.42958504",
"0.4294681",
"0.42928144",
"0.42901933",
"0.4288087",
"0.4284829",
"0.42839053",
"0.42661768",
"0.42659456",
"0.42600873",
"0.42587167",
"0.42558298",
"0.42556775",
"0.42528936",
"0.42474753",
"0.42398027",
"0.42374593",
"0.4235658",
"0.42354342",
"0.423503",
"0.42325106",
"0.42296463",
"0.42276216",
"0.42215204",
"0.42198282",
"0.42197976",
"0.4217353",
"0.42056343",
"0.41974148",
"0.41935763",
"0.41933048",
"0.41926494",
"0.41912663",
"0.41865787"
] | 0.698079 | 0 |
SetBody adds the body to the validate create anomaly detection disk event params | func (o *ValidateCreateAnomalyDetectionDiskEventParams) SetBody(body *dynatrace.DiskEventAnomalyDetectionConfig) {
o.Body = body
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *ValidateCreateServiceRequestNamingParams) SetBody(body *dynatrace.RequestNaming) {\n\to.Body = body\n}",
"func (o *UpdateEventParams) SetBody(body *models.Event) {\n\to.Body = body\n}",
"func (o *CreateDashboardRenderTaskParams) SetBody(body *models.CreateDashboardRenderTask) {\n\to.Body = body\n}",
"func (o *ValidateCreateAnomalyDetectionDiskEventParams) WithBody(body *dynatrace.DiskEventAnomalyDetectionConfig) *ValidateCreateAnomalyDetectionDiskEventParams {\n\to.SetBody(body)\n\treturn o\n}",
"func (o *IndicatorCreateV1Params) SetBody(body *models.APIIndicatorCreateReqsV1) {\n\to.Body = body\n}",
"func (o *ValidateParams) SetBody(body *models.APIValidationRequestV1) {\n\to.Body = body\n}",
"func (o *CreateRepoNotificationParams) SetBody(body *models.NotificationCreateRequest) {\n\to.Body = body\n}",
"func (o *CreateDatabaseOnServerParams) SetBody(body *model.CreateDatabaseV4Request) {\n\to.Body = body\n}",
"func (o *TestEmailConfigurationParams) SetBody(body string) {\n\to.Body = body\n}",
"func (o *UpdateBuildPropertiesParams) SetBody(body *models.JSONPatchDocument) {\n\to.Body = body\n}",
"func (o *BudgetAddParams) SetBody(body *models.BudgetAddRequest) {\n\to.Body = body\n}",
"func (body *CreateResponseBody) Validate() (err error) {\n\tif body.ID == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"id\", \"body\"))\n\t}\n\tif body.Color == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"color\", \"body\"))\n\t}\n\tif body.Cron == nil {\n\t\terr = goa.MergeErrors(err, goa.MissingFieldError(\"cron\", \"body\"))\n\t}\n\tif body.Name != nil {\n\t\tif utf8.RuneCountInString(*body.Name) > 100 {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidLengthError(\"body.name\", *body.Name, utf8.RuneCountInString(*body.Name), 100, false))\n\t\t}\n\t}\n\tif body.Color != nil {\n\t\tif !(*body.Color == \"red\" || *body.Color == \"yellow\" || *body.Color == \"green\" || *body.Color == \"off\") {\n\t\t\terr = goa.MergeErrors(err, goa.InvalidEnumValueError(\"body.color\", *body.Color, []interface{}{\"red\", \"yellow\", \"green\", \"off\"}))\n\t\t}\n\t}\n\treturn\n}",
"func (o *CreateBlueprintInWorkspaceInternalParams) SetBody(body *model.BlueprintV4Request) {\n\to.Body = body\n}",
"func (o *RTRExecuteActiveResponderCommandParams) SetBody(body *models.DomainCommandExecuteRequest) {\n\to.Body = body\n}",
"func (o *VolumeCreateBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *SwarmUpdateParams) SetBody(body *models.SwarmSpec) {\n\to.Body = body\n}",
"func (c *baseClient) SetBody(params interface{}) *baseClient {\n\tc.body = params\n\treturn c\n}",
"func (o *CreateTenantParams) SetBody(body *kbmodel.Tenant) {\n\to.Body = body\n}",
"func ValidateRequestBody(r http.Header, body []byte, signingSecret string) error {\n\tsv, err := slack.NewSecretsVerifier(r, signingSecret)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif _, err := sv.Write(body); err != nil {\n\t\treturn err\n\t}\n\tif err := sv.Ensure(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (o *PostHyperflexAutoSupportPoliciesParams) SetBody(body *models.HyperflexAutoSupportPolicy) {\n\to.Body = body\n}",
"func (o *PatchAssetDeviceConfigurationsMoidParams) SetBody(body *models.AssetDeviceConfiguration) {\n\to.Body = body\n}",
"func (o *ValidateUpdateSymfilePinningParams) SetBody(body *dynatrace.SymbolFilePinning) {\n\to.Body = body\n}",
"func (o *UpdateTableMetadataParams) SetBody(body string) {\n\to.Body = body\n}",
"func (_options *SendNotificationsOptions) SetBody(body *NotificationCreate) *SendNotificationsOptions {\n\t_options.Body = body\n\treturn _options\n}",
"func (o *PetCreateParams) SetBody(body *models.Pet) {\n\to.Body = body\n}",
"func (o *CreateAccessPolicyParams) SetBody(body *models.AccessPolicyEntity) {\n\to.Body = body\n}",
"func (options *CreateLoadBalancerMonitorOptions) SetExpectedBody(expectedBody string) *CreateLoadBalancerMonitorOptions {\n\toptions.ExpectedBody = core.StringPtr(expectedBody)\n\treturn options\n}",
"func (o *PcloudIkepoliciesPutParams) SetBody(body *models.IKEPolicyUpdate) {\n\to.Body = body\n}",
"func (o *PutOrganizationProjectApisBuildDefinitionsDefinitionIDParams) SetBody(body *models.BuildDefinition) {\n\to.Body = body\n}",
"func (o *RejectLogoutRequestParams) SetBody(body *models.RejectRequest) {\n\to.Body = body\n}",
"func (o *AddVMParams) SetBody(body *models.VM) {\n\to.Body = body\n}",
"func (r *Request) SetBody(b []byte) {\n\tr.body = b\n}",
"func (o *SaveTemplateParams) SetBody(body *models.BuildDefinitionTemplate) {\n\to.Body = body\n}",
"func (o *UpdateStockReceiptParams) SetBody(body *models.StockReceipt) {\n\to.Body = body\n}",
"func (o *FetchIntegrationFormParams) SetBody(body map[string]string) {\n\to.Body = body\n}",
"func ValidateEventPayloadRequestBody(body *EventPayloadRequestBody) (err error) {\n\terr = goa.MergeErrors(err, goa.ValidateFormat(\"body.start\", body.Start, goa.FormatDateTime))\n\n\tif body.End != nil {\n\t\terr = goa.MergeErrors(err, goa.ValidateFormat(\"body.end\", *body.End, goa.FormatDateTime))\n\t}\n\treturn\n}",
"func (o *NotifyStateChangedParams) SetBody(body *kbmodel.PaymentTransaction) {\n\to.Body = body\n}",
"func (o *DeleteBlueprintsInWorkspaceParams) SetBody(body []string) {\n\to.Body = body\n}",
"func (o *UpdateAutoTagParams) SetBody(body *dynatrace.AutoTag) {\n\to.Body = body\n}",
"func (o *ResendFaxParams) SetBody(body *models.ResendFaxRequest) {\n\to.Body = body\n}",
"func (r ApiApiProjectsIdDeploymentsPostRequest) Body(body RequestsCreateProjectDeploymentRequest) ApiApiProjectsIdDeploymentsPostRequest {\n\tr.body = &body\n\treturn r\n}",
"func (o *DeleteDatasetCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *UpdateFlowParams) SetBody(body *models.VersionedFlow) {\n\to.Body = body\n}",
"func (o *PostDocumentMergeParams) SetBody(body PostDocumentMergeBody) {\n\to.Body = body\n}",
"func (o *BarParams) SetBody(body BarBody) {\n\to.Body = body\n}",
"func (options *EditLoadBalancerMonitorOptions) SetExpectedBody(expectedBody string) *EditLoadBalancerMonitorOptions {\n\toptions.ExpectedBody = core.StringPtr(expectedBody)\n\treturn options\n}",
"func (o *ServiceCreateCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *StartPostgreSQLShowCreateTableActionParams) SetBody(body StartPostgreSQLShowCreateTableActionBody) {\n\to.Body = body\n}",
"func (o *UserPasswordCheckParams) SetBody(body *models.UserPasswordCheckRequest) {\n\to.Body = body\n}",
"func (o *PublicInteractiveLoginCredentialParams) SetBody(body *model.CredentialRequest) {\n\to.Body = body\n}",
"func (o *PcloudPvminstancesSnapshotsRestorePostParams) SetBody(body *models.SnapshotRestore) {\n\to.Body = body\n}",
"func (o *ScheduledPlanRunOnceByIDParams) SetBody(body *models.WriteScheduledPlan) {\n\to.Body = body\n}",
"func (o *GetPointsByQueryParams) SetBody(body *models.TestPointsQuery) {\n\to.Body = body\n}",
"func (o *PatchAddonParams) SetBody(body *models.Addon) {\n\to.Body = body\n}",
"func (o *PostSiteCreatedBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateData(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AddExternalBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *DeleteKeyPairsParams) SetBody(body *models.OpenpitrixDeleteKeyPairsRequest) {\n\to.Body = body\n}",
"func (o *PcloudV1CloudinstancesCosimagesPostParams) SetBody(body *models.CreateCosImageImportJob) {\n\to.Body = body\n}",
"func (o *AddServerGroupInUpstreamParams) SetBody(body *vproxy_client_model.ServerGroupInUpstreamCreate) {\n\to.Body = body\n}",
"func (o *RepoUpdateTopicsParams) SetBody(body *models.RepoTopicOptions) {\n\to.Body = body\n}",
"func (o *UploadPluginParams) SetBody(body dynatrace.InputStream) {\n\to.Body = body\n}",
"func (o *ServiceBindingBindingParams) SetBody(body *models.ServiceBindingRequest) {\n\to.Body = body\n}",
"func (s *server) createEvent(w http.ResponseWriter, r *http.Request, _ httprouter.Params) {\n\tdefer r.Body.Close()\n\n\t// Read the body out into a buffer.\n\tbuf, err := ioutil.ReadAll(r.Body)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Read the body as generic JSON, so we can perform JDDF validation on it.\n\t//\n\t// If the request body is invalid JSON, send the user a 400 Bad Request.\n\tvar eventRaw interface{}\n\tif err := json.Unmarshal(buf, &eventRaw); err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// Validate the event (in eventRaw) against our schema for JDDF events.\n\t//\n\t// In practice, there will never be errors arising here -- see the jddf-go\n\t// docs for details, but basically jddf.Validator.Validate can only error if\n\t// you use \"ref\" in a cyclic manner in your schemas.\n\t//\n\t// Therefore, we ignore the possibility of an error here.\n\tvalidator := jddf.Validator{}\n\tvalidationResult, _ := validator.Validate(s.EventSchema, eventRaw)\n\n\t// If there were validation errors, then we write them out to the response\n\t// body, and send the user a 400 Bad Request.\n\tif len(validationResult.Errors) != 0 {\n\t\tencoder := json.NewEncoder(w)\n\t\tif err := encoder.Encode(validationResult.Errors); err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tfmt.Fprintf(w, \"%s\", err)\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\t// If we made it here, the request body contained JSON that passed our schema.\n\t// Let's now write it into the database.\n\t//\n\t// The events table has a \"payload\" column of type \"jsonb\". In Golang-land,\n\t// you can send that to Postgres by just using []byte. The user's request\n\t// payload is already in that format, so we'll use that.\n\t_, err = s.DB.ExecContext(r.Context(), `\n\t\tinsert into events (payload) values ($1)\n\t`, buf)\n\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tfmt.Fprintf(w, \"%s\", err)\n\t\treturn\n\t}\n\n\t// We're done!\n\tw.WriteHeader(http.StatusOK)\n\tfmt.Fprintf(w, \"%s\", buf)\n}",
"func (m *GaragesCreateBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := m.validateCentreID(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateLocations(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif err := m.validateName(formats); err != nil {\n\t\t// prop\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *NotificationConfig) SetBody(v string) {\n\to.Body = &v\n}",
"func (o *AutoscaleStopInstancesByCrnParams) SetBody(body []string) {\n\to.Body = body\n}",
"func (o *CreateInstantPaymentParams) SetBody(body *kbmodel.InvoicePayment) {\n\to.Body = body\n}",
"func (zr *ZRequest) SetBody(body interface{}) *ZRequest {\n\tif zr.ended {\n\t\treturn zr\n\t}\n\tzr.body = body\n\treturn zr\n}",
"func (o *CreateDatasetBody) Validate(formats strfmt.Registry) error {\n\tvar res []error\n\n\tif err := o.validateAccess(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateComp(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateCompressMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateEnc(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateMetadata(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validateProject(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif err := o.validatePushMethod(formats); err != nil {\n\t\tres = append(res, err)\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SendJobCommandParams) SetBody(body SendJobCommandBody) {\n\to.Body = body\n}",
"func (o *HandleReturnParams) SetBody(body *models.ReturnRequest) {\n\to.Body = body\n}",
"func (o *VerifyCallerIDParams) SetBody(body *models.VerifySmsCallerIDRequest) {\n\to.Body = body\n}",
"func (o *CreateScheduledPlanParams) SetBody(body *models.ScheduledPlan) {\n\to.Body = body\n}",
"func (o *PutNmsUpdateParams) SetBody(body *models.Model113) {\n\to.Body = body\n}",
"func (x *NetworkInfoRequest) SetBody(v *NetworkInfoRequest_Body) {\n\tif x != nil {\n\t\tx.Body = v\n\t}\n}",
"func (o *UpdateOrganizationTeamParams) SetBody(body *models.TeamDescription) {\n\to.Body = body\n}",
"func (s *SendApiAssetInput) SetBody(v string) *SendApiAssetInput {\n\ts.Body = &v\n\treturn s\n}",
"func (o *PostLeasesIDAuthCreatedBody) Validate(formats strfmt.Registry) error {\n\treturn nil\n}",
"func (o *SetBuildQueuePositionParams) SetBody(body *models.Build) {\n\to.Body = body\n}",
"func (s *SendApiAssetOutput) SetBody(v string) *SendApiAssetOutput {\n\ts.Body = &v\n\treturn s\n}",
"func (o *SetFinishedTimeParams) SetBody(body string) {\n\to.Body = body\n}",
"func (o *TradingTableUnsubscribeTradingTableParams) SetBody(body *models.UnsubscribeTradingTablesDefinition) {\n\to.Body = body\n}",
"func (o *SetUserAttributeGroupValuesParams) SetBody(body []*models.UserAttributeGroupValue) {\n\to.Body = body\n}",
"func (o *SearchWorkspacesParams) SetBody(body *models.RestSearchWorkspaceRequest) {\n\to.Body = body\n}",
"func (h *HTTP) SetBody(body []byte) {\n\th.body = body\n}",
"func (o *PostAPIV10PeerReviewsParams) SetBody(body *models.PeerReviewTask) {\n\to.Body = body\n}",
"func (o *UpdateFolderParams) SetBody(body *models.UpdateFolder) {\n\to.Body = body\n}",
"func (o *CustomerGatewayUpdateOwnershipParams) SetBody(body *models.V1UpdateOwnershipRequest) {\n\to.Body = body\n}",
"func (o *AddBranchParams) SetBody(body AddBranchBody) {\n\to.Body = body\n}",
"func (o *SayParams) SetBody(body string) {\n\to.Body = body\n}",
"func (o *CreateIscsiLunSnapshotParams) SetRequestBody(requestBody []*models.IscsiLunSnapshotCreationParams) {\n\to.RequestBody = requestBody\n}",
"func (o *PutClusterForAutoscaleParams) SetBody(body *model.UpdateClusterV4Request) {\n\to.Body = body\n}",
"func (o *TradingTableSubscribeTradingTableParams) SetBody(body *models.SubscribeTradingTablesDefinition) {\n\to.Body = body\n}",
"func (s *BaseEvent) ParseBody( body []byte, parseAll bool ) error {\n s.part1.EventType = EV_UNKNOWN;\n var numSections,part1Size,part2Size,sysSize,execSize uint\n\n if numParams,err:=fmt.Sscanf(string(body), \"%02d,1,%06d,1,%06d,1,%06d,1,%06d\\n\",&numSections,&part1Size,&part2Size,&sysSize,&execSize); numParams!=5 {\n s.errString = fmt.Sprintf( \"ParseBody error: only parsed %d parameters from frame:'%s' - err:\", numParams, body ) + err.Error()\n return s\n } // if\n if numSections != 4 {\n s.errString = fmt.Sprintf( \"ParseBody error expected 4 sections found %d\", numSections )\n return s\n } // if\n if part1Size == 0 {\n s.errString = \"ParseBody error part1 cannot be empty\"\n return s\n } // if\n\n startOffset := uint(BLOCK_HEADER_LEN);\n s.part1Json = body[startOffset:startOffset+part1Size]\n startOffset += part1Size\n if err:=s.parsePart1(); err != nil { return err }\n \n if part2Size > 0 {\n s.part2Json = body[startOffset:startOffset+part2Size]\n startOffset += part2Size\n if parseAll {\n if err:=s.parsePart2(); err != nil { return err }\n s.part2JsonExtracted = true\n } else {\n s.part2JsonExtracted = false\n } // else parseAll\n\n } // if\n if sysSize > 0 {\n s.sysParamsJson = body[startOffset:startOffset+sysSize]\n startOffset += sysSize\n if parseAll {\n if err:=s.parseSysParams(); err != nil { return err }\n s.sysParamsExtracted = true\n } else {\n s.sysParamsExtracted = false\n } // else parseAll\n } // if\n if execSize > 0 {\n s.execParamsJson = body[startOffset:startOffset+execSize]\n startOffset += execSize\n if parseAll {\n if err:=s.parseExecParams(); err != nil { return err }\n s.execParamsExtracted = true\n } else {\n s.execParamsExtracted = false\n } // else parseAll\n } // if\n\n Log.Printf( \"parseBody: sections:%d part1Size:%d part2Size:%d sysSize:%d execSize:%d\",numSections,part1Size,part2Size,sysSize,execSize )\n return nil\n}",
"func (o *UpdateRowParams) SetBody(body *models.RowUpdate) {\n\to.Body = body\n}",
"func Body(data ...interface{}) AdditionalAttribute {\n return func(rb *Builder) error {\n rb.SetBody(data...)\n return nil\n }\n}",
"func (o *UpdateCredentialParams) SetBody(body dynatrace.Credentials) {\n\to.Body = body\n}",
"func (HTTPOperation) SetRequestBody(time time.Time, inputType api.InputTypeEnum, location int, numberAvailable int, numberTotal *int, tags *string, vaccine *int) error {\n\tbody.Date = time\n\tbody.InputType = inputType\n\tbody.Location = location\n\tbody.NumberAvailable = numberAvailable\n\tbody.NumberTotal = numberTotal\n\tbody.Tags = tags\n\tbody.Vaccine = vaccine\n\treturn nil\n}",
"func (s *ExecuteScriptInput) SetBody(v string) *ExecuteScriptInput {\n\ts.Body = &v\n\treturn s\n}",
"func (o *PatchStorageVirtualDriveExtensionsMoidParams) SetBody(body *models.StorageVirtualDriveExtension) {\n\to.Body = body\n}"
] | [
"0.6159029",
"0.6080955",
"0.60065275",
"0.5994568",
"0.59874004",
"0.59266406",
"0.5898191",
"0.5773824",
"0.5687035",
"0.56381845",
"0.5631992",
"0.56269395",
"0.561802",
"0.5585539",
"0.5528684",
"0.5472717",
"0.5429385",
"0.53950125",
"0.53935003",
"0.5375957",
"0.5372451",
"0.5370695",
"0.5337932",
"0.53163034",
"0.53097993",
"0.527805",
"0.52511406",
"0.5239485",
"0.5228316",
"0.5227484",
"0.5214057",
"0.51978064",
"0.51960015",
"0.5195108",
"0.51890755",
"0.51890016",
"0.51878333",
"0.5181444",
"0.5172751",
"0.51712054",
"0.5164762",
"0.5135019",
"0.5126265",
"0.51214856",
"0.5119929",
"0.5119633",
"0.51182187",
"0.5117592",
"0.50929576",
"0.5085647",
"0.50851125",
"0.50816524",
"0.50729036",
"0.5062903",
"0.50554305",
"0.5054014",
"0.50509113",
"0.5042391",
"0.5034209",
"0.5032498",
"0.503236",
"0.50304127",
"0.50210553",
"0.50140226",
"0.50032365",
"0.50017285",
"0.49990448",
"0.49966133",
"0.4996201",
"0.49951887",
"0.49947202",
"0.49932906",
"0.49913877",
"0.49846867",
"0.49846753",
"0.49786624",
"0.49761444",
"0.49748626",
"0.4972666",
"0.49669454",
"0.49584696",
"0.49519864",
"0.49510106",
"0.49449995",
"0.49326253",
"0.49199146",
"0.4919167",
"0.49187502",
"0.49139538",
"0.4913694",
"0.49122205",
"0.4911358",
"0.49098912",
"0.490531",
"0.48926648",
"0.48921922",
"0.48900414",
"0.48871443",
"0.48809248",
"0.48690256"
] | 0.7403387 | 0 |
WriteToRequest writes these params to a swagger request | func (o *ValidateCreateAnomalyDetectionDiskEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {
if err := r.SetTimeout(o.timeout); err != nil {
return err
}
var res []error
if o.Body != nil {
if err := r.SetBodyParam(o.Body); err != nil {
return err
}
}
if len(res) > 0 {
return errors.CompositeValidationError(res...)
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (o *FileInfoCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ByteOffset != nil {\n\n\t\t// query param byte_offset\n\t\tvar qrByteOffset int64\n\n\t\tif o.ByteOffset != nil {\n\t\t\tqrByteOffset = *o.ByteOffset\n\t\t}\n\t\tqByteOffset := swag.FormatInt64(qrByteOffset)\n\t\tif qByteOffset != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"byte_offset\", qByteOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif o.Info != nil {\n\t\tif err := r.SetBodyParam(o.Info); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Overwrite != nil {\n\n\t\t// query param overwrite\n\t\tvar qrOverwrite bool\n\n\t\tif o.Overwrite != nil {\n\t\t\tqrOverwrite = *o.Overwrite\n\t\t}\n\t\tqOverwrite := swag.FormatBool(qrOverwrite)\n\t\tif qOverwrite != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"overwrite\", qOverwrite); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param path\n\tif err := r.SetPathParam(\"path\", o.Path); err != nil {\n\t\treturn err\n\t}\n\n\tif o.ReturnRecords != nil {\n\n\t\t// query param return_records\n\t\tvar qrReturnRecords bool\n\n\t\tif o.ReturnRecords != nil {\n\t\t\tqrReturnRecords = *o.ReturnRecords\n\t\t}\n\t\tqReturnRecords := swag.FormatBool(qrReturnRecords)\n\t\tif qReturnRecords != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"return_records\", qReturnRecords); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.StreamName != nil {\n\n\t\t// query param stream_name\n\t\tvar qrStreamName string\n\n\t\tif o.StreamName != nil {\n\t\t\tqrStreamName = *o.StreamName\n\t\t}\n\t\tqStreamName := qrStreamName\n\t\tif qStreamName != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"stream_name\", qStreamName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param volume.uuid\n\tif err := r.SetPathParam(\"volume.uuid\", o.VolumeUUID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Device-Id\n\tif err := r.SetHeaderParam(\"Device-Id\", o.DeviceID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.DeviceOS != nil {\n\n\t\t// header param Device-OS\n\t\tif err := r.SetHeaderParam(\"Device-OS\", *o.DeviceOS); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// path param fiscalDocumentNumber\n\tif err := r.SetPathParam(\"fiscalDocumentNumber\", swag.FormatUint64(o.FiscalDocumentNumber)); err != nil {\n\t\treturn err\n\t}\n\n\t// path param fiscalDriveNumber\n\tif err := r.SetPathParam(\"fiscalDriveNumber\", swag.FormatUint64(o.FiscalDriveNumber)); err != nil {\n\t\treturn err\n\t}\n\n\t// query param fiscalSign\n\tqrFiscalSign := o.FiscalSign\n\tqFiscalSign := swag.FormatUint64(qrFiscalSign)\n\tif qFiscalSign != \"\" {\n\t\tif err := r.SetQueryParam(\"fiscalSign\", qFiscalSign); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.SendToEmail != nil {\n\n\t\t// query param sendToEmail\n\t\tvar qrSendToEmail string\n\t\tif o.SendToEmail != nil {\n\t\t\tqrSendToEmail = *o.SendToEmail\n\t\t}\n\t\tqSendToEmail := qrSendToEmail\n\t\tif qSendToEmail != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sendToEmail\", qSendToEmail); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *StartV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Environment != nil {\n\n\t\t// query param environment\n\t\tvar qrEnvironment string\n\t\tif o.Environment != nil {\n\t\t\tqrEnvironment = *o.Environment\n\t\t}\n\t\tqEnvironment := qrEnvironment\n\t\tif qEnvironment != \"\" {\n\t\t\tif err := r.SetQueryParam(\"environment\", qEnvironment); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateAutoTagParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetIntrospectionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif o.ResponseAsJwt != nil {\n\n\t\t// query param response_as_jwt\n\t\tvar qrResponseAsJwt bool\n\t\tif o.ResponseAsJwt != nil {\n\t\t\tqrResponseAsJwt = *o.ResponseAsJwt\n\t\t}\n\t\tqResponseAsJwt := swag.FormatBool(qrResponseAsJwt)\n\t\tif qResponseAsJwt != \"\" {\n\t\t\tif err := r.SetQueryParam(\"response_as_jwt\", qResponseAsJwt); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param token\n\tqrToken := o.Token\n\tqToken := qrToken\n\tif qToken != \"\" {\n\t\tif err := r.SetQueryParam(\"token\", qToken); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.TokenTypeHint != nil {\n\n\t\t// query param token_type_hint\n\t\tvar qrTokenTypeHint string\n\t\tif o.TokenTypeHint != nil {\n\t\t\tqrTokenTypeHint = *o.TokenTypeHint\n\t\t}\n\t\tqTokenTypeHint := qrTokenTypeHint\n\t\tif qTokenTypeHint != \"\" {\n\t\t\tif err := r.SetQueryParam(\"token_type_hint\", qTokenTypeHint); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostContextsAddPhpParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param name\n\tqrName := o.Name\n\tqName := qrName\n\tif qName != \"\" {\n\n\t\tif err := r.SetQueryParam(\"name\", qName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Private != nil {\n\n\t\t// query param private\n\t\tvar qrPrivate int64\n\n\t\tif o.Private != nil {\n\t\t\tqrPrivate = *o.Private\n\t\t}\n\t\tqPrivate := swag.FormatInt64(qrPrivate)\n\t\tif qPrivate != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"private\", qPrivate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetInstancesDocsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\tif o.OperationID != nil {\n\n\t\t// query param operationId\n\t\tvar qrOperationID string\n\t\tif o.OperationID != nil {\n\t\t\tqrOperationID = *o.OperationID\n\t\t}\n\t\tqOperationID := qrOperationID\n\t\tif qOperationID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"operationId\", qOperationID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Version != nil {\n\n\t\t// query param version\n\t\tvar qrVersion string\n\t\tif o.Version != nil {\n\t\t\tqrVersion = *o.Version\n\t\t}\n\t\tqVersion := qrVersion\n\t\tif qVersion != \"\" {\n\t\t\tif err := r.SetQueryParam(\"version\", qVersion); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CloudTargetCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CheckOnly != nil {\n\n\t\t// query param check_only\n\t\tvar qrCheckOnly bool\n\n\t\tif o.CheckOnly != nil {\n\t\t\tqrCheckOnly = *o.CheckOnly\n\t\t}\n\t\tqCheckOnly := swag.FormatBool(qrCheckOnly)\n\t\tif qCheckOnly != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"check_only\", qCheckOnly); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.IgnoreWarnings != nil {\n\n\t\t// query param ignore_warnings\n\t\tvar qrIgnoreWarnings bool\n\n\t\tif o.IgnoreWarnings != nil {\n\t\t\tqrIgnoreWarnings = *o.IgnoreWarnings\n\t\t}\n\t\tqIgnoreWarnings := swag.FormatBool(qrIgnoreWarnings)\n\t\tif qIgnoreWarnings != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ignore_warnings\", qIgnoreWarnings); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\tif o.Info != nil {\n\t\tif err := r.SetBodyParam(o.Info); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.ReturnRecords != nil {\n\n\t\t// query param return_records\n\t\tvar qrReturnRecords bool\n\n\t\tif o.ReturnRecords != nil {\n\t\t\tqrReturnRecords = *o.ReturnRecords\n\t\t}\n\t\tqReturnRecords := swag.FormatBool(qrReturnRecords)\n\t\tif qReturnRecords != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"return_records\", qReturnRecords); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ReturnTimeout != nil {\n\n\t\t// query param return_timeout\n\t\tvar qrReturnTimeout int64\n\n\t\tif o.ReturnTimeout != nil {\n\t\t\tqrReturnTimeout = *o.ReturnTimeout\n\t\t}\n\t\tqReturnTimeout := swag.FormatInt64(qrReturnTimeout)\n\t\tif qReturnTimeout != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"return_timeout\", qReturnTimeout); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SayParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Sinkid != nil {\n\n\t\t// query param sinkid\n\t\tvar qrSinkid string\n\t\tif o.Sinkid != nil {\n\t\t\tqrSinkid = *o.Sinkid\n\t\t}\n\t\tqSinkid := qrSinkid\n\t\tif qSinkid != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sinkid\", qSinkid); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Voiceid != nil {\n\n\t\t// query param voiceid\n\t\tvar qrVoiceid string\n\t\tif o.Voiceid != nil {\n\t\t\tqrVoiceid = *o.Voiceid\n\t\t}\n\t\tqVoiceid := qrVoiceid\n\t\tif qVoiceid != \"\" {\n\t\t\tif err := r.SetQueryParam(\"voiceid\", qVoiceid); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *HandleGetAboutUsingGETParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// query param apiVersion\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\n\tif err := r.SetQueryParam(\"apiVersion\", qAPIVersion); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetFileSystemParametersInternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID string\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID\n\t\tif qAccountID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.AccountName != nil {\n\n\t\t// query param accountName\n\t\tvar qrAccountName string\n\t\tif o.AccountName != nil {\n\t\t\tqrAccountName = *o.AccountName\n\t\t}\n\t\tqAccountName := qrAccountName\n\t\tif qAccountName != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountName\", qAccountName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.AttachedCluster != nil {\n\n\t\t// query param attachedCluster\n\t\tvar qrAttachedCluster bool\n\t\tif o.AttachedCluster != nil {\n\t\t\tqrAttachedCluster = *o.AttachedCluster\n\t\t}\n\t\tqAttachedCluster := swag.FormatBool(qrAttachedCluster)\n\t\tif qAttachedCluster != \"\" {\n\t\t\tif err := r.SetQueryParam(\"attachedCluster\", qAttachedCluster); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param blueprintName\n\tqrBlueprintName := o.BlueprintName\n\tqBlueprintName := qrBlueprintName\n\tif qBlueprintName != \"\" {\n\t\tif err := r.SetQueryParam(\"blueprintName\", qBlueprintName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// query param clusterName\n\tqrClusterName := o.ClusterName\n\tqClusterName := qrClusterName\n\tif qClusterName != \"\" {\n\t\tif err := r.SetQueryParam(\"clusterName\", qClusterName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// query param fileSystemType\n\tqrFileSystemType := o.FileSystemType\n\tqFileSystemType := qrFileSystemType\n\tif qFileSystemType != \"\" {\n\t\tif err := r.SetQueryParam(\"fileSystemType\", qFileSystemType); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Secure != nil {\n\n\t\t// query param secure\n\t\tvar qrSecure bool\n\t\tif o.Secure != nil {\n\t\t\tqrSecure = *o.Secure\n\t\t}\n\t\tqSecure := swag.FormatBool(qrSecure)\n\t\tif qSecure != \"\" {\n\t\t\tif err := r.SetQueryParam(\"secure\", qSecure); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param storageName\n\tqrStorageName := o.StorageName\n\tqStorageName := qrStorageName\n\tif qStorageName != \"\" {\n\t\tif err := r.SetQueryParam(\"storageName\", qStorageName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ServeBuildFieldShortParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param btLocator\n\tif err := r.SetPathParam(\"btLocator\", o.BtLocator); err != nil {\n\t\treturn err\n\t}\n\n\t// path param buildLocator\n\tif err := r.SetPathParam(\"buildLocator\", o.BuildLocator); err != nil {\n\t\treturn err\n\t}\n\n\t// path param field\n\tif err := r.SetPathParam(\"field\", o.Field); err != nil {\n\t\treturn err\n\t}\n\n\t// path param projectLocator\n\tif err := r.SetPathParam(\"projectLocator\", o.ProjectLocator); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetRequestDetailsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param extra\n\tif err := r.SetPathParam(\"extra\", o.Extra); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetWorkItemParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.DollarExpand != nil {\n\n\t\t// query param $expand\n\t\tvar qrNrDollarExpand string\n\t\tif o.DollarExpand != nil {\n\t\t\tqrNrDollarExpand = *o.DollarExpand\n\t\t}\n\t\tqNrDollarExpand := qrNrDollarExpand\n\t\tif qNrDollarExpand != \"\" {\n\t\t\tif err := r.SetQueryParam(\"$expand\", qNrDollarExpand); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param api-version\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\tif qAPIVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"api-version\", qAPIVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.AsOf != nil {\n\n\t\t// query param asOf\n\t\tvar qrAsOf strfmt.DateTime\n\t\tif o.AsOf != nil {\n\t\t\tqrAsOf = *o.AsOf\n\t\t}\n\t\tqAsOf := qrAsOf.String()\n\t\tif qAsOf != \"\" {\n\t\t\tif err := r.SetQueryParam(\"asOf\", qAsOf); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt32(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *IntegrationsManualHTTPSCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Data); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ValidateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostSecdefSearchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Symbol); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UserShowV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\t// path param name\n\tif err := r.SetPathParam(\"name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetLogsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.From != nil {\n\n\t\t// query param from\n\t\tvar qrFrom string\n\t\tif o.From != nil {\n\t\t\tqrFrom = *o.From\n\t\t}\n\t\tqFrom := qrFrom\n\t\tif qFrom != \"\" {\n\t\t\tif err := r.SetQueryParam(\"from\", qFrom); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.IncludeFields != nil {\n\n\t\t// query param include_fields\n\t\tvar qrIncludeFields bool\n\t\tif o.IncludeFields != nil {\n\t\t\tqrIncludeFields = *o.IncludeFields\n\t\t}\n\t\tqIncludeFields := swag.FormatBool(qrIncludeFields)\n\t\tif qIncludeFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"include_fields\", qIncludeFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.IncludeTotals != nil {\n\n\t\t// query param include_totals\n\t\tvar qrIncludeTotals bool\n\t\tif o.IncludeTotals != nil {\n\t\t\tqrIncludeTotals = *o.IncludeTotals\n\t\t}\n\t\tqIncludeTotals := swag.FormatBool(qrIncludeTotals)\n\t\tif qIncludeTotals != \"\" {\n\t\t\tif err := r.SetQueryParam(\"include_totals\", qIncludeTotals); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int64\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt64(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.PerPage != nil {\n\n\t\t// query param per_page\n\t\tvar qrPerPage int64\n\t\tif o.PerPage != nil {\n\t\t\tqrPerPage = *o.PerPage\n\t\t}\n\t\tqPerPage := swag.FormatInt64(qrPerPage)\n\t\tif qPerPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"per_page\", qPerPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Sort != nil {\n\n\t\t// query param sort\n\t\tvar qrSort string\n\t\tif o.Sort != nil {\n\t\t\tqrSort = *o.Sort\n\t\t}\n\t\tqSort := qrSort\n\t\tif qSort != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sort\", qSort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Take != nil {\n\n\t\t// query param take\n\t\tvar qrTake int64\n\t\tif o.Take != nil {\n\t\t\tqrTake = *o.Take\n\t\t}\n\t\tqTake := swag.FormatInt64(qrTake)\n\t\tif qTake != \"\" {\n\t\t\tif err := r.SetQueryParam(\"take\", qTake); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostGetOneParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\tvaluesRelated := o.Related\n\n\tjoinedRelated := swag.JoinByFormat(valuesRelated, \"\")\n\t// query array param related\n\tif err := r.SetQueryParam(\"related\", joinedRelated...); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *BarParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ConfigGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Option != nil {\n\n\t\t// binding items for option\n\t\tjoinedOption := o.bindParamOption(reg)\n\n\t\t// query array param option\n\t\tif err := r.SetQueryParam(\"option\", joinedOption...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.ProjectID != nil {\n\n\t\t// query param project_id\n\t\tvar qrProjectID int64\n\n\t\tif o.ProjectID != nil {\n\t\t\tqrProjectID = *o.ProjectID\n\t\t}\n\t\tqProjectID := swag.FormatInt64(qrProjectID)\n\t\tif qProjectID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"project_id\", qProjectID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.UserID != nil {\n\n\t\t// query param user_id\n\t\tvar qrUserID int64\n\n\t\tif o.UserID != nil {\n\t\t\tqrUserID = *o.UserID\n\t\t}\n\t\tqUserID := swag.FormatInt64(qrUserID)\n\t\tif qUserID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"user_id\", qUserID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetSsoParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param code\n\tqrCode := o.Code\n\tqCode := qrCode\n\tif qCode != \"\" {\n\t\tif err := r.SetQueryParam(\"code\", qCode); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// query param resource_id\n\tqrResourceID := o.ResourceID\n\tqResourceID := qrResourceID\n\tif qResourceID != \"\" {\n\t\tif err := r.SetQueryParam(\"resource_id\", qResourceID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AllLookmlTestsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.FileID != nil {\n\n\t\t// query param file_id\n\t\tvar qrFileID string\n\t\tif o.FileID != nil {\n\t\t\tqrFileID = *o.FileID\n\t\t}\n\t\tqFileID := qrFileID\n\t\tif qFileID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"file_id\", qFileID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param project_id\n\tif err := r.SetPathParam(\"project_id\", o.ProjectID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *APIServiceHaltsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Height != nil {\n\n\t\t// query param height\n\t\tvar qrHeight string\n\t\tif o.Height != nil {\n\t\t\tqrHeight = *o.Height\n\t\t}\n\t\tqHeight := qrHeight\n\t\tif qHeight != \"\" {\n\t\t\tif err := r.SetQueryParam(\"height\", qHeight); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetUsersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Connection != nil {\n\n\t\t// query param connection\n\t\tvar qrConnection string\n\t\tif o.Connection != nil {\n\t\t\tqrConnection = *o.Connection\n\t\t}\n\t\tqConnection := qrConnection\n\t\tif qConnection != \"\" {\n\t\t\tif err := r.SetQueryParam(\"connection\", qConnection); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.IncludeFields != nil {\n\n\t\t// query param include_fields\n\t\tvar qrIncludeFields bool\n\t\tif o.IncludeFields != nil {\n\t\t\tqrIncludeFields = *o.IncludeFields\n\t\t}\n\t\tqIncludeFields := swag.FormatBool(qrIncludeFields)\n\t\tif qIncludeFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"include_fields\", qIncludeFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.IncludeTotals != nil {\n\n\t\t// query param include_totals\n\t\tvar qrIncludeTotals bool\n\t\tif o.IncludeTotals != nil {\n\t\t\tqrIncludeTotals = *o.IncludeTotals\n\t\t}\n\t\tqIncludeTotals := swag.FormatBool(qrIncludeTotals)\n\t\tif qIncludeTotals != \"\" {\n\t\t\tif err := r.SetQueryParam(\"include_totals\", qIncludeTotals); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int64\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt64(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.PerPage != nil {\n\n\t\t// query param per_page\n\t\tvar qrPerPage int64\n\t\tif o.PerPage != nil {\n\t\t\tqrPerPage = *o.PerPage\n\t\t}\n\t\tqPerPage := swag.FormatInt64(qrPerPage)\n\t\tif qPerPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"per_page\", qPerPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.SearchEngine != nil {\n\n\t\t// query param search_engine\n\t\tvar qrSearchEngine string\n\t\tif o.SearchEngine != nil {\n\t\t\tqrSearchEngine = *o.SearchEngine\n\t\t}\n\t\tqSearchEngine := qrSearchEngine\n\t\tif qSearchEngine != \"\" {\n\t\t\tif err := r.SetQueryParam(\"search_engine\", qSearchEngine); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Sort != nil {\n\n\t\t// query param sort\n\t\tvar qrSort string\n\t\tif o.Sort != nil {\n\t\t\tqrSort = *o.Sort\n\t\t}\n\t\tqSort := qrSort\n\t\tif qSort != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sort\", qSort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetBlockGeneratorResultParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateRuntimeMapParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.FileUpload != nil {\n\n\t\tif o.FileUpload != nil {\n\n\t\t\t// form file param file_upload\n\t\t\tif err := r.SetFileParam(\"file_upload\", o.FileUpload); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetPlatformsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Extended != nil {\n\n\t\t// query param extended\n\t\tvar qrExtended bool\n\t\tif o.Extended != nil {\n\t\t\tqrExtended = *o.Extended\n\t\t}\n\t\tqExtended := swag.FormatBool(qrExtended)\n\t\tif qExtended != \"\" {\n\t\t\tif err := r.SetQueryParam(\"extended\", qExtended); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetUserUsageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Cloud != nil {\n\n\t\t// query param cloud\n\t\tvar qrCloud string\n\t\tif o.Cloud != nil {\n\t\t\tqrCloud = *o.Cloud\n\t\t}\n\t\tqCloud := qrCloud\n\t\tif qCloud != \"\" {\n\t\t\tif err := r.SetQueryParam(\"cloud\", qCloud); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Filterenddate != nil {\n\n\t\t// query param filterenddate\n\t\tvar qrFilterenddate int64\n\t\tif o.Filterenddate != nil {\n\t\t\tqrFilterenddate = *o.Filterenddate\n\t\t}\n\t\tqFilterenddate := swag.FormatInt64(qrFilterenddate)\n\t\tif qFilterenddate != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filterenddate\", qFilterenddate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Since != nil {\n\n\t\t// query param since\n\t\tvar qrSince int64\n\t\tif o.Since != nil {\n\t\t\tqrSince = *o.Since\n\t\t}\n\t\tqSince := swag.FormatInt64(qrSince)\n\t\tif qSince != \"\" {\n\t\t\tif err := r.SetQueryParam(\"since\", qSince); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Zone != nil {\n\n\t\t// query param zone\n\t\tvar qrZone string\n\t\tif o.Zone != nil {\n\t\t\tqrZone = *o.Zone\n\t\t}\n\t\tqZone := qrZone\n\t\tif qZone != \"\" {\n\t\t\tif err := r.SetQueryParam(\"zone\", qZone); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetOrderParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.MerchantID != nil {\n\n\t\t// query param merchantId\n\t\tvar qrMerchantID int64\n\t\tif o.MerchantID != nil {\n\t\t\tqrMerchantID = *o.MerchantID\n\t\t}\n\t\tqMerchantID := swag.FormatInt64(qrMerchantID)\n\t\tif qMerchantID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"merchantId\", qMerchantID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetPropertyDescriptorParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\t// query param propertyName\n\tqrPropertyName := o.PropertyName\n\tqPropertyName := qrPropertyName\n\tif qPropertyName != \"\" {\n\n\t\tif err := r.SetQueryParam(\"propertyName\", qPropertyName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetCurrentGenerationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateGitWebhookUsingPOSTParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// query param apiVersion\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\n\tif err := r.SetQueryParam(\"apiVersion\", qAPIVersion); err != nil {\n\t\treturn err\n\t}\n\tif err := r.SetBodyParam(o.GitWebhookSpec); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ViewsGetByIDParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param identifier\n\tif err := r.SetPathParam(\"identifier\", o.Identifier); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateDeviceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param deviceId\n\tif err := r.SetPathParam(\"deviceId\", o.DeviceID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.UpdateDeviceRequest != nil {\n\t\tif err := r.SetBodyParam(o.UpdateDeviceRequest); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SaveTemplateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param api-version\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\tif qAPIVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"api-version\", qAPIVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\t// path param templateId\n\tif err := r.SetPathParam(\"templateId\", o.TemplateID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SystemEventsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filters != nil {\n\n\t\t// query param filters\n\t\tvar qrFilters string\n\t\tif o.Filters != nil {\n\t\t\tqrFilters = *o.Filters\n\t\t}\n\t\tqFilters := qrFilters\n\t\tif qFilters != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filters\", qFilters); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Since != nil {\n\n\t\t// query param since\n\t\tvar qrSince string\n\t\tif o.Since != nil {\n\t\t\tqrSince = *o.Since\n\t\t}\n\t\tqSince := qrSince\n\t\tif qSince != \"\" {\n\t\t\tif err := r.SetQueryParam(\"since\", qSince); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Until != nil {\n\n\t\t// query param until\n\t\tvar qrUntil string\n\t\tif o.Until != nil {\n\t\t\tqrUntil = *o.Until\n\t\t}\n\t\tqUntil := qrUntil\n\t\tif qUntil != \"\" {\n\t\t\tif err := r.SetQueryParam(\"until\", qUntil); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ConvertParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param from.currency_code\n\tif err := r.SetPathParam(\"from.currency_code\", o.FromCurrencyCode); err != nil {\n\t\treturn err\n\t}\n\n\tif o.FromNanos != nil {\n\n\t\t// query param from.nanos\n\t\tvar qrFromNanos int32\n\t\tif o.FromNanos != nil {\n\t\t\tqrFromNanos = *o.FromNanos\n\t\t}\n\t\tqFromNanos := swag.FormatInt32(qrFromNanos)\n\t\tif qFromNanos != \"\" {\n\t\t\tif err := r.SetQueryParam(\"from.nanos\", qFromNanos); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.FromUnits != nil {\n\n\t\t// query param from.units\n\t\tvar qrFromUnits string\n\t\tif o.FromUnits != nil {\n\t\t\tqrFromUnits = *o.FromUnits\n\t\t}\n\t\tqFromUnits := qrFromUnits\n\t\tif qFromUnits != \"\" {\n\t\t\tif err := r.SetQueryParam(\"from.units\", qFromUnits); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param to_code\n\tif err := r.SetPathParam(\"to_code\", o.ToCode); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetBundleByKeyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Audit != nil {\n\n\t\t// query param audit\n\t\tvar qrAudit string\n\n\t\tif o.Audit != nil {\n\t\t\tqrAudit = *o.Audit\n\t\t}\n\t\tqAudit := qrAudit\n\t\tif qAudit != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"audit\", qAudit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// query param externalKey\n\tqrExternalKey := o.ExternalKey\n\tqExternalKey := qrExternalKey\n\tif qExternalKey != \"\" {\n\n\t\tif err := r.SetQueryParam(\"externalKey\", qExternalKey); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.IncludedDeleted != nil {\n\n\t\t// query param includedDeleted\n\t\tvar qrIncludedDeleted bool\n\n\t\tif o.IncludedDeleted != nil {\n\t\t\tqrIncludedDeleted = *o.IncludedDeleted\n\t\t}\n\t\tqIncludedDeleted := swag.FormatBool(qrIncludedDeleted)\n\t\tif qIncludedDeleted != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"includedDeleted\", qIncludedDeleted); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// header param WithProfilingInfo\n\tif o.WithProfilingInfo != nil && len(*o.WithProfilingInfo) > 0 {\n\t\tif err := r.SetHeaderParam(\"X-Killbill-Profiling-Req\", *o.WithProfilingInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param withStackTrace\n\tif o.WithStackTrace != nil && *o.WithStackTrace {\n\t\tif err := r.SetQueryParam(\"withStackTrace\", \"true\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SwarmUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.RotateManagerToken != nil {\n\n\t\t// query param rotateManagerToken\n\t\tvar qrRotateManagerToken bool\n\t\tif o.RotateManagerToken != nil {\n\t\t\tqrRotateManagerToken = *o.RotateManagerToken\n\t\t}\n\t\tqRotateManagerToken := swag.FormatBool(qrRotateManagerToken)\n\t\tif qRotateManagerToken != \"\" {\n\t\t\tif err := r.SetQueryParam(\"rotateManagerToken\", qRotateManagerToken); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.RotateManagerUnlockKey != nil {\n\n\t\t// query param rotateManagerUnlockKey\n\t\tvar qrRotateManagerUnlockKey bool\n\t\tif o.RotateManagerUnlockKey != nil {\n\t\t\tqrRotateManagerUnlockKey = *o.RotateManagerUnlockKey\n\t\t}\n\t\tqRotateManagerUnlockKey := swag.FormatBool(qrRotateManagerUnlockKey)\n\t\tif qRotateManagerUnlockKey != \"\" {\n\t\t\tif err := r.SetQueryParam(\"rotateManagerUnlockKey\", qRotateManagerUnlockKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.RotateWorkerToken != nil {\n\n\t\t// query param rotateWorkerToken\n\t\tvar qrRotateWorkerToken bool\n\t\tif o.RotateWorkerToken != nil {\n\t\t\tqrRotateWorkerToken = *o.RotateWorkerToken\n\t\t}\n\t\tqRotateWorkerToken := swag.FormatBool(qrRotateWorkerToken)\n\t\tif qRotateWorkerToken != \"\" {\n\t\t\tif err := r.SetQueryParam(\"rotateWorkerToken\", qRotateWorkerToken); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param version\n\tqrVersion := o.Version\n\tqVersion := swag.FormatInt64(qrVersion)\n\tif qVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"version\", qVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ServiceInstanceGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.XBrokerAPIOriginatingIdentity != nil {\n\n\t\t// header param X-Broker-API-Originating-Identity\n\t\tif err := r.SetHeaderParam(\"X-Broker-API-Originating-Identity\", *o.XBrokerAPIOriginatingIdentity); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param X-Broker-API-Version\n\tif err := r.SetHeaderParam(\"X-Broker-API-Version\", o.XBrokerAPIVersion); err != nil {\n\t\treturn err\n\t}\n\n\t// path param instance_id\n\tif err := r.SetPathParam(\"instance_id\", o.InstanceID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ShowPackageParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param media_type\n\tif err := r.SetPathParam(\"media_type\", o.MediaType); err != nil {\n\t\treturn err\n\t}\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\t// path param package\n\tif err := r.SetPathParam(\"package\", o.Package); err != nil {\n\t\treturn err\n\t}\n\n\t// path param release\n\tif err := r.SetPathParam(\"release\", o.Release); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SizeParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Parameters); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetOutagesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param count\n\tqrCount := o.Count\n\tqCount := swag.FormatFloat64(qrCount)\n\tif qCount != \"\" {\n\n\t\tif err := r.SetQueryParam(\"count\", qCount); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.DeviceID != nil {\n\n\t\t// query param deviceId\n\t\tvar qrDeviceID string\n\n\t\tif o.DeviceID != nil {\n\t\t\tqrDeviceID = *o.DeviceID\n\t\t}\n\t\tqDeviceID := qrDeviceID\n\t\tif qDeviceID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"deviceId\", qDeviceID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.InProgress != nil {\n\n\t\t// query param inProgress\n\t\tvar qrInProgress bool\n\n\t\tif o.InProgress != nil {\n\t\t\tqrInProgress = *o.InProgress\n\t\t}\n\t\tqInProgress := swag.FormatBool(qrInProgress)\n\t\tif qInProgress != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"inProgress\", qInProgress); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// query param page\n\tqrPage := o.Page\n\tqPage := swag.FormatFloat64(qrPage)\n\tif qPage != \"\" {\n\n\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Period != nil {\n\n\t\t// query param period\n\t\tvar qrPeriod float64\n\n\t\tif o.Period != nil {\n\t\t\tqrPeriod = *o.Period\n\t\t}\n\t\tqPeriod := swag.FormatFloat64(qrPeriod)\n\t\tif qPeriod != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"period\", qPeriod); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Query != nil {\n\n\t\t// query param query\n\t\tvar qrQuery string\n\n\t\tif o.Query != nil {\n\t\t\tqrQuery = *o.Query\n\t\t}\n\t\tqQuery := qrQuery\n\t\tif qQuery != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"query\", qQuery); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Type != nil {\n\n\t\t// query param type\n\t\tvar qrType string\n\n\t\tif o.Type != nil {\n\t\t\tqrType = *o.Type\n\t\t}\n\t\tqType := qrType\n\t\tif qType != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"type\", qType); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *TerminateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param extractorId\n\tif err := r.SetPathParam(\"extractorId\", o.ExtractorID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param inputId\n\tif err := r.SetPathParam(\"inputId\", o.InputID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ServeFieldParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param field\n\tif err := r.SetPathParam(\"field\", o.Field); err != nil {\n\t\treturn err\n\t}\n\n\t// path param vcsRootLocator\n\tif err := r.SetPathParam(\"vcsRootLocator\", o.VcsRootLocator); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PostV1DevicesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// form param device_identifier\n\tfrDeviceIdentifier := o.DeviceIdentifier\n\tfDeviceIdentifier := frDeviceIdentifier\n\tif fDeviceIdentifier != \"\" {\n\t\tif err := r.SetFormParam(\"device_identifier\", fDeviceIdentifier); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// form param kind\n\tfrKind := o.Kind\n\tfKind := frKind\n\tif fKind != \"\" {\n\t\tif err := r.SetFormParam(\"kind\", fKind); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// form param name\n\tfrName := o.Name\n\tfName := frName\n\tif fName != \"\" {\n\t\tif err := r.SetFormParam(\"name\", fName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.NotificationIdentifier != nil {\n\n\t\t// form param notification_identifier\n\t\tvar frNotificationIdentifier string\n\t\tif o.NotificationIdentifier != nil {\n\t\t\tfrNotificationIdentifier = *o.NotificationIdentifier\n\t\t}\n\t\tfNotificationIdentifier := frNotificationIdentifier\n\t\tif fNotificationIdentifier != \"\" {\n\t\t\tif err := r.SetFormParam(\"notification_identifier\", fNotificationIdentifier); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.SubscribeNotification != nil {\n\n\t\t// form param subscribe_notification\n\t\tvar frSubscribeNotification bool\n\t\tif o.SubscribeNotification != nil {\n\t\t\tfrSubscribeNotification = *o.SubscribeNotification\n\t\t}\n\t\tfSubscribeNotification := swag.FormatBool(frSubscribeNotification)\n\t\tif fSubscribeNotification != \"\" {\n\t\t\tif err := r.SetFormParam(\"subscribe_notification\", fSubscribeNotification); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *QueryFirewallFieldsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Limit != nil {\n\n\t\t// query param limit\n\t\tvar qrLimit int64\n\n\t\tif o.Limit != nil {\n\t\t\tqrLimit = *o.Limit\n\t\t}\n\t\tqLimit := swag.FormatInt64(qrLimit)\n\t\tif qLimit != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"limit\", qLimit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Offset != nil {\n\n\t\t// query param offset\n\t\tvar qrOffset string\n\n\t\tif o.Offset != nil {\n\t\t\tqrOffset = *o.Offset\n\t\t}\n\t\tqOffset := qrOffset\n\t\tif qOffset != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"offset\", qOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.PlatformID != nil {\n\n\t\t// query param platform_id\n\t\tvar qrPlatformID string\n\n\t\tif o.PlatformID != nil {\n\t\t\tqrPlatformID = *o.PlatformID\n\t\t}\n\t\tqPlatformID := qrPlatformID\n\t\tif qPlatformID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"platform_id\", qPlatformID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetCatalogXMLParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID strfmt.UUID\n\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID.String()\n\t\tif qAccountID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.RequestedDate != nil {\n\n\t\t// query param requestedDate\n\t\tvar qrRequestedDate strfmt.DateTime\n\n\t\tif o.RequestedDate != nil {\n\t\t\tqrRequestedDate = *o.RequestedDate\n\t\t}\n\t\tqRequestedDate := qrRequestedDate.String()\n\t\tif qRequestedDate != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"requestedDate\", qRequestedDate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// header param WithProfilingInfo\n\tif o.WithProfilingInfo != nil && len(*o.WithProfilingInfo) > 0 {\n\t\tif err := r.SetHeaderParam(\"X-Killbill-Profiling-Req\", *o.WithProfilingInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param withStackTrace\n\tif o.WithStackTrace != nil && *o.WithStackTrace {\n\t\tif err := r.SetQueryParam(\"withStackTrace\", \"true\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetClockParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param X-Killbill-ApiKey\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiKey\", o.XKillbillAPIKey); err != nil {\n\t\treturn err\n\t}\n\n\t// header param X-Killbill-ApiSecret\n\tif err := r.SetHeaderParam(\"X-Killbill-ApiSecret\", o.XKillbillAPISecret); err != nil {\n\t\treturn err\n\t}\n\n\t// header param WithProfilingInfo\n\tif o.WithProfilingInfo != nil && len(*o.WithProfilingInfo) > 0 {\n\t\tif err := r.SetHeaderParam(\"X-Killbill-Profiling-Req\", *o.WithProfilingInfo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// header param withStackTrace\n\tif o.WithStackTrace != nil && *o.WithStackTrace {\n\t\tif err := r.SetQueryParam(\"withStackTrace\", \"true\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AdminCreateJusticeUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\t// path param targetNamespace\n\tif err := r.SetPathParam(\"targetNamespace\", o.TargetNamespace); err != nil {\n\t\treturn err\n\t}\n\n\t// path param userId\n\tif err := r.SetPathParam(\"userId\", o.UserID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateWidgetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Accept != nil {\n\n\t\t// header param Accept\n\t\tif err := r.SetHeaderParam(\"Accept\", *o.Accept); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.ContentType != nil {\n\n\t\t// header param Content-Type\n\t\tif err := r.SetHeaderParam(\"Content-Type\", *o.ContentType); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// path param uuid\n\tif err := r.SetPathParam(\"uuid\", o.UUID.String()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.WidgetBody); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *TestEndpointParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetLogsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.XRequestID != nil {\n\n\t\t// header param X-Request-Id\n\t\tif err := r.SetHeaderParam(\"X-Request-Id\", *o.XRequestID); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int64\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt64(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.PageSize != nil {\n\n\t\t// query param page_size\n\t\tvar qrPageSize int64\n\t\tif o.PageSize != nil {\n\t\t\tqrPageSize = *o.PageSize\n\t\t}\n\t\tqPageSize := swag.FormatInt64(qrPageSize)\n\t\tif qPageSize != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page_size\", qPageSize); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param project_name\n\tif err := r.SetPathParam(\"project_name\", o.ProjectName); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ListSourceFileOfProjectVersionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param parentId\n\tif err := r.SetPathParam(\"parentId\", swag.FormatInt64(o.ParentID)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Q != nil {\n\n\t\t// query param q\n\t\tvar qrQ string\n\t\tif o.Q != nil {\n\t\t\tqrQ = *o.Q\n\t\t}\n\t\tqQ := qrQ\n\t\tif qQ != \"\" {\n\t\t\tif err := r.SetQueryParam(\"q\", qQ); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetDrgParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param drgId\n\tif err := r.SetPathParam(\"drgId\", o.DrgID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateFlowParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param bucketId\n\tif err := r.SetPathParam(\"bucketId\", o.BucketID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param flowId\n\tif err := r.SetPathParam(\"flowId\", o.FlowID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateWidgetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Accept != nil {\n\n\t\t// header param Accept\n\t\tif err := r.SetHeaderParam(\"Accept\", *o.Accept); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.ContentType != nil {\n\n\t\t// header param Content-Type\n\t\tif err := r.SetHeaderParam(\"Content-Type\", *o.ContentType); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif err := r.SetBodyParam(o.WidgetBody); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetBodyResourceByDatePeriodParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param date\n\tif err := r.SetPathParam(\"date\", o.Date.String()); err != nil {\n\t\treturn err\n\t}\n\n\t// path param period\n\tif err := r.SetPathParam(\"period\", o.Period); err != nil {\n\t\treturn err\n\t}\n\n\t// path param resource-path\n\tif err := r.SetPathParam(\"resource-path\", o.ResourcePath); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetAboutUserParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tvaluesSelect := o.Select\n\n\tjoinedSelect := swag.JoinByFormat(valuesSelect, \"csv\")\n\t// query array param select\n\tif err := r.SetQueryParam(\"select\", joinedSelect...); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ExtractionListV1Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param id\n\tqrID := o.ID\n\tqID := qrID\n\tif qID != \"\" {\n\n\t\tif err := r.SetQueryParam(\"id\", qID); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Limit != nil {\n\n\t\t// query param limit\n\t\tvar qrLimit int64\n\n\t\tif o.Limit != nil {\n\t\t\tqrLimit = *o.Limit\n\t\t}\n\t\tqLimit := swag.FormatInt64(qrLimit)\n\t\tif qLimit != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"limit\", qLimit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Offset != nil {\n\n\t\t// query param offset\n\t\tvar qrOffset string\n\n\t\tif o.Offset != nil {\n\t\t\tqrOffset = *o.Offset\n\t\t}\n\t\tqOffset := qrOffset\n\t\tif qOffset != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"offset\", qOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetAuditEventsParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int32\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt32(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param resourceCrn\n\tqrResourceCrn := o.ResourceCrn\n\tqResourceCrn := qrResourceCrn\n\tif qResourceCrn != \"\" {\n\t\tif err := r.SetQueryParam(\"resourceCrn\", qResourceCrn); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Size != nil {\n\n\t\t// query param size\n\t\tvar qrSize int32\n\t\tif o.Size != nil {\n\t\t\tqrSize = *o.Size\n\t\t}\n\t\tqSize := swag.FormatInt32(qrSize)\n\t\tif qSize != \"\" {\n\t\t\tif err := r.SetQueryParam(\"size\", qSize); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PcloudSystempoolsGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param cloud_instance_id\n\tif err := r.SetPathParam(\"cloud_instance_id\", o.CloudInstanceID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *WaitListParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param address\n\tif err := r.SetPathParam(\"address\", o.Address); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Height != nil {\n\n\t\t// query param height\n\t\tvar qrHeight uint64\n\n\t\tif o.Height != nil {\n\t\t\tqrHeight = *o.Height\n\t\t}\n\t\tqHeight := swag.FormatUint64(qrHeight)\n\t\tif qHeight != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"height\", qHeight); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.PublicKey != nil {\n\n\t\t// query param public_key\n\t\tvar qrPublicKey string\n\n\t\tif o.PublicKey != nil {\n\t\t\tqrPublicKey = *o.PublicKey\n\t\t}\n\t\tqPublicKey := qrPublicKey\n\t\tif qPublicKey != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"public_key\", qPublicKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *BudgetAddParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetGCParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param gc_id\n\tif err := r.SetPathParam(\"gc_id\", swag.FormatInt64(o.GcID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PartialUpdateAppParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\t// path param app_id\n\tif err := r.SetPathParam(\"app_id\", swag.FormatInt64(o.AppID)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\treturn err\n\t}\n\n\t// path param team_id\n\tif err := r.SetPathParam(\"team_id\", swag.FormatInt64(o.TeamID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *StartPacketCaptureParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *TaskSchemasIDGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param identifier\n\tif err := r.SetPathParam(\"identifier\", o.Identifier); err != nil {\n\t\treturn err\n\t}\n\n\tif o.ResolveRef != nil {\n\n\t\t// query param resolveRef\n\t\tvar qrResolveRef bool\n\t\tif o.ResolveRef != nil {\n\t\t\tqrResolveRef = *o.ResolveRef\n\t\t}\n\t\tqResolveRef := swag.FormatBool(qrResolveRef)\n\t\tif qResolveRef != \"\" {\n\t\t\tif err := r.SetQueryParam(\"resolveRef\", qResolveRef); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UploadTaskFileParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Description != nil {\n\n\t\t// form param description\n\t\tvar frDescription string\n\t\tif o.Description != nil {\n\t\t\tfrDescription = *o.Description\n\t\t}\n\t\tfDescription := frDescription\n\t\tif fDescription != \"\" {\n\t\t\tif err := r.SetFormParam(\"description\", fDescription); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.File != nil {\n\n\t\tif o.File != nil {\n\t\t\t// form file param file\n\t\t\tif err := r.SetFileParam(\"file\", o.File); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PetCreateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetInstrumentsInstrumentOrderBookParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AcceptDatetimeFormat != nil {\n\n\t\t// header param Accept-Datetime-Format\n\t\tif err := r.SetHeaderParam(\"Accept-Datetime-Format\", *o.AcceptDatetimeFormat); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param instrument\n\tif err := r.SetPathParam(\"instrument\", o.Instrument); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Time != nil {\n\n\t\t// query param time\n\t\tvar qrTime string\n\t\tif o.Time != nil {\n\t\t\tqrTime = *o.Time\n\t\t}\n\t\tqTime := qrTime\n\t\tif qTime != \"\" {\n\t\t\tif err := r.SetQueryParam(\"time\", qTime); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *QueryChangesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Filter != nil {\n\n\t\t// query param filter\n\t\tvar qrFilter string\n\n\t\tif o.Filter != nil {\n\t\t\tqrFilter = *o.Filter\n\t\t}\n\t\tqFilter := qrFilter\n\t\tif qFilter != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"filter\", qFilter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Limit != nil {\n\n\t\t// query param limit\n\t\tvar qrLimit int64\n\n\t\tif o.Limit != nil {\n\t\t\tqrLimit = *o.Limit\n\t\t}\n\t\tqLimit := swag.FormatInt64(qrLimit)\n\t\tif qLimit != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"limit\", qLimit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Offset != nil {\n\n\t\t// query param offset\n\t\tvar qrOffset int64\n\n\t\tif o.Offset != nil {\n\t\t\tqrOffset = *o.Offset\n\t\t}\n\t\tqOffset := swag.FormatInt64(qrOffset)\n\t\tif qOffset != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"offset\", qOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Sort != nil {\n\n\t\t// query param sort\n\t\tvar qrSort string\n\n\t\tif o.Sort != nil {\n\t\t\tqrSort = *o.Sort\n\t\t}\n\t\tqSort := qrSort\n\t\tif qSort != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"sort\", qSort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetScopeConfigurationParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param scope_id\n\tif err := r.SetPathParam(\"scope_id\", o.ScopeID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param site_id\n\tif err := r.SetPathParam(\"site_id\", o.SiteID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param stack_id\n\tif err := r.SetPathParam(\"stack_id\", o.StackID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateEventParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param eventId\n\tif err := r.SetPathParam(\"eventId\", o.EventID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param koronaAccountId\n\tif err := r.SetPathParam(\"koronaAccountId\", o.KoronaAccountID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ContainerUpdateParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetBodyParam(o.Update); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetV1FunctionalitiesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Impacted != nil {\n\n\t\t// query param impacted\n\t\tvar qrImpacted string\n\n\t\tif o.Impacted != nil {\n\t\t\tqrImpacted = *o.Impacted\n\t\t}\n\t\tqImpacted := qrImpacted\n\t\tif qImpacted != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"impacted\", qImpacted); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Labels != nil {\n\n\t\t// query param labels\n\t\tvar qrLabels string\n\n\t\tif o.Labels != nil {\n\t\t\tqrLabels = *o.Labels\n\t\t}\n\t\tqLabels := qrLabels\n\t\tif qLabels != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"labels\", qLabels); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Lite != nil {\n\n\t\t// query param lite\n\t\tvar qrLite bool\n\n\t\tif o.Lite != nil {\n\t\t\tqrLite = *o.Lite\n\t\t}\n\t\tqLite := swag.FormatBool(qrLite)\n\t\tif qLite != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"lite\", qLite); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Name != nil {\n\n\t\t// query param name\n\t\tvar qrName string\n\n\t\tif o.Name != nil {\n\t\t\tqrName = *o.Name\n\t\t}\n\t\tqName := qrName\n\t\tif qName != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"name\", qName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Owner != nil {\n\n\t\t// query param owner\n\t\tvar qrOwner string\n\n\t\tif o.Owner != nil {\n\t\t\tqrOwner = *o.Owner\n\t\t}\n\t\tqOwner := qrOwner\n\t\tif qOwner != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"owner\", qOwner); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int32\n\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt32(qrPage)\n\t\tif qPage != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.PerPage != nil {\n\n\t\t// query param per_page\n\t\tvar qrPerPage int32\n\n\t\tif o.PerPage != nil {\n\t\t\tqrPerPage = *o.PerPage\n\t\t}\n\t\tqPerPage := swag.FormatInt32(qrPerPage)\n\t\tif qPerPage != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"per_page\", qPerPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Query != nil {\n\n\t\t// query param query\n\t\tvar qrQuery string\n\n\t\tif o.Query != nil {\n\t\t\tqrQuery = *o.Query\n\t\t}\n\t\tqQuery := qrQuery\n\t\tif qQuery != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"query\", qQuery); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetPointsByQueryParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.DollarSkip != nil {\n\n\t\t// query param $skip\n\t\tvar qrNrDollarSkip int32\n\t\tif o.DollarSkip != nil {\n\t\t\tqrNrDollarSkip = *o.DollarSkip\n\t\t}\n\t\tqNrDollarSkip := swag.FormatInt32(qrNrDollarSkip)\n\t\tif qNrDollarSkip != \"\" {\n\t\t\tif err := r.SetQueryParam(\"$skip\", qNrDollarSkip); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.DollarTop != nil {\n\n\t\t// query param $top\n\t\tvar qrNrDollarTop int32\n\t\tif o.DollarTop != nil {\n\t\t\tqrNrDollarTop = *o.DollarTop\n\t\t}\n\t\tqNrDollarTop := swag.FormatInt32(qrNrDollarTop)\n\t\tif qNrDollarTop != \"\" {\n\t\t\tif err := r.SetQueryParam(\"$top\", qNrDollarTop); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param api-version\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\tif qAPIVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"api-version\", qAPIVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SyncStatusUsingGETParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param namespaceSelfLinkId\n\tif err := r.SetPathParam(\"namespaceSelfLinkId\", o.NamespaceSelfLinkID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param requestId\n\tif err := r.SetPathParam(\"requestId\", o.RequestID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ResolveBatchParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Account != nil {\n\n\t\t// query param account\n\t\tvar qrAccount string\n\t\tif o.Account != nil {\n\t\t\tqrAccount = *o.Account\n\t\t}\n\t\tqAccount := qrAccount\n\t\tif qAccount != \"\" {\n\t\t\tif err := r.SetQueryParam(\"account\", qAccount); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Environment != nil {\n\n\t\t// query param environment\n\t\tvar qrEnvironment string\n\t\tif o.Environment != nil {\n\t\t\tqrEnvironment = *o.Environment\n\t\t}\n\t\tqEnvironment := qrEnvironment\n\t\tif qEnvironment != \"\" {\n\t\t\tif err := r.SetQueryParam(\"environment\", qEnvironment); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.From != nil {\n\n\t\t// query param from\n\t\tvar qrFrom string\n\t\tif o.From != nil {\n\t\t\tqrFrom = *o.From\n\t\t}\n\t\tqFrom := qrFrom\n\t\tif qFrom != \"\" {\n\t\t\tif err := r.SetQueryParam(\"from\", qFrom); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Page != nil {\n\n\t\t// query param page\n\t\tvar qrPage int32\n\t\tif o.Page != nil {\n\t\t\tqrPage = *o.Page\n\t\t}\n\t\tqPage := swag.FormatInt32(qrPage)\n\t\tif qPage != \"\" {\n\t\t\tif err := r.SetQueryParam(\"page\", qPage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Region != nil {\n\n\t\t// query param region\n\t\tvar qrRegion string\n\t\tif o.Region != nil {\n\t\t\tqrRegion = *o.Region\n\t\t}\n\t\tqRegion := qrRegion\n\t\tif qRegion != \"\" {\n\t\t\tif err := r.SetQueryParam(\"region\", qRegion); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.To != nil {\n\n\t\t// query param to\n\t\tvar qrTo string\n\t\tif o.To != nil {\n\t\t\tqrTo = *o.To\n\t\t}\n\t\tqTo := qrTo\n\t\tif qTo != \"\" {\n\t\t\tif err := r.SetQueryParam(\"to\", qTo); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetAccountParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tr.SetTimeout(o.timeout)\n\tvar res []error\n\n\tif o.Authorization != nil {\n\n\t\t// header param Authorization\n\t\tif err := r.SetHeaderParam(\"Authorization\", *o.Authorization); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\tif o.Country != nil {\n\n\t\t// query param country\n\t\tvar qrCountry string\n\t\tif o.Country != nil {\n\t\t\tqrCountry = *o.Country\n\t\t}\n\t\tqCountry := qrCountry\n\t\tif qCountry != \"\" {\n\t\t\tif err := r.SetQueryParam(\"country\", qCountry); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Email != nil {\n\n\t\t// query param email\n\t\tvar qrEmail string\n\t\tif o.Email != nil {\n\t\t\tqrEmail = *o.Email\n\t\t}\n\t\tqEmail := qrEmail\n\t\tif qEmail != \"\" {\n\t\t\tif err := r.SetQueryParam(\"email\", qEmail); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tvaluesFields := o.Fields\n\n\tjoinedFields := swag.JoinByFormat(valuesFields, \"csv\")\n\t// query array param fields\n\tif err := r.SetQueryParam(\"fields\", joinedFields...); err != nil {\n\t\treturn err\n\t}\n\n\tif o.PersonID != nil {\n\n\t\t// query param person_id\n\t\tvar qrPersonID string\n\t\tif o.PersonID != nil {\n\t\t\tqrPersonID = *o.PersonID\n\t\t}\n\t\tqPersonID := qrPersonID\n\t\tif qPersonID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"person_id\", qPersonID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetDeviceHealthParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// header param Authorization\n\tif err := r.SetHeaderParam(\"Authorization\", o.Authorization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param deviceId\n\tif err := r.SetPathParam(\"deviceId\", o.DeviceID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdatePatientParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param ID\n\tif err := r.SetPathParam(\"ID\", o.ID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Patient != nil {\n\t\tif err := r.SetBodyParam(o.Patient); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *UpdateCustomIDPParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.CustomIDP != nil {\n\t\tif err := r.SetBodyParam(o.CustomIDP); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param aid\n\tif err := r.SetPathParam(\"aid\", o.Aid); err != nil {\n\t\treturn err\n\t}\n\n\t// path param iid\n\tif err := r.SetPathParam(\"iid\", o.Iid); err != nil {\n\t\treturn err\n\t}\n\n\t// path param tid\n\tif err := r.SetPathParam(\"tid\", o.Tid); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetSeriesIDFilterParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AcceptLanguage != nil {\n\n\t\t// header param Accept-Language\n\t\tif err := r.SetHeaderParam(\"Accept-Language\", *o.AcceptLanguage); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t}\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\t// query param keys\n\tqrKeys := o.Keys\n\tqKeys := qrKeys\n\tif qKeys != \"\" {\n\t\tif err := r.SetQueryParam(\"keys\", qKeys); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateBlueprintInWorkspaceInternalParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.AccountID != nil {\n\n\t\t// query param accountId\n\t\tvar qrAccountID string\n\t\tif o.AccountID != nil {\n\t\t\tqrAccountID = *o.AccountID\n\t\t}\n\t\tqAccountID := qrAccountID\n\t\tif qAccountID != \"\" {\n\t\t\tif err := r.SetQueryParam(\"accountId\", qAccountID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param workspaceId\n\tif err := r.SetPathParam(\"workspaceId\", swag.FormatInt64(o.WorkspaceID)); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *OrgGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param org\n\tif err := r.SetPathParam(\"org\", o.Org); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *ExtrasGraphsReadParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param id\n\tif err := r.SetPathParam(\"id\", swag.FormatInt64(o.ID)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Name != nil {\n\n\t\t// query param name\n\t\tvar qrName string\n\t\tif o.Name != nil {\n\t\t\tqrName = *o.Name\n\t\t}\n\t\tqName := qrName\n\t\tif qName != \"\" {\n\t\t\tif err := r.SetQueryParam(\"name\", qName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Type != nil {\n\n\t\t// query param type\n\t\tvar qrType string\n\t\tif o.Type != nil {\n\t\t\tqrType = *o.Type\n\t\t}\n\t\tqType := qrType\n\t\tif qType != \"\" {\n\t\t\tif err := r.SetQueryParam(\"type\", qType); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetVersioningPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Description != nil {\n\n\t\t// query param Description\n\t\tvar qrDescription string\n\t\tif o.Description != nil {\n\t\t\tqrDescription = *o.Description\n\t\t}\n\t\tqDescription := qrDescription\n\t\tif qDescription != \"\" {\n\t\t\tif err := r.SetQueryParam(\"Description\", qDescription); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.IgnoreFilesGreaterThan != nil {\n\n\t\t// query param IgnoreFilesGreaterThan\n\t\tvar qrIgnoreFilesGreaterThan string\n\t\tif o.IgnoreFilesGreaterThan != nil {\n\t\t\tqrIgnoreFilesGreaterThan = *o.IgnoreFilesGreaterThan\n\t\t}\n\t\tqIgnoreFilesGreaterThan := qrIgnoreFilesGreaterThan\n\t\tif qIgnoreFilesGreaterThan != \"\" {\n\t\t\tif err := r.SetQueryParam(\"IgnoreFilesGreaterThan\", qIgnoreFilesGreaterThan); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.MaxSizePerFile != nil {\n\n\t\t// query param MaxSizePerFile\n\t\tvar qrMaxSizePerFile string\n\t\tif o.MaxSizePerFile != nil {\n\t\t\tqrMaxSizePerFile = *o.MaxSizePerFile\n\t\t}\n\t\tqMaxSizePerFile := qrMaxSizePerFile\n\t\tif qMaxSizePerFile != \"\" {\n\t\t\tif err := r.SetQueryParam(\"MaxSizePerFile\", qMaxSizePerFile); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.MaxTotalSize != nil {\n\n\t\t// query param MaxTotalSize\n\t\tvar qrMaxTotalSize string\n\t\tif o.MaxTotalSize != nil {\n\t\t\tqrMaxTotalSize = *o.MaxTotalSize\n\t\t}\n\t\tqMaxTotalSize := qrMaxTotalSize\n\t\tif qMaxTotalSize != \"\" {\n\t\t\tif err := r.SetQueryParam(\"MaxTotalSize\", qMaxTotalSize); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Name != nil {\n\n\t\t// query param Name\n\t\tvar qrName string\n\t\tif o.Name != nil {\n\t\t\tqrName = *o.Name\n\t\t}\n\t\tqName := qrName\n\t\tif qName != \"\" {\n\t\t\tif err := r.SetQueryParam(\"Name\", qName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param Uuid\n\tif err := r.SetPathParam(\"Uuid\", o.UUID); err != nil {\n\t\treturn err\n\t}\n\n\tif o.VersionsDataSourceBucket != nil {\n\n\t\t// query param VersionsDataSourceBucket\n\t\tvar qrVersionsDataSourceBucket string\n\t\tif o.VersionsDataSourceBucket != nil {\n\t\t\tqrVersionsDataSourceBucket = *o.VersionsDataSourceBucket\n\t\t}\n\t\tqVersionsDataSourceBucket := qrVersionsDataSourceBucket\n\t\tif qVersionsDataSourceBucket != \"\" {\n\t\t\tif err := r.SetQueryParam(\"VersionsDataSourceBucket\", qVersionsDataSourceBucket); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.VersionsDataSourceName != nil {\n\n\t\t// query param VersionsDataSourceName\n\t\tvar qrVersionsDataSourceName string\n\t\tif o.VersionsDataSourceName != nil {\n\t\t\tqrVersionsDataSourceName = *o.VersionsDataSourceName\n\t\t}\n\t\tqVersionsDataSourceName := qrVersionsDataSourceName\n\t\tif qVersionsDataSourceName != \"\" {\n\t\t\tif err := r.SetQueryParam(\"VersionsDataSourceName\", qVersionsDataSourceName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetBuildPropertiesParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param api-version\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\tif qAPIVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"api-version\", qAPIVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param buildId\n\tif err := r.SetPathParam(\"buildId\", swag.FormatInt32(o.BuildID)); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Filter != nil {\n\n\t\t// query param filter\n\t\tvar qrFilter string\n\t\tif o.Filter != nil {\n\t\t\tqrFilter = *o.Filter\n\t\t}\n\t\tqFilter := qrFilter\n\t\tif qFilter != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filter\", qFilter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// path param project\n\tif err := r.SetPathParam(\"project\", o.Project); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *AdminGetBannedDevicesV4Params) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param namespace\n\tif err := r.SetPathParam(\"namespace\", o.Namespace); err != nil {\n\t\treturn err\n\t}\n\n\tif o.DeviceType != nil {\n\n\t\t// query param deviceType\n\t\tvar qrDeviceType string\n\t\tif o.DeviceType != nil {\n\t\t\tqrDeviceType = *o.DeviceType\n\t\t}\n\t\tqDeviceType := qrDeviceType\n\t\tif qDeviceType != \"\" {\n\t\t\tif err := r.SetQueryParam(\"deviceType\", qDeviceType); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.EndDate != nil {\n\n\t\t// query param endDate\n\t\tvar qrEndDate string\n\t\tif o.EndDate != nil {\n\t\t\tqrEndDate = *o.EndDate\n\t\t}\n\t\tqEndDate := qrEndDate\n\t\tif qEndDate != \"\" {\n\t\t\tif err := r.SetQueryParam(\"endDate\", qEndDate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Limit != nil {\n\n\t\t// query param limit\n\t\tvar qrLimit int64\n\t\tif o.Limit != nil {\n\t\t\tqrLimit = *o.Limit\n\t\t}\n\t\tqLimit := swag.FormatInt64(qrLimit)\n\t\tif qLimit != \"\" {\n\t\t\tif err := r.SetQueryParam(\"limit\", qLimit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Offset != nil {\n\n\t\t// query param offset\n\t\tvar qrOffset int64\n\t\tif o.Offset != nil {\n\t\t\tqrOffset = *o.Offset\n\t\t}\n\t\tqOffset := swag.FormatInt64(qrOffset)\n\t\tif qOffset != \"\" {\n\t\t\tif err := r.SetQueryParam(\"offset\", qOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.StartDate != nil {\n\n\t\t// query param startDate\n\t\tvar qrStartDate string\n\t\tif o.StartDate != nil {\n\t\t\tqrStartDate = *o.StartDate\n\t\t}\n\t\tqStartDate := qrStartDate\n\t\tif qStartDate != \"\" {\n\t\t\tif err := r.SetQueryParam(\"startDate\", qStartDate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// setting the default header value\n\tif err := r.SetHeaderParam(\"User-Agent\", utils.UserAgentGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := r.SetHeaderParam(\"X-Amzn-Trace-Id\", utils.AmazonTraceIDGen()); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\n\treturn nil\n}",
"func (o *BikePointGetAllParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *DecryptParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif err := r.SetBodyParam(o.Parameters); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *DeleteRequestsRequestNameParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// query param api-version\n\tqrAPIVersion := o.APIVersion\n\tqAPIVersion := qrAPIVersion\n\tif qAPIVersion != \"\" {\n\t\tif err := r.SetQueryParam(\"api-version\", qAPIVersion); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param organization\n\tif err := r.SetPathParam(\"organization\", o.Organization); err != nil {\n\t\treturn err\n\t}\n\n\t// query param requestName\n\tqrRequestName := o.RequestName\n\tqRequestName := qrRequestName\n\tif qRequestName != \"\" {\n\t\tif err := r.SetQueryParam(\"requestName\", qRequestName); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Synchronous != nil {\n\n\t\t// query param synchronous\n\t\tvar qrSynchronous bool\n\t\tif o.Synchronous != nil {\n\t\t\tqrSynchronous = *o.Synchronous\n\t\t}\n\t\tqSynchronous := swag.FormatBool(qrSynchronous)\n\t\tif qSynchronous != \"\" {\n\t\t\tif err := r.SetQueryParam(\"synchronous\", qSynchronous); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *SearchAbsoluteParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Decorate != nil {\n\n\t\t// query param decorate\n\t\tvar qrDecorate bool\n\t\tif o.Decorate != nil {\n\t\t\tqrDecorate = *o.Decorate\n\t\t}\n\t\tqDecorate := swag.FormatBool(qrDecorate)\n\t\tif qDecorate != \"\" {\n\t\t\tif err := r.SetQueryParam(\"decorate\", qDecorate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Fields != nil {\n\n\t\t// query param fields\n\t\tvar qrFields string\n\t\tif o.Fields != nil {\n\t\t\tqrFields = *o.Fields\n\t\t}\n\t\tqFields := qrFields\n\t\tif qFields != \"\" {\n\t\t\tif err := r.SetQueryParam(\"fields\", qFields); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Filter != nil {\n\n\t\t// query param filter\n\t\tvar qrFilter string\n\t\tif o.Filter != nil {\n\t\t\tqrFilter = *o.Filter\n\t\t}\n\t\tqFilter := qrFilter\n\t\tif qFilter != \"\" {\n\t\t\tif err := r.SetQueryParam(\"filter\", qFilter); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param from\n\tqrFrom := o.From\n\tqFrom := qrFrom\n\tif qFrom != \"\" {\n\t\tif err := r.SetQueryParam(\"from\", qFrom); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Limit != nil {\n\n\t\t// query param limit\n\t\tvar qrLimit int64\n\t\tif o.Limit != nil {\n\t\t\tqrLimit = *o.Limit\n\t\t}\n\t\tqLimit := swag.FormatInt64(qrLimit)\n\t\tif qLimit != \"\" {\n\t\t\tif err := r.SetQueryParam(\"limit\", qLimit); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\tif o.Offset != nil {\n\n\t\t// query param offset\n\t\tvar qrOffset int64\n\t\tif o.Offset != nil {\n\t\t\tqrOffset = *o.Offset\n\t\t}\n\t\tqOffset := swag.FormatInt64(qrOffset)\n\t\tif qOffset != \"\" {\n\t\t\tif err := r.SetQueryParam(\"offset\", qOffset); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param query\n\tqrQuery := o.Query\n\tqQuery := qrQuery\n\tif qQuery != \"\" {\n\t\tif err := r.SetQueryParam(\"query\", qQuery); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif o.Sort != nil {\n\n\t\t// query param sort\n\t\tvar qrSort string\n\t\tif o.Sort != nil {\n\t\t\tqrSort = *o.Sort\n\t\t}\n\t\tqSort := qrSort\n\t\tif qSort != \"\" {\n\t\t\tif err := r.SetQueryParam(\"sort\", qSort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t}\n\n\t// query param to\n\tqrTo := o.To\n\tqTo := qrTo\n\tif qTo != \"\" {\n\t\tif err := r.SetQueryParam(\"to\", qTo); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *GetCountersParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.ClusterNodeID != nil {\n\n\t\t// query param clusterNodeId\n\t\tvar qrClusterNodeID string\n\n\t\tif o.ClusterNodeID != nil {\n\t\t\tqrClusterNodeID = *o.ClusterNodeID\n\t\t}\n\t\tqClusterNodeID := qrClusterNodeID\n\t\tif qClusterNodeID != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"clusterNodeId\", qClusterNodeID); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Nodewise != nil {\n\n\t\t// query param nodewise\n\t\tvar qrNodewise bool\n\n\t\tif o.Nodewise != nil {\n\t\t\tqrNodewise = *o.Nodewise\n\t\t}\n\t\tqNodewise := swag.FormatBool(qrNodewise)\n\t\tif qNodewise != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"nodewise\", qNodewise); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *MetroclusterInterconnectGetParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\t// path param adapter\n\tif err := r.SetPathParam(\"adapter\", o.Adapter); err != nil {\n\t\treturn err\n\t}\n\n\tif o.Fields != nil {\n\n\t\t// binding items for fields\n\t\tjoinedFields := o.bindParamFields(reg)\n\n\t\t// query array param fields\n\t\tif err := r.SetQueryParam(\"fields\", joinedFields...); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param node.uuid\n\tif err := r.SetPathParam(\"node.uuid\", o.NodeUUID); err != nil {\n\t\treturn err\n\t}\n\n\t// path param partner_type\n\tif err := r.SetPathParam(\"partner_type\", o.PartnerType); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *PutParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.Item != nil {\n\t\tif err := r.SetBodyParam(o.Item); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// path param itemId\n\tif err := r.SetPathParam(\"itemId\", o.ItemID); err != nil {\n\t\treturn err\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *CreateAccessPolicyParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\tif o.Body != nil {\n\t\tif err := r.SetBodyParam(o.Body); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}",
"func (o *DeleteDataSourceParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error {\n\n\tif err := r.SetTimeout(o.timeout); err != nil {\n\t\treturn err\n\t}\n\tvar res []error\n\n\tif o.APIKey != nil {\n\n\t\t// query param ApiKey\n\t\tvar qrAPIKey string\n\n\t\tif o.APIKey != nil {\n\t\t\tqrAPIKey = *o.APIKey\n\t\t}\n\t\tqAPIKey := qrAPIKey\n\t\tif qAPIKey != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ApiKey\", qAPIKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.APISecret != nil {\n\n\t\t// query param ApiSecret\n\t\tvar qrAPISecret string\n\n\t\tif o.APISecret != nil {\n\t\t\tqrAPISecret = *o.APISecret\n\t\t}\n\t\tqAPISecret := qrAPISecret\n\t\tif qAPISecret != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ApiSecret\", qAPISecret); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.CreationDate != nil {\n\n\t\t// query param CreationDate\n\t\tvar qrCreationDate int32\n\n\t\tif o.CreationDate != nil {\n\t\t\tqrCreationDate = *o.CreationDate\n\t\t}\n\t\tqCreationDate := swag.FormatInt32(qrCreationDate)\n\t\tif qCreationDate != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"CreationDate\", qCreationDate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Disabled != nil {\n\n\t\t// query param Disabled\n\t\tvar qrDisabled bool\n\n\t\tif o.Disabled != nil {\n\t\t\tqrDisabled = *o.Disabled\n\t\t}\n\t\tqDisabled := swag.FormatBool(qrDisabled)\n\t\tif qDisabled != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"Disabled\", qDisabled); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.EncryptionKey != nil {\n\n\t\t// query param EncryptionKey\n\t\tvar qrEncryptionKey string\n\n\t\tif o.EncryptionKey != nil {\n\t\t\tqrEncryptionKey = *o.EncryptionKey\n\t\t}\n\t\tqEncryptionKey := qrEncryptionKey\n\t\tif qEncryptionKey != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"EncryptionKey\", qEncryptionKey); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.EncryptionMode != nil {\n\n\t\t// query param EncryptionMode\n\t\tvar qrEncryptionMode string\n\n\t\tif o.EncryptionMode != nil {\n\t\t\tqrEncryptionMode = *o.EncryptionMode\n\t\t}\n\t\tqEncryptionMode := qrEncryptionMode\n\t\tif qEncryptionMode != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"EncryptionMode\", qEncryptionMode); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.FlatStorage != nil {\n\n\t\t// query param FlatStorage\n\t\tvar qrFlatStorage bool\n\n\t\tif o.FlatStorage != nil {\n\t\t\tqrFlatStorage = *o.FlatStorage\n\t\t}\n\t\tqFlatStorage := swag.FormatBool(qrFlatStorage)\n\t\tif qFlatStorage != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"FlatStorage\", qFlatStorage); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.LastSynchronizationDate != nil {\n\n\t\t// query param LastSynchronizationDate\n\t\tvar qrLastSynchronizationDate int32\n\n\t\tif o.LastSynchronizationDate != nil {\n\t\t\tqrLastSynchronizationDate = *o.LastSynchronizationDate\n\t\t}\n\t\tqLastSynchronizationDate := swag.FormatInt32(qrLastSynchronizationDate)\n\t\tif qLastSynchronizationDate != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"LastSynchronizationDate\", qLastSynchronizationDate); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// path param Name\n\tif err := r.SetPathParam(\"Name\", o.Name); err != nil {\n\t\treturn err\n\t}\n\n\tif o.ObjectsBaseFolder != nil {\n\n\t\t// query param ObjectsBaseFolder\n\t\tvar qrObjectsBaseFolder string\n\n\t\tif o.ObjectsBaseFolder != nil {\n\t\t\tqrObjectsBaseFolder = *o.ObjectsBaseFolder\n\t\t}\n\t\tqObjectsBaseFolder := qrObjectsBaseFolder\n\t\tif qObjectsBaseFolder != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsBaseFolder\", qObjectsBaseFolder); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ObjectsBucket != nil {\n\n\t\t// query param ObjectsBucket\n\t\tvar qrObjectsBucket string\n\n\t\tif o.ObjectsBucket != nil {\n\t\t\tqrObjectsBucket = *o.ObjectsBucket\n\t\t}\n\t\tqObjectsBucket := qrObjectsBucket\n\t\tif qObjectsBucket != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsBucket\", qObjectsBucket); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ObjectsHost != nil {\n\n\t\t// query param ObjectsHost\n\t\tvar qrObjectsHost string\n\n\t\tif o.ObjectsHost != nil {\n\t\t\tqrObjectsHost = *o.ObjectsHost\n\t\t}\n\t\tqObjectsHost := qrObjectsHost\n\t\tif qObjectsHost != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsHost\", qObjectsHost); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ObjectsPort != nil {\n\n\t\t// query param ObjectsPort\n\t\tvar qrObjectsPort int32\n\n\t\tif o.ObjectsPort != nil {\n\t\t\tqrObjectsPort = *o.ObjectsPort\n\t\t}\n\t\tqObjectsPort := swag.FormatInt32(qrObjectsPort)\n\t\tif qObjectsPort != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsPort\", qObjectsPort); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ObjectsSecure != nil {\n\n\t\t// query param ObjectsSecure\n\t\tvar qrObjectsSecure bool\n\n\t\tif o.ObjectsSecure != nil {\n\t\t\tqrObjectsSecure = *o.ObjectsSecure\n\t\t}\n\t\tqObjectsSecure := swag.FormatBool(qrObjectsSecure)\n\t\tif qObjectsSecure != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsSecure\", qObjectsSecure); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.ObjectsServiceName != nil {\n\n\t\t// query param ObjectsServiceName\n\t\tvar qrObjectsServiceName string\n\n\t\tif o.ObjectsServiceName != nil {\n\t\t\tqrObjectsServiceName = *o.ObjectsServiceName\n\t\t}\n\t\tqObjectsServiceName := qrObjectsServiceName\n\t\tif qObjectsServiceName != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"ObjectsServiceName\", qObjectsServiceName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.PeerAddress != nil {\n\n\t\t// query param PeerAddress\n\t\tvar qrPeerAddress string\n\n\t\tif o.PeerAddress != nil {\n\t\t\tqrPeerAddress = *o.PeerAddress\n\t\t}\n\t\tqPeerAddress := qrPeerAddress\n\t\tif qPeerAddress != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"PeerAddress\", qPeerAddress); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.SkipSyncOnRestart != nil {\n\n\t\t// query param SkipSyncOnRestart\n\t\tvar qrSkipSyncOnRestart bool\n\n\t\tif o.SkipSyncOnRestart != nil {\n\t\t\tqrSkipSyncOnRestart = *o.SkipSyncOnRestart\n\t\t}\n\t\tqSkipSyncOnRestart := swag.FormatBool(qrSkipSyncOnRestart)\n\t\tif qSkipSyncOnRestart != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"SkipSyncOnRestart\", qSkipSyncOnRestart); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.StorageType != nil {\n\n\t\t// query param StorageType\n\t\tvar qrStorageType string\n\n\t\tif o.StorageType != nil {\n\t\t\tqrStorageType = *o.StorageType\n\t\t}\n\t\tqStorageType := qrStorageType\n\t\tif qStorageType != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"StorageType\", qStorageType); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.VersioningPolicyName != nil {\n\n\t\t// query param VersioningPolicyName\n\t\tvar qrVersioningPolicyName string\n\n\t\tif o.VersioningPolicyName != nil {\n\t\t\tqrVersioningPolicyName = *o.VersioningPolicyName\n\t\t}\n\t\tqVersioningPolicyName := qrVersioningPolicyName\n\t\tif qVersioningPolicyName != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"VersioningPolicyName\", qVersioningPolicyName); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif o.Watch != nil {\n\n\t\t// query param Watch\n\t\tvar qrWatch bool\n\n\t\tif o.Watch != nil {\n\t\t\tqrWatch = *o.Watch\n\t\t}\n\t\tqWatch := swag.FormatBool(qrWatch)\n\t\tif qWatch != \"\" {\n\n\t\t\tif err := r.SetQueryParam(\"Watch\", qWatch); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tif len(res) > 0 {\n\t\treturn errors.CompositeValidationError(res...)\n\t}\n\treturn nil\n}"
] | [
"0.7197684",
"0.7143436",
"0.70474225",
"0.70220006",
"0.69955575",
"0.69946444",
"0.697959",
"0.6978197",
"0.69692016",
"0.69657797",
"0.6923328",
"0.6907898",
"0.6903574",
"0.68703294",
"0.6857265",
"0.68562853",
"0.685186",
"0.68445694",
"0.6843865",
"0.68413115",
"0.68303764",
"0.6827786",
"0.68212545",
"0.6820409",
"0.68184274",
"0.680905",
"0.6793689",
"0.67928916",
"0.67853534",
"0.67797935",
"0.67771256",
"0.6773061",
"0.67608666",
"0.6760539",
"0.675987",
"0.67562747",
"0.67498136",
"0.6743112",
"0.6742901",
"0.67413497",
"0.6740198",
"0.67341185",
"0.6732634",
"0.6732555",
"0.6731272",
"0.6725358",
"0.6724313",
"0.67145455",
"0.6711667",
"0.67090523",
"0.6708381",
"0.67050636",
"0.6702181",
"0.66984284",
"0.6693375",
"0.66914546",
"0.66848385",
"0.6676924",
"0.6671247",
"0.6670448",
"0.66692716",
"0.6665742",
"0.66653264",
"0.6664705",
"0.6662263",
"0.66560906",
"0.665296",
"0.66462785",
"0.66459423",
"0.66420376",
"0.6640142",
"0.66379446",
"0.6634022",
"0.66336113",
"0.66254157",
"0.6621184",
"0.66182345",
"0.6618022",
"0.6613641",
"0.6606297",
"0.660465",
"0.6602635",
"0.65988183",
"0.6598189",
"0.6595584",
"0.6594363",
"0.6591284",
"0.6586186",
"0.65816593",
"0.6581576",
"0.6574345",
"0.6571894",
"0.6569136",
"0.6567681",
"0.6566635",
"0.65652066",
"0.6564413",
"0.65621674",
"0.6561576",
"0.6560705",
"0.65563494"
] | 0.0 | -1 |
NewParkingLotUsecase will create new an NewParkingLotUsecase object representation of domain.NewParkingLotUsecase interface | func NewParkingLotUsecase(a domain.ParkingLotRepository, timeout time.Duration) domain.ParkingLotUsecase {
return &parkingLotUsecase{
parkingLotRepo: a,
contextTimeout: timeout,
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func NewBusinessScenarioPlanner()(*BusinessScenarioPlanner) {\n m := &BusinessScenarioPlanner{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewTrainCar() TrainCar {\n c := TrainCar{name: \"TrainCar\", vehicle: \"TrainCar\", speed: 30, capacity: 30, railway: \"CNR\"}\n return c\n}",
"func NewPlanner()(*Planner) {\n m := &Planner{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewPickUp() Pickup {\n p := Pickup{name: \"Pickup\", vehicle: \"Pickup\", speed: 60, capacity: 2, isPrivate: true}\n return p\n}",
"func newTicket(\n\tbeaconOutput []byte, // V_i\n\tstakerValue []byte, // Q_j\n\tvirtualStakerIndex *big.Int, // vs\n) (*ticket, error) {\n\tvalue, err := calculateTicketValue(beaconOutput, stakerValue, virtualStakerIndex)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"ticket value calculation failed [%v]\", err)\n\t}\n\n\treturn &ticket{\n\t\tvalue: value,\n\t\tproof: &proof{\n\t\t\tstakerValue: stakerValue,\n\t\t\tvirtualStakerIndex: virtualStakerIndex,\n\t\t},\n\t}, nil\n}",
"func newTask(jobID *peloton.JobID, id uint32, jobFactory *jobFactory, jobType pbjob.JobType) *task {\n\ttask := &task{\n\t\tjobID: jobID,\n\t\tid: id,\n\t\tjobType: jobType,\n\t\tjobFactory: jobFactory,\n\t}\n\n\treturn task\n}",
"func newTruck(mk, mdl string) *truck {\n\treturn &truck{vehicle: vehicle{mk, mdl}}\n}",
"func NewUseCase(repo domain.Repository) *UseCase {\n\tuc := new(UseCase)\n\trepository = repo\n\treturn uc\n}",
"func (t *UseCase_UseCase) NewUseCase(Id string) (*UseCase_UseCase_UseCase, error){\n\n\t// Initialise the list within the receiver struct if it has not already been\n\t// created.\n\tif t.UseCase == nil {\n\t\tt.UseCase = make(map[string]*UseCase_UseCase_UseCase)\n\t}\n\n\tkey := Id\n\n\t// Ensure that this key has not already been used in the\n\t// list. Keyed YANG lists do not allow duplicate keys to\n\t// be created.\n\tif _, ok := t.UseCase[key]; ok {\n\t\treturn nil, fmt.Errorf(\"duplicate key %v for list UseCase\", key)\n\t}\n\n\tt.UseCase[key] = &UseCase_UseCase_UseCase{\n\t\tId: &Id,\n\t}\n\n\treturn t.UseCase[key], nil\n}",
"func NewUseCase(filepath string) (uc *UseCase, err error) {\n\tvar data []byte\n\n\tif data, err = ioutil.ReadFile(filepath); err != nil {\n\t\treturn\n\t}\n\tvar m map[string]interface{}\n\tvar p sen.Parser\n\tvar v interface{}\n\tif v, err = p.Parse(data); err != nil {\n\t\treturn\n\t}\n\tif m, _ = v.(map[string]interface{}); m == nil {\n\t\treturn nil, fmt.Errorf(\"expected a map, not a %T\", v)\n\t}\n\tuc = &UseCase{Filepath: filepath}\n\tif uc.Comment, err = asString(m[\"comment\"]); err != nil {\n\t\treturn\n\t}\n\tif err = uc.addSteps(m[\"steps\"]); err != nil {\n\t\treturn\n\t}\n\treturn\n}",
"func newPlane(mk, mdl string) *plane {\n\tp := &plane{}\n\tp.make = mk\n\tp.model = mdl\n\treturn p\n}",
"func New(r IRepository) *Usecase {\n\treturn &Usecase{\n\t\tr,\n\t}\n}",
"func NewBookingBusiness()(*BookingBusiness) {\n m := &BookingBusiness{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewPmUseCase(repo repository.PmRepository) PmUseCase {\n\treturn &pmUseCase{\n\t\tRepo: repo,\n\t}\n}",
"func New(db PlanData) Planner {\n\treturn Planner{\n\t\tdata: db,\n\t}\n}",
"func NewUseCase(repo Repository) UseCase {\n\treturn &useCase{\n\t\tRepo: repo,\n\t}\n}",
"func New(cl domain.ColumnRepository, t domain.TaskRepository, c domain.CommentRepository) domain.TaskUsecase {\n\treturn &taskUsecase{columnRepo: cl, taskRepo: t, commentRepo: c}\n}",
"func New(api API) *LotusChain {\n\treturn &LotusChain{\n\t\tapi: api,\n\t}\n}",
"func newWorkoutPlanFromFile(infile string) (workoutplan, error) {\n\tmyplan := workoutplan{}\n\tdat, err := ioutil.ReadFile(infile)\n\tif err != nil {\n\t\treturn myplan, err\n\t}\n\terr = yaml.Unmarshal([]byte(dat), &myplan)\n\tif err != nil {\n\t\treturn myplan, err\n\t}\n\treturn myplan, err\n}",
"func NewPoint(latitude float64, longitude float64) *Point {\n return &Point{latitude: latitude, longitude: longitude}\n}",
"func newJob(job Runnable, priority int) JobEntry {\n\treturn &pt{\n\t\tpriority: priority,\n\t\tjob: job,\n\t\tlock: &sync.Mutex{},\n\t}\n}",
"func newPerson(name string,class string, nationality string ) *Person {\n\treturn &Person{name: name,job: class, nationality: nationality}\n\n}",
"func newProvisioner(baseCtx context.Context, cl *restClient, callTimeout time.Duration) mode.Provisioner {\n\treturn provisioner{cl: cl, baseCtx: baseCtx, callTimeout: callTimeout}\n}",
"func NewPlan(obj objects.ObjectConfig) (objects.Object, error) {\n\treturn (&Plan{}).Parse(obj)\n}",
"func newLocation(lat, long coordinate) *location {\n\treturn &location{lat.decimal(), long.decimal()}\n}",
"func NewUseCase(repository user.Repository) *UserUseCase {\n\treturn &UserUseCase{\n\t\trepository,\n\t}\n}",
"func newScenario(name string) *Instruction {\n\treturn &Instruction{\n\t\tType: ScenarioInst,\n\t\tName: name,\n\t\tVersion: &Version{},\n\t}\n}",
"func (*ticketR) NewStruct() *ticketR {\n\treturn &ticketR{}\n}",
"func NewKnapsack(t mockConstructorTestingTNewKnapsack) *Knapsack {\n\tmock := &Knapsack{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func New(btcNode btc.BitcoinNode) btc.BitcoinUseCase {\n\treturn &BitcoinUsecase{\n\t\tnode: btcNode,\n\t}\n}",
"func (*trainingCostR) NewStruct() *trainingCostR {\n\treturn &trainingCostR{}\n}",
"func NewUsecase(repo Repository) Usecase {\n\treturn &usecase{\n\t\trepository: repo,\n\t}\n}",
"func newPiName(n string) Name {\n\treturn &piName{name: n, t: NewUnTyped()}\n}",
"func newCall(name string) *Instruction {\n\treturn &Instruction{\n\t\tType: CallInst,\n\t\tName: name,\n\t}\n}",
"func New(pg pgReservationClient) *ReservationUseCase {\n\treturn &ReservationUseCase{\n\t\tpg: pg,\n\t}\n}",
"func newPlayer(seat int, shoe *Shoe, cfg *Config, strategy Strategy, betAmount int) *Player {\n\tvar p Player\n\t// fmt.Println(\"in newPlayer\")\n\tp.seat = seat\n\tp.shoe = shoe\n\tp.cfg = cfg\n\tp.strategy = strategy\n\tp.betAmount = betAmount\n\treturn &p\n}",
"func NewParkingMap() *ParkingMap {\n\treturn &ParkingMap{\n\t\tEntry: []ParkingMapEntry{},\n\t}\n}",
"func (h *HUOBIHADAX) SpotNewOrder(arg SpotNewOrderRequestParams) (int64, error) {\n\tvals := make(map[string]string)\n\tvals[\"account-id\"] = fmt.Sprintf(\"%d\", arg.AccountID)\n\tvals[\"amount\"] = strconv.FormatFloat(arg.Amount, 'f', -1, 64)\n\n\t// Only set price if order type is not equal to buy-market or sell-market\n\tif arg.Type != SpotNewOrderRequestTypeBuyMarket && arg.Type != SpotNewOrderRequestTypeSellMarket {\n\t\tvals[\"price\"] = strconv.FormatFloat(arg.Price, 'f', -1, 64)\n\t}\n\n\tif arg.Source != \"\" {\n\t\tvals[\"source\"] = arg.Source\n\t}\n\n\tvals[\"symbol\"] = arg.Symbol\n\tvals[\"type\"] = string(arg.Type)\n\n\ttype response struct {\n\t\tResponse\n\t\tOrderID int64 `json:\"data,string\"`\n\t}\n\n\t// The API indicates that for the POST request, the parameters of each method are not signed and authenticated. That is, only the AccessKeyId, SignatureMethod, SignatureVersion, and Timestamp parameters are required for the POST request. The other parameters are placed in the body.\n\t// So re-encode the Post parameter\n\tbytesParams, _ := json.Marshal(vals)\n\tpostBodyParams := string(bytesParams)\n\tif h.Verbose {\n\t\tfmt.Println(\"Post params:\", postBodyParams)\n\t}\n\n\tvar result response\n\tendpoint := fmt.Sprintf(\"%s/%s\", huobihadaxAPIName, huobihadaxOrderPlace)\n\terr := h.SendAuthenticatedHTTPPostRequest(http.MethodPost, endpoint, postBodyParams, &result)\n\n\tif result.ErrorMessage != \"\" {\n\t\treturn 0, errors.New(result.ErrorMessage)\n\t}\n\treturn result.OrderID, err\n}",
"func New() {\n\ttypeOfProject()\n}",
"func NewPlan(t, s string, b []interface{}) Plan {\n\treturn Plan{\n\t\tTitle: t,\n\t\tButtons: b,\n\t\tSubTitle: s,\n\t}\n}",
"func newTask() task {\n\treturn task{}\n}",
"func Park(car Car) error {\n\tthis := GetInstance()\n\tif _, err := this.isparkingLotCreated(); err != nil {\n\t\treturn err\n\t}\n\n\t// Validate if the parking lot is already full or not\n\tif _, err := this.isParkingLotFull(); err != nil {\n\t\treturn err\n\t} else {\n\n\t\t// Validate if the car is already parked somewhere to check mistyped input\n\t\tif slot, _ := GetSlotNoForRegNo(car.GetRegNo(), false); slot == 0 {\n\t\t\t// Complexity : O(log n)\n\t\t\temptySlot := heap.Pop(&this.emptySlots)\n\t\t\tthis.mapRegNoToSlot(car.GetRegNo(), emptySlot.(int))\n\t\t\tthis.mapSlotToCar(emptySlot.(int), car)\n\t\t\tthis.mapColorToRegNo(car.GetColor(), car.GetRegNo())\n\t\t\tfmt.Println(\"Allocated slot number: \" + strconv.Itoa(emptySlot.(int)))\n\t\t\treturn nil\n\t\t} else {\n\t\t\terr := errors.New(\"Car with this registration number already parked at slot: \" + strconv.Itoa(slot))\n\t\t\treturn err\n\t\t}\n\n\t}\n}",
"func NewFoo() *Foo {\n return &Foo{}\n}",
"func NewPayment(a payment.Repository, timeout time.Duration) payment.Usecase {\n\treturn &paymentUsecase{\n\t\trepo: a,\n\t\tcontextTimeout: timeout,\n\t}\n}",
"func NewProtocol(bc blockchain.Blockchain) *Protocol { return &Protocol{bc} }",
"func (h *HUOBI) SpotNewOrder(ctx context.Context, arg *SpotNewOrderRequestParams) (int64, error) {\n\tsymbolValue, err := h.FormatSymbol(arg.Symbol, asset.Spot)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tdata := struct {\n\t\tAccountID int `json:\"account-id,string\"`\n\t\tAmount string `json:\"amount\"`\n\t\tPrice string `json:\"price\"`\n\t\tSource string `json:\"source\"`\n\t\tSymbol string `json:\"symbol\"`\n\t\tType string `json:\"type\"`\n\t}{\n\t\tAccountID: arg.AccountID,\n\t\tAmount: strconv.FormatFloat(arg.Amount, 'f', -1, 64),\n\t\tSymbol: symbolValue,\n\t\tType: string(arg.Type),\n\t}\n\n\t// Only set price if order type is not equal to buy-market or sell-market\n\tif arg.Type != SpotNewOrderRequestTypeBuyMarket && arg.Type != SpotNewOrderRequestTypeSellMarket {\n\t\tdata.Price = strconv.FormatFloat(arg.Price, 'f', -1, 64)\n\t}\n\n\tif arg.Source != \"\" {\n\t\tdata.Source = arg.Source\n\t}\n\n\tresult := struct {\n\t\tOrderID int64 `json:\"data,string\"`\n\t}{}\n\terr = h.SendAuthenticatedHTTPRequest(ctx,\n\t\texchange.RestSpot,\n\t\thttp.MethodPost,\n\t\thuobiOrderPlace,\n\t\tnil,\n\t\tdata,\n\t\t&result,\n\t\tfalse,\n\t)\n\treturn result.OrderID, err\n}",
"func (s *MarketplaceListingService) New(data MarketplaceListingEdit) (*MarketplaceListingNew, *Response, error) {\n\tGetAccessToken()\n\n\tu := fmt.Sprintf(\"http://api.discogs.com/marketplace/listings\")\n\n\tbody, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tbodyString := string(body)\n\tresponse, err := oauthConsumer.Post(u, nil, accessToken, bodyString)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer response.Body.Close()\n\n\tvar f *MarketplaceListingNew\n\tbits, _ := ioutil.ReadAll(response.Body)\n\t_ = json.Unmarshal(bits, &f)\n\n\treturn f, nil, err\n}",
"func NewOffer(sup Supplier) *Transaction {\n\treturn &Transaction{\n\t\ttp: Offer,\n\t\tSup: sup,\n\t}\n}",
"func newStep(id string) *Step {\n\treturn &Step{\n\t\tProperties: &td.StepProperties{\n\t\t\tId: id,\n\t\t},\n\t}\n}",
"func NewprovinceUsecase(userAdminUsecase user_admin.Usecase,provinceRepo province.Repository, timeout time.Duration) province.Usecase {\n\treturn &provinceUsecase{\n\t\tuserAdminUsecase:userAdminUsecase,\n\t\tprovinceRepo: provinceRepo,\n\t\tcontextTimeout: timeout,\n\t}\n}",
"func NewBookingWorkTimeSlot()(*BookingWorkTimeSlot) {\n m := &BookingWorkTimeSlot{\n }\n m.backingStore = ie8677ce2c7e1b4c22e9c3827ecd078d41185424dd9eeb92b7d971ed2d49a392e.BackingStoreFactoryInstance();\n m.SetAdditionalData(make(map[string]any))\n return m\n}",
"func expectedNewInstance(jobID, datasetID string) *dataset.NewInstance {\n\tnewInstance := &dataset.NewInstance{\n\t\tLinks: &dataset.Links{\n\t\t\tDataset: dataset.Link{\n\t\t\t\tURL: \"http://localhost:22000/datasets/\" + datasetID,\n\t\t\t\tID: datasetID,\n\t\t\t},\n\t\t\tJob: dataset.Link{\n\t\t\t\tURL: \"http://import-api/jobs/\" + jobID,\n\t\t\t\tID: jobID,\n\t\t\t},\n\t\t},\n\t\tDimensions: []dataset.CodeList{},\n\t\tImportTasks: &dataset.InstanceImportTasks{\n\t\t\tImportObservations: &dataset.ImportObservationsTask{\n\t\t\t\tState: dataset.StateCreated.String(),\n\t\t\t},\n\t\t\tBuildHierarchyTasks: []*dataset.BuildHierarchyTask{},\n\t\t\tBuildSearchIndexTasks: []*dataset.BuildSearchIndexTask{},\n\t\t},\n\t\tType: \"cantabular_blob\",\n\t}\n\tif datasetID == \"dataset1\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist11\"}, {ID: \"codelist12\"}}\n\t\tnewInstance.LowestGeography = \"lowest_geo\"\n\t} else if datasetID == \"dataset2\" {\n\t\tnewInstance.Dimensions = []dataset.CodeList{{ID: \"codelist21\"}, {ID: \"codelist22\"}, {ID: \"codelist23\"}}\n\t}\n\treturn newInstance\n}",
"func newLeaf(p *Point) *leaf {\n\treturn &leaf{\n\t\tpoint: p,\n\t}\n}",
"func makeNewGame(name string, playerNames []string) *Game {\n\tvar g = new(Game)\n\tid, err := uuid.GenUUID()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tg.ID = id\n\tg.Name = name\n\tg.Messages.Capacity = 500\n\tg.Phase = Development\n\tGames[g.ID] = g\n\tg.addMessage(fmt.Sprintf(\"Created game %s...\", g.Name))\n\tg.loadLocos()\n\tg.prepareLocos()\n\tg.initPlayers(playerNames)\n\tg.determineTurnOrder()\n\n\treturn g\n}",
"func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}",
"func New(w http.ResponseWriter, r *http.Request) {\r\n\ttmpl.ExecuteTemplate(w, \"New\", nil)\r\n}",
"func (p PassportElementRentalAgreement) construct() PassportElementClass { return &p }",
"func New(input io.Reader) Solution {\n\treturn Solution{Passports: parse(input)}\n}",
"func newWallet(cfg *BTCCloneCFG, node rpcClient) *ExchangeWallet {\n\treturn &ExchangeWallet{\n\t\tnode: node,\n\t\twallet: newWalletClient(node, cfg.ChainParams),\n\t\tsymbol: cfg.Symbol,\n\t\tchainParams: cfg.ChainParams,\n\t\tlog: cfg.Logger,\n\t\ttradeChange: make(map[string]time.Time),\n\t\ttipChange: cfg.WalletCFG.TipChange,\n\t\tfundingCoins: make(map[string]*compositeUTXO),\n\t\tminNetworkVersion: cfg.MinNetworkVersion,\n\t\tfallbackFeeRate: cfg.WalletCFG.FallbackFeeRate,\n\t\twalletInfo: cfg.WalletInfo,\n\t}\n}",
"func newVertex(x, y, theta, v, w float64, parent *Vertex) *Vertex {\n\treturn &Vertex{Point{X: x, Y: y, Theta: theta, V: v, W: w}, parent, nil}\n}",
"func (t *MCTS) New(move int32, score float32) (retVal Naughty) {\n\tn := t.alloc()\n\tN := t.nodeFromNaughty(n)\n\tN.lock.Lock()\n\tdefer N.lock.Unlock()\n\tN.move = move\n\tN.visits = 1\n\tN.status = uint32(Active)\n\tN.qsa = 0\n\tN.psa = score\n\n\treturn n\n}",
"func NewPlan() *Plan {\n\treturn &Plan{\n\t\tNodeMap: make(map[string]*GraphNode),\n\t}\n}",
"func NewPi(label string, t Term, body Term) Pi {\n\treturn Pi{\n\t\tLabel: label,\n\t\tType: t,\n\t\tBody: body,\n\t}\n}",
"func (e Department) EntNew() ent.Ent { return &Department{} }",
"func newPerson(name string, age int) *Person {\n\tnewperson := Person{name: name, age: age}\n\treturn &newperson\n}",
"func New() prov.Provisioner {\n\tx\n}",
"func New(repo itemRepo.Repository, tc time.Duration) uItem.Usecase {\n\treturn &itemUsecase{\n\t\trepo: repo,\n\t\ttimeoutContext: tc,\n\t}\n}",
"func (b *BallotBox) newFilledBallot(voter types.Partition, vote uint64) ballot {\n\treturn ballot{\n\t\tfrom: voter,\n\t\ttimestamp: vote,\n\t}\n}",
"func NewAllocationChange(t mockConstructorTestingTNewAllocationChange) *AllocationChange {\n\tmock := &AllocationChange{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}",
"func NewSynchronizationJob()(*SynchronizationJob) {\n m := &SynchronizationJob{\n Entity: *NewEntity(),\n }\n return m\n}",
"func (s *AbuseService) NewTicket(ticket *AbuseTicketCreate) (*AbuseTicketID, error) {\n\tvar id = new(AbuseTicketID)\n\n\tenc, err := json.Marshal(ticket)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdata, err := s.client.Post(\"/v1/abuse/tickets\", enc)\n\tif err != nil {\n\t\treturn id, err\n\t}\n\n\terr = json.Unmarshal(data, &id)\n\n\treturn id, err\n}",
"func (p PassportElementUtilityBill) construct() PassportElementClass { return &p }",
"func NewPoke() *Poke {\n\n\treturn &Poke{\n\t\tModelVersion: 1,\n\t}\n}",
"func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point {\n\tswitch v.Aggregation.Type {\n\tcase view.AggTypeLastValue:\n\t\treturn newGaugePoint(v, row, end)\n\tdefault:\n\t\treturn newCumulativePoint(v, row, start, end)\n\t}\n}",
"func New(opts ...Option) staking.Contract {\n\tbs := &stakingContractMock{}\n\n\tfor _, o := range opts {\n\t\to(bs)\n\t}\n\n\treturn bs\n}",
"func newTestingWallet(testdir string, cs modules.ConsensusSet, tp modules.TransactionPool) (modules.Wallet, error) {\n\tw, err := modWallet.New(cs, tp, filepath.Join(testdir, modules.WalletDir), modules.DefaultAddressGapLimit, false)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey := crypto.GenerateSiaKey(crypto.TypeDefaultWallet)\n\tencrypted, err := w.Encrypted()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !encrypted {\n\t\t_, err = w.Encrypt(key)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\terr = w.Unlock(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\t// give it some money\n\tm, err := miner.New(cs, tp, w, filepath.Join(testdir, modules.MinerDir))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfor i := types.BlockHeight(0); i <= types.MaturityDelay; i++ {\n\t\t_, err := m.AddBlock()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn w, nil\n}",
"func newPerson(name string) *person {\n\treturn &person{name: name}\n}",
"func NewTrain(id int, maxSpeed int, capacity int, startPoint int, n int) *Train {\n\ttrain := new(Train)\n\n\ttrain.id = id\n\ttrain.maxSpeed = maxSpeed\n\ttrain.capacity = capacity\n\ttrain.startPoint = startPoint\n\ttrain.posType = POS_NONE\n\ttrain.posID = 0\n\n\ttrain.route = make([]int, n)\n\tfor i := 0; i < len(train.route); i++ {\n\t\ttrain.route[i] = 0\n\t}\n\n\ttrain.curRoute = 0\n\n\treturn train\n}",
"func (t *BPTree) newLeaf() *Node {\n\tleaf := t.newNode()\n\tleaf.isLeaf = true\n\treturn leaf\n}",
"func New() *Beeper { return &Beeper{} }",
"func newTubePool(address string, connConfigData connConfig) *tubePool {\n\treturn newTubePoolWithOptions(address, defaultTubePoolOptions, connConfigData)\n}",
"func newWrapper(db *DB) *Wrapper {\n\treturn &Wrapper{executable: true, db: db, Timestamp: &Timestamp{}}\n}",
"func (tree *Tree23) newLeaf(elem TreeElement, prev, next TreeNodeIndex) TreeNodeIndex {\n\n\tn := tree.newNode()\n\n\ttree.treeNodes[n].cCount = 0\n\ttree.treeNodes[n].elem = elem\n\ttree.treeNodes[n].prev = prev\n\ttree.treeNodes[n].next = next\n\n\treturn n\n}",
"func NewAddress(street string) *Address {\n // Just return a dummy for STUB\n return &Address{}\n}",
"func (t *task) newOperation(bq *InMemoryBuildQueue, priority int32, i *invocation, mayExistWithoutWaiters bool) *operation {\n\to := &operation{\n\t\tname: uuid.Must(bq.uuidGenerator()).String(),\n\t\ttask: t,\n\t\tpriority: priority,\n\t\tinvocation: i,\n\t\tmayExistWithoutWaiters: mayExistWithoutWaiters,\n\t\tqueueIndex: -1,\n\t}\n\tif _, ok := t.operations[i]; ok {\n\t\tpanic(\"Task is already associated with this invocation\")\n\t}\n\tt.operations[i] = o\n\tbq.operationsNameMap[o.name] = o\n\treturn o\n}",
"func NewPlannerAssignedToTaskBoardTaskFormat()(*PlannerAssignedToTaskBoardTaskFormat) {\n m := &PlannerAssignedToTaskBoardTaskFormat{\n Entity: *NewEntity(),\n }\n return m\n}",
"func (p PassportElementPersonalDetails) construct() PassportElementClass { return &p }",
"func (c *InputService22ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {\n\treq := c.NewRequest(op, params, data)\n\n\treturn req\n}",
"func newPod(name string) *corev1.Pod {\n\treturn &corev1.Pod{\n\t\tTypeMeta: metav1.TypeMeta{},\n\t\tObjectMeta: metav1.ObjectMeta{Name: name},\n\t\tSpec: corev1.PodSpec{},\n\t\tStatus: corev1.PodStatus{},\n\t}\n}",
"func (*recipeLipidR) NewStruct() *recipeLipidR {\n\treturn &recipeLipidR{}\n}",
"func newSpawner(spawnerType SpawnerType) spawner {\n\tswitch spawnerType {\n\tcase NsEnter:\n\t\treturn &nsenter{}\n\tdefault:\n\t\treturn nil\n\t}\n}",
"func NewUsecase(logger *logrus.Logger) *Usecase {\n\treturn &Usecase{Logger: logger}\n}",
"func New(w http.ResponseWriter, r *http.Request) {\n\tgetTemplates().ExecuteTemplate(w, \"New\", nil)\n}",
"func PracticeMakeAndNew() {\n\tpracticeMake()\n\tpracticeNew()\n}",
"func newIn(driver *Driver, number int, name string) drivers.In {\n\treturn &in{driver: driver, number: number, name: name}\n}",
"func newTaskBuilder(b *jobBuilder, name string) *taskBuilder {\n\tparts, err := b.jobNameSchema.ParseJobName(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn &taskBuilder{\n\t\tjobBuilder: b,\n\t\tparts: parts,\n\t\tName: name,\n\t\tSpec: &specs.TaskSpec{},\n\t\trecipeProperties: map[string]string{},\n\t}\n}",
"func NewPar(P, Q Process) *Par { return &Par{Procs: []Process{P, Q}} }",
"func NewAccessPackage()(*AccessPackage) {\n m := &AccessPackage{\n Entity: *NewEntity(),\n }\n return m\n}",
"func NewMock(t *testing.T) *MockT { return &MockT{t: t} }",
"func New() Go { return Go{} }"
] | [
"0.5842782",
"0.57481986",
"0.56187475",
"0.5570555",
"0.543314",
"0.53436655",
"0.52992886",
"0.52857757",
"0.52671325",
"0.5232486",
"0.51309997",
"0.5117802",
"0.51166123",
"0.5093931",
"0.50716466",
"0.5067203",
"0.5049016",
"0.5048438",
"0.5003215",
"0.5000063",
"0.4989681",
"0.49773738",
"0.4975412",
"0.4973304",
"0.49672017",
"0.49665466",
"0.49523875",
"0.49338153",
"0.49306884",
"0.49220547",
"0.49209192",
"0.49162772",
"0.49018797",
"0.489493",
"0.489105",
"0.48722494",
"0.48640838",
"0.48611897",
"0.48565778",
"0.48507187",
"0.4845826",
"0.48457867",
"0.48344612",
"0.4821662",
"0.48164824",
"0.480392",
"0.47927547",
"0.47895637",
"0.47871915",
"0.47711673",
"0.47669446",
"0.47596276",
"0.47468936",
"0.4746516",
"0.47459874",
"0.47459874",
"0.47430933",
"0.47384992",
"0.4737668",
"0.47172144",
"0.4714323",
"0.4705315",
"0.46989512",
"0.46986723",
"0.46951488",
"0.469438",
"0.4693681",
"0.46902686",
"0.4687018",
"0.46828848",
"0.46762216",
"0.46719015",
"0.46718687",
"0.46685153",
"0.46627316",
"0.46594372",
"0.46437606",
"0.46408832",
"0.4640322",
"0.46378782",
"0.46372104",
"0.46347675",
"0.4629627",
"0.46260744",
"0.4622197",
"0.46119624",
"0.46092185",
"0.46088853",
"0.46064234",
"0.46059123",
"0.46007273",
"0.46005836",
"0.45957962",
"0.45947435",
"0.45942134",
"0.45941406",
"0.45934534",
"0.45918348",
"0.45894432",
"0.45887566"
] | 0.7265965 | 0 |
Err returns the last error encountered while paging. | func (p *DeploymentScriptsListByResourceGroupPager) Err() error {
return p.err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *PaginatedResult) Err() error {\n\treturn p.err\n}",
"func (p *SmartGroupsClientGetAllPager) Err() error {\n\treturn p.err\n}",
"func (p *AlertsClientGetAllPager) Err() error {\n\treturn p.err\n}",
"func (p *AzureFirewallsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *LinkedServerListPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainTopicsListByDomainPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallRulesListPager) Err() error {\n\treturn p.err\n}",
"func (p *AzureFirewallsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *UsagesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerNamespacesListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerOutboundRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointConnectionsListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewaysClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallPoliciesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualAppliancesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NatGatewaysClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *ProfilesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewayPrivateLinkResourcesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancersClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *NatRulesClientListByVPNGatewayPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerLoadBalancingRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerTopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ReservationClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkPeeringsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *HybridConnectionsListByNamespacePager) Err() error {\n\treturn p.err\n}",
"func (p *SubnetsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkScopesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerNamespacesListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainsListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallPoliciesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualHubsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *WebApplicationFirewallPoliciesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *WebApplicationFirewallPoliciesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *SecurityGroupsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesClientListByPrivateLinkScopePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *InterfacesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualAppliancesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworksClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *RouteTablesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *CustomDomainsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancersClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PoolsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NatGatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkScopesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerBackendAddressPoolsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *CertificatesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *BindingsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerTopicsListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *AppsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSitesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *StoragesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSiteLinksClientListByVPNSitePager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkLinksClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *TopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualRouterPeeringsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualWansClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *InboundNatRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *DeletedAccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (iter *DeleteListIterator) Err() error {\n\treturn iter.Paginator.Err()\n}",
"func (p *CachesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *MachinesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *EventSubscriptionsListByDomainTopicPager) Err() error {\n\treturn p.err\n}",
"func (p *GatewayCustomDomainsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *InterfaceLoadBalancersClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSitesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *GatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *SystemTopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *ServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListAuthorizationRulesPager) Err() error {\n\treturn p.err\n}",
"func (p *UsageModelsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *WCFRelaysListByNamespacePager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualApplianceSitesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *TableClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ConfigurationServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointConnectionsClientListByPrivateLinkScopePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewayPrivateEndpointConnectionsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *EventSubscriptionsListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateZonesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerNetworkInterfacesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerProbesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointsClientListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *ProfilesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkServicesClientListPrivateEndpointConnectionsPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkTapsClientListAllPager) Err() error {\n\treturn p.err\n}"
] | [
"0.71781874",
"0.7132234",
"0.71245384",
"0.7120786",
"0.7088907",
"0.7079575",
"0.70702946",
"0.70490265",
"0.7018319",
"0.6989553",
"0.695567",
"0.69453233",
"0.6935331",
"0.6925309",
"0.6916923",
"0.68996024",
"0.6896403",
"0.68959993",
"0.6891861",
"0.6891023",
"0.6887537",
"0.6865112",
"0.6841569",
"0.6836669",
"0.68278176",
"0.68222636",
"0.6818471",
"0.6814663",
"0.6814663",
"0.6814663",
"0.68140006",
"0.6807026",
"0.6806053",
"0.68033075",
"0.6802025",
"0.6792935",
"0.67892754",
"0.6783128",
"0.6779005",
"0.6775144",
"0.6774527",
"0.6771359",
"0.67698693",
"0.6769835",
"0.6767724",
"0.6767428",
"0.67664814",
"0.67605424",
"0.67561746",
"0.67556524",
"0.67552966",
"0.67502564",
"0.67481655",
"0.67463756",
"0.67454296",
"0.67454296",
"0.6745222",
"0.67416626",
"0.67398983",
"0.67298377",
"0.67288965",
"0.6728063",
"0.67270994",
"0.6721826",
"0.6719065",
"0.67159045",
"0.6713199",
"0.6705555",
"0.6705058",
"0.66991127",
"0.66960156",
"0.66900355",
"0.66853434",
"0.668534",
"0.66848063",
"0.6676543",
"0.66740406",
"0.66691744",
"0.6667403",
"0.66654116",
"0.666485",
"0.6663483",
"0.6663009",
"0.6662855",
"0.6661328",
"0.66585964",
"0.66560453",
"0.6651633",
"0.6651128",
"0.6648916",
"0.66463673",
"0.664027",
"0.6637946",
"0.6636623",
"0.6636231",
"0.6636164",
"0.66336966",
"0.66324973",
"0.66221744",
"0.66214824",
"0.6621386"
] | 0.0 | -1 |
NextPage returns true if the pager advanced to the next page. Returns false if there are no more pages or an error occurred. | func (p *DeploymentScriptsListByResourceGroupPager) NextPage(ctx context.Context) bool {
var req *policy.Request
var err error
if !reflect.ValueOf(p.current).IsZero() {
if p.current.DeploymentScriptListResult.NextLink == nil || len(*p.current.DeploymentScriptListResult.NextLink) == 0 {
return false
}
req, err = p.advancer(ctx, p.current)
} else {
req, err = p.requester(ctx)
}
if err != nil {
p.err = err
return false
}
resp, err := p.client.pl.Do(req)
if err != nil {
p.err = err
return false
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
p.err = p.client.listByResourceGroupHandleError(resp)
return false
}
result, err := p.client.listByResourceGroupHandleResponse(resp)
if err != nil {
p.err = err
return false
}
p.current = result
return true
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Pagination) HasNext() bool {\n\treturn p.Page+1 <= p.NumPages()\n}",
"func (p *SmartGroupsClientGetAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.SmartGroupsList.NextLink == nil || len(*p.current.SmartGroupsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ManagementClientGetActiveSessionsPager) NextPage(ctx context.Context) bool {\n\tif !p.second {\n\t\tp.second = true\n\t\treturn true\n\t} else if !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionActiveSessionListResult.NextLink == nil || len(*p.current.BastionActiveSessionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, *p.current.BastionActiveSessionListResult.NextLink)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getActiveSessionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *StoragesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.StorageResourceCollection.NextLink == nil || len(*p.current.StorageResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p Pagination) HasNext() bool {\n\ttotalPage := p.TotalPage()\n\tif totalPage == 0 {\n\t\treturn false\n\t}\n\n\tpage := p.CurrentPage()\n\tif page == 0 {\n\t\treturn false\n\t}\n\n\treturn page < totalPage\n}",
"func (p *Pagination) HasNext() bool {\n\treturn p.Total > p.PageNumber*p.PageSize\n}",
"func (p *Paginator) HasNext() bool {\n\treturn p.Page() < p.PageNums()\n}",
"func (o *PaginationProperties) HasNextPage() bool {\n\tif o != nil && o.NextPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *PrivateLinkResourcesListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateLinkResources.NextLink == nil || len(*p.current.PrivateLinkResources.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationListResult.NextLink == nil || len(*p.current.OperationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationListResult.NextLink == nil || len(*p.current.OperationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (*offsetPageInfoImpl) HasNextPage(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn (page.offset + page.limit) < page.totalCount, nil\n}",
"func (p *PartnerTopicsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerTopicsListResult.NextLink == nil || len(*p.current.PartnerTopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *AccountsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *ProfilesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ProfileListResult.NextLink == nil || len(*p.current.ProfileListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *Paginator) HasNext() bool {\n\treturn p.HasNextPage\n}",
"func (p *PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PublicIPAddressListResult.NextLink == nil || len(*p.current.PublicIPAddressListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetPublicIPAddressesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *LinkedServerListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisLinkedServerWithPropertiesList.NextLink == nil || len(*p.current.RedisLinkedServerWithPropertiesList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualAppliancesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualApplianceListResult.NextLink == nil || len(*p.current.VirtualApplianceListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *NatRulesClientListByVPNGatewayPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVPNGatewayNatRulesResult.NextLink == nil || len(*p.current.ListVPNGatewayNatRulesResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByVPNGatewayHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *ProjectsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (resp *PharosResponse) HasNextPage() bool {\n\treturn resp.Next != nil && *resp.Next != \"\"\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ClientDiscoveryResponse.NextLink == nil || len(*p.current.ClientDiscoveryResponse.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RouteTablesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteTableListResult.NextLink == nil || len(*p.current.RouteTableListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BlobContainersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListContainerItems.NextLink == nil || len(*p.current.ListContainerItems.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworkTapsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkTapListResult.NextLink == nil || len(*p.current.VirtualNetworkTapListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *QueueClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListQueueResource.NextLink == nil || len(*p.current.ListQueueResource.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AlertsClientGetAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AlertsList.NextLink == nil || len(*p.current.AlertsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRoutePortsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRoutePortListResult.NextLink == nil || len(*p.current.ExpressRoutePortListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRouteLinksClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRouteLinkListResult.NextLink == nil || len(*p.current.ExpressRouteLinkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RedisListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisListResult.NextLink == nil || len(*p.current.RedisListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *KeyVaultClientGetKeysPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.KeyListResult.NextLink == nil || len(*p.current.KeyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getKeysHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ApplicationGatewaysClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ApplicationGatewayListResult.NextLink == nil || len(*p.current.ApplicationGatewayListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RouteFilterRulesClientListByRouteFilterPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteFilterRuleListResult.NextLink == nil || len(*p.current.RouteFilterRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByRouteFilterHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AvailableResourceGroupDelegationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableDelegationsResult.NextLink == nil || len(*p.current.AvailableDelegationsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *IPGroupsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.IPGroupListResult.NextLink == nil || len(*p.current.IPGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRoutePortsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRoutePortListResult.NextLink == nil || len(*p.current.ExpressRoutePortListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualAppliancesClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualApplianceListResult.NextLink == nil || len(*p.current.VirtualApplianceListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkServicesClientListPrivateEndpointConnectionsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointConnectionListResult.NextLink == nil || len(*p.current.PrivateEndpointConnectionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listPrivateEndpointConnectionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FirewallRulesListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisFirewallRuleListResult.NextLink == nil || len(*p.current.RedisFirewallRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *DomainsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DomainsListResult.NextLink == nil || len(*p.current.DomainsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PoolsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.CapacityPoolList.NextLink == nil || len(*p.current.CapacityPoolList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *EnterprisesPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *ReservationClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListResult.NextLink == nil || len(*p.current.ListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *UsagesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.UsagesListResult.NextLink == nil || len(*p.current.UsagesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AppsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AppResourceCollection.NextLink == nil || len(*p.current.AppResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *TopicsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.TopicsListResult.NextLink == nil || len(*p.current.TopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationList.NextLink == nil || len(*p.current.OperationList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ProfilesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ProfileListResult.NextLink == nil || len(*p.current.ProfileListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FirewallPoliciesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.FirewallPolicyListResult.NextLink == nil || len(*p.current.FirewallPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkResourcesListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateLinkResourcesListResult.NextLink == nil || len(*p.current.PrivateLinkResourcesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerRegistrationsListResult.NextLink == nil || len(*p.current.PartnerRegistrationsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AvailableDelegationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableDelegationsResult.NextLink == nil || len(*p.current.AvailableDelegationsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualHubsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVirtualHubsResult.NextLink == nil || len(*p.current.ListVirtualHubsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *InterfacesClientListVirtualMachineScaleSetIPConfigurationsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.InterfaceIPConfigurationListResult.NextLink == nil || len(*p.current.InterfaceIPConfigurationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetIPConfigurationsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ServiceEndpointPoliciesClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ServiceEndpointPolicyListResult.NextLink == nil || len(*p.current.ServiceEndpointPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RedisListBySubscriptionPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisListResult.NextLink == nil || len(*p.current.RedisListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworkTapsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkTapListResult.NextLink == nil || len(*p.current.VirtualNetworkTapListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RecordSetsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RecordSetListResult.NextLink == nil || len(*p.current.RecordSetListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AutoApprovedPrivateLinkServicesResult.NextLink == nil || len(*p.current.AutoApprovedPrivateLinkServicesResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAutoApprovedPrivateLinkServicesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationsList.NextLink == nil || len(*p.current.OperationsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *WCFRelaysListAuthorizationRulesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AuthorizationRuleListResult.NextLink == nil || len(*p.current.AuthorizationRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listAuthorizationRulesHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAuthorizationRulesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PublicIPAddressListResult.NextLink == nil || len(*p.current.PublicIPAddressListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetVMPublicIPAddressesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RoutesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteListResult.NextLink == nil || len(*p.current.RouteListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AccountsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AccountList.NextLink == nil || len(*p.current.AccountList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *QuotaClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.QuotaLimits.NextLink == nil || len(*p.current.QuotaLimits.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *NatGatewaysClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.NatGatewayListResult.NextLink == nil || len(*p.current.NatGatewayListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *GatewaysClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.GatewayResourceCollection.NextLink == nil || len(*p.current.GatewayResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateEndpointConnectionsListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointConnectionListResult.NextLink == nil || len(*p.current.PrivateEndpointConnectionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworksClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkListResult.NextLink == nil || len(*p.current.VirtualNetworkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VaultsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VaultList.NextLink == nil || len(*p.current.VaultList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BindingsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BindingResourceCollection.NextLink == nil || len(*p.current.BindingResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *DscpConfigurationClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DscpConfigurationListResult.NextLink == nil || len(*p.current.DscpConfigurationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *AccountGroupsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *PrivateEndpointsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointListResult.NextLink == nil || len(*p.current.PrivateEndpointListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableOperations.NextLink == nil || len(*p.current.AvailableOperations.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *EventSubscriptionsListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.EventSubscriptionsListResult.NextLink == nil || len(*p.current.EventSubscriptionsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ReservationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ReservationList.NextLink == nil || len(*p.current.ReservationList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRouteServiceProvidersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRouteServiceProviderListResult.NextLink == nil || len(*p.current.ExpressRouteServiceProviderListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AzureFirewallsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AzureFirewallListResult.NextLink == nil || len(*p.current.AzureFirewallListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *TableClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListTableResource.NextLink == nil || len(*p.current.ListTableResource.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *LoadBalancersClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.LoadBalancerListResult.NextLink == nil || len(*p.current.LoadBalancerListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualHubsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVirtualHubsResult.NextLink == nil || len(*p.current.ListVirtualHubsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AccountsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AccountListResult.NextLink == nil || len(*p.current.AccountListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VaultsListBySubscriptionIDPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VaultList.NextLink == nil || len(*p.current.VaultList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionIDHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionIDHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BuildServiceClientListBuildResultsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BuildResultCollection.NextLink == nil || len(*p.current.BuildResultCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBuildResultsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ApplicationSecurityGroupListResult.NextLink == nil || len(*p.current.ApplicationSecurityGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *WebApplicationFirewallPoliciesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.WebApplicationFirewallPolicyListResult.NextLink == nil || len(*p.current.WebApplicationFirewallPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PartnerTopicsListBySubscriptionPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerTopicsListResult.NextLink == nil || len(*p.current.PartnerTopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualRouterPeeringsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualRouterPeeringListResult.NextLink == nil || len(*p.current.VirtualRouterPeeringListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *KeyVaultClientGetDeletedKeysPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeletedKeyListResult.NextLink == nil || len(*p.current.DeletedKeyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getDeletedKeysHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (m *Model) NextPage() {\n\tif !m.OnLastPage() {\n\t\tm.Page++\n\t}\n}",
"func (p *RouteTablesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteTableListResult.NextLink == nil || len(*p.current.RouteTableListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AvailableEndpointServicesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.EndpointServicesListResult.NextLink == nil || len(*p.current.EndpointServicesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *IntegrationsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (pager *DestinationsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *IPGroupsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.IPGroupListResult.NextLink == nil || len(*p.current.IPGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *SecurityGroupsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.SecurityGroupListResult.NextLink == nil || len(*p.current.SecurityGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FlowLogsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.FlowLogListResult.NextLink == nil || len(*p.current.FlowLogListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ManagementClientGetBastionShareableLinkPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionShareableLinkListResult.NextLink == nil || len(*p.current.BastionShareableLinkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getBastionShareableLinkHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}"
] | [
"0.6933959",
"0.69104683",
"0.6902021",
"0.6887859",
"0.6865384",
"0.6842242",
"0.68127084",
"0.6801136",
"0.6781187",
"0.67578614",
"0.67578614",
"0.6748404",
"0.67408264",
"0.66957474",
"0.66940117",
"0.66798955",
"0.6679727",
"0.6656323",
"0.66435057",
"0.6641304",
"0.6637062",
"0.662848",
"0.6625333",
"0.66156054",
"0.66006356",
"0.65961105",
"0.6595444",
"0.6593125",
"0.6585551",
"0.6581216",
"0.65770745",
"0.6568006",
"0.6561606",
"0.6560656",
"0.6558457",
"0.65550876",
"0.65520155",
"0.6549017",
"0.65488",
"0.65461254",
"0.65450716",
"0.6543936",
"0.65415657",
"0.6541475",
"0.65363663",
"0.65331686",
"0.6532954",
"0.65317404",
"0.6528815",
"0.652786",
"0.6526904",
"0.6522195",
"0.65178335",
"0.6512422",
"0.6510342",
"0.6509255",
"0.6497999",
"0.6493339",
"0.64898586",
"0.6484394",
"0.64809734",
"0.6480288",
"0.6478172",
"0.6476303",
"0.6475875",
"0.64728856",
"0.64663655",
"0.64659977",
"0.64594126",
"0.64574176",
"0.645657",
"0.64557695",
"0.6455439",
"0.6454111",
"0.64517266",
"0.64502066",
"0.64487606",
"0.6446886",
"0.64444304",
"0.6443364",
"0.64417464",
"0.64402187",
"0.6438602",
"0.6437627",
"0.6434928",
"0.6430516",
"0.6423444",
"0.64230037",
"0.6421616",
"0.6417063",
"0.64159894",
"0.6408074",
"0.6402804",
"0.6402482",
"0.6401523",
"0.6401196",
"0.6400674",
"0.64002293",
"0.63980424",
"0.63968116"
] | 0.6522806 | 51 |
PageResponse returns the current DeploymentScriptsListByResourceGroupResponse page. | func (p *DeploymentScriptsListByResourceGroupPager) PageResponse() DeploymentScriptsListByResourceGroupResponse {
return p.current
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *VirtualHubsClientListByResourceGroupPager) PageResponse() VirtualHubsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *DdosProtectionPlansClientListByResourceGroupPager) PageResponse() DdosProtectionPlansClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListByResourceGroupPager) PageResponse() AccountsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualAppliancesClientListByResourceGroupPager) PageResponse() VirtualAppliancesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *BastionHostsClientListByResourceGroupPager) PageResponse() BastionHostsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *DomainsListByResourceGroupPager) PageResponse() DomainsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *CachesClientListByResourceGroupPager) PageResponse() CachesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VPNSitesClientListByResourceGroupPager) PageResponse() VPNSitesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *RedisListByResourceGroupPager) PageResponse() RedisListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *DeploymentScriptsListBySubscriptionPager) PageResponse() DeploymentScriptsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *ServiceEndpointPolicyDefinitionsClientListByResourceGroupPager) PageResponse() ServiceEndpointPolicyDefinitionsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *DeploymentScriptsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeploymentScriptListResult.NextLink == nil || len(*p.current.DeploymentScriptListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AlertProcessingRulesClientListByResourceGroupPager) PageResponse() AlertProcessingRulesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *NamespacesListByResourceGroupPager) PageResponse() NamespacesListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *P2SVPNGatewaysClientListByResourceGroupPager) PageResponse() P2SVPNGatewaysClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *ServiceEndpointPoliciesClientListByResourceGroupPager) PageResponse() ServiceEndpointPoliciesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *IPAllocationsClientListByResourceGroupPager) PageResponse() IPAllocationsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *IPGroupsClientListByResourceGroupPager) PageResponse() IPGroupsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualWansClientListByResourceGroupPager) PageResponse() VirtualWansClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VPNGatewaysClientListByResourceGroupPager) PageResponse() VPNGatewaysClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *RouteFiltersClientListByResourceGroupPager) PageResponse() RouteFiltersClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *AvailableServiceAliasesClientListByResourceGroupPager) PageResponse() AvailableServiceAliasesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualNetworkTapsClientListByResourceGroupPager) PageResponse() VirtualNetworkTapsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *PartnerNamespacesListByResourceGroupPager) PageResponse() PartnerNamespacesListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *TopicsListByResourceGroupPager) PageResponse() TopicsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualRoutersClientListByResourceGroupPager) PageResponse() VirtualRoutersClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *MachinesClientListByResourceGroupPager) PageResponse() MachinesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VPNServerConfigurationsClientListByResourceGroupPager) PageResponse() VPNServerConfigurationsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *ExpressRouteCrossConnectionsClientListByResourceGroupPager) PageResponse() ExpressRouteCrossConnectionsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *PrivateLinkScopesClientListByResourceGroupPager) PageResponse() PrivateLinkScopesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *SecurityPartnerProvidersClientListByResourceGroupPager) PageResponse() SecurityPartnerProvidersClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *ExpressRoutePortsClientListByResourceGroupPager) PageResponse() ExpressRoutePortsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *SystemTopicsListByResourceGroupPager) PageResponse() SystemTopicsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VaultsListByResourceGroupPager) PageResponse() VaultsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) PageResponse() PartnerRegistrationsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *PrivateZonesClientListByResourceGroupPager) PageResponse() PrivateZonesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListRegionalByResourceGroupPager) PageResponse() EventSubscriptionsListRegionalByResourceGroupResponse {\n\treturn p.current\n}",
"func (schematics *SchematicsV1) ListResourceGroup(listResourceGroupOptions *ListResourceGroupOptions) (result []ResourceGroupResponse, response *core.DetailedResponse, err error) {\n\treturn schematics.ListResourceGroupWithContext(context.Background(), listResourceGroupOptions)\n}",
"func (p *AvailablePrivateEndpointTypesClientListByResourceGroupPager) PageResponse() AvailablePrivateEndpointTypesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *PartnerTopicsListByResourceGroupPager) PageResponse() PartnerTopicsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *FirewallPolicyRuleCollectionGroupsClientListPager) PageResponse() FirewallPolicyRuleCollectionGroupsClientListResponse {\n\treturn p.current\n}",
"func (client DeploymentsClient) List(ctx context.Context, resourceGroupName string, serviceName string, appName string, version []string) (result DeploymentResourceCollectionPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DeploymentsClient.List\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.drc.Response.Response != nil {\n\t\t\t\tsc = result.drc.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listNextResults\n\treq, err := client.ListPreparer(ctx, resourceGroupName, serviceName, appName, version)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListSender(req)\n\tif err != nil {\n\t\tresult.drc.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.drc, err = client.ListResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.DeploymentsClient\", \"List\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.drc.hasNextLink() && result.drc.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (p *EventSubscriptionsListGlobalByResourceGroupPager) PageResponse() EventSubscriptionsListGlobalByResourceGroupResponse {\n\treturn p.current\n}",
"func (client *ServersClient) NewListByResourceGroupPager(resourceGroup string, options *ServersClientListByResourceGroupOptions) *runtime.Pager[ServersClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ServersClientListByResourceGroupResponse]{\n\t\tMore: func(page ServersClientListByResourceGroupResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ServersClientListByResourceGroupResponse) (ServersClientListByResourceGroupResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByResourceGroupCreateRequest(ctx, resourceGroup, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *DeploymentsClientListForClusterPager) PageResponse() DeploymentsClientListForClusterResponse {\n\treturn p.current\n}",
"func (p *SecurityGroupsClientListAllPager) PageResponse() SecurityGroupsClientListAllResponse {\n\treturn p.current\n}",
"func (p *BuildServiceAgentPoolClientListPager) PageResponse() BuildServiceAgentPoolClientListResponse {\n\treturn p.current\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) PageResponse() ApplicationSecurityGroupsClientListAllResponse {\n\treturn p.current\n}",
"func (p *NamespacesListAuthorizationRulesPager) PageResponse() NamespacesListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (p *LoadBalancerBackendAddressPoolsClientListPager) PageResponse() LoadBalancerBackendAddressPoolsClientListResponse {\n\treturn p.current\n}",
"func (p *VirtualHubsClientListPager) PageResponse() VirtualHubsClientListResponse {\n\treturn p.current\n}",
"func ExampleServersClient_NewListByResourceGroupPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armmariadb.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := clientFactory.NewServersClient().NewListByResourceGroupPager(\"testrg\", nil)\n\tfor pager.More() {\n\t\tpage, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range page.Value {\n\t\t\t// You could use page here. We use blank identifier for just demo purposes.\n\t\t\t_ = v\n\t\t}\n\t\t// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t\t// page.ServerListResult = armmariadb.ServerListResult{\n\t\t// \tValue: []*armmariadb.Server{\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"mariadbtestsvc1\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.DBforMariaDB/servers\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/testrg/providers/Microsoft.DBforMariaDB/servers/mariadbtestsvc1\"),\n\t\t// \t\t\tLocation: to.Ptr(\"westus\"),\n\t\t// \t\t\tProperties: &armmariadb.ServerProperties{\n\t\t// \t\t\t\tAdministratorLogin: to.Ptr(\"testuser\"),\n\t\t// \t\t\t\tEarliestRestoreDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-03-07T18:17:35.729321+00:00\"); return t}()),\n\t\t// \t\t\t\tFullyQualifiedDomainName: to.Ptr(\"mariadbtestsvc1.mariadb.database.azure.com\"),\n\t\t// \t\t\t\tPrivateEndpointConnections: []*armmariadb.ServerPrivateEndpointConnection{\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tPublicNetworkAccess: to.Ptr(armmariadb.PublicNetworkAccessEnumEnabled),\n\t\t// \t\t\t\tSSLEnforcement: to.Ptr(armmariadb.SSLEnforcementEnumEnabled),\n\t\t// \t\t\t\tStorageProfile: &armmariadb.StorageProfile{\n\t\t// \t\t\t\t\tBackupRetentionDays: to.Ptr[int32](7),\n\t\t// \t\t\t\t\tGeoRedundantBackup: to.Ptr(armmariadb.GeoRedundantBackupDisabled),\n\t\t// \t\t\t\t\tStorageMB: to.Ptr[int32](5120),\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tUserVisibleState: to.Ptr(armmariadb.ServerStateReady),\n\t\t// \t\t\t\tVersion: to.Ptr(armmariadb.ServerVersionTen3),\n\t\t// \t\t\t},\n\t\t// \t\t\tSKU: &armmariadb.SKU{\n\t\t// \t\t\t\tName: to.Ptr(\"B_Gen4_1\"),\n\t\t// \t\t\t\tCapacity: to.Ptr[int32](1),\n\t\t// \t\t\t\tFamily: to.Ptr(\"Gen4\"),\n\t\t// \t\t\t\tTier: to.Ptr(armmariadb.SKUTierBasic),\n\t\t// \t\t\t},\n\t\t// \t\t},\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"mariadbtstsvc2\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.DBforMariaDB/servers\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/testrg/providers/Microsoft.DBforMariaDB/servers/mariadbtstsvc2\"),\n\t\t// \t\t\tLocation: to.Ptr(\"westus\"),\n\t\t// \t\t\tProperties: &armmariadb.ServerProperties{\n\t\t// \t\t\t\tAdministratorLogin: to.Ptr(\"testuser\"),\n\t\t// \t\t\t\tEarliestRestoreDate: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2018-03-07T18:17:35.729321+00:00\"); return t}()),\n\t\t// \t\t\t\tFullyQualifiedDomainName: to.Ptr(\"mariadbtstsvc2.mariadb.database.azure.com\"),\n\t\t// \t\t\t\tPrivateEndpointConnections: []*armmariadb.ServerPrivateEndpointConnection{\n\t\t// \t\t\t\t\t{\n\t\t// \t\t\t\t\t\tID: to.Ptr(\"/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/testrg/providers/Microsoft.DBforMariaDB/servers/mariadbtstsvc2/privateEndpointConnections/private-endpoint-name-00000000-1111-2222-3333-444444444444\"),\n\t\t// \t\t\t\t\t\tProperties: &armmariadb.ServerPrivateEndpointConnectionProperties{\n\t\t// \t\t\t\t\t\t\tPrivateEndpoint: &armmariadb.PrivateEndpointProperty{\n\t\t// \t\t\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/ffffffff-ffff-ffff-ffff-ffffffffffff/resourceGroups/Default-Network/providers/Microsoft.Network/privateEndpoints/private-endpoint-name\"),\n\t\t// \t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\tPrivateLinkServiceConnectionState: &armmariadb.ServerPrivateLinkServiceConnectionStateProperty{\n\t\t// \t\t\t\t\t\t\t\tDescription: to.Ptr(\"Auto-approved\"),\n\t\t// \t\t\t\t\t\t\t\tActionsRequired: to.Ptr(armmariadb.PrivateLinkServiceConnectionStateActionsRequireNone),\n\t\t// \t\t\t\t\t\t\t\tStatus: to.Ptr(armmariadb.PrivateLinkServiceConnectionStateStatusApproved),\n\t\t// \t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\tProvisioningState: to.Ptr(armmariadb.PrivateEndpointProvisioningState(\"Succeeded\")),\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t}},\n\t\t// \t\t\t\tPublicNetworkAccess: to.Ptr(armmariadb.PublicNetworkAccessEnumEnabled),\n\t\t// \t\t\t\tSSLEnforcement: to.Ptr(armmariadb.SSLEnforcementEnumEnabled),\n\t\t// \t\t\t\tStorageProfile: &armmariadb.StorageProfile{\n\t\t// \t\t\t\t\tBackupRetentionDays: to.Ptr[int32](7),\n\t\t// \t\t\t\t\tGeoRedundantBackup: to.Ptr(armmariadb.GeoRedundantBackupDisabled),\n\t\t// \t\t\t\t\tStorageMB: to.Ptr[int32](5120),\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tUserVisibleState: to.Ptr(armmariadb.ServerStateReady),\n\t\t// \t\t\t\tVersion: to.Ptr(armmariadb.ServerVersionTen3),\n\t\t// \t\t\t},\n\t\t// \t\t\tSKU: &armmariadb.SKU{\n\t\t// \t\t\t\tName: to.Ptr(\"GP_Gen4_2\"),\n\t\t// \t\t\t\tCapacity: to.Ptr[int32](2),\n\t\t// \t\t\t\tFamily: to.Ptr(\"Gen4\"),\n\t\t// \t\t\t\tTier: to.Ptr(armmariadb.SKUTierGeneralPurpose),\n\t\t// \t\t\t},\n\t\t// \t}},\n\t\t// }\n\t}\n}",
"func (client *WebAppsClient) listByResourceGroupHandleResponse(resp *http.Response) (WebAppsListByResourceGroupResponse, error) {\n\tresult := WebAppsListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.WebAppCollection); err != nil {\n\t\treturn WebAppsListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (client KqlScriptsClient) GetAll(ctx context.Context) (result KqlScriptsResourceCollectionResponsePage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/KqlScriptsClient.GetAll\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.ksrcr.Response.Response != nil {\n\t\t\t\tsc = result.ksrcr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.getAllNextResults\n\treq, err := client.GetAllPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"artifacts.KqlScriptsClient\", \"GetAll\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.GetAllSender(req)\n\tif err != nil {\n\t\tresult.ksrcr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"artifacts.KqlScriptsClient\", \"GetAll\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.ksrcr, err = client.GetAllResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"artifacts.KqlScriptsClient\", \"GetAll\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.ksrcr.hasNextLink() && result.ksrcr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (p *LoadBalancerLoadBalancingRulesClientListPager) PageResponse() LoadBalancerLoadBalancingRulesClientListResponse {\n\treturn p.current\n}",
"func (p *DeploymentsClientListPager) PageResponse() DeploymentsClientListResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListByResourcePager) PageResponse() EventSubscriptionsListByResourceResponse {\n\treturn p.current\n}",
"func (p *SecurityGroupsClientListPager) PageResponse() SecurityGroupsClientListResponse {\n\treturn p.current\n}",
"func (page VirtualMachineScaleSetListResultPageClient) Response() azcompute.VirtualMachineScaleSetListResult {\n\tr := azcompute.VirtualMachineScaleSetListResult{}\n\terr := DeepCopy(&r, page.vmsslrp.Response())\n\tif err != nil {\n\t\tpage.err = fmt.Errorf(\"fail to get virtual machine scale set list result, %s\", err) //nolint:staticcheck\n\t}\n\treturn r\n}",
"func ExampleEnvironmentsClient_ListByResourceGroup() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armtimeseriesinsights.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tres, err := clientFactory.NewEnvironmentsClient().ListByResourceGroup(ctx, \"rg1\", nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to finish the request: %v\", err)\n\t}\n\t// You could use response here. We use blank identifier for just demo purposes.\n\t_ = res\n\t// If the HTTP response code is 200 as defined in example definition, your response structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t// res.EnvironmentListResponse = armtimeseriesinsights.EnvironmentListResponse{\n\t// \tValue: []armtimeseriesinsights.EnvironmentResourceClassification{\n\t// \t\t&armtimeseriesinsights.Gen1EnvironmentResource{\n\t// \t\t\tName: to.Ptr(\"env1\"),\n\t// \t\t\tType: to.Ptr(\"Microsoft.TimeSeriesInsights/Environments\"),\n\t// \t\t\tID: to.Ptr(\"/subscriptions/subid/resourceGroups/rg1/providers/Microsoft.TimeSeriesInsights/Environments/env1\"),\n\t// \t\t\tLocation: to.Ptr(\"West US\"),\n\t// \t\t\tTags: map[string]*string{\n\t// \t\t\t},\n\t// \t\t\tKind: to.Ptr(armtimeseriesinsights.EnvironmentResourceKindGen1),\n\t// \t\t\tSKU: &armtimeseriesinsights.SKU{\n\t// \t\t\t\tName: to.Ptr(armtimeseriesinsights.SKUNameS1),\n\t// \t\t\t\tCapacity: to.Ptr[int32](1),\n\t// \t\t\t},\n\t// \t\t\tProperties: &armtimeseriesinsights.Gen1EnvironmentResourceProperties{\n\t// \t\t\t\tCreationTime: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2017-04-18T19:20:33.2288820Z\"); return t}()),\n\t// \t\t\t\tProvisioningState: to.Ptr(armtimeseriesinsights.ProvisioningStateSucceeded),\n\t// \t\t\t\tDataRetentionTime: to.Ptr(\"P31D\"),\n\t// \t\t\t},\n\t// \t}},\n\t// }\n}",
"func (p *ApplicationSecurityGroupsClientListPager) PageResponse() ApplicationSecurityGroupsClientListResponse {\n\treturn p.current\n}",
"func (client *ServersClient) NewListByResourceGroupPager(resourceGroupName string, options *ServersClientListByResourceGroupOptions) *runtime.Pager[ServersClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ServersClientListByResourceGroupResponse]{\n\t\tMore: func(page ServersClientListByResourceGroupResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ServersClientListByResourceGroupResponse) (ServersClientListByResourceGroupResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ServersClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *BastionHostsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionHostListResult.NextLink == nil || len(*p.current.BastionHostListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func ExampleVaultsClient_NewListByResourceGroupPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armkeyvault.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := clientFactory.NewVaultsClient().NewListByResourceGroupPager(\"sample-group\", &armkeyvault.VaultsClientListByResourceGroupOptions{Top: to.Ptr[int32](1)})\n\tfor pager.More() {\n\t\tpage, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range page.Value {\n\t\t\t// You could use page here. We use blank identifier for just demo purposes.\n\t\t\t_ = v\n\t\t}\n\t\t// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t\t// page.VaultListResult = armkeyvault.VaultListResult{\n\t\t// \tValue: []*armkeyvault.Vault{\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"sample-vault\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.KeyVault/vaults\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/sample-group/providers/Microsoft.KeyVault/vaults/sample-vault\"),\n\t\t// \t\t\tLocation: to.Ptr(\"westus\"),\n\t\t// \t\t\tProperties: &armkeyvault.VaultProperties{\n\t\t// \t\t\t\tAccessPolicies: []*armkeyvault.AccessPolicyEntry{\n\t\t// \t\t\t\t\t{\n\t\t// \t\t\t\t\t\tObjectID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t\t\t\tPermissions: &armkeyvault.Permissions{\n\t\t// \t\t\t\t\t\t\tCertificates: []*armkeyvault.CertificatePermissions{\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsGet),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsList),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsDelete),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsCreate),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsImport),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsUpdate),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsManagecontacts),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsGetissuers),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsListissuers),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsSetissuers),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsDeleteissuers),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsManageissuers),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsRecover),\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armkeyvault.CertificatePermissionsPurge)},\n\t\t// \t\t\t\t\t\t\t\tKeys: []*armkeyvault.KeyPermissions{\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsEncrypt),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsDecrypt),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsWrapKey),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsUnwrapKey),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsSign),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsVerify),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsGet),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsList),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsCreate),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsUpdate),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsImport),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsDelete),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsBackup),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsRestore),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsRecover),\n\t\t// \t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.KeyPermissionsPurge)},\n\t\t// \t\t\t\t\t\t\t\t\tSecrets: []*armkeyvault.SecretPermissions{\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsGet),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsList),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsSet),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsDelete),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsBackup),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsRestore),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsRecover),\n\t\t// \t\t\t\t\t\t\t\t\t\tto.Ptr(armkeyvault.SecretPermissionsPurge)},\n\t\t// \t\t\t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\t\t\tTenantID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t\t\t\t\t}},\n\t\t// \t\t\t\t\t\t\tEnableSoftDelete: to.Ptr(true),\n\t\t// \t\t\t\t\t\t\tEnabledForDeployment: to.Ptr(true),\n\t\t// \t\t\t\t\t\t\tEnabledForDiskEncryption: to.Ptr(true),\n\t\t// \t\t\t\t\t\t\tEnabledForTemplateDeployment: to.Ptr(true),\n\t\t// \t\t\t\t\t\t\tHsmPoolResourceID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t\t\t\t\tProvisioningState: to.Ptr(armkeyvault.VaultProvisioningStateSucceeded),\n\t\t// \t\t\t\t\t\t\tSKU: &armkeyvault.SKU{\n\t\t// \t\t\t\t\t\t\t\tName: to.Ptr(armkeyvault.SKUNamePremium),\n\t\t// \t\t\t\t\t\t\t\tFamily: to.Ptr(armkeyvault.SKUFamilyA),\n\t\t// \t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\tTenantID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t\t\t\t\tVaultURI: to.Ptr(\"https://sample-vault.vault.azure.net/\"),\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\tSystemData: &armkeyvault.SystemData{\n\t\t// \t\t\t\t\t\t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-01-01T12:00:00.0000000Z\"); return t}()),\n\t\t// \t\t\t\t\t\t\tCreatedBy: to.Ptr(\"keyVaultUser1\"),\n\t\t// \t\t\t\t\t\t\tCreatedByType: to.Ptr(armkeyvault.IdentityTypeUser),\n\t\t// \t\t\t\t\t\t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2020-01-01T12:00:00.0000000Z\"); return t}()),\n\t\t// \t\t\t\t\t\t\tLastModifiedBy: to.Ptr(\"keyVaultUser2\"),\n\t\t// \t\t\t\t\t\t\tLastModifiedByType: to.Ptr(armkeyvault.IdentityTypeUser),\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\tTags: map[string]*string{\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t}},\n\t\t// \t\t\t}\n\t}\n}",
"func (p *APIPortalCustomDomainsClientListPager) PageResponse() APIPortalCustomDomainsClientListResponse {\n\treturn p.current\n}",
"func ExampleAppliancesClient_NewListByResourceGroupPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclient, err := armresourceconnector.NewAppliancesClient(\"11111111-2222-3333-4444-555555555555\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := client.NewListByResourceGroupPager(\"testresourcegroup\",\n\t\tnil)\n\tfor pager.More() {\n\t\tnextResult, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range nextResult.Value {\n\t\t\t// TODO: use page item\n\t\t\t_ = v\n\t\t}\n\t}\n}",
"func (p *KeyVaultClientGetKeyVersionsPager) PageResponse() KeyVaultClientGetKeyVersionsResponse {\n\treturn p.current\n}",
"func (p *InterfacesClientListVirtualMachineScaleSetIPConfigurationsPager) PageResponse() InterfacesClientListVirtualMachineScaleSetIPConfigurationsResponse {\n\treturn p.current\n}",
"func (p *PrivateDNSZoneGroupsClientListPager) PageResponse() PrivateDNSZoneGroupsClientListResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListBySubscriptionPager) PageResponse() AccountsClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesPager) PageResponse() ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse {\n\treturn p.current\n}",
"func (client *IotSecuritySolutionClient) NewListByResourceGroupPager(resourceGroupName string, options *IotSecuritySolutionClientListByResourceGroupOptions) *runtime.Pager[IotSecuritySolutionClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[IotSecuritySolutionClientListByResourceGroupResponse]{\n\t\tMore: func(page IotSecuritySolutionClientListByResourceGroupResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *IotSecuritySolutionClientListByResourceGroupResponse) (IotSecuritySolutionClientListByResourceGroupResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn IotSecuritySolutionClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *DeploymentScriptsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *HybridConnectionsListAuthorizationRulesPager) PageResponse() HybridConnectionsListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (p *VirtualHubsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVirtualHubsResult.NextLink == nil || len(*p.current.ListVirtualHubsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (s *jsiiProxy_ServerDeploymentGroup) ToString() *string {\n\tvar returns *string\n\n\t_jsii_.Invoke(\n\t\ts,\n\t\t\"toString\",\n\t\tnil, // no parameters\n\t\t&returns,\n\t)\n\n\treturn returns\n}",
"func ExampleClient_NewListByResourceGroupPager() {\n\tcred, err := azidentity.NewDefaultAzureCredential(nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to obtain a credential: %v\", err)\n\t}\n\tctx := context.Background()\n\tclientFactory, err := armwebpubsub.NewClientFactory(\"<subscription-id>\", cred, nil)\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to create client: %v\", err)\n\t}\n\tpager := clientFactory.NewClient().NewListByResourceGroupPager(\"myResourceGroup\", nil)\n\tfor pager.More() {\n\t\tpage, err := pager.NextPage(ctx)\n\t\tif err != nil {\n\t\t\tlog.Fatalf(\"failed to advance page: %v\", err)\n\t\t}\n\t\tfor _, v := range page.Value {\n\t\t\t// You could use page here. We use blank identifier for just demo purposes.\n\t\t\t_ = v\n\t\t}\n\t\t// If the HTTP response code is 200 as defined in example definition, your page structure would look as follows. Please pay attention that all the values in the output are fake values for just demo purposes.\n\t\t// page.ResourceInfoList = armwebpubsub.ResourceInfoList{\n\t\t// \tValue: []*armwebpubsub.ResourceInfo{\n\t\t// \t\t{\n\t\t// \t\t\tName: to.Ptr(\"myWebPubSubService\"),\n\t\t// \t\t\tType: to.Ptr(\"Microsoft.SignalRService/WebPubSub\"),\n\t\t// \t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myResourceGroup/providers/Microsoft.SignalRService/WebPubSub/myWebPubSubService\"),\n\t\t// \t\t\tSystemData: &armwebpubsub.SystemData{\n\t\t// \t\t\t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2015-02-03T04:05:06Z\"); return t}()),\n\t\t// \t\t\t\tCreatedBy: to.Ptr(\"string\"),\n\t\t// \t\t\t\tCreatedByType: to.Ptr(armwebpubsub.CreatedByTypeUser),\n\t\t// \t\t\t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2015-02-03T04:05:06Z\"); return t}()),\n\t\t// \t\t\t\tLastModifiedBy: to.Ptr(\"string\"),\n\t\t// \t\t\t\tLastModifiedByType: to.Ptr(armwebpubsub.CreatedByTypeUser),\n\t\t// \t\t\t},\n\t\t// \t\t\tLocation: to.Ptr(\"eastus\"),\n\t\t// \t\t\tTags: map[string]*string{\n\t\t// \t\t\t\t\"key1\": to.Ptr(\"value1\"),\n\t\t// \t\t\t},\n\t\t// \t\t\tIdentity: &armwebpubsub.ManagedIdentity{\n\t\t// \t\t\t\tType: to.Ptr(armwebpubsub.ManagedIdentityTypeSystemAssigned),\n\t\t// \t\t\t\tPrincipalID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t\tTenantID: to.Ptr(\"00000000-0000-0000-0000-000000000000\"),\n\t\t// \t\t\t},\n\t\t// \t\t\tKind: to.Ptr(armwebpubsub.ServiceKindWebPubSub),\n\t\t// \t\t\tProperties: &armwebpubsub.Properties{\n\t\t// \t\t\t\tDisableAADAuth: to.Ptr(false),\n\t\t// \t\t\t\tDisableLocalAuth: to.Ptr(false),\n\t\t// \t\t\t\tExternalIP: to.Ptr(\"10.0.0.1\"),\n\t\t// \t\t\t\tHostName: to.Ptr(\"mywebpubsubservice.webpubsub.azure.com\"),\n\t\t// \t\t\t\tLiveTraceConfiguration: &armwebpubsub.LiveTraceConfiguration{\n\t\t// \t\t\t\t\tCategories: []*armwebpubsub.LiveTraceCategory{\n\t\t// \t\t\t\t\t\t{\n\t\t// \t\t\t\t\t\t\tName: to.Ptr(\"ConnectivityLogs\"),\n\t\t// \t\t\t\t\t\t\tEnabled: to.Ptr(\"true\"),\n\t\t// \t\t\t\t\t}},\n\t\t// \t\t\t\t\tEnabled: to.Ptr(\"false\"),\n\t\t// \t\t\t\t},\n\t\t// \t\t\t\tNetworkACLs: &armwebpubsub.NetworkACLs{\n\t\t// \t\t\t\t\tDefaultAction: to.Ptr(armwebpubsub.ACLActionDeny),\n\t\t// \t\t\t\t\tPrivateEndpoints: []*armwebpubsub.PrivateEndpointACL{\n\t\t// \t\t\t\t\t\t{\n\t\t// \t\t\t\t\t\t\tAllow: []*armwebpubsub.WebPubSubRequestType{\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armwebpubsub.WebPubSubRequestTypeServerConnection)},\n\t\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"mywebpubsubservice.1fa229cd-bf3f-47f0-8c49-afb36723997e\"),\n\t\t// \t\t\t\t\t\t}},\n\t\t// \t\t\t\t\t\tPublicNetwork: &armwebpubsub.NetworkACL{\n\t\t// \t\t\t\t\t\t\tAllow: []*armwebpubsub.WebPubSubRequestType{\n\t\t// \t\t\t\t\t\t\t\tto.Ptr(armwebpubsub.WebPubSubRequestTypeClientConnection)},\n\t\t// \t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\tPrivateEndpointConnections: []*armwebpubsub.PrivateEndpointConnection{\n\t\t// \t\t\t\t\t\t\t{\n\t\t// \t\t\t\t\t\t\t\tName: to.Ptr(\"mywebpubsubservice.1fa229cd-bf3f-47f0-8c49-afb36723997e\"),\n\t\t// \t\t\t\t\t\t\t\tType: to.Ptr(\"Microsoft.SignalRService/WebPubSub/privateEndpointConnections\"),\n\t\t// \t\t\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myResourceGroup/providers/Microsoft.SignalRService/WebPubSub/myWebPubSubService/privateEndpointConnections/mywebpubsubservice.1fa229cd-bf3f-47f0-8c49-afb36723997e\"),\n\t\t// \t\t\t\t\t\t\t\tSystemData: &armwebpubsub.SystemData{\n\t\t// \t\t\t\t\t\t\t\t\tCreatedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2015-02-03T04:05:06Z\"); return t}()),\n\t\t// \t\t\t\t\t\t\t\t\tCreatedBy: to.Ptr(\"string\"),\n\t\t// \t\t\t\t\t\t\t\t\tCreatedByType: to.Ptr(armwebpubsub.CreatedByTypeUser),\n\t\t// \t\t\t\t\t\t\t\t\tLastModifiedAt: to.Ptr(func() time.Time { t, _ := time.Parse(time.RFC3339Nano, \"2015-02-03T04:05:06Z\"); return t}()),\n\t\t// \t\t\t\t\t\t\t\t\tLastModifiedBy: to.Ptr(\"string\"),\n\t\t// \t\t\t\t\t\t\t\t\tLastModifiedByType: to.Ptr(armwebpubsub.CreatedByTypeUser),\n\t\t// \t\t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\t\tProperties: &armwebpubsub.PrivateEndpointConnectionProperties{\n\t\t// \t\t\t\t\t\t\t\t\tPrivateEndpoint: &armwebpubsub.PrivateEndpoint{\n\t\t// \t\t\t\t\t\t\t\t\t\tID: to.Ptr(\"/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/myResourceGroup/providers/Microsoft.Network/privateEndpoints/myPrivateEndpoint\"),\n\t\t// \t\t\t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\t\t\tPrivateLinkServiceConnectionState: &armwebpubsub.PrivateLinkServiceConnectionState{\n\t\t// \t\t\t\t\t\t\t\t\t\tActionsRequired: to.Ptr(\"None\"),\n\t\t// \t\t\t\t\t\t\t\t\t\tStatus: to.Ptr(armwebpubsub.PrivateLinkServiceConnectionStatusApproved),\n\t\t// \t\t\t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t\t\t\tProvisioningState: to.Ptr(armwebpubsub.ProvisioningStateSucceeded),\n\t\t// \t\t\t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\t}},\n\t\t// \t\t\t\t\t\tProvisioningState: to.Ptr(armwebpubsub.ProvisioningStateSucceeded),\n\t\t// \t\t\t\t\t\tPublicNetworkAccess: to.Ptr(\"Enabled\"),\n\t\t// \t\t\t\t\t\tPublicPort: to.Ptr[int32](443),\n\t\t// \t\t\t\t\t\tResourceLogConfiguration: &armwebpubsub.ResourceLogConfiguration{\n\t\t// \t\t\t\t\t\t\tCategories: []*armwebpubsub.ResourceLogCategory{\n\t\t// \t\t\t\t\t\t\t\t{\n\t\t// \t\t\t\t\t\t\t\t\tName: to.Ptr(\"ConnectivityLogs\"),\n\t\t// \t\t\t\t\t\t\t\t\tEnabled: to.Ptr(\"true\"),\n\t\t// \t\t\t\t\t\t\t}},\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\tServerPort: to.Ptr[int32](443),\n\t\t// \t\t\t\t\t\tTLS: &armwebpubsub.TLSSettings{\n\t\t// \t\t\t\t\t\t\tClientCertEnabled: to.Ptr(true),\n\t\t// \t\t\t\t\t\t},\n\t\t// \t\t\t\t\t\tVersion: to.Ptr(\"1.0\"),\n\t\t// \t\t\t\t\t},\n\t\t// \t\t\t\t\tSKU: &armwebpubsub.ResourceSKU{\n\t\t// \t\t\t\t\t\tName: to.Ptr(\"Premium_P1\"),\n\t\t// \t\t\t\t\t\tCapacity: to.Ptr[int32](1),\n\t\t// \t\t\t\t\t\tSize: to.Ptr(\"P1\"),\n\t\t// \t\t\t\t\t\tTier: to.Ptr(armwebpubsub.WebPubSubSKUTierPremium),\n\t\t// \t\t\t\t\t},\n\t\t// \t\t\t}},\n\t\t// \t\t}\n\t}\n}",
"func (client *RedisClient) listByResourceGroupHandleResponse(resp *http.Response) (RedisListByResourceGroupResponse, error) {\n\tresult := RedisListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.RedisListResult); err != nil {\n\t\treturn RedisListByResourceGroupResponse{}, runtime.NewResponseError(err, resp)\n\t}\n\treturn result, nil\n}",
"func (p *AlertProcessingRulesClientListBySubscriptionPager) PageResponse() AlertProcessingRulesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *HybridConnectionsListByNamespacePager) PageResponse() HybridConnectionsListByNamespaceResponse {\n\treturn p.current\n}",
"func (o LookupSnapshotPolicyResultOutput) ResourceGroupName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupSnapshotPolicyResult) string { return v.ResourceGroupName }).(pulumi.StringOutput)\n}",
"func (p *NamespacesListPager) PageResponse() NamespacesListResponse {\n\treturn p.current\n}",
"func (p *NamespacesListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RelayNamespaceListResult.NextLink == nil || len(*p.current.RelayNamespaceListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (client *SQLVirtualMachinesClient) listByResourceGroupHandleResponse(resp *http.Response) (SQLVirtualMachinesClientListByResourceGroupResponse, error) {\n\tresult := SQLVirtualMachinesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ListResult); err != nil {\n\t\treturn SQLVirtualMachinesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (client Client) ListByResourceGroupNextResults(lastResults ListResult) (result ListResult, err error) {\n\treq, err := lastResults.ListResultPreparer()\n\tif err != nil {\n\t\treturn result, autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", nil, \"Failure preparing next results request request\")\n\t}\n\tif req == nil {\n\t\treturn\n\t}\n\n\tresp, err := client.ListByResourceGroupSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\treturn result, autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", resp, \"Failure sending next results request request\")\n\t}\n\n\tresult, err = client.ListByResourceGroupResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"redis.Client\", \"ListByResourceGroup\", resp, \"Failure responding to next results request request\")\n\t}\n\n\treturn\n}",
"func (client *ConnectedEnvironmentsClient) NewListByResourceGroupPager(resourceGroupName string, options *ConnectedEnvironmentsClientListByResourceGroupOptions) *runtime.Pager[ConnectedEnvironmentsClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ConnectedEnvironmentsClientListByResourceGroupResponse]{\n\t\tMore: func(page ConnectedEnvironmentsClientListByResourceGroupResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ConnectedEnvironmentsClientListByResourceGroupResponse) (ConnectedEnvironmentsClientListByResourceGroupResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.pl.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ConnectedEnvironmentsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupPager) PageResponse() PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualApplianceSitesClientListPager) PageResponse() VirtualApplianceSitesClientListResponse {\n\treturn p.current\n}",
"func (client *VirtualMachineImageTemplatesClient) listByResourceGroupHandleResponse(resp *http.Response) (VirtualMachineImageTemplatesClientListByResourceGroupResponse, error) {\n\tresult := VirtualMachineImageTemplatesClientListByResourceGroupResponse{RawResponse: resp}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.ImageTemplateListResult); err != nil {\n\t\treturn VirtualMachineImageTemplatesClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (p *InterfaceLoadBalancersClientListPager) PageResponse() InterfaceLoadBalancersClientListResponse {\n\treturn p.current\n}",
"func (p *BuildpackBindingClientListPager) PageResponse() BuildpackBindingClientListResponse {\n\treturn p.current\n}",
"func (p *LoadBalancerOutboundRulesClientListPager) PageResponse() LoadBalancerOutboundRulesClientListResponse {\n\treturn p.current\n}",
"func (client *SpatialAnchorsAccountsClient) listByResourceGroupHandleResponse(resp *http.Response) (SpatialAnchorsAccountsClientListByResourceGroupResponse, error) {\n\tresult := SpatialAnchorsAccountsClientListByResourceGroupResponse{}\n\tif err := runtime.UnmarshalAsJSON(resp, &result.SpatialAnchorsAccountPage); err != nil {\n\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t}\n\treturn result, nil\n}",
"func (o GetLoadBalancersBalancerOutput) ResourceGroupId() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetLoadBalancersBalancer) string { return v.ResourceGroupId }).(pulumi.StringOutput)\n}",
"func (p *GatewayCustomDomainsClientListPager) PageResponse() GatewayCustomDomainsClientListResponse {\n\treturn p.current\n}",
"func (client *SpatialAnchorsAccountsClient) NewListByResourceGroupPager(resourceGroupName string, options *SpatialAnchorsAccountsClientListByResourceGroupOptions) *runtime.Pager[SpatialAnchorsAccountsClientListByResourceGroupResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[SpatialAnchorsAccountsClientListByResourceGroupResponse]{\n\t\tMore: func(page SpatialAnchorsAccountsClientListByResourceGroupResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *SpatialAnchorsAccountsClientListByResourceGroupResponse) (SpatialAnchorsAccountsClientListByResourceGroupResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listByResourceGroupCreateRequest(ctx, resourceGroupName, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn SpatialAnchorsAccountsClientListByResourceGroupResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listByResourceGroupHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *PartnerNamespacesListBySubscriptionPager) PageResponse() PartnerNamespacesListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *DomainsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DomainsListResult.NextLink == nil || len(*p.current.DomainsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *HubVirtualNetworkConnectionsClientListPager) PageResponse() HubVirtualNetworkConnectionsClientListResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListRegionalBySubscriptionPager) PageResponse() EventSubscriptionsListRegionalBySubscriptionResponse {\n\treturn p.current\n}"
] | [
"0.61588115",
"0.6053747",
"0.60201657",
"0.6003196",
"0.59671813",
"0.5928203",
"0.5914979",
"0.587309",
"0.5829479",
"0.58222055",
"0.5811157",
"0.57919514",
"0.5764543",
"0.5756858",
"0.5732701",
"0.57273406",
"0.56606054",
"0.56596446",
"0.56431043",
"0.5635589",
"0.56145126",
"0.5600253",
"0.55868804",
"0.5481398",
"0.54745126",
"0.5458186",
"0.54503685",
"0.54251426",
"0.54133135",
"0.5400754",
"0.5374622",
"0.53710204",
"0.53347725",
"0.5305822",
"0.5285865",
"0.524573",
"0.519394",
"0.5100229",
"0.506129",
"0.49260974",
"0.49068946",
"0.48614898",
"0.47992727",
"0.461687",
"0.46166137",
"0.46128303",
"0.4590732",
"0.45734522",
"0.4568736",
"0.45672327",
"0.45640466",
"0.45571822",
"0.45468497",
"0.4539237",
"0.4530293",
"0.45273274",
"0.44934398",
"0.44908383",
"0.44890875",
"0.44709614",
"0.44706517",
"0.44072768",
"0.4401066",
"0.43988708",
"0.43970525",
"0.4386914",
"0.43808377",
"0.4373715",
"0.43623284",
"0.4359373",
"0.43560642",
"0.43531337",
"0.43477786",
"0.43383592",
"0.43347514",
"0.4333939",
"0.43337592",
"0.43291152",
"0.43155193",
"0.43144733",
"0.431072",
"0.4307813",
"0.4301968",
"0.42977595",
"0.4287512",
"0.428346",
"0.428028",
"0.42794183",
"0.42769384",
"0.4270125",
"0.4270124",
"0.42632133",
"0.4261976",
"0.42615974",
"0.4254479",
"0.42522007",
"0.42466235",
"0.42451188",
"0.4244094",
"0.42416933"
] | 0.7542869 | 0 |
Err returns the last error encountered while paging. | func (p *DeploymentScriptsListBySubscriptionPager) Err() error {
return p.err
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *PaginatedResult) Err() error {\n\treturn p.err\n}",
"func (p *SmartGroupsClientGetAllPager) Err() error {\n\treturn p.err\n}",
"func (p *AlertsClientGetAllPager) Err() error {\n\treturn p.err\n}",
"func (p *AzureFirewallsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *LinkedServerListPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainTopicsListByDomainPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallRulesListPager) Err() error {\n\treturn p.err\n}",
"func (p *AzureFirewallsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *UsagesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerNamespacesListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerOutboundRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointConnectionsListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewaysClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallPoliciesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualAppliancesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NatGatewaysClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *ProfilesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewayPrivateLinkResourcesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancersClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *NatRulesClientListByVPNGatewayPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerLoadBalancingRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerTopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *OperationsListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ReservationClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkPeeringsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *HybridConnectionsListByNamespacePager) Err() error {\n\treturn p.err\n}",
"func (p *SubnetsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkScopesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerNamespacesListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *DomainsListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *FirewallPoliciesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualHubsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *WebApplicationFirewallPoliciesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *WebApplicationFirewallPoliciesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *SecurityGroupsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkResourcesClientListByPrivateLinkScopePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *InterfacesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualAppliancesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworksClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *RouteTablesClientListAllPager) Err() error {\n\treturn p.err\n}",
"func (p *CustomDomainsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancersClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PoolsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NatGatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkScopesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerBackendAddressPoolsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *CertificatesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *BindingsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerTopicsListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *AppsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkGatewayNatRulesClientListByVirtualNetworkGatewayPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSitesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *AccountsClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *StoragesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSiteLinksClientListByVPNSitePager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkLinksClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *TopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualRouterPeeringsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualWansClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *InboundNatRulesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *DeletedAccountsClientListPager) Err() error {\n\treturn p.err\n}",
"func (iter *DeleteListIterator) Err() error {\n\treturn iter.Paginator.Err()\n}",
"func (p *CachesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *MachinesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *EventSubscriptionsListByDomainTopicPager) Err() error {\n\treturn p.err\n}",
"func (p *GatewayCustomDomainsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *InterfaceLoadBalancersClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *VPNSitesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *GatewaysClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *SystemTopicsListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *ServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *NamespacesListAuthorizationRulesPager) Err() error {\n\treturn p.err\n}",
"func (p *UsageModelsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *WCFRelaysListByNamespacePager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualApplianceSitesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *TableClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *ConfigurationServicesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointConnectionsClientListByPrivateLinkScopePager) Err() error {\n\treturn p.err\n}",
"func (p *ApplicationGatewayPrivateEndpointConnectionsClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *EventSubscriptionsListByResourcePager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateZonesClientListByResourceGroupPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerNetworkInterfacesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *LoadBalancerProbesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateEndpointsClientListBySubscriptionPager) Err() error {\n\treturn p.err\n}",
"func (p *ProfilesClientListPager) Err() error {\n\treturn p.err\n}",
"func (p *PrivateLinkServicesClientListPrivateEndpointConnectionsPager) Err() error {\n\treturn p.err\n}",
"func (p *VirtualNetworkTapsClientListAllPager) Err() error {\n\treturn p.err\n}"
] | [
"0.71781874",
"0.7132234",
"0.71245384",
"0.7120786",
"0.7088907",
"0.7079575",
"0.70702946",
"0.70490265",
"0.7018319",
"0.6989553",
"0.695567",
"0.69453233",
"0.6935331",
"0.6925309",
"0.6916923",
"0.68996024",
"0.6896403",
"0.68959993",
"0.6891861",
"0.6891023",
"0.6887537",
"0.6865112",
"0.6841569",
"0.6836669",
"0.68278176",
"0.68222636",
"0.6818471",
"0.6814663",
"0.6814663",
"0.6814663",
"0.68140006",
"0.6807026",
"0.6806053",
"0.68033075",
"0.6802025",
"0.6792935",
"0.67892754",
"0.6783128",
"0.6779005",
"0.6775144",
"0.6774527",
"0.6771359",
"0.67698693",
"0.6769835",
"0.6767724",
"0.6767428",
"0.67664814",
"0.67605424",
"0.67561746",
"0.67556524",
"0.67552966",
"0.67502564",
"0.67481655",
"0.67463756",
"0.67454296",
"0.67454296",
"0.6745222",
"0.67416626",
"0.67398983",
"0.67298377",
"0.67288965",
"0.6728063",
"0.67270994",
"0.6721826",
"0.6719065",
"0.67159045",
"0.6713199",
"0.6705555",
"0.6705058",
"0.66991127",
"0.66960156",
"0.66900355",
"0.66853434",
"0.668534",
"0.66848063",
"0.6676543",
"0.66740406",
"0.66691744",
"0.6667403",
"0.66654116",
"0.666485",
"0.6663483",
"0.6663009",
"0.6662855",
"0.6661328",
"0.66585964",
"0.66560453",
"0.6651633",
"0.6651128",
"0.6648916",
"0.66463673",
"0.664027",
"0.6637946",
"0.6636623",
"0.6636231",
"0.6636164",
"0.66336966",
"0.66324973",
"0.66221744",
"0.66214824",
"0.6621386"
] | 0.0 | -1 |
NextPage returns true if the pager advanced to the next page. Returns false if there are no more pages or an error occurred. | func (p *DeploymentScriptsListBySubscriptionPager) NextPage(ctx context.Context) bool {
var req *policy.Request
var err error
if !reflect.ValueOf(p.current).IsZero() {
if p.current.DeploymentScriptListResult.NextLink == nil || len(*p.current.DeploymentScriptListResult.NextLink) == 0 {
return false
}
req, err = p.advancer(ctx, p.current)
} else {
req, err = p.requester(ctx)
}
if err != nil {
p.err = err
return false
}
resp, err := p.client.pl.Do(req)
if err != nil {
p.err = err
return false
}
if !runtime.HasStatusCode(resp, http.StatusOK) {
p.err = p.client.listBySubscriptionHandleError(resp)
return false
}
result, err := p.client.listBySubscriptionHandleResponse(resp)
if err != nil {
p.err = err
return false
}
p.current = result
return true
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *Pagination) HasNext() bool {\n\treturn p.Page+1 <= p.NumPages()\n}",
"func (p *SmartGroupsClientGetAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.SmartGroupsList.NextLink == nil || len(*p.current.SmartGroupsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ManagementClientGetActiveSessionsPager) NextPage(ctx context.Context) bool {\n\tif !p.second {\n\t\tp.second = true\n\t\treturn true\n\t} else if !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BastionActiveSessionListResult.NextLink == nil || len(*p.current.BastionActiveSessionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t}\n\treq, err := runtime.NewRequest(ctx, http.MethodGet, *p.current.BastionActiveSessionListResult.NextLink)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getActiveSessionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *StoragesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.StorageResourceCollection.NextLink == nil || len(*p.current.StorageResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p Pagination) HasNext() bool {\n\ttotalPage := p.TotalPage()\n\tif totalPage == 0 {\n\t\treturn false\n\t}\n\n\tpage := p.CurrentPage()\n\tif page == 0 {\n\t\treturn false\n\t}\n\n\treturn page < totalPage\n}",
"func (p *Pagination) HasNext() bool {\n\treturn p.Total > p.PageNumber*p.PageSize\n}",
"func (p *Paginator) HasNext() bool {\n\treturn p.Page() < p.PageNums()\n}",
"func (o *PaginationProperties) HasNextPage() bool {\n\tif o != nil && o.NextPage != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (p *PrivateLinkResourcesListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateLinkResources.NextLink == nil || len(*p.current.PrivateLinkResources.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationListResult.NextLink == nil || len(*p.current.OperationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationListResult.NextLink == nil || len(*p.current.OperationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (*offsetPageInfoImpl) HasNextPage(p graphql.ResolveParams) (bool, error) {\n\tpage := p.Source.(offsetPageInfo)\n\treturn (page.offset + page.limit) < page.totalCount, nil\n}",
"func (p *PartnerTopicsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerTopicsListResult.NextLink == nil || len(*p.current.PartnerTopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *AccountsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *ProfilesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ProfileListResult.NextLink == nil || len(*p.current.ProfileListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PublicIPAddressesClientListVirtualMachineScaleSetPublicIPAddressesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PublicIPAddressListResult.NextLink == nil || len(*p.current.PublicIPAddressListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetPublicIPAddressesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *Paginator) HasNext() bool {\n\treturn p.HasNextPage\n}",
"func (p *LinkedServerListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisLinkedServerWithPropertiesList.NextLink == nil || len(*p.current.RedisLinkedServerWithPropertiesList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualAppliancesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualApplianceListResult.NextLink == nil || len(*p.current.VirtualApplianceListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *NatRulesClientListByVPNGatewayPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVPNGatewayNatRulesResult.NextLink == nil || len(*p.current.ListVPNGatewayNatRulesResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByVPNGatewayHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *ProjectsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (resp *PharosResponse) HasNextPage() bool {\n\treturn resp.Next != nil && *resp.Next != \"\"\n}",
"func (p *OperationsListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ClientDiscoveryResponse.NextLink == nil || len(*p.current.ClientDiscoveryResponse.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RouteTablesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteTableListResult.NextLink == nil || len(*p.current.RouteTableListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BlobContainersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListContainerItems.NextLink == nil || len(*p.current.ListContainerItems.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworkTapsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkTapListResult.NextLink == nil || len(*p.current.VirtualNetworkTapListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *QueueClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListQueueResource.NextLink == nil || len(*p.current.ListQueueResource.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AlertsClientGetAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AlertsList.NextLink == nil || len(*p.current.AlertsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRoutePortsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRoutePortListResult.NextLink == nil || len(*p.current.ExpressRoutePortListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRouteLinksClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRouteLinkListResult.NextLink == nil || len(*p.current.ExpressRouteLinkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RedisListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisListResult.NextLink == nil || len(*p.current.RedisListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *KeyVaultClientGetKeysPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.KeyListResult.NextLink == nil || len(*p.current.KeyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getKeysHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ApplicationGatewaysClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ApplicationGatewayListResult.NextLink == nil || len(*p.current.ApplicationGatewayListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RouteFilterRulesClientListByRouteFilterPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteFilterRuleListResult.NextLink == nil || len(*p.current.RouteFilterRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByRouteFilterHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AvailableResourceGroupDelegationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableDelegationsResult.NextLink == nil || len(*p.current.AvailableDelegationsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *IPGroupsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.IPGroupListResult.NextLink == nil || len(*p.current.IPGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRoutePortsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRoutePortListResult.NextLink == nil || len(*p.current.ExpressRoutePortListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkServicesClientListPrivateEndpointConnectionsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointConnectionListResult.NextLink == nil || len(*p.current.PrivateEndpointConnectionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listPrivateEndpointConnectionsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualAppliancesClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualApplianceListResult.NextLink == nil || len(*p.current.VirtualApplianceListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FirewallRulesListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisFirewallRuleListResult.NextLink == nil || len(*p.current.RedisFirewallRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *DomainsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DomainsListResult.NextLink == nil || len(*p.current.DomainsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PoolsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.CapacityPoolList.NextLink == nil || len(*p.current.CapacityPoolList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ReservationClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListResult.NextLink == nil || len(*p.current.ListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *EnterprisesPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *UsagesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.UsagesListResult.NextLink == nil || len(*p.current.UsagesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *TopicsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.TopicsListResult.NextLink == nil || len(*p.current.TopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AppsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AppResourceCollection.NextLink == nil || len(*p.current.AppResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationList.NextLink == nil || len(*p.current.OperationList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ProfilesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ProfileListResult.NextLink == nil || len(*p.current.ProfileListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FirewallPoliciesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.FirewallPolicyListResult.NextLink == nil || len(*p.current.FirewallPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkResourcesListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateLinkResourcesListResult.NextLink == nil || len(*p.current.PrivateLinkResourcesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *DeploymentScriptsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeploymentScriptListResult.NextLink == nil || len(*p.current.DeploymentScriptListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerRegistrationsListResult.NextLink == nil || len(*p.current.PartnerRegistrationsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AvailableDelegationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableDelegationsResult.NextLink == nil || len(*p.current.AvailableDelegationsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualHubsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVirtualHubsResult.NextLink == nil || len(*p.current.ListVirtualHubsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *InterfacesClientListVirtualMachineScaleSetIPConfigurationsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.InterfaceIPConfigurationListResult.NextLink == nil || len(*p.current.InterfaceIPConfigurationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetIPConfigurationsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ServiceEndpointPoliciesClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ServiceEndpointPolicyListResult.NextLink == nil || len(*p.current.ServiceEndpointPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RedisListBySubscriptionPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RedisListResult.NextLink == nil || len(*p.current.RedisListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworkTapsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkTapListResult.NextLink == nil || len(*p.current.VirtualNetworkTapListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RecordSetsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RecordSetListResult.NextLink == nil || len(*p.current.RecordSetListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateLinkServicesClientListAutoApprovedPrivateLinkServicesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AutoApprovedPrivateLinkServicesResult.NextLink == nil || len(*p.current.AutoApprovedPrivateLinkServicesResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAutoApprovedPrivateLinkServicesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.OperationsList.NextLink == nil || len(*p.current.OperationsList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *WCFRelaysListAuthorizationRulesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AuthorizationRuleListResult.NextLink == nil || len(*p.current.AuthorizationRuleListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listAuthorizationRulesHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAuthorizationRulesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PublicIPAddressesClientListVirtualMachineScaleSetVMPublicIPAddressesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PublicIPAddressListResult.NextLink == nil || len(*p.current.PublicIPAddressListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listVirtualMachineScaleSetVMPublicIPAddressesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RoutesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteListResult.NextLink == nil || len(*p.current.RouteListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AccountsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AccountList.NextLink == nil || len(*p.current.AccountList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *QuotaClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.QuotaLimits.NextLink == nil || len(*p.current.QuotaLimits.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *NatGatewaysClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.NatGatewayListResult.NextLink == nil || len(*p.current.NatGatewayListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *GatewaysClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.GatewayResourceCollection.NextLink == nil || len(*p.current.GatewayResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PrivateEndpointConnectionsListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointConnectionListResult.NextLink == nil || len(*p.current.PrivateEndpointConnectionListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualNetworksClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualNetworkListResult.NextLink == nil || len(*p.current.VirtualNetworkListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VaultsListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VaultList.NextLink == nil || len(*p.current.VaultList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceGroupHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *DscpConfigurationClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DscpConfigurationListResult.NextLink == nil || len(*p.current.DscpConfigurationListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BindingsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BindingResourceCollection.NextLink == nil || len(*p.current.BindingResourceCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *AccountGroupsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *PrivateEndpointsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PrivateEndpointListResult.NextLink == nil || len(*p.current.PrivateEndpointListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *OperationsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AvailableOperations.NextLink == nil || len(*p.current.AvailableOperations.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *EventSubscriptionsListByResourcePager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.EventSubscriptionsListResult.NextLink == nil || len(*p.current.EventSubscriptionsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listByResourceHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ReservationClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ReservationList.NextLink == nil || len(*p.current.ReservationList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ExpressRouteServiceProvidersClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ExpressRouteServiceProviderListResult.NextLink == nil || len(*p.current.ExpressRouteServiceProviderListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AzureFirewallsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AzureFirewallListResult.NextLink == nil || len(*p.current.AzureFirewallListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *TableClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListTableResource.NextLink == nil || len(*p.current.ListTableResource.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *LoadBalancersClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.LoadBalancerListResult.NextLink == nil || len(*p.current.LoadBalancerListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualHubsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ListVirtualHubsResult.NextLink == nil || len(*p.current.ListVirtualHubsResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *AccountsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.AccountListResult.NextLink == nil || len(*p.current.AccountListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VaultsListBySubscriptionIDPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VaultList.NextLink == nil || len(*p.current.VaultList.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionIDHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionIDHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *BuildServiceClientListBuildResultsPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.BuildResultCollection.NextLink == nil || len(*p.current.BuildResultCollection.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBuildResultsHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.ApplicationSecurityGroupListResult.NextLink == nil || len(*p.current.ApplicationSecurityGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *WebApplicationFirewallPoliciesClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.WebApplicationFirewallPolicyListResult.NextLink == nil || len(*p.current.WebApplicationFirewallPolicyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PartnerTopicsListBySubscriptionPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PartnerTopicsListResult.NextLink == nil || len(*p.current.PartnerTopicsListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *VirtualRouterPeeringsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.VirtualRouterPeeringListResult.NextLink == nil || len(*p.current.VirtualRouterPeeringListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *KeyVaultClientGetDeletedKeysPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeletedKeyListResult.NextLink == nil || len(*p.current.DeletedKeyListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.getDeletedKeysHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (m *Model) NextPage() {\n\tif !m.OnLastPage() {\n\t\tm.Page++\n\t}\n}",
"func (p *AvailableEndpointServicesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.EndpointServicesListResult.NextLink == nil || len(*p.current.EndpointServicesListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RouteTablesClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.RouteTableListResult.NextLink == nil || len(*p.current.RouteTableListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (pager *IntegrationsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (pager *DestinationsPager) HasNext() bool {\n\treturn pager.hasNext\n}",
"func (p *IPGroupsClientListByResourceGroupPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.IPGroupListResult.NextLink == nil || len(*p.current.IPGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listByResourceGroupHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *SecurityGroupsClientListAllPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.SecurityGroupListResult.NextLink == nil || len(*p.current.SecurityGroupListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listAllHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *FlowLogsClientListPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.FlowLogListResult.NextLink == nil || len(*p.current.FlowLogListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *PublicIPAddressesClientListCloudServicePublicIPAddressesPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.PublicIPAddressListResult.NextLink == nil || len(*p.current.PublicIPAddressListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = runtime.NewResponseError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listCloudServicePublicIPAddressesHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}"
] | [
"0.69336736",
"0.6910068",
"0.6901863",
"0.688831",
"0.686525",
"0.68420583",
"0.68124425",
"0.680148",
"0.67817",
"0.6758141",
"0.6758141",
"0.67477936",
"0.6741167",
"0.6695066",
"0.66943264",
"0.6681217",
"0.66799814",
"0.6657003",
"0.6643805",
"0.6641323",
"0.6636811",
"0.66281503",
"0.66252583",
"0.6615692",
"0.6600454",
"0.6596153",
"0.6595461",
"0.6593395",
"0.6585765",
"0.6581242",
"0.657672",
"0.6568174",
"0.6561736",
"0.6560255",
"0.6558222",
"0.65548646",
"0.65523195",
"0.655011",
"0.65490514",
"0.6546229",
"0.6545831",
"0.6544535",
"0.65413666",
"0.6541023",
"0.653668",
"0.6533361",
"0.65328306",
"0.65314394",
"0.65287817",
"0.6528372",
"0.65275043",
"0.6523497",
"0.6522478",
"0.65178096",
"0.65131176",
"0.65116125",
"0.6509695",
"0.64978486",
"0.64928585",
"0.64894825",
"0.6484845",
"0.6480659",
"0.64800584",
"0.647945",
"0.6476123",
"0.647563",
"0.6472928",
"0.64665395",
"0.64659095",
"0.64606553",
"0.6457655",
"0.6456376",
"0.64563733",
"0.6455878",
"0.64534146",
"0.64520276",
"0.64498836",
"0.64493746",
"0.64463586",
"0.6444577",
"0.6444114",
"0.6441317",
"0.64407986",
"0.6438916",
"0.6437468",
"0.64347345",
"0.64303803",
"0.6423627",
"0.6423557",
"0.6422078",
"0.64172155",
"0.64165",
"0.64073527",
"0.6403009",
"0.64024436",
"0.6401157",
"0.6400828",
"0.6400441",
"0.64004153",
"0.63983184",
"0.63973725"
] | 0.0 | -1 |
PageResponse returns the current DeploymentScriptsListBySubscriptionResponse page. | func (p *DeploymentScriptsListBySubscriptionPager) PageResponse() DeploymentScriptsListBySubscriptionResponse {
return p.current
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (p *DeploymentScriptsListByResourceGroupPager) PageResponse() DeploymentScriptsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListBySubscriptionPager) PageResponse() AccountsClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *DomainsListBySubscriptionPager) PageResponse() DomainsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListRegionalBySubscriptionPager) PageResponse() EventSubscriptionsListRegionalBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *VaultsListBySubscriptionIDPager) PageResponse() VaultsListBySubscriptionIDResponse {\n\treturn p.current\n}",
"func (p *ServicesClientListBySubscriptionPager) PageResponse() ServicesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListGlobalBySubscriptionPager) PageResponse() EventSubscriptionsListGlobalBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *WebCategoriesClientListBySubscriptionPager) PageResponse() WebCategoriesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *DeploymentScriptsListBySubscriptionPager) NextPage(ctx context.Context) bool {\n\tvar req *policy.Request\n\tvar err error\n\tif !reflect.ValueOf(p.current).IsZero() {\n\t\tif p.current.DeploymentScriptListResult.NextLink == nil || len(*p.current.DeploymentScriptListResult.NextLink) == 0 {\n\t\t\treturn false\n\t\t}\n\t\treq, err = p.advancer(ctx, p.current)\n\t} else {\n\t\treq, err = p.requester(ctx)\n\t}\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tresp, err := p.client.pl.Do(req)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\tp.err = p.client.listBySubscriptionHandleError(resp)\n\t\treturn false\n\t}\n\tresult, err := p.client.listBySubscriptionHandleResponse(resp)\n\tif err != nil {\n\t\tp.err = err\n\t\treturn false\n\t}\n\tp.current = result\n\treturn true\n}",
"func (p *RedisListBySubscriptionPager) PageResponse() RedisListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *PartnerNamespacesListBySubscriptionPager) PageResponse() PartnerNamespacesListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *TopicsListBySubscriptionPager) PageResponse() TopicsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *SystemTopicsListBySubscriptionPager) PageResponse() SystemTopicsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *AlertProcessingRulesClientListBySubscriptionPager) PageResponse() AlertProcessingRulesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListByDomainTopicPager) PageResponse() EventSubscriptionsListByDomainTopicResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListByResourcePager) PageResponse() EventSubscriptionsListByResourceResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListRegionalBySubscriptionForTopicTypePager) PageResponse() EventSubscriptionsListRegionalBySubscriptionForTopicTypeResponse {\n\treturn p.current\n}",
"func (p *PrivateLinkServicesClientListBySubscriptionPager) PageResponse() PrivateLinkServicesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *PartnerRegistrationsListBySubscriptionPager) PageResponse() PartnerRegistrationsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *PrivateEndpointsClientListBySubscriptionPager) PageResponse() PrivateEndpointsClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *HybridConnectionsListByNamespacePager) PageResponse() HybridConnectionsListByNamespaceResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListGlobalBySubscriptionForTopicTypePager) PageResponse() EventSubscriptionsListGlobalBySubscriptionForTopicTypeResponse {\n\treturn p.current\n}",
"func (p *VirtualHubsClientListPager) PageResponse() VirtualHubsClientListResponse {\n\treturn p.current\n}",
"func (p *MachinesClientListBySubscriptionPager) PageResponse() MachinesClientListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *SystemTopicEventSubscriptionsListBySystemTopicPager) PageResponse() SystemTopicEventSubscriptionsListBySystemTopicResponse {\n\treturn p.current\n}",
"func (p *PartnerTopicsListBySubscriptionPager) PageResponse() PartnerTopicsListBySubscriptionResponse {\n\treturn p.current\n}",
"func (p *HybridConnectionsListAuthorizationRulesPager) PageResponse() HybridConnectionsListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (page SQLServerRegistrationListResultPage) Response() SQLServerRegistrationListResult {\n\treturn page.ssrlr\n}",
"func (o *GetAllSubscriptionsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif payload == nil {\n\t\t// return empty array\n\t\tpayload = models.SubscriptionList{}\n\t}\n\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}",
"func (client ServicesClient) ListBySubscription(ctx context.Context) (result ServiceResourceListPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/ServicesClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.srl.Response.Response != nil {\n\t\t\t\tsc = result.srl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.srl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.srl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"appplatform.ServicesClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.srl.hasNextLink() && result.srl.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (page SQLServerListResultPage) Response() SQLServerListResult {\n\treturn page.sslr\n}",
"func (p *EventSubscriptionsListRegionalByResourceGroupPager) PageResponse() EventSubscriptionsListRegionalByResourceGroupResponse {\n\treturn p.current\n}",
"func (page ServerListResultPage) Response() ServerListResult {\n return page.slr\n }",
"func (p *NamespacesListAuthorizationRulesPager) PageResponse() NamespacesListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (p *ApplicationSecurityGroupsClientListAllPager) PageResponse() ApplicationSecurityGroupsClientListAllResponse {\n\treturn p.current\n}",
"func (client DataControllersClient) ListInSubscription(ctx context.Context) (result PageOfDataControllerResourcePage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DataControllersClient.ListInSubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.podcr.Response.Response != nil {\n\t\t\t\tsc = result.podcr.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listInSubscriptionNextResults\n\treq, err := client.ListInSubscriptionPreparer(ctx)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListInSubscriptionSender(req)\n\tif err != nil {\n\t\tresult.podcr.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.podcr, err = client.ListInSubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"ListInSubscription\", resp, \"Failure responding to request\")\n\t\treturn\n\t}\n\tif result.podcr.hasNextLink() && result.podcr.IsEmpty() {\n\t\terr = result.NextWithContext(ctx)\n\t\treturn\n\t}\n\n\treturn\n}",
"func (p *PartnerTopicEventSubscriptionsListByPartnerTopicPager) PageResponse() PartnerTopicEventSubscriptionsListByPartnerTopicResponse {\n\treturn p.current\n}",
"func (p *APIPortalCustomDomainsClientListPager) PageResponse() APIPortalCustomDomainsClientListResponse {\n\treturn p.current\n}",
"func (p *WebApplicationFirewallPoliciesClientListAllPager) PageResponse() WebApplicationFirewallPoliciesClientListAllResponse {\n\treturn p.current\n}",
"func (p *EventSubscriptionsListRegionalByResourceGroupForTopicTypePager) PageResponse() EventSubscriptionsListRegionalByResourceGroupForTopicTypeResponse {\n\treturn p.current\n}",
"func (c *DefaultApiService) ListSubscription(params *ListSubscriptionParams) (*ListSubscriptionResponse, error) {\n\tpath := \"/v1/Subscriptions\"\n\n\tdata := url.Values{}\n\theaders := make(map[string]interface{})\n\n\tif params != nil && params.SinkSid != nil {\n\t\tdata.Set(\"SinkSid\", *params.SinkSid)\n\t}\n\tif params != nil && params.PageSize != nil {\n\t\tdata.Set(\"PageSize\", fmt.Sprint(*params.PageSize))\n\t}\n\n\tresp, err := c.requestHandler.Get(c.baseURL+path, data, headers)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tps := &ListSubscriptionResponse{}\n\tif err := json.NewDecoder(resp.Body).Decode(ps); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn ps, err\n}",
"func (p *VirtualHubsClientListByResourceGroupPager) PageResponse() VirtualHubsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *ServiceEndpointPoliciesClientListByResourceGroupPager) PageResponse() ServiceEndpointPoliciesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (client AccountClient) ListBySubscription(ctx context.Context) (result AccountResourceDescriptionListPage, err error) {\n if tracing.IsEnabled() {\n ctx = tracing.StartSpan(ctx, fqdn + \"/AccountClient.ListBySubscription\")\n defer func() {\n sc := -1\n if result.ardl.Response.Response != nil {\n sc = result.ardl.Response.Response.StatusCode\n }\n tracing.EndSpan(ctx, sc, err)\n }()\n }\n result.fn = client.listBySubscriptionNextResults\n req, err := client.ListBySubscriptionPreparer(ctx)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", nil , \"Failure preparing request\")\n return\n }\n\n resp, err := client.ListBySubscriptionSender(req)\n if err != nil {\n result.ardl.Response = autorest.Response{Response: resp}\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n return\n }\n\n result.ardl, err = client.ListBySubscriptionResponder(resp)\n if err != nil {\n err = autorest.NewErrorWithError(err, \"microsoftazuremanagementaisupercomputer.AccountClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n }\n if result.ardl.hasNextLink() && result.ardl.IsEmpty() {\n err = result.NextWithContext(ctx)\n }\n\n return\n}",
"func (client LabClient) ListBySubscription(ctx context.Context, filter string, top *int32, orderBy string) (result ResponseWithContinuationLabPage, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/LabClient.ListBySubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.rwcl.Response.Response != nil {\n\t\t\t\tsc = result.rwcl.Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.fn = client.listBySubscriptionNextResults\n\treq, err := client.ListBySubscriptionPreparer(ctx, filter, top, orderBy)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", nil, \"Failure preparing request\")\n\t\treturn\n\t}\n\n\tresp, err := client.ListBySubscriptionSender(req)\n\tif err != nil {\n\t\tresult.rwcl.Response = autorest.Response{Response: resp}\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure sending request\")\n\t\treturn\n\t}\n\n\tresult.rwcl, err = client.ListBySubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"dtl.LabClient\", \"ListBySubscription\", resp, \"Failure responding to request\")\n\t}\n\n\treturn\n}",
"func (p *ServiceEndpointPoliciesClientListPager) PageResponse() ServiceEndpointPoliciesClientListResponse {\n\treturn p.current\n}",
"func (p *VirtualHubRouteTableV2SClientListPager) PageResponse() VirtualHubRouteTableV2SClientListResponse {\n\treturn p.current\n}",
"func (p *DeploymentsClientListPager) PageResponse() DeploymentsClientListResponse {\n\treturn p.current\n}",
"func (p *StoragesClientListPager) PageResponse() StoragesClientListResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListByResourceGroupPager) PageResponse() AccountsClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (page ServiceSkuListPage) Response() ServiceSkuList {\n\treturn page.ssl\n}",
"func (p *GatewayCustomDomainsClientListPager) PageResponse() GatewayCustomDomainsClientListResponse {\n\treturn p.current\n}",
"func (p *NamespacesListPager) PageResponse() NamespacesListResponse {\n\treturn p.current\n}",
"func (page VirtualMachineScaleSetListResultPageClient) Response() azcompute.VirtualMachineScaleSetListResult {\n\tr := azcompute.VirtualMachineScaleSetListResult{}\n\terr := DeepCopy(&r, page.vmsslrp.Response())\n\tif err != nil {\n\t\tpage.err = fmt.Errorf(\"fail to get virtual machine scale set list result, %s\", err) //nolint:staticcheck\n\t}\n\treturn r\n}",
"func (p *WCFRelaysListByNamespacePager) PageResponse() WCFRelaysListByNamespaceResponse {\n\treturn p.current\n}",
"func (p *BuildServiceAgentPoolClientListPager) PageResponse() BuildServiceAgentPoolClientListResponse {\n\treturn p.current\n}",
"func (p *WCFRelaysListAuthorizationRulesPager) PageResponse() WCFRelaysListAuthorizationRulesResponse {\n\treturn p.current\n}",
"func (client *AvailabilitySetsClient) ListBySubscription(options *AvailabilitySetsListBySubscriptionOptions) *AvailabilitySetsListBySubscriptionPager {\n\treturn &AvailabilitySetsListBySubscriptionPager{\n\t\tclient: client,\n\t\trequester: func(ctx context.Context) (*policy.Request, error) {\n\t\t\treturn client.listBySubscriptionCreateRequest(ctx, options)\n\t\t},\n\t\tadvancer: func(ctx context.Context, resp AvailabilitySetsListBySubscriptionResponse) (*policy.Request, error) {\n\t\t\treturn runtime.NewRequest(ctx, http.MethodGet, *resp.AvailabilitySetListResult.NextLink)\n\t\t},\n\t}\n}",
"func (p *SecurityGroupsClientListAllPager) PageResponse() SecurityGroupsClientListAllResponse {\n\treturn p.current\n}",
"func (client DataControllersClient) ListInSubscriptionComplete(ctx context.Context) (result PageOfDataControllerResourceIterator, err error) {\n\tif tracing.IsEnabled() {\n\t\tctx = tracing.StartSpan(ctx, fqdn+\"/DataControllersClient.ListInSubscription\")\n\t\tdefer func() {\n\t\t\tsc := -1\n\t\t\tif result.Response().Response.Response != nil {\n\t\t\t\tsc = result.page.Response().Response.Response.StatusCode\n\t\t\t}\n\t\t\ttracing.EndSpan(ctx, sc, err)\n\t\t}()\n\t}\n\tresult.page, err = client.ListInSubscription(ctx)\n\treturn\n}",
"func (p *DdosProtectionPlansClientListByResourceGroupPager) PageResponse() DdosProtectionPlansClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *HubVirtualNetworkConnectionsClientListPager) PageResponse() HubVirtualNetworkConnectionsClientListResponse {\n\treturn p.current\n}",
"func (p *VirtualApplianceSKUsClientListPager) PageResponse() VirtualApplianceSKUsClientListResponse {\n\treturn p.current\n}",
"func (client *ConnectedEnvironmentsClient) NewListBySubscriptionPager(options *ConnectedEnvironmentsClientListBySubscriptionOptions) *runtime.Pager[ConnectedEnvironmentsClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ConnectedEnvironmentsClientListBySubscriptionResponse]{\n\t\tMore: func(page ConnectedEnvironmentsClientListBySubscriptionResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ConnectedEnvironmentsClientListBySubscriptionResponse) (ConnectedEnvironmentsClientListBySubscriptionResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ConnectedEnvironmentsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.pl.Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ConnectedEnvironmentsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ConnectedEnvironmentsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *FirewallPoliciesClientListAllPager) PageResponse() FirewallPoliciesClientListAllResponse {\n\treturn p.current\n}",
"func CreateDescribeSubscriptionInstancesResponse() (response *DescribeSubscriptionInstancesResponse) {\n\tresponse = &DescribeSubscriptionInstancesResponse{\n\t\tBaseResponse: &responses.BaseResponse{},\n\t}\n\treturn\n}",
"func (p *ServiceRegistriesClientListPager) PageResponse() ServiceRegistriesClientListResponse {\n\treturn p.current\n}",
"func (p *VirtualNetworkGatewaysClientListConnectionsPager) PageResponse() VirtualNetworkGatewaysClientListConnectionsResponse {\n\treturn p.current\n}",
"func (p *BuildServiceBuilderClientListPager) PageResponse() BuildServiceBuilderClientListResponse {\n\treturn p.current\n}",
"func (p *BuildServiceClientListBuildServicesPager) PageResponse() BuildServiceClientListBuildServicesResponse {\n\treturn p.current\n}",
"func (page ServiceListResultPage) Response() ServiceListResult {\n\treturn page.slr\n}",
"func (client *SpatialAnchorsAccountsClient) NewListBySubscriptionPager(options *SpatialAnchorsAccountsClientListBySubscriptionOptions) *runtime.Pager[SpatialAnchorsAccountsClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[SpatialAnchorsAccountsClientListBySubscriptionResponse]{\n\t\tMore: func(page SpatialAnchorsAccountsClientListBySubscriptionResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *SpatialAnchorsAccountsClientListBySubscriptionResponse) (SpatialAnchorsAccountsClientListBySubscriptionResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn SpatialAnchorsAccountsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn SpatialAnchorsAccountsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn SpatialAnchorsAccountsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (client *ServersClient) NewListBySubscriptionPager(options *ServersClientListBySubscriptionOptions) *runtime.Pager[ServersClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[ServersClientListBySubscriptionResponse]{\n\t\tMore: func(page ServersClientListBySubscriptionResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *ServersClientListBySubscriptionResponse) (ServersClientListBySubscriptionResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn ServersClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn ServersClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *LoadBalancerOutboundRulesClientListPager) PageResponse() LoadBalancerOutboundRulesClientListResponse {\n\treturn p.current\n}",
"func (p *NamespacesListByResourceGroupPager) PageResponse() NamespacesListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *VirtualApplianceSitesClientListPager) PageResponse() VirtualApplianceSitesClientListResponse {\n\treturn p.current\n}",
"func (client DataControllersClient) ListInSubscriptionResponder(resp *http.Response) (result PageOfDataControllerResource, err error) {\n\terr = autorest.Respond(\n\t\tresp,\n\t\tazure.WithErrorUnlessStatusCode(http.StatusOK),\n\t\tautorest.ByUnmarshallingJSON(&result),\n\t\tautorest.ByClosing())\n\tresult.Response = autorest.Response{Response: resp}\n\treturn\n}",
"func (p *TopicsListByResourceGroupPager) PageResponse() TopicsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *CachesClientListByResourceGroupPager) PageResponse() CachesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (p *BuildpackBindingClientListPager) PageResponse() BuildpackBindingClientListResponse {\n\treturn p.current\n}",
"func (p *AvailableServiceAliasesClientListByResourceGroupPager) PageResponse() AvailableServiceAliasesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (client DataControllersClient) listInSubscriptionNextResults(ctx context.Context, lastResults PageOfDataControllerResource) (result PageOfDataControllerResource, err error) {\n\treq, err := lastResults.pageOfDataControllerResourcePreparer(ctx)\n\tif err != nil {\n\t\treturn result, autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"listInSubscriptionNextResults\", nil, \"Failure preparing next results request\")\n\t}\n\tif req == nil {\n\t\treturn\n\t}\n\tresp, err := client.ListInSubscriptionSender(req)\n\tif err != nil {\n\t\tresult.Response = autorest.Response{Response: resp}\n\t\treturn result, autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"listInSubscriptionNextResults\", resp, \"Failure sending next results request\")\n\t}\n\tresult, err = client.ListInSubscriptionResponder(resp)\n\tif err != nil {\n\t\terr = autorest.NewErrorWithError(err, \"azuredata.DataControllersClient\", \"listInSubscriptionNextResults\", resp, \"Failure responding to next results request\")\n\t}\n\treturn\n}",
"func (p *EventChannelsListByPartnerNamespacePager) PageResponse() EventChannelsListByPartnerNamespaceResponse {\n\treturn p.current\n}",
"func (p *ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesPager) PageResponse() ApplicationGatewaysClientListAvailableSSLPredefinedPoliciesResponse {\n\treturn p.current\n}",
"func (p *ApplicationSecurityGroupsClientListPager) PageResponse() ApplicationSecurityGroupsClientListResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListPager) PageResponse() AccountsClientListResponse {\n\treturn p.current\n}",
"func (p *AccountsClientListPager) PageResponse() AccountsClientListResponse {\n\treturn p.current\n}",
"func (page StorageContainerListResultPage) Response() StorageContainerListResult {\n\treturn page.sclr\n}",
"func (p *VPNSitesClientListByResourceGroupPager) PageResponse() VPNSitesClientListByResourceGroupResponse {\n\treturn p.current\n}",
"func (client *IotSecuritySolutionClient) NewListBySubscriptionPager(options *IotSecuritySolutionClientListBySubscriptionOptions) *runtime.Pager[IotSecuritySolutionClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[IotSecuritySolutionClientListBySubscriptionResponse]{\n\t\tMore: func(page IotSecuritySolutionClientListBySubscriptionResponse) bool {\n\t\t\treturn page.NextLink != nil && len(*page.NextLink) > 0\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *IotSecuritySolutionClientListBySubscriptionResponse) (IotSecuritySolutionClientListBySubscriptionResponse, error) {\n\t\t\tvar req *policy.Request\n\t\t\tvar err error\n\t\t\tif page == nil {\n\t\t\t\treq, err = client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\t} else {\n\t\t\t\treq, err = runtime.NewRequest(ctx, http.MethodGet, *page.NextLink)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn IotSecuritySolutionClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn IotSecuritySolutionClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn IotSecuritySolutionClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t})\n}",
"func (p *DdosProtectionPlansClientListPager) PageResponse() DdosProtectionPlansClientListResponse {\n\treturn p.current\n}",
"func (p *WebApplicationFirewallPoliciesClientListPager) PageResponse() WebApplicationFirewallPoliciesClientListResponse {\n\treturn p.current\n}",
"func (p *KeyVaultClientGetKeyVersionsPager) PageResponse() KeyVaultClientGetKeyVersionsResponse {\n\treturn p.current\n}",
"func (p *PartnerNamespacesListByResourceGroupPager) PageResponse() PartnerNamespacesListByResourceGroupResponse {\n\treturn p.current\n}",
"func (page SingleSignOnResourceListResponsePage) Response() SingleSignOnResourceListResponse {\n\treturn page.ssorlr\n}",
"func (page SharedAccessSignatureAuthorizationRuleListResultPage) Response() SharedAccessSignatureAuthorizationRuleListResult {\n\treturn page.sasarlr\n}",
"func (page ServiceListPage) Response() ServiceList {\n\treturn page.sl\n}",
"func (p *SubnetsClientListPager) PageResponse() SubnetsClientListResponse {\n\treturn p.current\n}",
"func (p *PartnerRegistrationsListByResourceGroupPager) PageResponse() PartnerRegistrationsListByResourceGroupResponse {\n\treturn p.current\n}",
"func (client *MetricAlertsClient) NewListBySubscriptionPager(options *MetricAlertsClientListBySubscriptionOptions) *runtime.Pager[MetricAlertsClientListBySubscriptionResponse] {\n\treturn runtime.NewPager(runtime.PagingHandler[MetricAlertsClientListBySubscriptionResponse]{\n\t\tMore: func(page MetricAlertsClientListBySubscriptionResponse) bool {\n\t\t\treturn false\n\t\t},\n\t\tFetcher: func(ctx context.Context, page *MetricAlertsClientListBySubscriptionResponse) (MetricAlertsClientListBySubscriptionResponse, error) {\n\t\t\tctx = context.WithValue(ctx, runtime.CtxAPINameKey{}, \"MetricAlertsClient.NewListBySubscriptionPager\")\n\t\t\treq, err := client.listBySubscriptionCreateRequest(ctx, options)\n\t\t\tif err != nil {\n\t\t\t\treturn MetricAlertsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tresp, err := client.internal.Pipeline().Do(req)\n\t\t\tif err != nil {\n\t\t\t\treturn MetricAlertsClientListBySubscriptionResponse{}, err\n\t\t\t}\n\t\t\tif !runtime.HasStatusCode(resp, http.StatusOK) {\n\t\t\t\treturn MetricAlertsClientListBySubscriptionResponse{}, runtime.NewResponseError(resp)\n\t\t\t}\n\t\t\treturn client.listBySubscriptionHandleResponse(resp)\n\t\t},\n\t\tTracer: client.internal.Tracer(),\n\t})\n}"
] | [
"0.6547863",
"0.6249684",
"0.6231479",
"0.6074528",
"0.5956464",
"0.58353925",
"0.5799472",
"0.57506186",
"0.57301974",
"0.5714881",
"0.56876177",
"0.56841713",
"0.56513435",
"0.5639212",
"0.56340235",
"0.55549806",
"0.5545648",
"0.5505137",
"0.5401996",
"0.5378753",
"0.5339897",
"0.53112006",
"0.52610344",
"0.5257146",
"0.5215877",
"0.5192882",
"0.5147164",
"0.5080606",
"0.50791806",
"0.50764525",
"0.5065391",
"0.50452346",
"0.5041547",
"0.5030693",
"0.5015188",
"0.4986837",
"0.49644417",
"0.49521995",
"0.49246",
"0.49230918",
"0.49206382",
"0.49124205",
"0.49033284",
"0.48879665",
"0.48814297",
"0.48787847",
"0.48704",
"0.4859716",
"0.48564306",
"0.4856392",
"0.4855049",
"0.4852904",
"0.48518348",
"0.48514438",
"0.48490447",
"0.48486945",
"0.48437846",
"0.48342976",
"0.48294097",
"0.48257038",
"0.48198336",
"0.48167259",
"0.48021638",
"0.47916898",
"0.47905648",
"0.47820655",
"0.4777396",
"0.47747517",
"0.47657043",
"0.47605443",
"0.47588524",
"0.475589",
"0.47456497",
"0.4742034",
"0.47329974",
"0.4731939",
"0.4728107",
"0.47275025",
"0.4721526",
"0.471526",
"0.4714666",
"0.47141176",
"0.47128356",
"0.4711454",
"0.4707762",
"0.47063035",
"0.47063035",
"0.4705241",
"0.47014883",
"0.46989942",
"0.46943334",
"0.46940494",
"0.4689117",
"0.4679315",
"0.46788678",
"0.4673191",
"0.46718502",
"0.46702635",
"0.46536732",
"0.46473855"
] | 0.761867 | 0 |
AllocSector finds the best path for this sector to use | func (f *FS) AllocSector(typ DataType, miner address.Address, ssize abi.SectorSize, cache bool, num abi.SectorNumber) (SectorPath, error) {
{
spath, err := f.FindSector(typ, miner, num)
if err == nil {
return spath, xerrors.Errorf("allocating sector %s: %m", spath, ErrExists)
}
if err != ErrNotFound {
return "", err
}
}
need := overheadMul[typ] * uint64(ssize)
p, err := f.findBestPath(need, cache, false)
if err != nil {
return "", err
}
sp := p.Sector(typ, miner, num)
return sp, f.reserve(typ, sp.storage(), need)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (r *ComDoc) allocSectorTables() {\n\t// work out how many sectors are needed for both\n\tsatPerSector := r.SectorSize / 4\n\tmsatPerSector := satPerSector - 1\n\tfor {\n\t\tif len(r.SAT)%satPerSector != 0 {\n\t\t\tpanic(\"irregularly sized sector table\")\n\t\t}\n\t\tsatSectors := len(r.SAT) / satPerSector\n\t\tif satSectors > len(r.MSAT) {\n\t\t\t// allocate a new SAT sector\n\t\t\tsector := r.makeFreeSectors(1, false)[0]\n\t\t\tr.MSAT = append(r.MSAT, sector)\n\t\t\tr.SAT[sector] = SecIDSAT\n\t\t\t// a new SAT might be needed so check again\n\t\t\tcontinue\n\t\t}\n\t\t// 109 MSAT entries fit into the file header, the rest need more sectors\n\t\tmsatSectors := (len(r.MSAT) - msatInHeader + msatPerSector - 1) / msatPerSector\n\t\tif msatSectors > len(r.msatList) {\n\t\t\t// allocate a new MSAT sector\n\t\t\tsector := r.makeFreeSectors(1, false)[0]\n\t\t\tr.msatList = append(r.msatList, sector)\n\t\t\tr.SAT[sector] = SecIDMSAT\n\t\t\t// a new SAT might be needed so check again\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n}",
"func (g Galaxy) GetSector(x, y int) *Sector {\n\tif !burl.CheckBounds(x, y, coord_SECTOR_MAX, coord_SECTOR_MAX) {\n\t\treturn nil\n\t}\n\treturn g.sectors[y*g.width+x]\n}",
"func (handler *FetchHandler) remoteGetSector(w http.ResponseWriter, r *http.Request) {\n\tvars := mux.Vars(r)\n\n\tid, err := storiface.ParseSectorID(vars[\"id\"])\n\tif err != nil {\n\t\tlog.Errorf(\"%+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tft, err := FileTypeFromString(vars[\"type\"])\n\tif err != nil {\n\t\tlog.Errorf(\"%+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\t// The caller has a lock on this sector already, no need to get one here\n\t// passing 0 spt because we don't allocate anything\n\tsi := storiface.SectorRef{\n\t\tID: id,\n\t\tProofType: 0,\n\t}\n\n\tpaths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)\n\tif err != nil {\n\t\tlog.Errorf(\"AcquireSector: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\t// TODO: reserve local storage here\n\n\tpath := storiface.PathByType(paths, ft)\n\tif path == \"\" {\n\t\tlog.Error(\"acquired path was empty\")\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tstat, err := os.Stat(path)\n\tif err != nil {\n\t\tlog.Errorf(\"os.Stat: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tif stat.IsDir() {\n\t\tif _, has := r.Header[\"Range\"]; has {\n\t\t\tlog.Error(\"Range not supported on directories\")\n\t\t\tw.WriteHeader(500)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/x-tar\")\n\t\tw.WriteHeader(200)\n\n\t\terr := tarutil.TarDirectory(path, w, make([]byte, CopyBuf))\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"send tar: %+v\", err)\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tw.Header().Set(\"Content-Type\", \"application/octet-stream\")\n\t\t// will do a ranged read over the file at the given path if the caller has asked for a ranged read in the request headers.\n\t\thttp.ServeFile(w, r, path)\n\t}\n\n\tlog.Debugf(\"served sector file/dir, sectorID=%+v, fileType=%s, path=%s\", id, ft, path)\n}",
"func (hd *hostDownloader) Sector(root crypto.Hash) ([]byte, error) {\n\t// allot 10 minutes for this exchange; sufficient to transfer 4 MB over 50 kbps\n\thd.conn.SetDeadline(time.Now().Add(600 * time.Second))\n\tdefer hd.conn.SetDeadline(time.Now().Add(time.Hour))\n\n\t// calculate price\n\thd.contractor.mu.RLock()\n\theight := hd.contractor.blockHeight\n\thd.contractor.mu.RUnlock()\n\tif height >= hd.contract.FileContract.WindowStart {\n\t\treturn nil, errors.New(\"contract has already ended\")\n\t}\n\tsectorPrice := hd.host.DownloadBandwidthPrice.Mul64(modules.SectorSize)\n\tif sectorPrice.Cmp(hd.contract.LastRevision.NewValidProofOutputs[0].Value) >= 0 {\n\t\treturn nil, errors.New(\"contract has insufficient funds to support download\")\n\t}\n\n\t// initiate download by confirming host settings\n\tif err := startDownload(hd.conn, hd.host, hd.contractor.hdb); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// send download action\n\terr := encoding.WriteObject(hd.conn, []modules.DownloadAction{{\n\t\tMerkleRoot: root,\n\t\tOffset: 0,\n\t\tLength: modules.SectorSize,\n\t}})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// create and send revision to host for approval\n\trev := newDownloadRevision(hd.contract.LastRevision, sectorPrice)\n\tsignedTxn, err := negotiateRevision(hd.conn, rev, hd.contract.SecretKey, height)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// read sector data, completing one iteration of the download loop\n\tvar sectors [][]byte\n\tif err := encoding.ReadObject(hd.conn, §ors, modules.SectorSize+16); err != nil {\n\t\treturn nil, err\n\t} else if len(sectors) != 1 {\n\t\treturn nil, errors.New(\"host did not send enough sectors\")\n\t}\n\tsector := sectors[0]\n\tif uint64(len(sector)) != modules.SectorSize {\n\t\treturn nil, errors.New(\"host did not send enough sector data\")\n\t} else if crypto.MerkleRoot(sector) != root {\n\t\treturn nil, errors.New(\"host sent bad sector data\")\n\t}\n\n\t// update host contract\n\thd.contract.LastRevision = rev\n\thd.contract.LastRevisionTxn = signedTxn\n\n\thd.contractor.mu.Lock()\n\thd.contractor.contracts[hd.contract.ID] = hd.contract\n\thd.contractor.downloadSpending = hd.contractor.downloadSpending.Add(sectorPrice)\n\thd.contractor.saveSync()\n\thd.contractor.mu.Unlock()\n\n\treturn sector, nil\n}",
"func (uc *uploadDownloadContractor) Sector(root crypto.Hash) ([]byte, error) {\n\tuc.mu.Lock()\n\tdefer uc.mu.Unlock()\n\treturn uc.sectors[root], nil\n}",
"func (handler *FetchHandler) remoteGetAllocated(w http.ResponseWriter, r *http.Request) {\n\tlog.Infof(\"SERVE Alloc check %s\", r.URL)\n\tvars := mux.Vars(r)\n\n\tid, err := storiface.ParseSectorID(vars[\"id\"])\n\tif err != nil {\n\t\tlog.Errorf(\"parsing sectorID: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tft, err := FileTypeFromString(vars[\"type\"])\n\tif err != nil {\n\t\tlog.Errorf(\"FileTypeFromString: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tif ft != storiface.FTUnsealed {\n\t\tlog.Errorf(\"/allocated only supports unsealed sector files\")\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tspti, err := strconv.ParseInt(vars[\"spt\"], 10, 64)\n\tif err != nil {\n\t\tlog.Errorf(\"parsing spt: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tspt := abi.RegisteredSealProof(spti)\n\tssize, err := spt.SectorSize()\n\tif err != nil {\n\t\tlog.Errorf(\"spt.SectorSize(): %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\toffi, err := strconv.ParseInt(vars[\"offset\"], 10, 64)\n\tif err != nil {\n\t\tlog.Errorf(\"parsing offset: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tszi, err := strconv.ParseInt(vars[\"size\"], 10, 64)\n\tif err != nil {\n\t\tlog.Errorf(\"parsing size: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\t// The caller has a lock on this sector already, no need to get one here\n\n\t// passing 0 spt because we don't allocate anything\n\tsi := storiface.SectorRef{\n\t\tID: id,\n\t\tProofType: 0,\n\t}\n\n\t// get the path of the local Unsealed file for the given sector.\n\t// return error if we do NOT have it.\n\tpaths, _, err := handler.Local.AcquireSector(r.Context(), si, ft, storiface.FTNone, storiface.PathStorage, storiface.AcquireMove)\n\tif err != nil {\n\t\tlog.Errorf(\"AcquireSector: %+v\", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tpath := storiface.PathByType(paths, ft)\n\tif path == \"\" {\n\t\tlog.Error(\"acquired path was empty\")\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\t// open the Unsealed file and check if it has the Unsealed sector for the piece at the given offset and size.\n\tpf, err := handler.PfHandler.OpenPartialFile(abi.PaddedPieceSize(ssize), path)\n\tif err != nil {\n\t\tlog.Error(\"opening partial file: \", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\tdefer func() {\n\t\tif err := pf.Close(); err != nil {\n\t\t\tlog.Error(\"closing partial file: \", err)\n\t\t}\n\t}()\n\n\thas, err := handler.PfHandler.HasAllocated(pf, storiface.UnpaddedByteIndex(offi), abi.UnpaddedPieceSize(szi))\n\tif err != nil {\n\t\tlog.Error(\"has allocated: \", err)\n\t\tw.WriteHeader(500)\n\t\treturn\n\t}\n\n\tif has {\n\t\tlog.Debugf(\"returning ok: worker has unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d\", id, offi, szi)\n\t\tw.WriteHeader(http.StatusOK)\n\t\treturn\n\t}\n\n\tlog.Debugf(\"returning StatusRequestedRangeNotSatisfiable: worker does NOT have unsealed file with unsealed piece, sector:%+v, offset:%d, size:%d\", id, offi, szi)\n\tw.WriteHeader(http.StatusRequestedRangeNotSatisfiable)\n}",
"func NewDiskSector(radius float64, segments int, thetaStart, thetaLength float64) *Geometry {\n\n\td := NewGeometry()\n\n\t// Validate arguments\n\tif segments < 3 {\n\t\tpanic(\"Invalid argument: segments. The number of segments needs to be greater or equal to 3.\")\n\t}\n\n\t// Create buffers\n\tpositions := math32.NewArrayF32(0, 16)\n\tnormals := math32.NewArrayF32(0, 16)\n\tuvs := math32.NewArrayF32(0, 16)\n\tindices := math32.NewArrayU32(0, 16)\n\n\t// Append circle center position\n\tcenter := math32.NewVector3(0, 0, 0)\n\tpositions.AppendVector3(center)\n\n\t// Append circle center normal\n\tvar normal math32.Vector3\n\tnormal.Z = 1\n\tnormals.AppendVector3(&normal)\n\n\t// Append circle center uv coordinate\n\tcenterUV := math32.NewVector2(0.5, 0.5)\n\tuvs.AppendVector2(centerUV)\n\n\t// Generate the segments\n\tfor i := 0; i <= segments; i++ {\n\t\tsegment := thetaStart + float64(i)/float64(segments)*thetaLength\n\n\t\tvx := float32(radius * math.Cos(segment))\n\t\tvy := float32(radius * math.Sin(segment))\n\n\t\t// Appends vertex position, normal and uv coordinates\n\t\tpositions.Append(vx, vy, 0)\n\t\tnormals.AppendVector3(&normal)\n\t\tuvs.Append((vx/float32(radius)+1)/2, (vy/float32(radius)+1)/2)\n\t}\n\n\tfor i := 1; i <= segments; i++ {\n\t\tindices.Append(uint32(i), uint32(i)+1, 0)\n\t}\n\n\td.SetIndices(indices)\n\td.AddVBO(gls.NewVBO(positions).AddAttrib(gls.VertexPosition))\n\td.AddVBO(gls.NewVBO(normals).AddAttrib(gls.VertexNormal))\n\td.AddVBO(gls.NewVBO(uvs).AddAttrib(gls.VertexTexcoord))\n\n\t// Update volume\n\td.volume = 0\n\td.volumeValid = true\n\n\treturn d\n}",
"func (f *DynamicDiskBlockFactory) GetSector(block *Block, sectorIndex uint32) (*Sector, error) {\n\tblockIndex := block.BlockIndex\n\tif block.IsEmpty {\n\t\treturn f.sectorFactory.CreateEmptySector(blockIndex, sectorIndex), nil\n\t}\n\n\treturn f.sectorFactory.Create(block, sectorIndex)\n}",
"func (m *cidsMap) Allocate(vmi *virtv1.VirtualMachineInstance) error {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tkey := controller.VirtualMachineInstanceKey(vmi)\n\tif cid, exist := m.cids[key]; exist {\n\t\tvmi.Status.VSOCKCID = &cid\n\t\treturn nil\n\t}\n\tstart := m.randCID()\n\tassigned := start\n\tfor {\n\t\tif _, exist := m.reverse[assigned]; !exist {\n\t\t\tbreak\n\t\t}\n\t\tassigned = m.nextCID(assigned)\n\t\tif assigned == start {\n\t\t\t// Run out of CIDs. Practically this shouldn't happen.\n\t\t\treturn fmt.Errorf(\"CIDs exhausted\")\n\t\t}\n\t}\n\tm.cids[key] = assigned\n\tm.reverse[assigned] = key\n\tvmi.Status.VSOCKCID = &assigned\n\treturn nil\n}",
"func (p *PCIDs) allocate() uint16 {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif len(p.available) > 0 {\n\t\tfor id := range p.available {\n\t\t\tdelete(p.available, id)\n\t\t\treturn id\n\t\t}\n\t}\n\tif id := p.last + 1; id <= maxPCID {\n\t\tp.last = id\n\t\treturn id\n\t}\n\t// Nothing available.\n\treturn 0\n}",
"func allocFzPathMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfFzPathValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func FindAlloc(nomad *NomadServer, job *Job, host *Host) (*Alloc, error) {\n\tallocs := Allocs(nomad)\n\tfor _, alloc := range allocs {\n\t\tif alloc.NodeID == host.ID && strings.Contains(alloc.Name, job.Name) {\n\t\t\t// We may be looking at a stale allocation and a newer one exists\n\t\t\tif alloc.DesiredStatus == \"stop\" && len(allocs) > 1 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn &alloc, nil\n\t\t}\n\t}\n\treturn &Alloc{}, &AllocNotFound{Hostname: host.Name, Jobname: job.Name}\n}",
"func allocFzPathWalkerMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfFzPathWalkerValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (s *Module) DiskLookup(id string) (disk pkg.VDisk, err error) {\n\tpath, err := s.findDisk(id)\n\n\tif err != nil {\n\t\treturn disk, err\n\t}\n\n\tdisk.Path = path\n\tstat, err := os.Stat(path)\n\tif err != nil {\n\t\treturn disk, err\n\t}\n\n\tdisk.Size = stat.Size()\n\treturn\n}",
"func (bbw *Writer) Allocate(ln int, extend bool) ([]byte, error) {\n\tif bbw.clsdPos >= 0 {\n\t\treturn nil, errors.New(\"the writer already closed\")\n\t}\n\trest := len(bbw.buf) - bbw.offs - ln - 4\n\tif rest < 0 && (!extend || !bbw.extend(ln+4)) {\n\t\treturn nil, errors.New(fmt.Sprintf(\"not enough space - available %d, but needed %d\", len(bbw.buf)-bbw.offs, ln+4))\n\t}\n\tbinary.BigEndian.PutUint32(bbw.buf[bbw.offs:], uint32(ln))\n\tbbw.offs += ln + 4\n\treturn bbw.buf[bbw.offs-ln : bbw.offs], nil\n}",
"func (table ObjectMasterTable) Allocate() ObjectID {\n\tif len(table) < 2 {\n\t\treturn 0\n\t}\n\tstart := &table[0]\n\tif start.Next == 0 {\n\t\treturn 0\n\t}\n\tid := start.Next\n\tentry := &table[id]\n\tstart.Next = entry.Next\n\n\tentry.Reset()\n\tentry.Next = ObjectID(start.CrossReferenceTableIndex)\n\ttable[start.CrossReferenceTableIndex].Prev = id\n\tentry.Prev = 0\n\tstart.CrossReferenceTableIndex = int16(id)\n\n\tentry.InUse = 1\n\n\treturn id\n}",
"func (hd *Downloader) Sector(root crypto.Hash) (_ modules.RenterContract, _ []byte, err error) {\n\tdefer extendDeadline(hd.conn, time.Hour) // reset deadline when finished\n\n\t// calculate price\n\tsectorPrice := hd.host.DownloadBandwidthPrice.Mul64(modules.SectorSize)\n\tif hd.contract.RenterFunds().Cmp(sectorPrice) < 0 {\n\t\treturn modules.RenterContract{}, nil, errors.New(\"contract has insufficient funds to support download\")\n\t}\n\t// to mitigate small errors (e.g. differing block heights), fudge the\n\t// price and collateral by 0.2%. This is only applied to hosts above\n\t// v1.0.1; older hosts use stricter math.\n\tif build.VersionCmp(hd.host.Version, \"1.0.1\") > 0 {\n\t\tsectorPrice = sectorPrice.MulFloat(1 + hostPriceLeeway)\n\t}\n\n\t// create the download revision\n\trev := newDownloadRevision(hd.contract.LastRevision, sectorPrice)\n\n\t// initiate download by confirming host settings\n\textendDeadline(hd.conn, modules.NegotiateSettingsTime)\n\tif err := startDownload(hd.conn, hd.host); err != nil {\n\t\treturn modules.RenterContract{}, nil, err\n\t}\n\n\t// Before we continue, save the revision. Unexpected termination (e.g.\n\t// power failure) during the signature transfer leaves in an ambiguous\n\t// state: the host may or may not have received the signature, and thus\n\t// may report either revision as being the most recent. To mitigate this,\n\t// we save the old revision as a fallback.\n\tif hd.SaveFn != nil {\n\t\tif err := hd.SaveFn(rev, hd.contract.MerkleRoots); err != nil {\n\t\t\treturn modules.RenterContract{}, nil, err\n\t\t}\n\t}\n\n\t// send download action\n\textendDeadline(hd.conn, 2*time.Minute)\n\terr = encoding.WriteObject(hd.conn, []modules.DownloadAction{{\n\t\tMerkleRoot: root,\n\t\tOffset: 0,\n\t\tLength: modules.SectorSize,\n\t}})\n\tif err != nil {\n\t\treturn modules.RenterContract{}, nil, err\n\t}\n\n\t// Increase Successful/Failed interactions accordingly\n\tdefer func() {\n\t\tif err != nil {\n\t\t\thd.hdb.IncrementFailedInteractions(hd.contract.HostPublicKey)\n\t\t} else if err == nil {\n\t\t\thd.hdb.IncrementSuccessfulInteractions(hd.contract.HostPublicKey)\n\t\t}\n\t}()\n\n\t// send the revision to the host for approval\n\textendDeadline(hd.conn, 2*time.Minute)\n\tsignedTxn, err := negotiateRevision(hd.conn, rev, hd.contract.SecretKey)\n\tif err == modules.ErrStopResponse {\n\t\t// if host gracefully closed, close our connection as well; this will\n\t\t// cause the next download to fail. However, we must delay closing\n\t\t// until we've finished downloading the sector.\n\t\tdefer hd.conn.Close()\n\t} else if err != nil {\n\t\treturn modules.RenterContract{}, nil, err\n\t}\n\n\t// read sector data, completing one iteration of the download loop\n\textendDeadline(hd.conn, modules.NegotiateDownloadTime)\n\tvar sectors [][]byte\n\tif err := encoding.ReadObject(hd.conn, §ors, modules.SectorSize+16); err != nil {\n\t\treturn modules.RenterContract{}, nil, err\n\t} else if len(sectors) != 1 {\n\t\treturn modules.RenterContract{}, nil, errors.New(\"host did not send enough sectors\")\n\t}\n\tsector := sectors[0]\n\tif uint64(len(sector)) != modules.SectorSize {\n\t\treturn modules.RenterContract{}, nil, errors.New(\"host did not send enough sector data\")\n\t} else if crypto.MerkleRoot(sector) != root {\n\t\treturn modules.RenterContract{}, nil, errors.New(\"host sent bad sector data\")\n\t}\n\n\t// update contract and metrics\n\thd.contract.LastRevision = rev\n\thd.contract.LastRevisionTxn = signedTxn\n\thd.contract.DownloadSpending = hd.contract.DownloadSpending.Add(sectorPrice)\n\n\treturn hd.contract, sector, nil\n}",
"func (bbw *Writer) Allocate(ln int, extend bool) ([]byte, error) {\n\tif bbw.clsdPos > 0 {\n\t\treturn nil, fmt.Errorf(\"the writer already closed\")\n\t}\n\n\trest := len(bbw.buf) - bbw.offs - ln - 4\n\tif rest < 0 && (!extend || !bbw.extend(ln+4)) {\n\t\treturn nil, fmt.Errorf(\"not enough space - available %d, but needed %d\", len(bbw.buf)-bbw.offs, ln+4)\n\t}\n\n\tbinary.BigEndian.PutUint32(bbw.buf[bbw.offs:], uint32(ln))\n\tbbw.offs += ln + 4\n\treturn bbw.buf[bbw.offs-ln : bbw.offs], nil\n}",
"func SectorInfo(t *testing.T, v *VM, minerIDAddress address.Address, sectorNumber abi.SectorNumber) *miner.SectorOnChainInfo {\n\tvar minerState miner.State\n\terr := v.GetState(minerIDAddress, &minerState)\n\trequire.NoError(t, err)\n\n\tinfo, found, err := minerState.GetSector(v.Store(), sectorNumber)\n\trequire.NoError(t, err)\n\trequire.True(t, found)\n\treturn info\n}",
"func (s *Flaky) Alloc() (kv.Entity, error) {\n\tif s.fail() {\n\t\treturn 0, s.err\n\t}\n\treturn s.Txn.Alloc()\n}",
"func (s *PageStore) Allocate() (PageID, error) {\n\tif s.header.freeList != 0 {\n\t\treturn s.allocateFromFreeList()\n\t}\n\treturn s.allocateFromEndOfFile()\n}",
"func (a *Allocator) Alloc(size int64) (int64, error) {\n\tif size <= 0 {\n\t\treturn -1, fmt.Errorf(\"invalid argument: %T.Alloc(%v)\", a, size)\n\t}\n\n\ta.allocs++\n\tif size > maxSlot {\n\t\treturn a.allocBig(size)\n\t}\n\n\trank := slotRank(int(size))\n\tif off := a.pages[rank]; off != 0 {\n\t\treturn a.sbrk(off, rank)\n\t}\n\n\tif off := a.pages[firstPageRank]; off != 0 {\n\t\treturn a.sbrk2(off, rank)\n\t}\n\n\tif off := a.slots[rank]; off != 0 {\n\t\treturn a.allocSlot(off, rank)\n\t}\n\n\tp, err := a.newSharedPage(rank)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tif err := a.insertPage(p); err != nil {\n\t\treturn -1, err\n\t}\n\n\tp.setUsed(1)\n\tp.setBrk(1)\n\tif err := p.flush(); err != nil {\n\t\treturn -1, err\n\t}\n\n\treturn p.slot(0), a.flush()\n}",
"func allocateSpace(ctx *downloaderContext, status *types.DownloaderStatus,\n\tsize uint64) {\n\tif status.Size != 0 {\n\t\tlog.Errorf(\"%s, request for duplicate storage allocation\\n\", status.Name)\n\t\treturn\n\t}\n\tkb := types.RoundupToKB(size)\n\tctx.globalStatusLock.Lock()\n\tctx.globalStatus.ReservedSpace -= status.ReservedSpace\n\tctx.globalStatus.UsedSpace += kb\n\tupdateRemainingSpace(ctx)\n\tctx.globalStatusLock.Unlock()\n\tstatus.ReservedSpace = 0\n\tstatus.Size = size\n\tpublishGlobalStatus(ctx)\n}",
"func (api *API) PledgeSector(ctx context.Context) error {\n\treturn api.piecemanager.PledgeSector(ctx)\n}",
"func (t *MCTS) alloc() naughty {\n\tt.Lock()\n\tl := len(t.freelist)\n\tif l == 0 {\n\t\tN := Node{\n\t\t\ttree: ptrFromTree(t),\n\t\t\tid: naughty(len(t.nodes)),\n\n\t\t\tminPSARatioChildren: defaultMinPsaRatio,\n\t\t}\n\t\tt.nodes = append(t.nodes, N)\n\t\tt.children = append(t.children, make([]naughty, 0, t.M*t.N+1))\n\t\tt.childLock = append(t.childLock, sync.Mutex{})\n\t\tn := naughty(len(t.nodes) - 1)\n\t\tt.Unlock()\n\t\treturn n\n\t}\n\n\ti := t.freelist[l-1]\n\tt.freelist = t.freelist[:l-1]\n\tt.Unlock()\n\treturn naughty(i)\n}",
"func (s *EntityStorage) Allocate() (*Entity, int) {\n\ts.count++\n\ts.outdated = true\n\n\tl := len(s.freeIDs)\n\tif l != 0 {\n\t\tid := s.freeIDs[l-1]\n\t\ts.freeIDs = s.freeIDs[:l-1]\n\t\ts.vec[id].occupied = true\n\t\treturn &s.vec[id].value, id\n\t}\n\n\tid := len(s.vec)\n\ts.vec = append(s.vec, EntityCapsule{})\n\n\ts.vec[id].occupied = true\n\treturn &s.vec[id].value, id\n}",
"func (db *DB) allocate(txid txid, count int) (*page, error) {\n\t// Allocate a temporary buffer for the page.\n\tvar buf []byte\n\tif count == 1 {\n\t\tbuf = db.pagePool.Get().([]byte)\n\t} else {\n\t\tbuf = make([]byte, count*db.pageSize)\n\t}\n\tp := (*page)(unsafe.Pointer(&buf[0]))\n\tp.overflow = uint32(count - 1)\n\n\t// Use pages from the freelist if they are available.\n\tif p.id = db.freelist.allocate(txid, count); p.id != 0 {\n\t\treturn p, nil\n\t}\n\n\t// Resize mmap() if we're at the end.\n\tp.id = db.rwtx.meta.pgid\n\tvar minsz = int((p.id+pgid(count))+1) * db.pageSize\n\tif minsz >= db.datasz {\n\t\tif err := db.mmap(minsz); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"mmap allocate error: %s\", err)\n\t\t}\n\t}\n\n\t// Move the page id high water mark.\n\tdb.rwtx.meta.pgid += pgid(count)\n\n\treturn p, nil\n}",
"func (p *projectQuota) findOrCreateBackingDev(targetPath string) (*backingDev, error) {\n\tif dev, ok := p.pathMapBackingDev[targetPath]; ok {\n\t\treturn dev, nil\n\t}\n\n\tvar stat unix.Stat_t\n\tif err := unix.Stat(targetPath, &stat); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// if the target path is soft link, the function can also find the right point\n\tmountInfo, err := mountpoint.FindMount(targetPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tmountPoint := mountInfo.Path\n\tklog.V(3).Infof(\"mount point for path(%s) is: %s\", targetPath, mountPoint)\n\tif dev, ok := p.pathMapBackingDev[mountPoint]; ok {\n\t\tp.pathMapBackingDev[targetPath] = dev\n\t\treturn dev, nil\n\t}\n\n\t// create block device\n\tbackingFsBlockDev := path.Join(mountPoint, \"backingFsBlockDev\")\n\tbackingDevice := &backingDev{\n\t\tdevice: backingFsBlockDev,\n\t}\n\tp.pathMapBackingDev[mountPoint] = backingDevice\n\tp.pathMapBackingDev[targetPath] = backingDevice\n\n\t// if mount options has no quota option, no quota support\n\tif !strings.Contains(mountInfo.Options, quotaMountOption) {\n\t\tbackingDevice.supported = false\n\t\tklog.V(2).Infof(\"generating backing device for path(%s) with mount point(%s): %+v\",\n\t\t\ttargetPath, mountPoint, backingDevice)\n\t\treturn backingDevice, nil\n\t}\n\n\t// check if the mount point supporting disk quota\n\t_, err = getProjectID(mountPoint)\n\tif err != nil && strings.Contains(err.Error(), \"inappropriate ioctl for device\") {\n\t\tbackingDevice.supported = false\n\t} else {\n\t\tklog.V(2).Infof(\"creating backing device for target path(%s): %s\",\n\t\t\ttargetPath, backingFsBlockDev)\n\t\t// Re-create just in case someone copied the home directory over to a new device\n\t\tunix.Unlink(backingFsBlockDev)\n\t\terr = unix.Mknod(backingFsBlockDev, unix.S_IFBLK|0600, int(stat.Dev))\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tbackingDevice.supported = true\n\t\tcase unix.ENOSYS, unix.EPERM:\n\t\t\tbackingDevice.supported = false\n\t\tdefault:\n\t\t\treturn nil, fmt.Errorf(\"failed to mknod %s: %v\", backingFsBlockDev, err)\n\t\t}\n\t}\n\n\tklog.V(2).Infof(\"generating backing device for path(%s) with mount point(%s): %+v\",\n\t\ttargetPath, mountPoint, backingDevice)\n\treturn backingDevice, nil\n}",
"func TlsAlloc() DWORD {\n\tret1 := syscall3(tlsAlloc, 0,\n\t\t0,\n\t\t0,\n\t\t0)\n\treturn DWORD(ret1)\n}",
"func AllocateSeq(prgrm *CXProgram, size int) (offset int) {\n\tresult := prgrm.Heap.HeapPointer\n\tnewFree := result + size\n\n\tif newFree > INIT_HEAP_SIZE {\n\t\t// call GC\n\t\tMarkAndCompact(prgrm)\n\t\tresult = prgrm.Heap.HeapPointer\n\t\tnewFree = prgrm.Heap.HeapPointer + size\n\n\t\tif newFree > INIT_HEAP_SIZE {\n\t\t\t// heap exhausted\n\t\t\tpanic(\"heap exhausted\")\n\t\t}\n\t}\n\n\tprgrm.Heap.HeapPointer = newFree\n\n\treturn result\n}",
"func (sm SectorMap) FreeSectors() int {\n\tcount := 0\n\tfor _, file := range sm {\n\t\tif file == FileFree {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}",
"func NewSector(rows, cols int, excludeTags []string, fullTags bool, poiChance, otherWorldChance int, density Density) *Stars {\n\ts := &Stars{\n\t\tRows: rows,\n\t\tCols: cols,\n\t}\n\n\tdVal := 0\n\tswitch density {\n\tcase SPARSE:\n\t\tdVal = 8\n\tcase AVERAGE:\n\t\tdVal = 4\n\tcase DENSE:\n\t\tdVal = 2\n\t}\n\n\tcells := s.Rows * s.Cols\n\tstars := (rand.Intn(cells/4) / 2) + (cells / dVal)\n\n\tfor row, col := rand.Intn(s.Rows), rand.Intn(s.Cols); len(s.Systems) <= stars; row, col = rand.Intn(s.Rows), rand.Intn(s.Cols) {\n\t\tif !s.active(row, col) {\n\t\t\ts.Systems = append(s.Systems, NewStar(row, col, s.systemName(), excludeTags, fullTags, poiChance, otherWorldChance))\n\t\t}\n\t}\n\n\treturn s\n}",
"func (s *Scratch) allocDtable() {\n\ttableSize := 1 << s.actualTableLog\n\tif cap(s.decTable) < tableSize {\n\t\ts.decTable = make([]decSymbol, tableSize)\n\t}\n\ts.decTable = s.decTable[:tableSize]\n\n\tif cap(s.ct.tableSymbol) < 256 {\n\t\ts.ct.tableSymbol = make([]byte, 256)\n\t}\n\ts.ct.tableSymbol = s.ct.tableSymbol[:256]\n\n\tif cap(s.ct.stateTable) < 256 {\n\t\ts.ct.stateTable = make([]uint16, 256)\n\t}\n\ts.ct.stateTable = s.ct.stateTable[:256]\n}",
"func alloc(size uintptr, layout unsafe.Pointer) unsafe.Pointer",
"func (s *System) allocate() {\n\ts.Vertexes.Resize(s.vertex)\n\ts.Indices.Resize(s.indice)\n}",
"func (sm SectorMap) FirstFreeFile() byte {\n\tfor file := byte(0x01); file < 0xfe; file++ {\n\t\tsectors := sm.SectorsForFile(file)\n\t\tif len(sectors) == 0 {\n\t\t\treturn file\n\t\t}\n\t}\n\treturn 0\n}",
"func AllocateSeq(size int) (offset int) {\n\t// Current object trying to be allocated would use this address.\n\taddr := PROGRAM.HeapPointer\n\t// Next object to be allocated will use this address.\n\tnewFree := addr + size\n\n\t// Checking if we can allocate the entirety of the object in the current heap.\n\tif newFree > PROGRAM.HeapSize {\n\t\t// It does not fit, so calling garbage collector.\n\t\tMarkAndCompact(PROGRAM)\n\t\t// Heap pointer got moved by GC and recalculate these variables based on the new pointer.\n\t\taddr = PROGRAM.HeapPointer\n\t\tnewFree = addr + size\n\n\t\t// If the new heap pointer exceeds `MAX_HEAP_SIZE`, there's nothing left to do.\n\t\tif newFree > constants.MAX_HEAP_SIZE {\n\t\t\tpanic(constants.HEAP_EXHAUSTED_ERROR)\n\t\t}\n\n\t\t// According to MIN_HEAP_FREE_RATIO and MAX_HEAP_FREE_RATION we can either shrink\n\t\t// or expand the heap to maintain \"healthy\" heap sizes. The idea is that we don't want\n\t\t// to have an absurdly amount of free heap memory, as we would be wasting resources, and we\n\t\t// don't want to have a small amount of heap memory left as we'd be calling the garbage collector\n\t\t// too frequently.\n\n\t\t// Calculating free heap memory percentage.\n\t\tusedPerc := float32(newFree) / float32(PROGRAM.HeapSize)\n\t\tfreeMemPerc := 1.0 - usedPerc\n\n\t\t// Then we have less than MIN_HEAP_FREE_RATIO memory left. Expand!\n\t\tif freeMemPerc < constants.MIN_HEAP_FREE_RATIO {\n\t\t\t// Calculating new heap size in order to reach MIN_HEAP_FREE_RATIO.\n\t\t\tnewMemSize := int(float32(newFree) / (1.0 - constants.MIN_HEAP_FREE_RATIO))\n\t\t\tResizeMemory(PROGRAM, newMemSize, true)\n\t\t}\n\n\t\t// Then we have more than MAX_HEAP_FREE_RATIO memory left. Shrink!\n\t\tif freeMemPerc > constants.MAX_HEAP_FREE_RATIO {\n\t\t\t// Calculating new heap size in order to reach MAX_HEAP_FREE_RATIO.\n\t\t\tnewMemSize := int(float32(newFree) / (1.0 - constants.MAX_HEAP_FREE_RATIO))\n\n\t\t\t// This check guarantees that the CX program has always at least INIT_HEAP_SIZE bytes to work with.\n\t\t\t// A flag could be added later to remove this, as in some cases this mechanism could not be desired.\n\t\t\tif newMemSize > constants.INIT_HEAP_SIZE {\n\t\t\t\tResizeMemory(PROGRAM, newMemSize, false)\n\t\t\t}\n\t\t}\n\t}\n\n\tPROGRAM.HeapPointer = newFree\n\n\t// Returning absolute memory address (not relative to where heap starts at).\n\t// Above this point we were performing all operations taking into\n\t// consideration only heap offsets.\n\treturn addr + PROGRAM.HeapStartsAt\n}",
"func (t *BTree) AllocateNode() *BTreeNode {\n\tx := BTreeNode{}\n\tfor i := 0; i < 2*t.t; i++ {\n\t\tx.children = append(x.children, t.nullNode)\n\t}\n\tfor i := 0; i < 2*t.t-1; i++ {\n\t\tx.keys = append(x.keys, -1)\n\t}\n\treturn &x\n}",
"func Diskuse(path string) (cap uint64, used uint64) {\n\n\tfs := syscall.Statfs_t{}\n\terr := syscall.Statfs(path, &fs)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\n\tcap = fs.Blocks * uint64(fs.Bsize)\n\tfree := fs.Bfree * uint64(fs.Bsize) // yup, I just did that\n\tused = cap - free\n\treturn cap, used\n}",
"func allocPDFCmapMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFCmapValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func SectorDeadline(t *testing.T, v *VM, minerIDAddress address.Address, sectorNumber abi.SectorNumber) (uint64, uint64) {\n\tvar minerState miner.State\n\terr := v.GetState(minerIDAddress, &minerState)\n\trequire.NoError(t, err)\n\n\tdlIdx, pIdx, err := minerState.FindSector(v.Store(), sectorNumber)\n\trequire.NoError(t, err)\n\treturn dlIdx, pIdx\n}",
"func SpaceAvailForPath(path string) (uint64, error) {\n\tstat := &syscall.Statfs_t{}\n\terr := syscall.Statfs(path, stat)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn (uint64(stat.Frsize) * stat.Bavail) / 1024, nil\n}",
"func allocPDFHmtxMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFHmtxValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func AllocateFile(path string, size int64) {\n\n\tfd, err := os.Create(path)\n\tif err != nil {\n\t\tlog.Panic(\"Failed to create output\")\n\t}\n\t_, err = fd.Seek(size-1, 0)\n\tif err != nil {\n\t\tlog.Panic(\"Failed to seek\")\n\t}\n\t_, err = fd.Write([]byte{0})\n\tif err != nil {\n\t\tlog.Panic(\"Write failed\")\n\t}\n\terr = fd.Close()\n\tif err != nil {\n\t\tlog.Panic(\"Failed to close file\")\n\t}\n}",
"func (q *queue) alloc(dataLen int) (dataPageIndex int64, dataPage page.MappedPage, offset int, err error) {\n\tq.rwMutex.Lock()\n\tdefer q.rwMutex.Unlock()\n\n\t// prepare the data pointer\n\tif q.messageOffset+dataLen > dataPageSize {\n\t\t// check size limit before data page acquire\n\t\tif err := q.checkDataSize(); err != nil {\n\t\t\treturn 0, nil, 0, err\n\t\t}\n\t\t// sync previous data page\n\t\tif err := q.dataPage.Sync(); err != nil {\n\t\t\tqueueLogger.Error(\"sync data page err when alloc\",\n\t\t\t\tlogger.String(\"queue\", q.dirPath), logger.Error(err))\n\t\t}\n\t\tnextDataPageIndex := q.dataPageIndex + 1\n\t\t// not enough space in current data page, need create new page\n\t\tdataPage, err := q.dataPageFct.AcquirePage(nextDataPageIndex)\n\t\tif err != nil {\n\t\t\treturn 0, nil, 0, err\n\t\t}\n\n\t\tq.dataPage = dataPage\n\t\tq.dataPageIndex = nextDataPageIndex\n\t\tq.messageOffset = 0 // need reset message offset for new data page\n\t}\n\t// advance dataOffset\n\tmessageOffset := q.messageOffset\n\tq.messageOffset += dataLen // set next message offset\n\treturn q.dataPageIndex, q.dataPage, messageOffset, nil\n}",
"func SpaceMapAllocate() (* SpaceMap) {\n return &SpaceMap{}\n}",
"func (t *MCTS) alloc() Naughty {\n\tt.Lock()\n\tdefer t.Unlock()\n\tl := len(t.freelist)\n\tif l == 0 {\n\t\tN := Node{\n\t\t\tlock: sync.Mutex{},\n\t\t\ttree: ptrFromTree(t),\n\t\t\tid: Naughty(len(t.nodes)),\n\t\t\thasChildren: false,\n\t\t}\n\t\tt.nodes = append(t.nodes, N)\n\t\tt.children = append(t.children, make([]Naughty, 0, t.current.ActionSpace()))\n\t\tn := Naughty(len(t.nodes) - 1)\n\t\treturn n\n\t}\n\n\ti := t.freelist[l-1]\n\tt.freelist = t.freelist[:l-1]\n\treturn i\n}",
"func (s *fseEncoder) allocCtable() {\n\ttableSize := 1 << s.actualTableLog\n\t// get tableSymbol that is big enough.\n\tif cap(s.ct.tableSymbol) < tableSize {\n\t\ts.ct.tableSymbol = make([]byte, tableSize)\n\t}\n\ts.ct.tableSymbol = s.ct.tableSymbol[:tableSize]\n\n\tctSize := tableSize\n\tif cap(s.ct.stateTable) < ctSize {\n\t\ts.ct.stateTable = make([]uint16, ctSize)\n\t}\n\ts.ct.stateTable = s.ct.stateTable[:ctSize]\n\n\tif cap(s.ct.symbolTT) < 256 {\n\t\ts.ct.symbolTT = make([]symbolTransform, 256)\n\t}\n\ts.ct.symbolTT = s.ct.symbolTT[:256]\n}",
"func TestPageAllocScavenge(t *testing.T) {\n\tif GOOS == \"openbsd\" && testing.Short() {\n\t\tt.Skip(\"skipping because virtual memory is limited; see #36210\")\n\t}\n\ttype test struct {\n\t\trequest, expect uintptr\n\t}\n\tminPages := PhysPageSize / PageSize\n\tif minPages < 1 {\n\t\tminPages = 1\n\t}\n\ttype setup struct {\n\t\tbeforeAlloc map[ChunkIdx][]BitRange\n\t\tbeforeScav map[ChunkIdx][]BitRange\n\t\texpect []test\n\t\tafterScav map[ChunkIdx][]BitRange\n\t}\n\ttests := map[string]setup{\n\t\t\"AllFreeUnscavExhaust\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 3 * PallocChunkPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"NoneFreeUnscavExhaust\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t},\n\t\t\"ScavHighestPageFirst\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{1, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},\n\t\t\t},\n\t\t},\n\t\t\"ScavMultiple\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"ScavMultiple2\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{2 * minPages * PageSize, 2 * minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"ScavDiscontiguous\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0xe: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t\tBaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{2 * minPages * PageSize, 2 * minPages * PageSize},\n\t\t\t\t{^uintptr(0), 2 * minPages * PageSize},\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 0xe: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t}\n\t// Disable these tests on iOS since we have a small address space.\n\t// See #46860.\n\tif PageAlloc64Bit != 0 && goos.IsIos == 0 {\n\t\ttests[\"ScavAllVeryDiscontiguous\"] = setup{\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0x1000: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0x1000: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 2 * PallocChunkPages * PageSize},\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 0x1000: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t}\n\t}\n\tfor name, v := range tests {\n\t\tv := v\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := NewPageAlloc(v.beforeAlloc, v.beforeScav)\n\t\t\tdefer FreePageAlloc(b)\n\n\t\t\tfor iter, h := range v.expect {\n\t\t\t\tif got := b.Scavenge(h.request); got != h.expect {\n\t\t\t\t\tt.Fatalf(\"bad scavenge #%d: want %d, got %d\", iter+1, h.expect, got)\n\t\t\t\t}\n\t\t\t}\n\t\t\twant := NewPageAlloc(v.beforeAlloc, v.afterScav)\n\t\t\tdefer FreePageAlloc(want)\n\n\t\t\tcheckPageAlloc(t, want, b)\n\t\t})\n\t}\n}",
"func Alloci(t *jrnl.TxnHandle, mode IType) *Inode {\nretry:\n\tfor i := firstInodeAddr; i < firstInodeAddr+numInodes; i++ {\n\t\tblk := bio.Bget(uint(i))\n\t\tif blk.Data == \"\" {\n\t\t\tni := &Inode{\n\t\t\t\tSerialnum: uint16(i - firstInodeAddr),\n\t\t\t\tRefcnt: 1,\n\t\t\t\tAddrs: []uint{},\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tif ni.EnqWrite(t) != nil {\n\t\t\t\tgoto retry\n\t\t\t}\n\t\t\tfmt.Printf(\"Acquired inode w/ serial num %d from empty\\n\", ni.Serialnum)\n\t\t\treturn ni\n\n\t\t}\n\t\tni := IDecode(blk.Data)\n\t\tif ni.Refcnt == 0 {\n\t\t\tni = &Inode{\n\t\t\t\tSerialnum: uint16(i - firstInodeAddr),\n\t\t\t\tRefcnt: 1,\n\t\t\t\tAddrs: []uint{},\n\t\t\t\tMode: mode,\n\t\t\t}\n\t\t\tif ni.EnqWrite(t) != nil {\n\t\t\t\tgoto retry\n\t\t\t}\n\t\t\tfmt.Printf(\"Acquired inode w/ serial num %d from non-empty, refcnt %d\\n\", ni.Serialnum, ni.Refcnt)\n\t\t\treturn ni\n\t\t}\n\t\tblk.Brelse()\n\t}\n\tlog.Fatal(\"no allocatable Inodes\")\n\t// Never reached\n\treturn nil\n}",
"func (ms *MeshBase) AddCylinderSector(height, topRad, botRad float32, radialSegs, heightSegs int, angStart, angLen float32, top, bottom bool, offset mat32.Vec3) {\n\thHt := height / 2\n\tvtxs := [][]int{}\n\tuvsOrig := [][]mat32.Vec2{}\n\n\tangStRad := mat32.DegToRad(angStart)\n\tangLenRad := mat32.DegToRad(angLen)\n\n\t// Create buffer for vertex positions\n\tpos := mat32.NewArrayF32(0, 0)\n\tstidx := uint32(ms.Vtx.Len() / 3)\n\n\tbb := mat32.Box3{}\n\tbb.SetEmpty()\n\n\tvar pt mat32.Vec3\n\tfor y := 0; y <= heightSegs; y++ {\n\t\tvar vtxsRow = []int{}\n\t\tvar uvsRow = []mat32.Vec2{}\n\t\tv := float32(y) / float32(heightSegs)\n\t\tradius := v*(botRad-topRad) + topRad\n\t\tfor x := 0; x <= radialSegs; x++ {\n\t\t\tu := float32(x) / float32(radialSegs)\n\t\t\tpt.X = -radius * mat32.Cos(u*angLenRad+angStRad)\n\t\t\tpt.Y = -v*height + hHt\n\t\t\tpt.Z = radius * mat32.Sin(u*angLenRad+angStRad)\n\t\t\tpt.SetAdd(offset)\n\t\t\tpos.AppendVec3(pt)\n\t\t\tbb.ExpandByPoint(pt)\n\t\t\tvtxsRow = append(vtxsRow, pos.Size()/3-1)\n\t\t\tuvsRow = append(uvsRow, mat32.Vec2{u, 1.0 - v})\n\t\t}\n\t\tvtxs = append(vtxs, vtxsRow)\n\t\tuvsOrig = append(uvsOrig, uvsRow)\n\t}\n\n\ttanTheta := (botRad - topRad) / height\n\tvar na, nb mat32.Vec3\n\n\t// Create preallocated buffers for normals and uvs and buffer for indices\n\tnpos := pos.Size()\n\tnorms := mat32.NewArrayF32(npos, npos)\n\tuvs := mat32.NewArrayF32(2*npos/3, 2*npos/3)\n\tidxs := mat32.NewArrayU32(0, 0)\n\n\tfor x := 0; x < radialSegs; x++ {\n\t\tif topRad != 0 {\n\t\t\tpos.GetVec3(3*vtxs[0][x], &na)\n\t\t\tpos.GetVec3(3*vtxs[0][x+1], &nb)\n\t\t} else {\n\t\t\tpos.GetVec3(3*vtxs[1][x], &na)\n\t\t\tpos.GetVec3(3*vtxs[1][x+1], &nb)\n\t\t}\n\n\t\tna.Y = mat32.Sqrt(na.X*na.X+na.Z*na.Z) * tanTheta\n\t\tna.Normalize()\n\t\tnb.Y = mat32.Sqrt(nb.X*nb.X+nb.Z*nb.Z) * tanTheta\n\t\tnb.Normalize()\n\n\t\tfor y := 0; y < heightSegs; y++ {\n\t\t\tv1 := vtxs[y][x]\n\t\t\tv2 := vtxs[y+1][x]\n\t\t\tv3 := vtxs[y+1][x+1]\n\t\t\tv4 := vtxs[y][x+1]\n\n\t\t\tn1 := na\n\t\t\tn2 := na\n\t\t\tn3 := nb\n\t\t\tn4 := nb\n\n\t\t\tuv1 := uvsOrig[y][x]\n\t\t\tuv2 := uvsOrig[y+1][x]\n\t\t\tuv3 := uvsOrig[y+1][x+1]\n\t\t\tuv4 := uvsOrig[y][x+1]\n\n\t\t\tidxs.Append(stidx+uint32(v1), stidx+uint32(v2), stidx+uint32(v4))\n\t\t\tnorms.SetVec3(3*v1, n1)\n\t\t\tnorms.SetVec3(3*v2, n2)\n\t\t\tnorms.SetVec3(3*v4, n4)\n\n\t\t\tidxs.Append(stidx+uint32(v2), stidx+uint32(v3), stidx+uint32(v4))\n\t\t\tnorms.SetVec3(3*v2, n2)\n\t\t\tnorms.SetVec3(3*v3, n3)\n\t\t\tnorms.SetVec3(3*v4, n4)\n\n\t\t\tuvs.SetVec2(2*v1, uv1)\n\t\t\tuvs.SetVec2(2*v2, uv2)\n\t\t\tuvs.SetVec2(2*v3, uv3)\n\t\t\tuvs.SetVec2(2*v4, uv4)\n\t\t}\n\t}\n\n\t// Top cap\n\tif top && topRad > 0 {\n\t\t// Array of vertex indicesOrig to build used to build the faces.\n\t\tidxsOrig := []uint32{}\n\t\tnextidx := pos.Size() / 3\n\n\t\t// Appends top segments vtxs and builds array of its idxsOrig\n\t\tvar uv1, uv2, uv3 mat32.Vec2\n\t\tfor x := 0; x < radialSegs; x++ {\n\t\t\tuv1 = uvsOrig[0][x]\n\t\t\tuv2 = uvsOrig[0][x+1]\n\t\t\tuv3 = mat32.Vec2{uv2.X, 0}\n\t\t\t// Appends CENTER with its own UV.\n\t\t\tpos.Append(0, hHt, 0)\n\t\t\tnorms.Append(0, 1, 0)\n\t\t\tuvs.AppendVec2(uv3)\n\t\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\t\tnextidx++\n\t\t\t// Appends vertex\n\t\t\tv := mat32.Vec3{}\n\t\t\tvi := vtxs[0][x]\n\t\t\tpos.GetVec3(3*vi, &v)\n\t\t\tpos.AppendVec3(v)\n\t\t\tnorms.Append(0, 1, 0)\n\t\t\tuvs.AppendVec2(uv1)\n\t\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\t\tnextidx++\n\t\t}\n\t\t// Appends copy of first vertex (center)\n\t\tvar pt, norm mat32.Vec3\n\t\tvar uv mat32.Vec2\n\t\tpos.GetVec3(3*int(idxsOrig[0]), &pt)\n\t\tnorms.GetVec3(3*int(idxsOrig[0]), &norm)\n\t\tuvs.GetVec2(2*int(idxsOrig[0]), &uv)\n\t\tpos.AppendVec3(pt)\n\t\tnorms.AppendVec3(norm)\n\t\tuvs.AppendVec2(uv)\n\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\tnextidx++\n\n\t\t// Appends copy of second vertex (v1) USING LAST UV2\n\t\tpos.GetVec3(3*int(idxsOrig[1]), &pt)\n\t\tnorms.GetVec3(3*int(idxsOrig[1]), &norm)\n\t\tpos.AppendVec3(pt)\n\t\tnorms.AppendVec3(norm)\n\t\tuvs.AppendVec2(uv2)\n\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\tnextidx++\n\n\t\t// Append faces idxsOrig\n\t\tfor x := 0; x < radialSegs; x++ {\n\t\t\tpos := 2 * x\n\t\t\ti1 := idxsOrig[pos]\n\t\t\ti2 := idxsOrig[pos+1]\n\t\t\ti3 := idxsOrig[pos+3]\n\t\t\tidxs.Append(uint32(stidx+i1), uint32(stidx+i2), uint32(stidx+i3))\n\t\t}\n\t}\n\n\t// Bottom cap\n\tif bottom && botRad > 0 {\n\t\t// Array of vertex idxsOrig to build used to build the faces.\n\t\tidxsOrig := []uint32{}\n\t\tnextidx := pos.Size() / 3\n\n\t\t// Appends top segments vtxs and builds array of its idxsOrig\n\t\tvar uv1, uv2, uv3 mat32.Vec2\n\t\tfor x := 0; x < radialSegs; x++ {\n\t\t\tuv1 = uvsOrig[heightSegs][x]\n\t\t\tuv2 = uvsOrig[heightSegs][x+1]\n\t\t\tuv3 = mat32.Vec2{uv2.X, 1}\n\t\t\t// Appends CENTER with its own UV.\n\t\t\tpos.Append(0, -hHt, 0)\n\t\t\tnorms.Append(0, -1, 0)\n\t\t\tuvs.AppendVec2(uv3)\n\t\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\t\tnextidx++\n\t\t\t// Appends vertex\n\t\t\tv := mat32.Vec3{}\n\t\t\tvi := vtxs[heightSegs][x]\n\t\t\tpos.GetVec3(3*vi, &v)\n\t\t\tpos.AppendVec3(v)\n\t\t\tnorms.Append(0, -1, 0)\n\t\t\tuvs.AppendVec2(uv1)\n\t\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\t\tnextidx++\n\t\t}\n\n\t\t// Appends copy of first vertex (center)\n\t\tvar pt, norm mat32.Vec3\n\t\tvar uv mat32.Vec2\n\t\tpos.GetVec3(3*int(idxsOrig[0]), &pt)\n\t\tnorms.GetVec3(3*int(idxsOrig[0]), &norm)\n\t\tuvs.GetVec2(2*int(idxsOrig[0]), &uv)\n\t\tpos.AppendVec3(pt)\n\t\tnorms.AppendVec3(norm)\n\t\tuvs.AppendVec2(uv)\n\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\tnextidx++\n\n\t\t// Appends copy of second vertex (v1) USING LAST UV2\n\t\tpos.GetVec3(3*int(idxsOrig[1]), &pt)\n\t\tnorms.GetVec3(3*int(idxsOrig[1]), &norm)\n\t\tpos.AppendVec3(pt)\n\t\tnorms.AppendVec3(norm)\n\t\tuvs.AppendVec2(uv2)\n\t\tidxsOrig = append(idxsOrig, uint32(nextidx))\n\t\tnextidx++\n\n\t\t// Appends faces idxsOrig\n\t\tfor x := 0; x < radialSegs; x++ {\n\t\t\tpos := 2 * x\n\t\t\ti1 := idxsOrig[pos]\n\t\t\ti2 := idxsOrig[pos+3]\n\t\t\ti3 := idxsOrig[pos+1]\n\t\t\tidxs.Append(uint32(stidx+i1), uint32(stidx+i2), uint32(stidx+i3))\n\t\t}\n\t}\n\n\tms.Vtx = append(ms.Vtx, pos...)\n\tms.Idx = append(ms.Idx, idxs...)\n\tms.Norm = append(ms.Norm, norms...)\n\tms.Tex = append(ms.Tex, uvs...)\n\n\tms.BBox.BBox.ExpandByBox(bb)\n}",
"func (r *Region) ExistSector(x, y int) bool {\n\treturn r.offsets[x][y] != 0\n}",
"func (bfs *BlockFilesystem) allocate(ctx context.Context) (uint64, error) {\n\tstate, err := bfs.store.State(ctx)\n\tif err != nil {\n\t\treturn nilPtr, err\n\t} else if state.TrashPtr == nilPtr {\n\t\tnext := state.NextPtr\n\t\tstate.NextPtr += 1\n\t\treturn next, nil\n\t}\n\n\tb := &block{parent: bfs}\n\tif bfs.splitPtrs {\n\t\trawPtrs, err := bfs.store.Get(ctx, p(state.TrashPtr))\n\t\tif err != nil {\n\t\t\treturn nilPtr, err\n\t\t} else if err := b.UnmarshalPtrs(rawPtrs); err != nil {\n\t\t\treturn nilPtr, fmt.Errorf(\"blockfs: failed to parse block %x: %v\", state.TrashPtr, err)\n\t\t}\n\t} else {\n\t\traw, err := bfs.store.Get(ctx, state.TrashPtr)\n\t\tif err != nil {\n\t\t\treturn nilPtr, err\n\t\t} else if err := b.Unmarshal(raw); err != nil {\n\t\t\treturn nilPtr, fmt.Errorf(\"blockfs: failed to parse block %x: %v\", state.TrashPtr, err)\n\t\t}\n\t}\n\n\ttrash := state.TrashPtr\n\tstate.TrashPtr = b.ptrs[0]\n\treturn trash, nil\n}",
"func (n *Node) appendAllocCDIR(podCIDR *cidr.CIDR) {\n\tif podCIDR.IP.To4() != nil {\n\t\tif n.IPv4AllocCIDR == nil {\n\t\t\tn.IPv4AllocCIDR = podCIDR\n\t\t} else {\n\t\t\tn.IPv4SecondaryAllocCIDRs = append(n.IPv4SecondaryAllocCIDRs, podCIDR)\n\t\t}\n\t} else {\n\t\tif n.IPv6AllocCIDR == nil {\n\t\t\tn.IPv6AllocCIDR = podCIDR\n\t\t} else {\n\t\t\tn.IPv6SecondaryAllocCIDRs = append(n.IPv6SecondaryAllocCIDRs, podCIDR)\n\t\t}\n\t}\n}",
"func (p *ResourcePool) Alloc(ctx context.Context, id string) (Alloc, error) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\tif alloc, ok := p.allocs[id]; ok {\n\t\treturn alloc, nil\n\t}\n\treturn nil, errors.E(\"alloc\", id, errors.NotExist)\n}",
"func (s *EntityStorage) AllocateID(id int) *Entity {\n\tif int(id) >= len(s.vec) || s.vec[id].occupied {\n\t\treturn nil\n\t}\n\n\tidx, _ := s.freeIDs.BiSearch(id, gen.IntBiComp)\n\ts.freeIDs.Remove(idx)\n\n\treturn &s.vec[id].value\n}",
"func NewAlloc(startChunkSize int, slabSize int, growthFactor float64, malloc func(size int) []byte) *Alloc {\n\tc := new(Alloc)\n\tc.m = make(locker,1)\n\tc.arena = slab.NewArena(startChunkSize,slabSize,growthFactor,malloc)\n\tc.recycle = make(chan []byte,128)\n\treturn c\n}",
"func (bf *BlockFile) AllocateBlock() (int32, error) {\n\tblockID := bf.NumBlocks\n\tbf.NumBlocks++\n\terr := bf.File.Truncate(int64(bf.NumBlocks) * int64(bf.BlockSize))\n\tif err != nil {\n\t\treturn InvalidBlockID, err\n\t}\n\treturn blockID, nil\n}",
"func OpenDisk(filename string) (SectorDisk, error) {\n\text := strings.ToLower(path.Ext(filename))\n\tswitch ext {\n\tcase \".dsk\":\n\t\treturn LoadDSK(filename)\n\t}\n\treturn nil, fmt.Errorf(\"Unimplemented/unknown disk file extension %q\", ext)\n}",
"func allocPDFLexbufMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFLexbufValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (c *cluster) preAlloc() (b *Block) {\n\tn := c.pool.Config.BlocksPerAlloc\n\tbuf := make([]byte, n*c.size)\n\tc.pushPreAlloc(buf)\n\t// only return the first block (index 0)\n\tb = newBlock(c, buf[:c.size])\n\n\tc.Lock()\n\tc.totalBlocks += uint32(n)\n\tif uint16(c.totalBlocks/uint32(c.pool.Config.BlocksPerGroup))+1 > c.groups {\n\t\tc.blocks = append(c.blocks, []*Block{})\n\t\tc.groups = uint16(len(c.blocks))\n\t\tc.muts = append(c.muts, new(sync.Mutex))\n\t}\n\tc.Unlock()\n\treturn\n}",
"func DiskFree() (uint64, error) {\n\tusage, err := Disk()\n\t// for i := 0; i < len(usage); i++ {\n\tif len(usage) > 0 {\n\t\tfreeDisk := usage[0].Free\n\t\treturn freeDisk, err\n\t}\n\n\treturn 0, err\n}",
"func (s *Module) diskFindCandidate(size gridtypes.Unit) (path string, err error) {\n\tcandidates, err := s.findCandidates(size)\n\tif err != nil {\n\t\treturn path, err\n\t}\n\t// does anyone have a vdisk subvol\n\tfor _, candidate := range candidates {\n\t\tvolumes, err := candidate.Pool.Volumes()\n\t\tif err != nil {\n\t\t\tlog.Error().Str(\"pool\", candidate.Pool.Path()).Err(err).Msg(\"failed to list pool volumes\")\n\t\t\tcontinue\n\t\t}\n\t\tfor _, volume := range volumes {\n\t\t\tif volume.Name() != vdiskVolumeName {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn volume.Path(), nil\n\t\t}\n\t}\n\t// none has a vdiks subvolume, we need to\n\t// create one.\n\tcandidate := candidates[0]\n\tvolume, err := candidate.Pool.AddVolume(vdiskVolumeName)\n\tif err != nil {\n\t\treturn path, errors.Wrap(err, \"failed to create vdisk pool\")\n\t}\n\n\treturn volume.Path(), nil\n}",
"func (mm *atmanMemoryManager) allocPage(page vaddr) {\n\tvar (\n\t\tl4offset = page.pageTableOffset(pageTableLevel4)\n\t\tl3offset = page.pageTableOffset(pageTableLevel3)\n\t\tl2offset = page.pageTableOffset(pageTableLevel2)\n\t\tl1offset = page.pageTableOffset(pageTableLevel1)\n\t)\n\n\tl4 := mm.l4\n\tl3pte := l4.Get(l4offset)\n\n\tif !l3pte.hasFlag(xenPageTablePresent) {\n\t\tpfn := mm.physAllocPage()\n\t\tl3pte = mm.writePte(mm.l4PFN, l4offset, pfn, PTE_PAGE_TABLE_FLAGS|xenPageTableWritable)\n\t}\n\n\tl3 := mm.getPageTable(-1, -1, l4offset)\n\tl2pte := l3.Get(l3offset)\n\n\tif !l2pte.hasFlag(xenPageTablePresent) {\n\t\tpfn := mm.physAllocPage()\n\t\tl2pte = mm.writePte(l3pte.pfn(), l3offset, pfn, PTE_PAGE_TABLE_FLAGS|xenPageTableWritable)\n\t}\n\n\tl2 := mm.getPageTable(-1, l4offset, l3offset)\n\tl1pte := l2.Get(l2offset)\n\n\tif !l1pte.hasFlag(xenPageTablePresent) {\n\t\tpfn := mm.physAllocPage()\n\t\tl1pte = mm.writePte(l2pte.pfn(), l2offset, pfn, PTE_PAGE_TABLE_FLAGS|xenPageTableWritable)\n\t}\n\n\tpagepfn := mm.physAllocPage()\n\tmm.writePte(l1pte.pfn(), l1offset, pagepfn, PTE_PAGE_FLAGS)\n\n\t// ensure page is writable\n\t*(*uintptr)(unsafe.Pointer(page)) = 0x0\n}",
"func allocPDFHotspotMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFHotspotValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (ca *CreateAction) allocateOneCluster() error {\n\t// 计算已经分配的quota与集群总资源的比值,最后算出比值最小的集群\n\tcondM := operator.M{\n\t\t\"region\": ca.req.Region,\n\t\t\"federationclusterid\": ca.req.FederationClusterID,\n\t\t\"enginetype\": common.ClusterEngineTypeK8s,\n\t\t\"clustertype\": common.ClusterTypeSingle,\n\t}\n\tcond := operator.NewLeafCondition(operator.Eq, condM)\n\tclusterList, err := ca.model.ListCluster(ca.ctx, cond, &storeopt.ListOption{})\n\tif err != nil {\n\t\treturn err\n\t}\n\tminResRate := float32(math.MaxFloat32)\n\ttargetCluster := \"\"\n\tfor _, cluster := range clusterList {\n\t\tnodes, err := ca.listNodesFromCluster(cluster.ClusterID)\n\t\tif err != nil {\n\t\t\tblog.Warnf(\"failed to list nodes from cluster %s, continue to check next cluster, err %s\",\n\t\t\t\tcluster.ClusterID, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tquotas, err := ca.listQuotasByCluster(cluster.ClusterID)\n\t\tif err != nil {\n\t\t\tblog.Warnf(\"failed to list quotas by cluster %s, continue to check next cluster, err %s\",\n\t\t\t\tcluster.ClusterID, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\ttmpRate, err := utils.CalculateResourceAllocRate(quotas, nodes)\n\t\tif err != nil {\n\t\t\tblog.Warnf(\"failed to calculate rate of cluster %s, continue to check next cluster, err %s\",\n\t\t\t\tcluster.ClusterID, err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tif tmpRate <= minResRate {\n\t\t\ttargetCluster = cluster.ClusterID\n\t\t}\n\t}\n\tif len(targetCluster) == 0 {\n\t\treturn fmt.Errorf(\"can not find a suitable cluster\")\n\t}\n\tca.allocatedCluster = targetCluster\n\treturn nil\n}",
"func allocPDFLayerConfigMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFLayerConfigValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func TestShrinkStorageFolderWithSectors(t *testing.T) {\n\tif testing.Short() {\n\t\tt.SkipNow()\n\t}\n\tt.Parallel()\n\tcmt, err := newContractManagerTester(t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer cmt.panicClose()\n\n\t// Add a storage folder.\n\tstorageFolderOne := filepath.Join(cmt.persistDir, \"storageFolderOne\")\n\t// Create the storage folder dir.\n\terr = os.MkdirAll(storageFolderOne, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderOne, modules.SectorSize*storageFolderGranularity*8)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Get the index of the storage folder.\n\tsfs := cmt.cm.StorageFolders()\n\tif len(sfs) != 1 {\n\t\tt.Fatal(\"there should only be one storage folder\")\n\t}\n\tsfIndex := sfs[0].Index\n\t// Verify that the storage folder has the correct capacity.\n\tif sfs[0].Capacity != modules.SectorSize*storageFolderGranularity*8 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\t// Verify that the on-disk files are the right size.\n\tmfn := filepath.Join(storageFolderOne, metadataFile)\n\tsfn := filepath.Join(storageFolderOne, sectorFile)\n\tmfi, err := os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err := os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*8 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*8 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n\n\t// Create some sectors and add them to the storage folder.\n\troots := make([]crypto.Hash, storageFolderGranularity*3)\n\tdatas := make([][]byte, storageFolderGranularity*3)\n\tfor i := 0; i < storageFolderGranularity*3; i++ {\n\t\troot, data := randSector()\n\t\troots[i] = root\n\t\tdatas[i] = data\n\t}\n\t// Add all of the sectors.\n\tvar wg sync.WaitGroup\n\twg.Add(len(roots))\n\tfor i := 0; i < len(roots); i++ {\n\t\tgo func(i int) {\n\t\t\terr := cmt.cm.AddSector(roots[i], datas[i])\n\t\t\tif err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\n\t// Add a second storage folder so that the displaced sectors have somewhere\n\t// to go.\n\tstorageFolderTwo := filepath.Join(cmt.persistDir, \"storageFolderTwo\")\n\t// Create the storage folder dir.\n\terr = os.MkdirAll(storageFolderTwo, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\terr = cmt.cm.AddStorageFolder(storageFolderTwo, modules.SectorSize*storageFolderGranularity*3)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify that every single sector is readable and has the correct data.\n\twg.Add(len(roots))\n\tvar misses uint64\n\tfor i := 0; i < len(roots); i++ {\n\t\tgo func(i int) {\n\t\t\tdata, err := cmt.cm.ReadSector(roots[i])\n\t\t\tif err != nil || !bytes.Equal(data, datas[i]) {\n\t\t\t\tatomic.AddUint64(&misses, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\tif misses != 0 {\n\t\tt.Errorf(\"Could not find all %v sectors: %v\\n\", len(roots), misses)\n\t}\n\n\t// Decrease the size of the storage folder.\n\terr = cmt.cm.ResizeStorageFolder(sfIndex, modules.SectorSize*storageFolderGranularity*2, false)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Verify that the capacity and file sizes are correct.\n\tsfs = cmt.cm.StorageFolders()\n\tcapacity := sfs[0].Capacity + sfs[1].Capacity\n\tcapacityRemaining := sfs[0].CapacityRemaining + sfs[1].CapacityRemaining\n\tif capacity != modules.SectorSize*storageFolderGranularity*5 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\tif capacityRemaining != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"new storage folder capacity remaining is reporting the wrong remaining capacity\")\n\t}\n\tmfi, err = os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err = os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n\n\t// Verify that every single sector is readable and has the correct data.\n\twg.Add(len(roots))\n\tmisses = 0\n\tfor i := 0; i < len(roots); i++ {\n\t\tgo func(i int) {\n\t\t\tdata, err := cmt.cm.ReadSector(roots[i])\n\t\t\tif err != nil || !bytes.Equal(data, datas[i]) {\n\t\t\t\tatomic.AddUint64(&misses, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\tif misses != 0 {\n\t\tt.Errorf(\"Could not find all %v sectors: %v\\n\", len(roots), misses)\n\t}\n\n\t// Restart the contract manager to see that the change is persistent.\n\terr = cmt.cm.Close()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tcmt.cm, err = New(filepath.Join(cmt.persistDir, modules.ContractManagerDir))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// Verify that the capacity and file sizes are correct.\n\tsfs = cmt.cm.StorageFolders()\n\tcapacity = sfs[0].Capacity + sfs[1].Capacity\n\tcapacityRemaining = sfs[0].CapacityRemaining + sfs[1].CapacityRemaining\n\tif capacity != modules.SectorSize*storageFolderGranularity*5 {\n\t\tt.Error(\"new storage folder is reporting the wrong capacity\")\n\t}\n\tif capacityRemaining != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"new storage folder capacity remaining is reporting the wrong remaining capacity\")\n\t}\n\tmfi, err = os.Stat(mfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tsfi, err = os.Stat(sfn)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tif uint64(mfi.Size()) != sectorMetadataDiskSize*storageFolderGranularity*2 {\n\t\tt.Error(\"metadata file is the wrong size\")\n\t}\n\tif uint64(sfi.Size()) != modules.SectorSize*storageFolderGranularity*2 {\n\t\tt.Error(\"sector file is the wrong size\")\n\t}\n\n\t// Verify that every single sector is readable and has the correct data.\n\twg.Add(len(roots))\n\tmisses = 0\n\tfor i := 0; i < len(roots); i++ {\n\t\tgo func(i int) {\n\t\t\tdata, err := cmt.cm.ReadSector(roots[i])\n\t\t\tif err != nil || !bytes.Equal(data, datas[i]) {\n\t\t\t\tatomic.AddUint64(&misses, 1)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}(i)\n\t}\n\twg.Wait()\n\tif misses != 0 {\n\t\tt.Errorf(\"Could not find all %v sectors: %v\\n\", len(roots), misses)\n\t}\n}",
"func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {\n\tp := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif uintptr(p) < 4096 {\n\t\tif uintptr(p) == _EACCES {\n\t\t\tprint(\"runtime: mmap: access denied\\n\")\n\t\t\texit(2)\n\t\t}\n\t\tif uintptr(p) == _EAGAIN {\n\t\t\tprint(\"runtime: mmap: too much locked memory (check 'ulimit -l').\\n\")\n\t\t\texit(2)\n\t\t}\n\t\treturn nil\n\t}\n\tmSysStatInc(sysStat, n)\n\treturn p\n}",
"func (oo *OnuDeviceEntry) AllocateFreeTcont(ctx context.Context, allocID uint16) (uint16, bool, error) {\n\tlogger.Debugw(ctx, \"allocate-free-tcont\", log.Fields{\"device-id\": oo.deviceID, \"allocID\": allocID,\n\t\t\"allocated-instances\": oo.SOnuPersistentData.PersTcontMap})\n\n\too.MutexPersOnuConfig.Lock()\n\tdefer oo.MutexPersOnuConfig.Unlock()\n\tif entityID, ok := oo.SOnuPersistentData.PersTcontMap[allocID]; ok {\n\t\t//tcont already allocated before, return the used instance-id\n\t\treturn entityID, true, nil\n\t}\n\t//First allocation of tcont. Find a free instance\n\tif tcontInstKeys := oo.pOnuDB.GetSortedInstKeys(ctx, me.TContClassID); len(tcontInstKeys) > 0 {\n\t\tlogger.Debugw(ctx, \"allocate-free-tcont-db-keys\", log.Fields{\"device-id\": oo.deviceID, \"keys\": tcontInstKeys})\n\t\tfor _, instID := range tcontInstKeys {\n\t\t\tinstExist := false\n\t\t\t//If this instance exist in map, it means it is not empty. It is allocated before\n\t\t\tfor _, v := range oo.SOnuPersistentData.PersTcontMap {\n\t\t\t\tif v == instID {\n\t\t\t\t\tinstExist = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tif !instExist {\n\t\t\t\too.SOnuPersistentData.PersTcontMap[allocID] = instID\n\t\t\t\treturn instID, false, nil\n\t\t\t}\n\t\t}\n\t}\n\treturn 0, false, fmt.Errorf(fmt.Sprintf(\"no-free-tcont-left-for-device-%s\", oo.deviceID))\n}",
"func (m *SHMLockManager) AllocateLock() (Locker, error) {\n\treturn nil, fmt.Errorf(\"not supported\")\n}",
"func LoadSectorMap(diskbytes []byte) (SectorMap, error) {\n\tsm := SectorMap(make([]byte, 560))\n\tsector09, err := disk.ReadSector(diskbytes, 0, 9)\n\tif err != nil {\n\t\treturn sm, err\n\t}\n\tsector0A, err := disk.ReadSector(diskbytes, 0, 0xA)\n\tif err != nil {\n\t\treturn sm, err\n\t}\n\tsector0B, err := disk.ReadSector(diskbytes, 0, 0xB)\n\tif err != nil {\n\t\treturn sm, err\n\t}\n\tcopy(sm[0:0x30], sector09[0xd0:])\n\tcopy(sm[0x30:0x130], sector0A)\n\tcopy(sm[0x130:0x230], sector0B)\n\treturn sm, nil\n}",
"func Probe(readSeeker io.ReadSeeker) (mbr *MBR, err error) {\n\tdata := make([]byte, 512)\n\tread := func() {\n\t\tif _, err = io.ReadFull(readSeeker, data); err != nil {\n\t\t\treturn\n\t\t}\n\t\t// for i := range data {\n\t\t// \tfmt.Printf(\"%v: %X\\n\", i, data[i])\n\t\t// }\n\t}\n\n\tif read(); err != nil {\n\t\treturn nil, err\n\t}\n\tpartEntries, err := probe(data)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpartitions := map[int]*parttable.Partition{}\n\tadd := func(n int, entry PartEntry, partType parttable.PartType) {\n\t\tif entry.PartitionType != 0 {\n\t\t\tpartitions[n] = &parttable.Partition{\n\t\t\t\tNumber: n,\n\t\t\t\tType: partType,\n\t\t\t}\n\t\t}\n\t}\n\n\tfor i, entry := range partEntries {\n\t\tswitch entry.PartitionType {\n\t\tcase 0x05, 0x0F, 0x85, 0xC5, 0xCF, 0xD5:\n\t\t\tadd(i+1, entry, parttable.Extended)\n\t\t\t// fmt.Println(\"DEBUG:::: \", int64(entry.FirstLBA-1)*512)\n\t\t\tif _, err = readSeeker.Seek(int64(entry.FirstLBA-1)*512, os.SEEK_CUR); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tif read(); err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tlogicalPartEntries, err := probe(data)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\tfor j := range logicalPartEntries {\n\t\t\t\tadd(j+5, logicalPartEntries[j], parttable.Logical)\n\t\t\t}\n\t\tdefault:\n\t\t\tadd(i+1, entry, parttable.Primary)\n\t\t}\n\t}\n\n\treturn &MBR{partitions: partitions}, nil\n}",
"func AddNewCylinderSector(sc *Scene, name string, height, topRad, botRad float32, radialSegs, heightSegs int, angStart, angLen float32, top, bottom bool) *Cylinder {\n\tcy := &Cylinder{}\n\tcy.Nm = name\n\tcy.Height = height\n\tcy.TopRad = topRad\n\tcy.BotRad = botRad\n\tcy.RadialSegs = radialSegs\n\tcy.HeightSegs = heightSegs\n\tcy.AngStart = angStart\n\tcy.AngLen = angLen\n\tcy.Top = top\n\tcy.Bottom = bottom\n\tsc.AddMesh(cy)\n\treturn cy\n}",
"func (idx *Tree) allocateNode(a *Allocator, count int, prefixLen int) (n uint64, data []uint64) {\n\tprefixSlots := (prefixLen + 7) >> 3\n\tif prefixLen >= 255 {\n\t\tprefixSlots++\n\t}\n\tcount += prefixSlots\n\tn = a.newNode(count)\n\tblock := int(n >> blockSlotsShift)\n\toffset := int(n & blockSlotsOffsetMask)\n\tdata = idx.blocks[block].data[offset:]\n\treturn\n}",
"func (d Device) Alloc(extern External, size int64) (tensor.Memory, error) {\n\tif d == CPU {\n\t\tcudaLogf(\"device is CPU\")\n\t\treturn nil, nil // well there should be an error because this wouldn't be called\n\t}\n\n\tmachine := extern.(CUDAMachine)\n\tctxes := machine.Contexts()\n\tif len(ctxes) == 0 {\n\t\tcudaLogf(\"allocate nothing\")\n\t\treturn nil, nil\n\t}\n\tctx := ctxes[int(d)]\n\n\tcudaLogf(\"calling ctx.MemAlloc(%d)\", size)\n\treturn ctx.MemAlloc(size)\n}",
"func (vns *VirtualNetworkService) Allocate(ctx context.Context, vnBlueprint blueprint.Interface,\n\tcluster resources.Cluster) (*resources.VirtualNetwork, error) {\n\tclusterID, err := cluster.ID()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tblueprintText, err := vnBlueprint.Render()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresArr, err := vns.call(ctx, \"one.vn.allocate\", blueprintText, clusterID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn vns.RetrieveInfo(ctx, int(resArr[resultIndex].ResultInt()))\n}",
"func allocPDFCryptMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFCryptValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (s *store) AllocateHostDir(name string) (string, error) {\n\tpath := filepath.Join(s.StorePath(), name)\n\terr := os.MkdirAll(path, 0755)\n\treturn path, err\n}",
"func (a *arena) alloc(size, align uint32) uint32 {\n\tstart := (uint64(a.n) + (uint64(align) - 1)) & ^(uint64(align) - 1)\n\tend := start + uint64(size)\n\n\tif end > uint64(len(a.buf)) {\n\t\t// Resize buffer.\n\t\tnewSize := end * 2\n\t\tif newSize > math.MaxUint32 {\n\t\t\tpanic(\"buffer exceeded maximum size\")\n\t\t}\n\n\t\tnewBuf := make([]byte, newSize)\n\t\tif a.buf != nil {\n\t\t\tcopy(newBuf[:a.n], a.buf[:a.n])\n\t\t}\n\n\t\ta.buf = newBuf\n\t}\n\n\ta.n = uint32(end)\n\treturn uint32(start)\n}",
"func allocPDFDaInfoMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFDaInfoValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func allocPDFLexbufLargeMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFLexbufLargeValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (c *clustermgrClient) AllocVolumeUnit(ctx context.Context, vuid proto.Vuid) (*AllocVunitInfo, error) {\n\tc.rwLock.Lock()\n\tdefer c.rwLock.Unlock()\n\n\tspan := trace.SpanFromContextSafe(ctx)\n\n\tspan.Debugf(\"alloc volume unit: args vuid[%d]\", vuid)\n\tret := &AllocVunitInfo{}\n\tinfo, err := c.client.AllocVolumeUnit(ctx, &cmapi.AllocVolumeUnitArgs{Vuid: vuid})\n\tif err != nil {\n\t\tspan.Errorf(\"alloc volume unit failed: err[%+v]\", err)\n\t\treturn nil, err\n\t}\n\tspan.Debugf(\"alloc volume unit ret: unit[%+v]\", *info)\n\n\tdiskInfo, err := c.client.DiskInfo(ctx, info.DiskID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tret.set(info, diskInfo.Host)\n\treturn ret, err\n}",
"func memAlloc(v unsafe.Pointer, n uintptr) unsafe.Pointer {\n\trequiredPages := uint64(round(n, _PAGESIZE) / _PAGESIZE)\n\n\treturn _atman_mm.allocPages(v, requiredPages)\n}",
"func getDiskInfo(path string) (uint64, uint64, error) {\n\ts := syscall.Statfs_t{}\n\terr := syscall.Statfs(path, &s)\n\tif err != nil {\n\t\treturn 0,0, err\n\t}\n\treservedBlocks := s.Bfree - s.Bavail\n\n\ttotal := uint64(s.Frsize) * (s.Blocks - reservedBlocks)\n\tfree := uint64(s.Frsize) * s.Bavail\n\t// Check for overflows.\n\t// https://github.com/minio/minio/issues/8035\n\t// XFS can show wrong values at times error out\n\t// in such scenarios.\n\tif free > total {\n\t\treturn 0, 0, fmt.Errorf(\"detected free space (%d) > total disk space (%d), fs corruption at (%s). please run 'fsck'\", free, total, path)\n\t}\n\n\treturn total, free, nil\n}",
"func reserveVulkanDevice(ctx context.Context, d adb.Device) (*flock.Mutex, error) {\n\tm := flock.Lock(d.Instance().GetSerial())\n\tif err := d.SetSystemProperty(ctx, vkImplicitLayersProp, \"GraphicsSpy\"); err != nil {\n\t\treturn nil, log.Err(ctx, err, \"Setting up vulkan layer\")\n\t}\n\treturn m, nil\n}",
"func sysAlloc(n uintptr, sysStat *sysMemStat) unsafe.Pointer {\n\tp, err := mmap(nil, n, _PROT_READ|_PROT_WRITE, _MAP_ANON|_MAP_PRIVATE, -1, 0)\n\tif err != 0 {\n\t\tif err == _EACCES {\n\t\t\tprint(\"runtime: mmap: access denied\\n\")\n\t\t\texit(2)\n\t\t}\n\t\tif err == _EAGAIN {\n\t\t\tprint(\"runtime: mmap: too much locked memory (check 'ulimit -l').\\n\")\n\t\t\texit(2)\n\t\t}\n\t\treturn nil\n\t}\n\tsysStat.add(int64(n))\n\treturn p\n}",
"func (p *projectQuota) findAvailableBackingDev(targetPath string) (*backingDev, error) {\n\tbackingFsBlockDev, err := p.findOrCreateBackingDev(targetPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif !backingFsBlockDev.supported {\n\t\treturn nil, manager.NotSupported\n\t}\n\n\treturn backingFsBlockDev, nil\n}",
"func DiskUsage(path string) (disk DiskStatus) {\n\n\tif path == \"\" {\n\t\treturn\n\t}\n\n\tfs := syscall.Statfs_t{}\n\terr := syscall.Statfs(path, &fs)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdisk.All = fs.Blocks * uint64(fs.Bsize)\n\tdisk.Free = fs.Bfree * uint64(fs.Bsize)\n\tdisk.Used = disk.All - disk.Free\n\treturn disk\n}",
"func allocPDFCsiMemory(n int) unsafe.Pointer {\n\tmem, err := C.calloc(C.size_t(n), (C.size_t)(sizeOfPDFCsiValue))\n\tif err != nil {\n\t\tpanic(\"memory alloc error: \" + err.Error())\n\t}\n\treturn mem\n}",
"func (trcn *TestRetrievalClientNode) AllocateLane(ctx context.Context, paymentChannel address.Address) (uint64, error) {\n\tif trcn.allocateLaneRecorder != nil {\n\t\ttrcn.allocateLaneRecorder(paymentChannel)\n\t}\n\treturn trcn.lane, trcn.laneError\n}",
"func DiskUsage(path string) (disk DiskStatus) {\n\th := syscall.MustLoadDLL(\"kernel32.dll\")\n\tc := h.MustFindProc(\"GetDiskFreeSpaceExW\")\n\tlpFreeBytesAvailable := int64(0)\n\tlpTotalNumberOfBytes := int64(0)\n\tlpTotalNumberOfFreeBytes := int64(0)\n\t_, _, err := c.Call(uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(path[:2]))),\n\t\tuintptr(unsafe.Pointer(&lpFreeBytesAvailable)),\n\t\tuintptr(unsafe.Pointer(&lpTotalNumberOfBytes)),\n\t\tuintptr(unsafe.Pointer(&lpTotalNumberOfFreeBytes)))\n\tif err != nil {\n\t\tif !strings.Contains(fmt.Sprint(err), \"successfully\") {\n\t\t\tlogger.Error(\"Error during retrieving memory statistic:\", err)\n\t\t\treturn\n\t\t}\n\t}\n\tdisk.All = uint64(lpTotalNumberOfBytes)\n\tdisk.Free = uint64(lpTotalNumberOfFreeBytes)\n\tdisk.Used = disk.All - disk.Free\n\treturn\n}",
"func MakeFileSystem(idp string, typef byte) {\n\tvar flgfound bool = false\n\tmpartition := Mounted{}\n\tfor _, mp := range sliceMP {\n\t\tidm := \"vd\" + string(mp.Letter) + strconv.FormatInt(mp.Number, 10)\n\t\tif idp == idm {\n\t\t\tflgfound = true\n\t\t\tmpartition = mp\n\t\t\tbreak\n\t\t}\n\t}\n\tif flgfound {\n\t\tvar bname [16]byte\n\t\tpartition := mpartition.Part\n\t\t// Se realiza el formateo de la partición\n\t\tif typef == 'u' {\n\t\t\twriteByteArray(mpartition.Path, partition.PartStart, partition.PartSize)\n\t\t}\n\t\t// Current Position Disk Partition\n\t\tvar cpd int64\n\t\t// Se obtiene el tamaño de las estructuras y la cantidad (#Estructuras)\n\t\tsStrc, cStrc := GetNumberOfStructures(partition.PartSize)\n\t\t// Se creará el Super Boot\n\t\tnewSB := SuperBoot{}\n\t\t// Nombre HD\n\t\tcopy(bname[:], mpartition.Name)\n\t\tnewSB.NombreHd = bname\n\t\tnewSB.FechaCreacion = getCurrentTime()\n\t\tnewSB.FechaUltimoMontaje = mpartition.TMount\n\t\tnewSB.ConteoMontajes = 1\n\t\t// Cantidad de estructuras creadas\n\t\tnewSB.CantArbolVirtual = 1\n\t\tnewSB.CantDetalleDirectorio = 1\n\t\tnewSB.CantidadInodos = 1\n\t\tnewSB.CantidadBloques = 2\n\t\t// Cantidad de estructuras ocupadas...\n\t\tnewSB.ArbolesVirtualesLibres = cStrc - 1\n\t\tnewSB.DetallesDirectorioLibres = cStrc - 1\n\t\tnewSB.InodosLibres = (cStrc * 5) - 1\n\t\tnewSB.BloquesLibres = (cStrc * 20) - 2 // Por los dos bloques del archivo user.txt\n\t\t// Inicio BMap AVD = Inicio_Particion + SizeSB\n\t\tcpd = partition.PartStart + sStrc.sizeSB\n\t\tnewSB.AptBmapArbolDirectorio = cpd\n\t\t// Inicio AVD = Inicio BitMap AVD + #Estructuras\n\t\tcpd = cpd + cStrc\n\t\tnewSB.AptArbolDirectorio = cpd\n\t\t// Inicio BMap DDir = Inicio AVD + (sizeAVD*#Estructuras)\n\t\tcpd = cpd + (sStrc.sizeAV * cStrc)\n\t\tnewSB.AptBmapDetalleDirectorio = cpd\n\t\t// Inicio DDir = Inicio BMap DDir + #Estructuras\n\t\tcpd = cpd + cStrc\n\t\tnewSB.AptDetalleDirectorio = cpd\n\t\t// Inicio BMap Inodo = Inicio DDir + (sizeDDir * #Estructuras)\n\t\tcpd = cpd + (sStrc.sizeDDir * cStrc)\n\t\tnewSB.AptBmapTablaInodo = cpd\n\t\t// Inicio Inodos = Inicio BMap Inodo + (5 * sizeInodo)\n\t\tcpd = cpd + (5 * cStrc)\n\t\tnewSB.AptTablaInodo = cpd\n\t\t// Inicio BMap Bloque = Inicio Inodos + (5 * sizeInodo * #Estructuras)\n\t\tcpd = cpd + (5 * sStrc.sizeInodo * cStrc)\n\t\tnewSB.AptBmapBloques = cpd\n\t\t// Inicio Bloque = Inicio Inodo + (20 * #Estructuras)\n\t\tcpd = cpd + (20 * cStrc)\n\t\tnewSB.AptBloques = cpd\n\t\t// Inicio Bitacora (Log) = Inicio Bloque + (20 * sizeBloque * #Estructuras)\n\t\tcpd = cpd + (20 * sStrc.sizeBD * cStrc)\n\t\tnewSB.AptLog = cpd\n\t\t// Inicio Copia SB = Inicio Bitacora + (sizeLog * #Estructuras)\n\t\tcpd = cpd + (sStrc.sizeLog * cStrc)\n\t\t//--- Se guarda el tamaño de las estructuras ------------------------------------\n\t\tnewSB.TamStrcArbolDirectorio = sStrc.sizeAV\n\t\tnewSB.TamStrcDetalleDirectorio = sStrc.sizeDDir\n\t\tnewSB.TamStrcInodo = sStrc.sizeInodo\n\t\tnewSB.TamStrcBloque = sStrc.sizeBD\n\t\t//--- Se guarda el primer bit vacio del bitmap de cada estructura ---------------\n\t\tnewSB.PrimerBitLibreArbolDir = 2\n\t\tnewSB.PrimerBitLibreDetalleDir = 2\n\t\tnewSB.PrimerBitLibreTablaInodo = 2\n\t\tnewSB.PrimerBitLibreBloques = 3\n\t\t//--- Numero Magico -------------------------------------------------------------\n\t\tnewSB.NumeroMagico = 201503442\n\t\t//--- Escribir SB en Disco ------------------------------------------------------\n\t\tWriteSuperBoot(mpartition.Path, newSB, partition.PartStart)\n\t\t//--- Escritura de la Copia de SB -----------------------------------------------\n\t\tWriteSuperBoot(mpartition.Path, newSB, cpd)\n\t\t//--- (1) Crear un AVD : root \"/\" -----------------------------------------------\n\t\tavdRoot := ArbolVirtualDir{}\n\t\tavdRoot.FechaCreacion = getCurrentTime()\n\t\tcopy(avdRoot.NombreDirectorio[:], \"/\")\n\t\tcopy(avdRoot.AvdPropietario[:], \"root\")\n\t\tcopy(avdRoot.AvdGID[:], \"root\")\n\t\tavdRoot.AvdPermisos = 777\n\t\tavdRoot.AptDetalleDirectorio = 1\n\t\tWriteAVD(mpartition.Path, avdRoot, newSB.AptArbolDirectorio)\n\t\t//--- (2) Crear un Detalle de Directorio ----------------------------------------\n\t\tdetalleDir := DetalleDirectorio{}\n\t\tarchivoInf := InfoArchivo{}\n\t\tarchivoInf.FechaCreacion = getCurrentTime()\n\t\tarchivoInf.FechaModifiacion = getCurrentTime()\n\t\tcopy(archivoInf.FileName[:], \"user.txt\")\n\t\tarchivoInf.ApInodo = 1\n\t\tdetalleDir.InfoFile[0] = archivoInf\n\t\tWriteDetalleDir(mpartition.Path, detalleDir, newSB.AptDetalleDirectorio)\n\t\t//--- (3) Crear una Tabla de Inodo ----------------------------------------------\n\t\tstrAux := \"1,G,root\\n1,U,root,201503442\\n\"\n\t\ttbInodo := TablaInodo{}\n\t\ttbInodo.NumeroInodo = 1 // Primer Inodo creado\n\t\ttbInodo.SizeArchivo = int64(len(strAux))\n\t\ttbInodo.CantBloquesAsignados = 2\n\t\ttbInodo.AptBloques[0] = int64(1)\n\t\ttbInodo.AptBloques[1] = int64(2)\n\t\tcopy(tbInodo.IDPropietario[:], \"root\")\n\t\tcopy(tbInodo.IDUGrupo[:], \"root\")\n\t\ttbInodo.IPermisos = 777\n\t\tWriteTInodo(mpartition.Path, tbInodo, newSB.AptTablaInodo)\n\t\t//--- (4) Creación de los Bloques de datos --------------------------------------\n\t\tbloque1 := BloqueDeDatos{}\n\t\tcopy(bloque1.Data[:], strAux[0:25])\n\t\tWriteBloqueD(mpartition.Path, bloque1, newSB.AptBloques)\n\t\tbloque2 := BloqueDeDatos{}\n\t\tcopy(bloque2.Data[:], strAux[25:len(strAux)])\n\t\tWriteBloqueD(mpartition.Path, bloque2, newSB.AptBloques+newSB.TamStrcBloque)\n\t\t//--- (5) Escribir en BitMap ----------------------------------------------------\n\t\tauxBytes := []byte{1}\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapArbolDirectorio)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapDetalleDirectorio)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapTablaInodo)\n\t\tauxBytes = append(auxBytes, 1)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapBloques)\n\t} else {\n\t\tfmt.Println(\"[!] La particion\", idp, \" no se encuentra montada...\")\n\t}\n}",
"func initializeZeroAlloc() *Allocation {\n\talloc := &Allocation{\n\t\tValue: make(map[AllocationType]*Resources),\n\t}\n\n\talloc.Value[TotalAllocation] = ZeroResource\n\talloc.Value[NonPreemptibleAllocation] = ZeroResource\n\talloc.Value[ControllerAllocation] = ZeroResource\n\talloc.Value[PreemptibleAllocation] = ZeroResource\n\talloc.Value[SlackAllocation] = ZeroResource\n\talloc.Value[NonSlackAllocation] = ZeroResource\n\n\treturn alloc\n}",
"func sysAlloc(n uintptr, sysStat *uint64) unsafe.Pointer {\n\tp := memAlloc(nil, n)\n\tif p != nil {\n\t\tmSysStatInc(sysStat, n)\n\t}\n\treturn p\n}",
"func (r *Registry) Alloc(name string, addr ...string) (uint16, error) {\n\n\tr.Lock()\n\tdefer r.Unlock()\n\n\t_, name_taken := r.byname[name]\n\n\tif name_taken {\n\t\treturn 0, fmt.Errorf(\"Name %q is already taken\", name)\n\t}\n\n\tport, err := r.portFind()\n\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tr.createSvc(port, name, addr...)\n\n\treturn port, nil\n}",
"func alloc() uint64 {\n\tvar stats runtime.MemStats\n\truntime.GC()\n\truntime.ReadMemStats(&stats)\n\t// return stats.Alloc - uint64(unsafe.Sizeof(hs[0]))*uint64(cap(hs))\n\treturn stats.Alloc\n}",
"func AssignNewSectors(deadlines *Deadlines, newSectors []uint64, seed abi.Randomness) error {\n\tnextNewSector := uint64(0)\n\n\t// Assigns up to `count` sectors to `deadline` and advances `nextNewSector`.\n\tassignToDeadline := func(count uint64, deadline uint64) error {\n\t\tcountToAdd := min64(count, uint64(len(newSectors))-nextNewSector)\n\t\tsectorsToAdd := newSectors[nextNewSector : nextNewSector+countToAdd]\n\t\terr := deadlines.AddToDeadline(deadline, sectorsToAdd...)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to add %d sectors to deadline %d: %w\", countToAdd, deadline, err)\n\t\t}\n\t\tnextNewSector += countToAdd\n\t\treturn nil\n\t}\n\n\t// Iterate deadlines and fill any partial partitions. There's no great advantage to filling more- or less-\n\t// full ones first, so they're filled in sequence order.\n\t// Meanwhile, record the partition count at each deadline.\n\tdeadlinePartitionCounts := make([]uint64, WPoStPeriodDeadlines)\n\tfor i := uint64(0); i < WPoStPeriodDeadlines && nextNewSector < uint64(len(newSectors)); i++ {\n\t\tpartitionCount, sectorCount, err := DeadlineCount(deadlines, i)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"failed to count sectors in partition %d: %w\", i, err)\n\t\t}\n\t\tdeadlinePartitionCounts[i] = partitionCount\n\n\t\tgap := WPoStPartitionSectors - (sectorCount % WPoStPartitionSectors)\n\t\tif gap != WPoStPartitionSectors {\n\t\t\terr = assignToDeadline(gap, i)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\t// While there remain new sectors to assign, fill a new partition at each deadline in round-robin fashion.\n\t// TODO WPOST (follow-up): fill less-full deadlines first, randomize when equally full.\n\ttargetDeadline := uint64(0)\n\tfor nextNewSector < uint64(len(newSectors)) {\n\t\terr := assignToDeadline(WPoStPartitionSectors, targetDeadline)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttargetDeadline = (targetDeadline + 1) % WPoStPeriodDeadlines\n\t}\n\treturn nil\n}",
"func (db *DB) ReserveEmptyLeaf(accID uint64) (stateID uint64, err error) {\n\tparams, err := db.GetParams()\n\tif err != nil {\n\t\treturn\n\t}\n\tstate, err := db.FindEmptyAndReserve(int(params.MaxDepth), accID)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn core.StringToUint(state.Path)\n}",
"func (gc *Cfg) AllocVLAN(ra core.ResourceManager) (uint, error) {\n\tvlan, err := ra.AllocateResourceVal(gc.Tenant, resources.AutoVLANResource)\n\tif err != nil {\n\t\tlog.Errorf(\"alloc vlan failed: %q\", err)\n\t\treturn 0, err\n\t}\n\n\treturn vlan.(uint), err\n}"
] | [
"0.6531526",
"0.55464417",
"0.5311277",
"0.51301605",
"0.50938255",
"0.50267166",
"0.50223935",
"0.50222445",
"0.49632046",
"0.49364078",
"0.49089208",
"0.486091",
"0.48423472",
"0.48232087",
"0.47796917",
"0.4778974",
"0.47616217",
"0.47598645",
"0.47460467",
"0.47296306",
"0.47235176",
"0.47129008",
"0.47101986",
"0.47042942",
"0.46932656",
"0.4688587",
"0.4682051",
"0.467953",
"0.46673653",
"0.4664344",
"0.46431994",
"0.4638558",
"0.46300656",
"0.46138492",
"0.46125224",
"0.4611861",
"0.45972255",
"0.4579627",
"0.4573026",
"0.45718184",
"0.45708224",
"0.45656615",
"0.4565618",
"0.45625174",
"0.45601743",
"0.45569482",
"0.45557642",
"0.4555706",
"0.45482963",
"0.45364574",
"0.45333877",
"0.45227557",
"0.45162597",
"0.44951388",
"0.44895145",
"0.44880676",
"0.44853318",
"0.4479259",
"0.44771668",
"0.4471711",
"0.44710198",
"0.44669303",
"0.44600192",
"0.44517213",
"0.44467345",
"0.44445089",
"0.444225",
"0.4441116",
"0.44381806",
"0.4436429",
"0.44265997",
"0.4419538",
"0.44174412",
"0.44142956",
"0.4414138",
"0.43912947",
"0.43818465",
"0.43817782",
"0.43771368",
"0.43664375",
"0.43596733",
"0.43499577",
"0.43454638",
"0.43420064",
"0.43389314",
"0.43312097",
"0.43304613",
"0.43298286",
"0.43230513",
"0.4318718",
"0.43184927",
"0.43174487",
"0.43169475",
"0.43162495",
"0.4311015",
"0.43002233",
"0.42972034",
"0.42930597",
"0.42869186",
"0.4280056"
] | 0.798377 | 0 |
get the list of files in the JobInfoDir | func (c *Configuration) JobInfoFiles() ([]string, error) {
fileInfos, err := ioutil.ReadDir(c.JobInfoDir)
if err != nil {
return nil, err
}
res := []string{}
for i := 0; i<len(fileInfos); i++ {
fileInfo := fileInfos[i]
name := fileInfo.Name()
if strings.HasSuffix(name, ".json") {
res = append(res, name)
}
}
return res, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func getFileList(dir string) ([]string, error) {\n\tvar fileNames []string\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn fileNames, err\n\t}\n\n\tfor _, file := range files {\n\t\tif !file.IsDir() && filepath.Ext(file.Name()) == \".out\" {\n\t\t\tfileNames = append(fileNames, file.Name())\n\t\t}\n\t}\n\treturn fileNames, nil\n}",
"func getFileList(inputFolder string) []os.FileInfo {\r\n\tlist := make([]os.FileInfo, 0)\r\n\r\n\tfileList, err := ioutil.ReadDir(inputFolder)\r\n\tparseError(err)\r\n\r\n\tfor _, file := range fileList {\r\n\t\tif file.IsDir() == false {\r\n\t\t\tif filepath.Ext(file.Name()) == \".pu\" {\r\n\t\t\t\tlist = append(list, file)\r\n\t\t\t}\r\n\t\t}\r\n\t}\r\n\r\n\treturn list\r\n}",
"func GetInfoFiles(directory string) []string {\n\tvar err error\n\tvar output []string\n\tvar files_slice []os.FileInfo\n\n\tfiles_slice, err = ioutil.ReadDir(directory)\n\tcommon.CheckErr(err)\n\n\tfor _, file := range files_slice {\n\t\tif file.IsDir() {\n\t\t\tjson_path := path.Join(\n\t\t\t\tdirectory,\n\t\t\t\tfile.Name(),\n\t\t\t\tfile.Name()+\".json\",\n\t\t\t)\n\t\t\tif _, err := os.Stat(json_path); err == nil {\n\t\t\t\toutput = append(output, json_path)\n\t\t\t} else if os.IsNotExist(err) {\n\t\t\t\tlog.Printf(\"'%s' doesn't exist.\\n\", json_path)\n\t\t\t} else {\n\t\t\t\tcommon.CheckErr(err)\n\t\t\t}\n\t\t}\n\t}\n\treturn output\n}",
"func (c *Configuration) JobInfoList(\n\tfiles []string) ([]*job_info.JobInfo, error) {\n\n\tres := make([]*job_info.JobInfo, len(files))\n\tfor i := 0; i < len(files); i++ {\n\t\tres[i] = job_info.NewJobInfo()\n\t\terr := res[i].ReadFromFile(c.JobInfoDir + \"/\" + files[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn res, nil\n}",
"func detectResultFiles(job *models.Job) ([]string, error) {\n\n\tfiles, err := ioutil.ReadDir(filepath.Join(job.Resource.URL, \"out\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresults := []string{}\n\tfor _, f := range files {\n\t\tresults = append(results, f.Name())\n\t}\n\n\treturn results, nil\n}",
"func getFilesToProcess() []string {\n\tfileList := []string{}\n\tsingleFileMode = false\n\n\tif dirName == \"\" {\n\t\tif inFileName != \"\" {\n\t\t\t// no Dir name provided, but file name provided =>\n\t\t\t// Single file mode\n\t\t\tsingleFileMode = true\n\t\t\tfileList = append(fileList, inFileName)\n\t\t\treturn fileList\n\t\t} else {\n\t\t\t// no Dir name, no file name\n\t\t\tlog.Println(\"Input file name or working directory is not provided\")\n\t\t\tusage()\n\t\t}\n\t}\n\n\t// We have working directory - takes over single file name, if both provided\n\terr := filepath.Walk(dirName, func(path string, f os.FileInfo, _ error) error {\n\t\tif isCsvFile(path) {\n\t\t\tfileList = append(fileList, path)\n\t\t}\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Println(\"Error getting files list: \", err)\n\t\tos.Exit(-1)\n\t}\n\n\treturn fileList\n}",
"func (l *Location) List() ([]string, error) {\n\n\tvar filenames []string\n\tclient, err := l.fileSystem.Client(l.Authority)\n\tif err != nil {\n\t\treturn filenames, err\n\t}\n\t// start timer once action is completed\n\tdefer l.fileSystem.connTimerStart()\n\n\tfileinfos, err := client.ReadDir(l.Path())\n\tif err != nil {\n\t\tif err == os.ErrNotExist {\n\t\t\treturn filenames, nil\n\t\t}\n\t\treturn filenames, err\n\t}\n\tfor _, fileinfo := range fileinfos {\n\t\tif !fileinfo.IsDir() {\n\t\t\tfilenames = append(filenames, fileinfo.Name())\n\t\t}\n\t}\n\n\treturn filenames, nil\n}",
"func (storage *B2Storage) ListFiles(threadIndex int, dir string) (files []string, sizes []int64, err error) {\n for len(dir) > 0 && dir[len(dir) - 1] == '/' {\n dir = dir[:len(dir) - 1]\n }\n length := len(dir) + 1\n\n includeVersions := false\n if dir == \"chunks\" {\n includeVersions = true\n }\n\n entries, err := storage.clients[threadIndex].ListFileNames(dir, false, includeVersions)\n if err != nil {\n return nil, nil, err\n }\n\n if dir == \"snapshots\" {\n\n subDirs := make(map[string]bool)\n\n for _, entry := range entries {\n name := entry.FileName[length:]\n subDir := strings.Split(name, \"/\")[0]\n subDirs[subDir + \"/\"] = true\n }\n\n for subDir, _ := range subDirs {\n files = append(files, subDir)\n }\n } else if dir == \"chunks\" {\n lastFile := \"\"\n for _, entry := range entries {\n if entry.FileName == lastFile {\n continue\n }\n lastFile = entry.FileName\n if entry.Action == \"hide\" {\n files = append(files, entry.FileName[length:] + \".fsl\")\n } else {\n files = append(files, entry.FileName[length:])\n }\n sizes = append(sizes, entry.Size)\n }\n } else {\n for _, entry := range entries {\n files = append(files, entry.FileName[length:])\n }\n }\n\n return files, sizes, nil\n}",
"func getListOfFiles() []string {\n\tif len(os.Args) > 1 {\n\t\treturn getListOfFilesFromArguments()\n\t}\n\treturn getListOfFilesFromGit()\n}",
"func (o FioSpecPtrOutput) BuiltinJobFiles() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v *FioSpec) []string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.BuiltinJobFiles\n\t}).(pulumi.StringArrayOutput)\n}",
"func (o FioSpecOutput) BuiltinJobFiles() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FioSpec) []string { return v.BuiltinJobFiles }).(pulumi.StringArrayOutput)\n}",
"func getList(baseurl string, resolution string, subdir string) []string {\n\tvar list []string\n\n\t// fetch the list of srtm files\n\turl := baseurl + \"/\" + resolution + \"/\" + subdir\n\n\tresp, err := http.Get(url)\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\troot, err := html.Parse(resp.Body)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tfiles := scrape.FindAll(root, scrape.ByTag(atom.A))\n\n\tfor _, file := range files {\n\t\tfileurl := url + \"/\" + scrape.Attr(file, \"href\")\n\n\t\tif strings.Contains(fileurl, \".zip\") {\n\t\t\t// fmt.Printf(\"%2d %s (%s)\\n\", i, scrape.Text(file), fileurl)\n\t\t\tlist = append(list, fileurl)\n\t\t}\n\t}\n\n\treturn list\n}",
"func ListFilenames(path string) []os.FileInfo {\n Trace.Println(\"listFilenames(\" + path + \")\")\n\n files, err := ioutil.ReadDir(path)\n if err != nil {\n Error.Println(err)\n os.Exit(1)\n }\n\n return files\n}",
"func (pool *PackagePool) FilepathList(progress aptly.Progress) ([]string, error) {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tdirs, err := os.ReadDir(pool.rootPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif len(dirs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif progress != nil {\n\t\tprogress.InitBar(int64(len(dirs)), false, aptly.BarGeneralBuildFileList)\n\t\tdefer progress.ShutdownBar()\n\t}\n\n\tresult := []string{}\n\n\tfor _, dir := range dirs {\n\t\terr = filepath.Walk(filepath.Join(pool.rootPath, dir.Name()), func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.IsDir() {\n\t\t\t\tresult = append(result, path[len(pool.rootPath)+1:])\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif progress != nil {\n\t\t\tprogress.AddBar(1)\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func fetchFileListing() ([]string, error) {\n\n\tresp, err := http.Get(rootURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\treturn parseDirectoryListing(resp.Body), nil\n}",
"func get_local_song_info(dir_name string) []string {\n\tinfo_files, err := ioutil.ReadDir(dir_name)\n\tif err != nil {\n\t\tfmt.Println(\"cant read songs\")\n\t\tos.Exit(1)\n\t}\n\n\tsong_info := make([]string, len(info_files))\n\tfor i := 0; i < len(info_files); i++ {\n\t\tif path.Ext(info_files[i].Name()) != \".info\" {\n\t\t\tcontinue\n\t\t}\n\t\tcontent, _ := ioutil.ReadFile(dir_name + \"/\" + info_files[i].Name())\n\t\tsong_info = append(song_info, string(content[:]))\n\t}\n\treturn song_info\n}",
"func getAllFiles(j *job.Job) {\n\tcomplete := make(chan *job.Document, j.DocCount())\n\tfailures := make(chan error, j.DocCount())\n\tfor i := range j.DocList {\n\t\tgo getFile(&j.DocList[i], complete, failures)\n\t}\n\n\twaitForDownloads(j, complete, failures)\n}",
"func (pool *PackagePool) FilepathList(progress aptly.Progress) ([]string, error) {\n\tpool.Lock()\n\tdefer pool.Unlock()\n\n\tdirs, err := ioutil.ReadDir(pool.rootPath)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, err\n\t}\n\n\tif len(dirs) == 0 {\n\t\treturn nil, nil\n\t}\n\n\tif progress != nil {\n\t\tprogress.InitBar(int64(len(dirs)), false)\n\t\tdefer progress.ShutdownBar()\n\t}\n\n\tresult := []string{}\n\n\tfor _, dir := range dirs {\n\t\terr = filepath.Walk(filepath.Join(pool.rootPath, dir.Name()), func(path string, info os.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.IsDir() {\n\t\t\t\tresult = append(result, path[len(pool.rootPath)+1:])\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif progress != nil {\n\t\t\tprogress.AddBar(1)\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func getFilesInDir(directory string) ([]os.FileInfo,error) {\n\tentries, err := ioutil.ReadDir(directory)\n\tif err != nil {\n\t\tlog.Printf(\"getFilesInDir(): Error reading directory: %s\",directory)\n\t\treturn nil,err\n\t}\n\tvar files []os.FileInfo\n\tfor _,entry := range entries {\n\t\tif entry.Mode().IsRegular() {\n\t\t\tmatch,_ := regexp.Match(\"f-[0-9]+\",[]byte(entry.Name()))\n\t\t\tif match {\n\t\t\t\tfiles = append(files,entry)\n\t\t\t}\n\t\t}\n\t}\n\treturn files,nil\n}",
"func (statics AssestStruct) GetFileNames(dir string) []string {\n\tnames := make([]string, len(statics.Files))\n\tfor name := range statics.Files {\n\t\tnames = append(names, name)\n\t}\n\treturn names\n}",
"func filesIn(dir string) []string {\n\tfh, err := os.Open(dir)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tfiles, err := fh.Readdirnames(0)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn files\n}",
"func (a *edenScraper) fileList() ([]string, error) {\n\t// Get homepage.\n\tres, err := httpGet(edenHome, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to get homepage: %v\", err)\n\t}\n\tdefer res.Body.Close()\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to read homepage: %v\", err)\n\t}\n\n\t// Parse links.\n\tresult := []string{}\n\tfiles := regexp.MustCompile(\"<a href=\\\"(.*?)\\\"\").FindAllSubmatch(body, -1)\n\tif len(files) == 0 {\n\t\treturn nil, fmt.Errorf(\"Got 0 files.\")\n\t}\n\n\tfor _, file := range files {\n\t\t// All links should end with '.zip'. A change in that condition means\n\t\t// that the homepage had changed.\n\t\tif !bytes.HasSuffix(file[1], []byte(\".zip\")) {\n\t\t\treturn nil, fmt.Errorf(\"Found a link that's not a zip file: %s\",\n\t\t\t\tfile[1])\n\t\t}\n\n\t\tresult = append(result, string(file[1]))\n\t}\n\n\treturn result, nil\n}",
"func listFiles(root string) ([]string, error) {\n var paths = make([]string, 0)\n var walker filepath.WalkFunc = func(path string, info os.FileInfo, err error) error {\n if err != nil {\n return err\n } else {\n if ! info.IsDir() {\n path, _ = filepath.Rel(root, path)\n paths = append(paths, path)\n }\n }\n return nil\n }\n err := filepath.Walk(root, walker)\n\n return paths, err\n}",
"func GetMessageList(dirPath string) ([]os.FileInfo, error){\n dirPath = strings.TrimPrefix(dirPath, \"\\\"\")\n dirPath = strings.TrimRight(dirPath, \"\\\";\")\n files, err := ioutil.ReadDir(dirPath)\n\tif err != nil {\n log.Print(err)\n return nil, err\n }\n sort.Slice(files, func(i,j int) bool{\n return files[i].ModTime().Unix() < files[j].ModTime().Unix()\n })\n return files, err\n}",
"func getFilelist(path string, e *error) []string {\n\tfileV := []string{}\n\tfilepath.Walk(path, func(path string, f os.FileInfo, err error) error {\n\t\tif f == nil {\n\t\t\t*e = err\n\t\t\treturn nil\n\t\t}\n\t\tif f.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tfileV = append(fileV, path)\n\t\treturn nil\n\t})\n\treturn fileV\n}",
"func (f *FileStore) List() ([]string, error) {\n\tkeys := make([]string, 0)\n\tfiles, err := f.filesystem.ReadDir(f.directoryPath)\n\tif err != nil {\n\t\treturn keys, err\n\t}\n\tfor _, f := range files {\n\t\tif !strings.HasPrefix(f.Name(), tmpPrefix) {\n\t\t\tkeys = append(keys, f.Name())\n\t\t}\n\t}\n\treturn keys, nil\n}",
"func List(home string) ([]Stat, error) {\n\terr := ensureDir(home)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tchildren, err := os.ReadDir(home)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar futures []Stat\n\tfor _, child := range children {\n\t\tif !child.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\tname := child.Name()\n\n\t\tf := Open(home, name)\n\t\tcomplete, err := f.isComplete()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfutures = append(futures, Stat{name, complete})\n\t}\n\treturn futures, nil\n}",
"func (m *Maildir) getDirListing(dir string) ([]string, error) {\n\tfilter := \"*\"\n\tsearchPath := path.Join(m.path, dir, filter)\n\tfilePaths, err := filepath.Glob(searchPath)\n\t// remove maildir path so that only key remains\n\tfor i := 0; i < len(filePaths); i++ {\n\t\tfilePaths[i] = strings.Replace(filePaths[i], m.path, \"\", 1)\n\t}\n\treturn filePaths, err\n}",
"func getPolicyFiles(homePath string) ([]os.FileInfo, error) {\n\tres := make([]os.FileInfo, 0, 10)\n\n\tif files, err := ioutil.ReadDir(homePath); err != nil {\n\t\treturn nil, err\n\t} else {\n\t\tfor _, fileInfo := range files {\n\t\t\tif strings.HasSuffix(fileInfo.Name(), \".policy\") && !fileInfo.IsDir() {\n\t\t\t\tres = append(res, fileInfo)\n\t\t\t}\n\t\t}\n\t\treturn res, nil\n\t}\n}",
"func (c *Client) List(path gfs.Path) ([]gfs.PathInfo, error) {\n\tvar reply gfs.ListReply\n\terr := util.Call(c.master, \"Master.RPCList\", gfs.ListArg{path}, &reply)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn reply.Files, nil\n}",
"func getFilelist(path string) ([]os.FileInfo, error) {\n\tf, err := os.Open(path)\n\tdefer f.Close()\n\tif err != nil {\n\t\tlog.Error(\"Error opening directory: \", err)\n\t\treturn nil, err\n\t}\n\n\tfilelist, err := f.Readdir(-1)\n\tif err != nil {\n\t\tlog.Error(\"Error reading directory: \", err)\n\t\treturn nil, err\n\t}\n\n\treturn filelist, nil\n}",
"func (c *Client) List(prefix string, opts ...backend.ListOption) (*backend.ListResult, error) {\n\toptions := backend.DefaultListOptions()\n\tfor _, opt := range opts {\n\t\topt(options)\n\t}\n\n\tif options.Paginated {\n\t\treturn nil, errors.New(\"pagination not supported\")\n\t}\n\n\troot := path.Join(c.pather.BasePath(), prefix)\n\n\tlistJobs := make(chan string)\n\tresults := make(chan listResult)\n\tdone := make(chan struct{})\n\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < c.config.ListConcurrency; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tc.lister(done, listJobs, results)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tdefer func() {\n\t\tclose(done)\n\t\tif c.config.testing {\n\t\t\t// Waiting might be delayed if an early error is encountered but\n\t\t\t// other goroutines are waiting on a long http timeout. Thus, we\n\t\t\t// only wait for each spawned goroutine to exit during testing to\n\t\t\t// assert that no goroutines leak.\n\t\t\twg.Wait()\n\t\t}\n\t}()\n\n\tvar files []string\n\n\t// Pending tracks the number of directories which are pending exploration.\n\t// Invariant: there will be a result received for every increment made to\n\t// pending.\n\tpending := 1\n\tlistJobs <- root\n\n\tfor pending > 0 {\n\t\tres := <-results\n\t\tpending--\n\t\tif res.err != nil {\n\t\t\tif httputil.IsNotFound(res.err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, res.err\n\t\t}\n\t\tvar dirs []string\n\t\tfor _, fs := range res.list {\n\t\t\tp := path.Join(res.dir, fs.PathSuffix)\n\n\t\t\t// TODO(codyg): This is an ugly hack to avoid walking through non-tags\n\t\t\t// during Docker catalog. Ideally, only tags are located in the repositories\n\t\t\t// directory, however in WBU2 HDFS, there are blobs here as well. At some\n\t\t\t// point, we must migrate the data into a structure which cleanly divides\n\t\t\t// blobs and tags (like we do in S3).\n\t\t\tif _ignoreRegex.MatchString(p) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// TODO(codyg): Another ugly hack to speed up catalog performance by stopping\n\t\t\t// early when we hit tags...\n\t\t\tif _stopRegex.MatchString(p) {\n\t\t\t\tp = path.Join(p, \"tags/dummy/current/link\")\n\t\t\t\tfs.Type = \"FILE\"\n\t\t\t}\n\n\t\t\tif fs.Type == \"DIRECTORY\" {\n\t\t\t\t// Flat directory structures are common, so accumulate directories and send\n\t\t\t\t// them to the listers in a single goroutine (as opposed to a goroutine per\n\t\t\t\t// directory).\n\t\t\t\tdirs = append(dirs, p)\n\t\t\t} else {\n\t\t\t\tname, err := c.pather.NameFromBlobPath(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.With(\"path\", p).Errorf(\"Error converting blob path into name: %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfiles = append(files, name)\n\t\t\t}\n\t\t}\n\t\tif len(dirs) > 0 {\n\t\t\t// We cannot send list jobs and receive results in the same thread, else\n\t\t\t// deadlock will occur.\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tc.sendAll(done, dirs, listJobs)\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tpending += len(dirs)\n\t\t}\n\t}\n\n\treturn &backend.ListResult{\n\t\tNames: files,\n\t}, nil\n}",
"func ConfigFileList() ([]string, error) {\n\tccm_path := os.Getenv(\"CCM_REPO_PATH\")\n\tconfig_file_path := ccm_path + \"/ccm_configs/\"\n\t//fmt.Println(config_file_path)\n\tfile_data, err := ioutil.ReadDir(config_file_path)\n\tERRHandler(err, \"read ccm_configs dir\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar ConfigFiles []string\n\tfor _, file_info := range file_data {\n\t\tconfig_file := file_info.Name()\n\t\tConfigFiles = append(ConfigFiles, config_file)\n\t}\n\n\treturn ConfigFiles, nil\n}",
"func listWorker(ctx context.Context) {\n\tdefer utils.Recover()\n\n\tfor j := range jc {\n\t\tlogrus.Infof(\"Start listing job %s.\", j.Path)\n\n\t\terr := listJob(ctx, j)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogrus.Infof(\"Job %s listed.\", j.Path)\n\t}\n}",
"func PluginListRuntimeFiles(fileType string) []string {\n\tfiles := ListRuntimeFiles(fileType)\n\tresult := make([]string, len(files))\n\tfor i, f := range files {\n\t\tresult[i] = f.Name()\n\t}\n\treturn result\n}",
"func dirList(path string) ([]string, error) {\n\tnames := []string{}\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\tlog.Printf(\"Template error: %v\", err)\n\t\treturn names, nil\n\t}\n\tfor _, f := range files {\n\t\tnames = append(names, f.Name())\n\t}\n\treturn names, nil\n}",
"func (c *cloner) listZipFiles() ([]zipFile, error) {\n\tdirEntries, err := os.ReadDir(c.cli.config.cacheDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar zipFiles []zipFile\n\tfor _, entry := range dirEntries {\n\t\text := filepath.Ext(entry.Name())\n\t\tif ext != \".zip\" {\n\t\t\tcontinue\n\t\t}\n\t\tif !strings.HasPrefix(entry.Name(), sampleAppsNamePrefix) {\n\t\t\tcontinue\n\t\t}\n\t\tfi, err := entry.Info()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tname := fi.Name()\n\t\tetag := \"\"\n\t\tparts := strings.Split(name, \"_\")\n\t\tif len(parts) == 2 {\n\t\t\tetag = strings.TrimSuffix(parts[1], ext)\n\t\t}\n\t\tzipFiles = append(zipFiles, zipFile{\n\t\t\tpath: filepath.Join(c.cli.config.cacheDir, name),\n\t\t\tetag: etag,\n\t\t\tmodTime: fi.ModTime(),\n\t\t})\n\t}\n\treturn zipFiles, nil\n}",
"func getFiles(dir string) (files map[string]struct{}, err error) {\n\tfiles = make(map[string]struct{})\n\n\tif dir == \"\" {\n\t\tdir = \"./\"\n\t}\n\n\terr = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {\n\t\tfiles[path] = struct{}{}\n\t\treturn nil\n\t})\n\n\treturn\n}",
"func (n *TreeNode) GetFiles() []string {\n\tn.mutex.RLock()\n\tresult := make([]string, len(n.files))\n\ti := 0\n\tfor name := range n.files {\n\t\tresult[i] = name\n\t\ti++\n\t}\n\tn.mutex.RUnlock()\n\treturn result\n}",
"func (kc *MessageBufferHandle) dirList(path string) ([]string, error) {\n\n\tfiles, err := ioutil.ReadDir(path) // sorted\n\tvar results []string\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar count = 0\n\tkc.bufferList = kc.bufferList[:0]\n\tfor _, f := range files {\n\t\tname := f.Name()\n\n\t\tif name[0:1] == readyPrefix {\n\t\t\tcount++\n\t\t\tkc.appendBuffers(f)\n\t\t\tresults = append(results, name)\n\t\t}\n\t}\n\t//fmt.Println(\"DIRLIST, found=\", count)\n\treturn results, nil\n}",
"func getFilePaths() []string {\n\tvar files []string\n\n\tfor year := startYear; year <= lastYear; year++ {\n\t\tos.MkdirAll(outputRoot+\"/\"+strconv.Itoa(year), 0777)\n\n\t\tstartMonth := 1\n\t\tif year == 2007 {\n\t\t\tstartMonth = 10\n\t\t}\n\t\tendMonth := 12\n\t\tif year == 2015 {\n\t\t\tendMonth = 5\n\t\t}\n\n\t\tfor month := startMonth; month <= endMonth; month++ {\n\t\t\tfilename := fmt.Sprintf(\"/%d/RC_%d-%02d\", year, year, month)\n\n\t\t\tfiles = append(files, filename)\n\t\t}\n\t}\n\n\treturn files\n}",
"func ListFilesIn(dir string) ([]string, error) {\n\tfiles := []string{}\n\tlist, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn files, err\n\t}\n\n\tfor _, fi := range list {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiles = append(files, filepath.Join(dir, fi.Name()))\n\t}\n\n\treturn files, nil\n}",
"func (d *Discovery) listFiles() []string {\n\tvar paths []string\n\tfor _, p := range d.paths {\n\t\tfiles, err := filepath.Glob(p)\n\t\tif err != nil {\n\t\t\tlevel.Error(d.logger).Log(\"msg\", \"Error expanding glob\", \"glob\", p, \"err\", err)\n\t\t\tcontinue\n\t\t}\n\t\tpaths = append(paths, files...)\n\t}\n\treturn paths\n}",
"func ListFiles(rootDir string, predicate FileInfoPredicate) []string {\n\n\tvar files []string\n\n\terr := filepath.Walk(rootDir, func(filePath string, fileInfo os.FileInfo, err error) error {\n\n\t\tif predicate == nil || predicate(fileInfo) {\n\t\t\tfiles = append(files, filePath)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn files\n}",
"func (dt *Tracker) ListJobs() ([]string, error) {\n\treturn dt.processTracker.ListJobs()\n}",
"func listFiles(path string) func(string) []string {\n\treturn func(line string) []string {\n\t\tnames := make([]string, 0)\n\t\tfiles, _ := ioutil.ReadDir(path)\n\t\tfor _, f := range files {\n\t\t\tnames = append(names, f.Name())\n\t\t}\n\t\treturn names\n\t}\n}",
"func getFilesList(dirname, filename string) ([]string, int, error) {\n\tfiles, err := ioutil.ReadDir(dirname)\n\tif err != nil {\n\t\treturn nil, 0, err\n\t}\n\n\ti, cur := 0, 0\n\tvar images []string\n\tvar ext string\n\tfor _, val := range files {\n\t\text = strings.ToLower(path.Ext(val.Name()))\n\t\tif _, ok := img[ext]; ok {\n\t\t\timages = append(images, filepath.Join(dirname, val.Name()))\n\t\t\tif filename == val.Name() {\n\t\t\t\tcur = i\n\t\t\t}\n\t\t\ti++\n\t\t}\n\t}\n\n\treturn images, cur, nil\n}",
"func (fs *Fs) ListFiles(dir string) []string {\n\tvar s []string\n\tfiles, err := ioutil.ReadDir(filepath.Join(fs.Path, dir))\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot list files in %s, %s\", filepath.Join(fs.Path, dir), err.Error())\n\t}\n\tfor _, file := range files {\n\t\tif !file.IsDir() {\n\t\t\ts = append(s, file.Name())\n\t\t}\n\t}\n\treturn s\n}",
"func listFiles(folder string) []string {\n\tvar files []string\n\tfilepath.Walk(folder, func(fp string, info os.FileInfo, err error) error {\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tparts := strings.SplitAfter(fp, fmt.Sprintf(\"%s%c\", folder, os.PathSeparator))\n\t\tnfp := strings.Join(parts[1:], string(os.PathSeparator))\n\t\tfiles = append(files, nfp)\n\t\treturn nil\n\t})\n\n\treturn files\n}",
"func getHistoryFileList(dir string) ([]historyItem, error) {\n\tfileInfos, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thfileList := []historyItem{}\n\tfor _, fi := range fileInfos {\n\t\tif fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\t// another suffix\n\t\t// ex: tiup-history-0.bak\n\t\ti, err := strconv.Atoi((strings.TrimPrefix(fi.Name(), historyPrefix)))\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tfInfo, _ := fi.Info()\n\t\thfileList = append(hfileList, historyItem{\n\t\t\tpath: filepath.Join(dir, fi.Name()),\n\t\t\tindex: i,\n\t\t\tinfo: fInfo,\n\t\t})\n\t}\n\n\tsort.Slice(hfileList, func(i, j int) bool {\n\t\treturn hfileList[i].index > hfileList[j].index\n\t})\n\n\treturn hfileList, nil\n}",
"func listArchive() {\n files, err := ioutil.ReadDir(settings.PATH_ARCHIVE)\n utils.CheckError(err)\n\n fmt.Printf(\"| %s:\\n\", settings.User.Hash)\n for _, file := range files {\n fmt.Println(\"|\", file.Name())\n }\n}",
"func (fc FileCollection) GetActFiles() string {\n\tvar mensj string\n\tvar totalSize int64\n\tmensj += fmt.Sprintf(\"Last request ID: %d\\n\",fc.flid)\n\tfor _,fsize := range fc.fileSizes {\n\t\tdirectory := fmt.Sprintf(\"%s/d-%d\",fc.frandi,fsize)\n\t\tfileList,err := getFilesInDir(directory)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"GetActFiles(): Error listing directory: %s\\n%s\",directory,err.Error())\n\t\t\treturn \"Error getting files information\\n\"\n\t\t} \n\t\tmensj += fmt.Sprintf(\"Files of size: %d, Count: %d\\n\", fsize,len(fileList))\n\t\tfor _,fl := range fileList{\n\t\t\ttotalSize += fl.Size()\n\t\t}\n\t}\n\tmensj += fmt.Sprintf(\"Total size: %d bytes.\\n\",totalSize)\n\treturn mensj\n}",
"func retreiveFiles() ([]string, error) {\n\tfiles := []string{}\n\n\t// walk in targetDir recursively\n\terr := filepath.Walk(targetDir, visitDirs(&files))\n\tif err != nil {\n\t\tnewErrStr := fmt.Sprintf(\"Error while visiting directories: %s\", err.Error())\n\t\terr = errors.New(newErrStr)\n\t\treturn nil, err\n\t}\n\n\treturn files, nil\n}",
"func getFiles(ctx *context) (filesOut []os.FileInfo, errOut error) {\n\tpattern := filepath.Base(*ctx.src)\n\tfiles, err := ioutil.ReadDir(filepath.Dir(*ctx.src))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tfor _, file := range files {\n\t\tif res, err := filepath.Match(strings.ToLower(pattern), strings.ToLower(file.Name())); res {\n\t\t\tif err != nil {\n\t\t\t\terrOut = err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfilesOut = append(filesOut, file)\n\t\t\tctx.estimatesize += uint64(file.Size())\n\t\t\t// fmt.Printf(\"prise en compte de %s\", file.Name())\n\t\t}\n\t}\n\treturn filesOut, nil\n}",
"func getJSONFiles(dir string) (jsonFiles []string, err error) {\r\n\tfileMask := filepath.Join(dir, \"*.json\")\r\n\tjsonFiles, err = filepath.Glob(fileMask)\r\n\tif err != nil {\r\n\t\treturn nil, err\r\n\t}\r\n\treturn jsonFiles, nil\r\n}",
"func (service *Service) ListFiles(repositoryURL, referenceName, username, password string, dirOnly, hardRefresh bool, includedExts []string, tlsSkipVerify bool) ([]string, error) {\n\trepoKey := generateCacheKey(repositoryURL, referenceName, username, password, strconv.FormatBool(tlsSkipVerify), strconv.FormatBool(dirOnly))\n\n\tif service.cacheEnabled && hardRefresh {\n\t\t// Should remove the cache explicitly, so that the following normal list can show the correct result\n\t\tservice.repoFileCache.Remove(repoKey)\n\t}\n\n\tif service.repoFileCache != nil {\n\t\t// lookup the files cache first\n\t\tcache, ok := service.repoFileCache.Get(repoKey)\n\t\tif ok {\n\t\t\tfiles, success := cache.([]string)\n\t\t\tif success {\n\t\t\t\t// For the case while searching files in a repository without include extensions for the first time,\n\t\t\t\t// but with include extensions for the second time\n\t\t\t\tincludedFiles := filterFiles(files, includedExts)\n\t\t\t\treturn includedFiles, nil\n\t\t\t}\n\t\t}\n\t}\n\n\toptions := fetchOption{\n\t\tbaseOption: baseOption{\n\t\t\trepositoryUrl: repositoryURL,\n\t\t\tusername: username,\n\t\t\tpassword: password,\n\t\t\ttlsSkipVerify: tlsSkipVerify,\n\t\t},\n\t\treferenceName: referenceName,\n\t\tdirOnly: dirOnly,\n\t}\n\n\tvar (\n\t\tfiles []string\n\t\terr error\n\t)\n\tif isAzureUrl(options.repositoryUrl) {\n\t\tfiles, err = service.azure.listFiles(context.TODO(), options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tfiles, err = service.git.listFiles(context.TODO(), options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tincludedFiles := filterFiles(files, includedExts)\n\tif service.cacheEnabled && service.repoFileCache != nil {\n\t\tservice.repoFileCache.Add(repoKey, includedFiles)\n\t\treturn includedFiles, nil\n\t}\n\treturn includedFiles, nil\n}",
"func (c Configuration) getListOfFiles(fs afero.Fs, pathList []string) []FileInfo {\n\tlogger := c.logger()\n\tvar filesToMonitor []FileInfo\n\tcompleteListOfPaths := c.getCompleteListOfPaths(pathList)\n\n\tfor _, fullPath := range completeListOfPaths {\n\t\tfullPath := fullPath\n\t\tpkgFile := pkg.NewFile(func(file *pkg.File) {\n\t\t\tfile.Fs, file.Path, file.Logger = fs, fullPath, logger\n\t\t})\n\n\t\tPathFull := \"\"\n\t\tif baseFile, ok := pkgFile.Fs.(*afero.BasePathFs); ok {\n\t\t\tPathFull, _ = baseFile.RealPath(fullPath)\n\t\t}\n\t\tif PathFull == \"\" {\n\t\t\tPathFull = fullPath\n\t\t}\n\t\tlogger.Debug().Msgf(\"file to watch: %v\", PathFull)\n\t\tPathFull, fi := c.resolvePath(PathFull)\n\t\tif PathFull == \"\" {\n\t\t\tcontinue // could not resolve the file. skip for now.\n\t\t}\n\n\t\tswitch mode := fi.Mode(); {\n\t\tcase mode.IsDir():\n\t\t\tlogger.Debug().Msg(\"Path is a dir\")\n\t\t\terr := filepath.Walk(PathFull, func(path string, info os.FileInfo, err error) error {\n\t\t\t\twalkPath, resolvedInfo := c.resolvePath(path)\n\t\t\t\tif walkPath == \"\" {\n\t\t\t\t\treturn nil // path could not be resolved skip for now\n\t\t\t\t}\n\t\t\t\tisDir := resolvedInfo.IsDir()\n\t\t\t\tlogger.Debug().Msgf(\"Path: %v\", path)\n\t\t\t\tfilesToMonitor = append(filesToMonitor, FileInfo{File: path, IsDir: isDir})\n\t\t\t\treturn nil\n\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\tlogger.Error().Err(err).Msgf(\"error walking dir: %v\", PathFull)\n\t\t\t}\n\t\tcase mode.IsRegular():\n\t\t\tlogger.Debug().Msg(\"Path is a file\")\n\t\t\tlogger.Debug().Msgf(\"Path: %v\", PathFull)\n\t\t\tfilesToMonitor = append(filesToMonitor, FileInfo{File: PathFull, IsDir: false})\n\t\tdefault:\n\t\t\tlogger.Debug().Msg(\"Path is a dir\")\n\t\t}\n\t}\n\treturn filesToMonitor\n}",
"func (cfg *Config) ListConfigFiles() []string {\n\tobjects := cfg.Client.ListObjects(configBucket, \"\", true, make(chan struct{}, 1))\n\tresults := []string{}\n\tfor object := range objects {\n\t\tresults = append(results, object.Key)\n\t}\n\treturn results\n}",
"func GetFilesToMonitor(args []string) ([]string, error) {\n\tpos := getFirstSeparatorPos(args)\n\tif pos >= 0 {\n\t\treturn args[0:pos], nil\n\t}\n\treturn nil, fmt.Errorf(\"seperator not found %s\", separatorArg)\n}",
"func readDirectory(dir string) ([]string, error) {\n\tfileinfos, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfiles := make([]string, len(fileinfos))\n\tfor pos, fi := range fileinfos {\n\t\tfiles[pos] = fi.Name()\n\t}\n\treturn files, nil\n}",
"func gatherFiles(root string, cfg Config) ([]string, error) {\n\tfiles := []string{}\n\terr := filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\trel, err := filepath.Rel(root, path)\n\t\tif err != nil {\n\t\t\trel = path\n\t\t}\n\n\t\tswitch rel {\n\t\tcase \".git\":\n\t\t\treturn filepath.SkipDir\n\t\tcase ConfigFileName:\n\t\t\treturn nil\n\t\t}\n\n\t\tif !cfg.shouldExamine(root, path) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif !info.IsDir() {\n\t\t\tfiles = append(files, rel)\n\t\t}\n\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn files, nil\n}",
"func listXmlFiles(dir string) ([]string, error) {\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfilePaths := []string{}\n\tfor _, file := range files {\n\t\tif !file.IsDir() && filepath.Ext(file.Name()) == \".xml\" {\n\t\t\tfilePaths = append(filePaths, dir+\"/\"+file.Name())\n\t\t}\n\t}\n\n\treturn filePaths, nil\n}",
"func listDir(itemsDir string) []string {\n\tf, err := os.Open(itemsDir)\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot open dir \", itemsDir)\n\t}\n\titems, err := f.Readdirnames(0)\n\tif err != nil {\n\t\tlog.Fatal(\"Cannot get list of items from \", itemsDir)\n\t}\n\treturn items\n}",
"func (p *GetService) GetFileList(request string, reply *string) error {\n\tbuffer := make([]byte, 1024)\n\tbuffer, _ = json.Marshal(fileList)\n\t*reply = string(buffer)\n\treturn nil\n}",
"func (s *ShellFileCollector) Files() []string {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tvar files []string\n\tfor file := range s.files {\n\t\tfiles = append(files, file)\n\t}\n\treturn files\n}",
"func ListFileNames(dirPath, prefix, suffix string) ([]string, error) {\n\tf, err := os.Open(dirPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tfileInfos, err := f.Readdir(-1)\n\t_ = f.Close()\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar files []string\n\n\tfor _, info := range fileInfos {\n\t\tname := info.Name()\n\n\t\tif prefix != \"\" && !strings.HasPrefix(name, prefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif suffix != \"\" && !strings.HasSuffix(name, suffix) {\n\t\t\tcontinue\n\t\t}\n\n\t\tfiles = append(files, name)\n\t}\n\n\treturn files, nil\n}",
"func (w *Writer) Files() []string { return w.files }",
"func listFiles(dirname string) []string {\n\tf, _ := os.Open(dirname)\n\n\tnames, _ := f.Readdirnames(-1)\n\tf.Close()\n\n\tsort.Strings(names)\n\n\tdirs := []string{}\n\tfiles := []string{}\n\n\t// sort: directories in front of files\n\tfor _, name := range names {\n\t\tpath := filepath.Join(dirname, name)\n\t\tfio, err := os.Lstat(path)\n\n\t\tif nil != err {\n\t\t\tlogger.Warnf(\"Can't read file info [%s]\", path)\n\n\t\t\tcontinue\n\t\t}\n\n\t\tif fio.IsDir() {\n\t\t\t// exclude the .git, .svn, .hg direcitory\n\t\t\tif \".git\" == fio.Name() || \".svn\" == fio.Name() || \".hg\" == fio.Name() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tdirs = append(dirs, name)\n\t\t} else {\n\t\t\t// exclude the .DS_Store directory on Mac OS X\n\t\t\tif \".DS_Store\" == fio.Name() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfiles = append(files, name)\n\t\t}\n\t}\n\n\treturn append(dirs, files...)\n}",
"func (jdn jobDirectoryNode) Info() os.FileInfo {\n\treturn jdn.info\n}",
"func (storage *PublishedStorage) Filelist(prefix string) ([]string, error) {\n\tpaths, _, err := storage.internalFilelist(prefix, true)\n\treturn paths, err\n}",
"func (this *RanServer) listDir(w http.ResponseWriter, serveAll bool, c *context) (size int64, err error) {\n\n\tif !c.exist {\n\t\tsize = Error(w, 404)\n\t\treturn\n\t}\n\n\tif !c.isDir {\n\t\terr = fmt.Errorf(\"Cannot list contents of a non-directory\")\n\t\treturn\n\t}\n\n\tf, err := os.Open(c.absFilePath)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer f.Close()\n\n\tinfo, err := f.Readdir(0)\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Override below if listdirasjson was set\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\n\ttitle := html.EscapeString(path.Base(c.cleanPath))\n\n\tvar files []dirListFiles\n\n\tfor n, i := range info {\n\t\tname := i.Name()\n\t\tif i.IsDir() {\n\t\t\tname += \"/\"\n\t\t}\n\n\t\t// Check if the extension of this file is in the ignore list\n\t\text := filepath.Ext(name)\n\t\tif isStringInCSV(ext, c.ignorefileext) {\n\t\t\tcontinue\n\t\t}\n\n\t\tname = html.EscapeString(name)\n\n\t\t// skip hidden path\n\t\tif !serveAll && strings.HasPrefix(name, \".\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tfileUrl := url.URL{Path: name}\n\n\t\t// write parent dir\n\t\tif n == 0 && c.cleanPath != \"/\" {\n\t\t\tparent := c.parent()\n\n\t\t\t// unescape parent before get it's modification time\n\t\t\tvar parentUnescape string\n\t\t\tparentUnescape, err = url.QueryUnescape(parent)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tvar info os.FileInfo\n\t\t\tinfo, err = os.Stat(filepath.Join(this.config.Root, parentUnescape))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfiles = append(files, dirListFiles{Name: \"[..]\", Url: parent, ModTime: info.ModTime(), IsDir: true})\n\t\t}\n\n\t\tfiles = append(files, dirListFiles{Name: name, Url: fileUrl.String(), Size: i.Size(), ModTime: i.ModTime(), IsDir: i.IsDir(), Mode: i.Mode()})\n\t}\n\n\tdata := dirList{Title: title, Files: files}\n\n\tbuf := bufferPool.Get()\n\tdefer bufferPool.Put(buf)\n\n\tif c.listDirAsJSON {\n\t\t// Return entities as JSON structure\n\t\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t\tencoded_json, _ := json.Marshal(data)\n\t\tio.WriteString(buf, string(encoded_json))\n\t} else {\n\t\t// Return HTML table\n\t\ttplDirList.Execute(buf, data)\n\t}\n\n\tsize, _ = buf.WriteTo(w)\n\treturn\n}",
"func ListFiles(dir string) []string {\n\tvar out []string\n\terr := filepath.Walk(dir, func(path string, f os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !f.IsDir() {\n\t\t\tout = append(out, path)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlogs.WithFields(logs.Fields{\n\t\t\t\"Directory\": dir,\n\t\t\t\"Error\": err,\n\t\t}).Println(\"Unable to read directory\")\n\t\treturn nil\n\t}\n\treturn out\n}",
"func getMapList() []string {\n\tfiles, err := dry.ListDirFiles(default_map_data_dir)\n\tif err != nil {\n\t\treturn []string{}\n\t} else {\n\t\tfmt.Println(files)\n\t\treturn dry.StringMap(func(s string) string {\n\t\t\treturn strings.Replace(s, path.Ext(s), \"\", 1)\n\t\t}, files)\n\t\t// return files\n\t}\n}",
"func (fs EmbedFs) ListDir(path string) ([]string, error) {\n\tresult := []string{}\n\n\tfor _, entry := range fs.files {\n\t\trootName := filepath.Join(\"/\", entry.name)\n\t\tif strings.HasPrefix(rootName, filepath.Join(path, \"/\")) {\n\t\t\tresult = append(result, entry.name)\n\t\t}\n\t}\n\n\treturn result, nil\n}",
"func (a *Artifact) Files() []string {\n\treturn []string{a.image}\n}",
"func (a Artifacts) List() ([]string, error) {\n\tswitch a.Files.(type) {\n\tcase string:\n\t\t// TODO: Apply glob-pattern here\n\t\treturn []string{a.Files.(string)}, nil\n\tcase []string:\n\t\treturn a.Files.([]string), nil\n\t}\n\n\treturn []string{}, nil\n}",
"func readDir(writer http.ResponseWriter, request *http.Request) {\n\tdirectory := request.FormValue(\"q\")\n\tfiles, err := ioutil.ReadDir(directory)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\tio.WriteString(writer, err.Error())\n\t} else {\n\t\tfilenames := make([]string, len(files))\n\t\tfor i, f := range files {\n\t\t\tfilenames[i] = f.Name()\n\t\t}\n\t\tio.WriteString(writer, \"content of \"+directory+\":\\n\"+strings.Join(filenames, \"\\n\"))\n\t}\n\n}",
"func (s *fsStore) List(typ namespace.Type) []string {\n\tout := []string{}\n\tdir := filepath.Join(s.root, typ.StringLower())\n\tfl, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn out\n\t}\n\tfor _, inf := range fl {\n\t\tout = append(out, inf.Name())\n\t}\n\treturn out\n}",
"func walkDir(dname string) []string {\n\t//list for all found files0\n\tvar fileList []string\n\t// walk files\n\tfilepath.Walk(dname, func(path string, f os.FileInfo, err error) error {\n\t\tif err == nil {\n\t\t\tif f.Mode().IsRegular() {\n\t\t\t\tfileList = append(fileList, path)\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\treturn fileList\n}",
"func (s *Service) ListAll() ([]*basefs.File, error) {\n\tret := []*basefs.File{}\n\n\trootNode := s.megaCli.FS.GetRoot()\n\n\tvar addAll func(*mega.Node, string) // Closure that basically appends entries to local ret\n\taddAll = func(n *mega.Node, pathstr string) {\n\t\tchildren, err := s.megaCli.FS.GetChildren(n)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\t// Add to ret\n\t\tfor _, childNode := range children {\n\t\t\tspath := pathstr + \"/\" + childNode.GetName()\n\t\t\tret = append(ret, File(&MegaPath{Path: spath, Node: childNode}))\n\t\t\tif childNode.GetType() == mega.FOLDER {\n\t\t\t\taddAll(childNode, pathstr+\"/\"+childNode.GetName())\n\t\t\t}\n\t\t}\n\t}\n\n\taddAll(rootNode, \"\")\n\n\treturn ret, nil\n\n}",
"func Files(internalDir string) (files []File, err error) {\n\tofficialTestSuiteDir := filepath.Join(internalDir, \"jsonschematestsuite\", \"testdata\", \"official\")\n\tif _, err := os.Stat(filepath.Join(officialTestSuiteDir, \"tests\")); os.IsNotExist(err) {\n\t\treturn nil, fmt.Errorf(\"missing git submodule, run \\\"git submodule init && git submodule update\\\": %w\", err)\n\t}\n\tfor _, root := range []string{\n\t\tfilepath.Join(internalDir, \"jsonschematestsuite\", \"testdata\"),\n\t\tfilepath.Join(officialTestSuiteDir, \"tests\", \"draft7\"),\n\t} {\n\t\terr = filepath.Walk(root, func(path string, info os.FileInfo, err error) error {\n\t\t\tif path == officialTestSuiteDir {\n\t\t\t\treturn filepath.SkipDir\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif !info.Mode().IsRegular() || filepath.Ext(info.Name()) != \".json\" {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfiles = append(files, File{\n\t\t\t\tName: strings.TrimSuffix(strings.TrimPrefix(path, root+string(os.PathSeparator)), \".json\"),\n\t\t\t\tpath: path,\n\t\t\t})\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\treturn files, err\n}",
"func grabFilesInDir(dirPath string) []string {\n\tvar files []string\n\n\terr := filepath.Walk(dirPath, func(path string, info os.FileInfo, err error) error {\n\t\tif err == nil && isMIDIFile(info.Name()) {\n\t\t\tfilePath := filepath.Clean(path)\n\t\t\tfiles = append(files, filePath)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to walk through dirPath %v with error: %v\", dirPath, err)\n\t}\n\treturn files\n}",
"func (f *FS) List(path string) (folders []string, files []string, err error) {\n\tp := filepath.Join(f.Root, path)\n\tinfo, err := ioutil.ReadDir(p)\n\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfolders = make([]string, 0, len(info))\n\tfiles = make([]string, 0, len(info))\n\n\tfor _, i := range info {\n\t\tif i.IsDir() {\n\t\t\tfolders = append(folders, i.Name())\n\t\t} else {\n\t\t\tfiles = append(files, i.Name())\n\t\t}\n\t}\n\n\treturn folders, files, err\n}",
"func fileList(folder string, fileChannel chan string) error {\n\treturn filepath.Walk(folder, func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil || info.IsDir() {\n\t\t\treturn err\n\t\t}\n\t\tfileChannel <- path\n\t\treturn nil\n\t})\n}",
"func (f *FileList) GetFiles(dir string) []*File {\n\tif files, ok := f.store[dir]; ok {\n\t\treturn files[:]\n\t}\n\treturn nil\n}",
"func (pkg *MCPMPackage) GetFileList() *MCPMFileList {\n\tcht, chte := get(util.DirPathJoin(\"http://curse.com/project\", strconv.FormatUint(pkg.id, 10)))\n\tdefer util.MustClose(cht.Body)\n\tdefer util.Must(chte)\n\tcdoc, cdoce := goquery.NewDocumentFromResponse(cht)\n\tutil.Must(cdoce)\n\tlist := cdoc.Find(\"table.project-file-listing tbody tr\")\n\tif list.Length() == 0 {\n\t\treturn nil\n\t}\n\tfl := &MCPMFileList{make([]*MCPMFile, list.Length())}\n\tfor i := range list.Nodes {\n\t\tn := list.Eq(i)\n\t\ttd := n.Find(\"td\")\n\t\tna := td.Eq(0).Find(\"a\")\n\t\tnt := td.Eq(1)\n\t\tnv := td.Eq(2)\n\t\ttid := na.AttrOr(\"href\", \"\")\n\t\tfl.a[i] = &MCPMFile{tid[strings.LastIndexByte(tid, '/')+1:], na.Text(), nt.Text(), nv.Text()}\n\t}\n\treturn fl\n}",
"func files(d interface{}) ([]namedFile, error) {\n\ts, _, err := unpackDirectory(d)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"argument could not be parsed as *Directory: %w\", err)\n\t}\n\n\tvar res []namedFile\n\tfor i := 0; i < s.NumField(); i++ {\n\t\ttf := s.Type().Field(i)\n\t\tfileTag := tf.Tag.Get(\"file\")\n\t\tif fileTag == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\t_, f, err := unpackFile(s.Field(i).Addr().Interface())\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"file %q could not be parsed as *File: %w\", tf.Name, err)\n\t\t}\n\t\tres = append(res, namedFile{fileTag, f})\n\t}\n\treturn res, nil\n}",
"func (p rProjects) Files(projectID, base string) ([]dir.FileInfo, error) {\n\trql := r.Table(\"project2datadir\").GetAllByIndex(\"project_id\", projectID).EqJoin(\"datadir_id\", r.Table(\"datadirs_denorm\")).Zip()\n\tvar entries []schema.DataDirDenorm\n\tif err := model.DirsDenorm.Qs(p.session).Rows(rql, &entries); err != nil {\n\t\treturn nil, err\n\t}\n\tif len(entries) == 0 {\n\t\t// Nothing was found, treat as invalid project.\n\t\treturn nil, mcerr.ErrNotFound\n\t}\n\tdirlist := &dirList{}\n\treturn dirlist.build(entries, base), nil\n}",
"func getFiles(path string, ext_type string) []string {\n\n\tfiles, err := ioutil.ReadDir(path)\n\tif err != nil {\n\t\t// an error has occured\n\t\tpanic(err)\n\t}\n\n\tvar ext_files []string // create a slice to hold the files\n\t//var i int = 1\t\t\t // create an index for the array\n\tvar ext_sum int = 0 // count the number of files of the ext_type\n\n\tfor _, file := range files {\n\t\tif strings.Contains(string(file.Name()), ext_type) {\n\t\t\text_files = append(ext_files, file)\n\t\t\text_sum++\n\t\t}\n\t}\n\tfmt.Println(\"Number of files added to ext_files: \", ext_sum)\n\n\t// return the slice\n\treturn ext_files\n}",
"func listDirFiles(dir string, suffix []string) (fileAbsPaths []string, err error) {\n\tfiles, err := ioutil.ReadDir(dir)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor _, f := range files {\n\t\t// if is hidden file, continue\n\t\tif checkIsHiddenFile(f.Name()) {\n\t\t\tcontinue\n\t\t}\n\t\tlog.Debug(path.Join(dir, f.Name()))\n\t\tabsPath := path.Join(dir, f.Name())\n\t\t// if is dir, recursive loop to find files\n\t\tif f.IsDir() {\n\t\t\tvar childKeys []string\n\t\t\tchildKeys, err = listDirFiles(absPath, suffix)\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tfileAbsPaths = append(fileAbsPaths, childKeys...)\n\t\t} else {\n\t\t\t// if is file, append to keys\n\t\t\tfileAbsPaths = append(fileAbsPaths, absPath)\n\t\t}\n\t}\n\n\treturn\n}",
"func ListDir(dirPth string, suffix string) (files []string, err error) {\n\tfiles = make([]string, 0, 10)\n\tdir, err := ioutil.ReadDir(dirPth)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsuffix = strings.ToUpper(suffix) //忽略后缀匹配的大小写\n\tfor _, fi := range dir {\n\t\tif fi.IsDir() { // 忽略目录\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasSuffix(strings.ToUpper(fi.Name()), suffix) { //匹配文件\n\t\t\tfiles = append(files, fi.Name())\n\t\t}\n\t}\n\treturn files, nil\n}",
"func GetListOfFiles(client interface{}, prefix, path string) ([]string, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tvar relativePaths []string\n\n\tswitch prefix {\n\tcase \"k8s\":\n\t\tpaths, err := GetListOfFilesFromK8s(client, path, \"f\", \"*\")\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trelativePaths = paths\n\tcase \"s3\":\n\t\tpaths, err := GetListOfFilesFromS3(client, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trelativePaths = paths\n\tcase \"abs\":\n\t\tpaths, err := GetListOfFilesFromAbs(ctx, client, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trelativePaths = paths\n\tcase \"gcs\":\n\t\tpaths, err := GetListOfFilesFromGcs(ctx, client, path)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\trelativePaths = paths\n\tdefault:\n\t\treturn nil, fmt.Errorf(prefix + \" not implemented\")\n\t}\n\n\treturn relativePaths, nil\n}",
"func ListFilesInDirectory(dirname string) []os.FileInfo {\n\tfiles, err := ioutil.ReadDir(dirname)\n\tcheck(\"listdir\", err)\n\treturn files\n}",
"func (utils MockJobLogUtils) ListJobRunFilesWithPrefix(job string, run int, prefix string) ([]string, error) {\n\tfilesWithPrefixes, ok := utils.MockFilesWithPrefix[run]\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"Run number %v not a 1st key in the mock files with prefix map\", run)\n\t}\n\tfilesWithPrefix, ok := filesWithPrefixes[prefix]\n\treturn filesWithPrefix, nil\n}",
"func requestedFilesToRelativePaths(filteredFiles []string) ([]string, error) {\n\tfilterProvided := len(filteredFiles) > 0\n\tfileNamesIncluded := make(map[string]bool)\n\n\tif filterProvided {\n\t\tlog.Printf(\"Limit archive to files: %v\\n\", filteredFiles)\n\n\t\tfor _, name := range filteredFiles {\n\t\t\tfileNamesIncluded[name] = true\n\t\t}\n\t}\n\n\tfilesToArchive, err := files.ListFiles(conf.StorageFolder, true)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif filterProvided {\n\t\tvar filteredFileList []files.File\n\t\tfor _, file := range filesToArchive {\n\t\t\tif fileNamesIncluded[file.Name] {\n\t\t\t\tfilteredFileList = append(filteredFileList, file)\n\t\t\t}\n\t\t}\n\t\tlog.Printf(\"Reduced number of files in archive from %d to %d based on user provided filter.\\n\", len(filesToArchive), len(filteredFileList))\n\t\tfilesToArchive = filteredFileList\n\t}\n\n\t// Convert []File to []string\n\tstrFiles := make([]string, len(filesToArchive))\n\tfor i, file := range filesToArchive {\n\t\tstrFiles[i] = fmt.Sprintf(\"%s/%s\", conf.StorageFolder, file.Name)\n\t}\n\n\treturn strFiles, nil\n}",
"func Entries() []string {\n\treturn rootFileGroup.Entries()\n}",
"func ListDir(type_ string, user string) []map[string]string {\n\tuserDir := c.FileStore + \"/\" + glb.MD5Sum(user)\n\tglb.ReplaceRept(&userDir, \"/\")\n\tuserDir = strings.TrimRight(userDir, \"/\")\n\t\n\t//fmt.Println(userDir)\n\tif glb.FileExist(userDir) {\n\t\tos.Remove(userDir)\n\t}\n\tif !glb.DirExist(userDir) {\n\t\tos.MkdirAll(userDir+\"/plain\", 0775)\n\t\tos.MkdirAll(userDir+\"/secret/cache\", 0775)\n\t\tos.MkdirAll(userDir+\"/plain_share\", 0775)\n\t\tos.MkdirAll(userDir+\"/secret_share\", 0775)\n\t}\n\t\n\tvar (\n\t\tfileList []map[string]string\n\t\tdirToRead string\n\t)\n\tswitch type_ {\n\tcase \"plain\":\n\t\tdirToRead = userDir + \"/plain\"\n\t\tlist, _ := ioutil.ReadDir(dirToRead)\n\t\tfor _, file := range list {\n\t\t\tfileList = append(fileList, map[string]string{\n\t\t\t\t\"name\": file.Name(),\n\t\t\t\t\"size\": glb.ComputeSize(file.Size()),\n\t\t\t\t\"date\": file.ModTime().Format(\"2006-01-02 15:04:05\"),\n\t\t\t\t\"link\": \"\",\n\t\t\t})\n\t\t}\n\t\treturn fileList\n\tcase \"plain_share\":\n\t\tdirToRead = userDir + \"/plain_share\"\n\t\tlist, _ := ioutil.ReadDir(dirToRead)\n\t\tfor _, file := range list {\n\t\t\tfileList = append(fileList, map[string]string{\n\t\t\t\t\"name\": file.Name(),\n\t\t\t\t\"size\": glb.ComputeSize(file.Size()),\n\t\t\t\t\"date\": file.ModTime().Format(\"2006-01-02 15:04:05\"),\n\t\t\t\t\"link\": \"\",\n\t\t\t})\n\t\t}\n\t\treturn fileList\n\tcase \"secret\":\n\t\tdirToRead = userDir + \"/secret\"\n\t\tlist, _ := ioutil.ReadDir(dirToRead)\n\t\tfor _, file := range list {\n\t\t\tfileList = append(fileList, map[string]string{\n\t\t\t\t\"name\": file.Name(),\n\t\t\t\t\"size\": glb.ComputeSize(file.Size()),\n\t\t\t\t\"date\": file.ModTime().Format(\"2006-01-02 15:04:05\"),\n\t\t\t\t\"link\": \"\",\n\t\t\t})\n\t\t}\n\t\treturn fileList\n\t\n\tcase \"secret_share\":\n\t\treturn nil\n\t}\n\treturn nil\n}",
"func (g *gcs) List(ctx context.Context, prefix string) ([]*fs.FileInfo, error) {\n\tvar files []*fs.FileInfo\n\tit := g.bucket.Objects(ctx, &storage.Query{\n\t\tPrefix: prefix,\n\t})\n\tfor {\n\t\tattrs, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfiles = append(files, fileinfo(attrs))\n\t}\n\treturn files, nil\n}",
"func GetFileList() Resources {\n\tresult := Resources{}\n\tresult.Images = readDirectory(imagesPath)\n\tresult.Sounds = readDirectory(soundsPath)\n\n\treturn result\n}",
"func (d *directory) OrderedDataFileList() []string {\n\treturn d.orderedFileList\n}"
] | [
"0.6738004",
"0.6650834",
"0.65649015",
"0.63972044",
"0.6386861",
"0.63821405",
"0.63751984",
"0.6369589",
"0.6284099",
"0.6223426",
"0.6190669",
"0.6143492",
"0.6128013",
"0.60666806",
"0.60534495",
"0.60130465",
"0.60001355",
"0.5971921",
"0.595284",
"0.5880319",
"0.5857797",
"0.58513343",
"0.5842462",
"0.58192754",
"0.5813625",
"0.5808683",
"0.5794851",
"0.57478017",
"0.5713953",
"0.5670138",
"0.56687486",
"0.56605905",
"0.56603605",
"0.56504595",
"0.5642888",
"0.56303996",
"0.5615253",
"0.5599829",
"0.55960643",
"0.55914456",
"0.5577439",
"0.5556286",
"0.5550016",
"0.5535857",
"0.55333346",
"0.55329764",
"0.5526954",
"0.55269",
"0.55229336",
"0.5501293",
"0.54994667",
"0.54963076",
"0.5491842",
"0.54826623",
"0.5479777",
"0.5476953",
"0.54734164",
"0.54709285",
"0.5468668",
"0.5464817",
"0.5440367",
"0.543896",
"0.54352075",
"0.542748",
"0.54136515",
"0.5412882",
"0.5412868",
"0.5411771",
"0.5411601",
"0.53997713",
"0.5399358",
"0.53964335",
"0.53854537",
"0.53742635",
"0.53669894",
"0.53619474",
"0.53581846",
"0.53551584",
"0.5354535",
"0.53541833",
"0.5350422",
"0.53483266",
"0.5348308",
"0.53464276",
"0.5332403",
"0.53249025",
"0.53209317",
"0.53102976",
"0.53028667",
"0.5300781",
"0.5300314",
"0.5296423",
"0.5289433",
"0.52863765",
"0.52774304",
"0.5271343",
"0.52695286",
"0.5265372",
"0.52624017",
"0.5260736"
] | 0.7517042 | 0 |
get a list of JobInfo instances by reading files | func (c *Configuration) JobInfoList(
files []string) ([]*job_info.JobInfo, error) {
res := make([]*job_info.JobInfo, len(files))
for i := 0; i < len(files); i++ {
res[i] = job_info.NewJobInfo()
err := res[i].ReadFromFile(c.JobInfoDir + "/" + files[i])
if err != nil {
return nil, err
}
}
return res, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Configuration) JobInfoFiles() ([]string, error) {\n\tfileInfos, err := ioutil.ReadDir(c.JobInfoDir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres := []string{}\n\tfor i := 0; i<len(fileInfos); i++ {\n\t\tfileInfo := fileInfos[i]\n\t\tname := fileInfo.Name()\n\t\tif strings.HasSuffix(name, \".json\") {\n\t\t\tres = append(res, name)\n\t\t}\n\t}\n\n\treturn res, nil\n}",
"func ParseTaskFile(filename string) []I_Task {\n r := []I_Task{}\n raw, err := ioutil.ReadFile(filename)\n if err != nil {\n fmt.Println(err.Error())\n return r\n }\n\n var ts []Task\n json.Unmarshal(raw, &ts)\n for i, _ := range ts {\n r = append(r, &ts[i])\n }\n return r\n}",
"func GetAllJobInfo(\n\tctx context.Context,\n\tconn sqlexec.SQLExecutor,\n\tuser string,\n) ([]*JobInfo, error) {\n\tctx = util.WithInternalSourceType(ctx, kv.InternalLoadData)\n\trs, err := conn.ExecuteInternal(ctx,\n\t\t`SELECT\n\t\texpected_status,\n\t\tupdate_time >= DATE_SUB(CURRENT_TIMESTAMP(6), INTERVAL %? SECOND) AS is_alive,\n\t\tend_time,\n\t\tresult_message,\n\t\terror_message,\n\t\tstart_time,\n\n\t\tjob_id,\n\t\tdata_source,\n\t\ttable_schema,\n\t\ttable_name,\n\t\timport_mode,\n\t\tprogress,\n\t\tcreate_user\n\t\tFROM mysql.load_data_jobs\n\t\tWHERE create_user = %?;`,\n\t\tOfflineThresholdInSec, user)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer terror.Call(rs.Close)\n\trows, err := sqlexec.DrainRecordSet(ctx, rs, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tret := make([]*JobInfo, 0, len(rows))\n\tfor _, row := range rows {\n\t\tjobInfo, err := getJobInfo(row)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tret = append(ret, jobInfo)\n\t}\n\n\treturn ret, nil\n}",
"func readJobFile(name string) []byte {\n file, err := os.Open(name)\n if err != nil {\n log.Fatal(err)\n }\n bytes, err := ioutil.ReadAll(file)\n defer file.Close()\n return bytes\n}",
"func LoadJobsFromFile(file string) ([]*JOB.Job, error) {\n\tif buf, fileErr := LoadFile(file); fileErr != nil {\n\t\treturn nil, fileErr\n\t} else if jobConfigs, configErr := LoadJobs(buf); configErr != nil {\n\t\treturn nil, configErr\n\t} else {\n\t\treturn SetDefaults(jobConfigs)\n\t}\n}",
"func readJobFile(name string) []byte {\n\tfile, err := os.Open(name)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tbytes, _ := ioutil.ReadAll(file)\n\tdefer file.Close()\n\treturn bytes\n}",
"func ParseJobFile(filename, mtr string) (jobs Jobs, cksum []byte, err error) {\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn []*Job{}, []byte{}, err\n\t}\n\tdefer f.Close()\n\n\th := sha256.New()\n\tr := io.TeeReader(f, h)\n\n\tjobs, err = ParseJobs(r, mtr)\n\n\treturn jobs, h.Sum(nil), err\n}",
"func listWorker(ctx context.Context) {\n\tdefer utils.Recover()\n\n\tfor j := range jc {\n\t\tlogrus.Infof(\"Start listing job %s.\", j.Path)\n\n\t\terr := listJob(ctx, j)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogrus.Infof(\"Job %s listed.\", j.Path)\n\t}\n}",
"func NewJobInfo(input string) (JobInfo, error) {\n\tvar ji JobInfo\n\terr := xml.Unmarshal([]byte(input), &ji)\n\n\tif err != nil {\n\t\treturn JobInfo{}, err\n\t}\n\n\tdeleteTargets := make(map[int]Job)\n\n\t//Handle extrapolation of pending tasks\n\n\tfor k, p := range ji.PendingJobs.JobList {\n\t\tif DoesJobContainTaskRange(p) {\n\t\t\t//Mark for deletion and substitution\n\t\t\tdeleteTargets[k] = p\n\t\t}\n\t}\n\n\t//If anything comes up as an extrapolatable task list, we need to extrapolate to multiple job entries and remove the original listing.\n\tif len(deleteTargets) > 0 {\n\t\t//Reiterate over collected jobs to cleanup and reconstruct\n\t\tfor k, p := range deleteTargets {\n\n\t\t\tjobs, err := ExtrapolateTasksToJobs(p)\n\n\t\t\tif err != nil {\n\t\t\t\t//We can't do anything with this entry. Just continue along\n\t\t\t\tlog.Error(\"An error occurred trying to extrapolate Task range into JobList\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t//Remove the target Job\n\t\t\tji.PendingJobs.JobList = append(ji.PendingJobs.JobList[:k], ji.PendingJobs.JobList[k+1:]...)\n\n\t\t\t//Append Extrapolated Jobs\n\t\t\tji.PendingJobs.JobList = append(ji.PendingJobs.JobList, jobs...)\n\t\t}\n\n\t\t//Sort the slice after all the shuffling By Job Number and Task IDß\n\t\tsort.Slice(ji.PendingJobs.JobList, func(i, j int) bool {\n\t\t\treturn ji.PendingJobs.JobList[i].JBJobNumber < ji.PendingJobs.JobList[j].JBJobNumber && ji.PendingJobs.JobList[i].Tasks.TaskID < ji.PendingJobs.JobList[j].Tasks.TaskID\n\t\t})\n\t}\n\n\treturn ji, nil\n}",
"func getJobs(url string) (prowJobs, error) {\n\tbody, err := mungerutil.ReadHTTP(url)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tjobs := prowJobs{}\n\terr = json.Unmarshal(body, &jobs)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jobs, nil\n}",
"func (d *mongoDriver) JobInfo(ctx context.Context) <-chan amboy.JobInfo {\n\tinfos := make(chan amboy.JobInfo)\n\tgo func() {\n\t\tdefer close(infos)\n\t\tq := bson.M{}\n\t\td.modifyQueryForGroup(q)\n\n\t\titer, err := d.getCollection().Find(ctx,\n\t\t\tq,\n\t\t\t&options.FindOptions{\n\t\t\t\tSort: bson.M{\"status.mod_ts\": -1},\n\t\t\t\tProjection: bson.M{\n\t\t\t\t\t\"_id\": 1,\n\t\t\t\t\t\"status\": 1,\n\t\t\t\t\t\"retry_info\": 1,\n\t\t\t\t\t\"time_info\": 1,\n\t\t\t\t\t\"type\": 1,\n\t\t\t\t\t\"version\": 1,\n\t\t\t\t},\n\t\t\t})\n\t\tif err != nil {\n\t\t\tgrip.Warning(message.WrapError(err, message.Fields{\n\t\t\t\t\"message\": \"problem with query\",\n\t\t\t\t\"driver_id\": d.instanceID,\n\t\t\t\t\"service\": \"amboy.queue.mdb\",\n\t\t\t\t\"operation\": \"job info iterator\",\n\t\t\t\t\"is_group\": d.opts.UseGroups,\n\t\t\t\t\"group\": d.opts.GroupName,\n\t\t\t}))\n\t\t\treturn\n\t\t}\n\n\t\tfor iter.Next(ctx) {\n\t\t\tji := ®istry.JobInterchange{}\n\t\t\tif err := iter.Decode(ji); err != nil {\n\t\t\t\tgrip.Warning(message.WrapError(err, message.Fields{\n\t\t\t\t\t\"message\": \"problem decoding job document into interchange job\",\n\t\t\t\t\t\"driver_id\": d.instanceID,\n\t\t\t\t\t\"service\": \"amboy.queue.mdb\",\n\t\t\t\t\t\"operation\": \"job info iterator\",\n\t\t\t\t\t\"is_group\": d.opts.UseGroups,\n\t\t\t\t\t\"group\": d.opts.GroupName,\n\t\t\t\t}))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\td.removeMetadata(ji)\n\t\t\tinfo := amboy.JobInfo{\n\t\t\t\tID: ji.Name,\n\t\t\t\tStatus: ji.Status,\n\t\t\t\tTime: ji.TimeInfo,\n\t\t\t\tRetry: ji.RetryInfo,\n\t\t\t\tType: amboy.JobType{\n\t\t\t\t\tName: ji.Type,\n\t\t\t\t\tVersion: ji.Version,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase infos <- info:\n\t\t\t}\n\n\t\t}\n\t}()\n\n\treturn infos\n}",
"func List(ctx context.Context) (err error) {\n\tif t.Status == constants.TaskStatusCreated {\n\t\t_, err = model.CreateJob(ctx, \"/\")\n\t\tif err != nil {\n\t\t\tlogrus.Panic(err)\n\t\t}\n\n\t\tt.Status = constants.TaskStatusRunning\n\t\terr = t.Save(ctx)\n\t\tif err != nil {\n\t\t\tlogrus.Panic(err)\n\t\t}\n\t}\n\n\t// Traverse already running but not finished object.\n\tp := \"\"\n\tfor {\n\t\to, err := model.NextObject(ctx, p)\n\t\tif err != nil {\n\t\t\tlogrus.Panic(err)\n\t\t}\n\t\tif o == nil {\n\t\t\tbreak\n\t\t}\n\n\t\toc <- o\n\t\tp = o.Key\n\t}\n\n\t// Traverse already running but not finished job.\n\tp = \"\"\n\tfor {\n\t\tj, err := model.NextJob(ctx, p)\n\t\tif err != nil {\n\t\t\tlogrus.Panic(err)\n\t\t}\n\t\tif j == nil {\n\t\t\tbreak\n\t\t}\n\n\t\tjwg.Add(1)\n\t\tjc <- j\n\t\tp = j.Path\n\t}\n\n\treturn\n}",
"func (f *fileScorer) fileObjInfos() []ObjectInfo {\n\tres := make([]ObjectInfo, 0, f.queue.Len())\n\te := f.queue.Front()\n\tfor e != nil {\n\t\tqfile := e.Value.(queuedFile)\n\t\tres = append(res, ObjectInfo{\n\t\t\tName: qfile.name,\n\t\t\tSize: int64(qfile.size),\n\t\t\tVersionID: qfile.versionID,\n\t\t})\n\t\te = e.Next()\n\t}\n\treturn res\n}",
"func (m *Synchronization) GetJobs()([]SynchronizationJobable) {\n val, err := m.GetBackingStore().Get(\"jobs\")\n if err != nil {\n panic(err)\n }\n if val != nil {\n return val.([]SynchronizationJobable)\n }\n return nil\n}",
"func ParseMachineFile(filename string) []I_Machine {\n r := []I_Machine{}\n raw, err := ioutil.ReadFile(filename)\n if err != nil {\n fmt.Println(err.Error())\n return r\n }\n\n var ms []Machine\n json.Unmarshal(raw, &ms)\n for i, _ := range ms {\n r = append(r, &ms[i])\n }\n return r\n}",
"func Jobs(nomad *NomadServer) ([]Job, int, error) {\n\tjobs := make([]Job, 0)\n\tstatus, err := decodeJSON(url(nomad)+\"/v1/jobs\", &jobs)\n\treturn jobs, status, err\n}",
"func LoadJobs(buf []byte) ([]CFG.JobConfig, error) {\n\tconfigs := []CFG.JobConfig{}\n\tif err := yaml.Unmarshal(buf, &configs); err != nil {\n\t\treturn nil, err\n\t}\n\treturn configs, nil\n}",
"func (e *executor) parse(file string) (*jobInfo, error) {\n\tc, err := job.ReadConfig(file)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tuseTelemetry := c.Telemetry && e.statsdClient != nil\n\n\tstart := time.Now()\n\tvar (\n\t\tf flunc.Flunc\n\t\tevents *telemetry.EventStore\n\t)\n\n\tif useTelemetry {\n\t\tf, events, err = telemetry.Instrument(c)\n\t} else {\n\t\tf, err = c.ExecutionTree()\n\t}\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tstop := time.Now()\n\tlog.Println(\"job preparation took\", stop.Sub(start))\n\n\treturn &jobInfo{\n\t\tfile: file,\n\t\tc: c,\n\t\tf: f,\n\n\t\ttelemetry: useTelemetry,\n\t\tevents: events,\n\t}, nil\n}",
"func (c *Client) GetJobs() ([]*Job, error) {\n\tclient := &http.Client{}\n\turl := fmt.Sprintf(\"http://%s/api/v1/jobs\", c.options.ServerAddr)\n\treq, err := http.NewRequest(\"GET\", url, nil)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\tjobs := []*Job{}\n\tif err := json.Unmarshal(body, jobs); err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\treturn jobs, nil\n}",
"func (c CacheReader) GetJobs(ctx context.Context) ([]models.Item, error) {\n\tvar items []models.Item\n\n\terr := c.cache.Once(&cache.Item{\n\t\tKey: \"jobs\",\n\t\tValue: &items,\n\t\tTTL: c.cfg.CacheTimout,\n\t\tDo: func(*cache.Item) (interface{}, error) {\n\t\t\ti, err := c.reader.GetJobs(ctx)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"cache\")\n\t\t\t}\n\t\t\treturn i, nil\n\t\t},\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn items, nil\n}",
"func processFile(index int, jobs <-chan string, wg *sync.WaitGroup) {\n\tglog.Infof(\"started worker %d\", index)\n\tdefer wg.Done()\n\tfor filename := range jobs {\n\t\tglog.Infof(\"worker %d: processing %s\", index, filename)\n\t\tdocument, err := ofx.NewDocumentFromXML(filename)\n\t\tif err != nil {\n\t\t\tglog.Errorf(\"worker %d: error processing %s - %s\", index, filename, err)\n\t\t}\n\t\t// TODO: do something with the parsed document.\n\t\tglog.Infof(\"%v\", document)\n\t}\n\tglog.Infof(\"shutting down worker %d\", index)\n}",
"func (k *Kube) ListJobs(ctx context.Context, in *ListJobsInput) (out *ListJobsOutput, err error) {\n\tif err = k.checkInput(ctx, in); err != nil {\n\t\treturn nil, err\n\t}\n\n\t//List Jobs\n\tjobs := &jobs{}\n\terr = k.visor.ListResources(ctx, kubevisor.ResourceTypeJobs, jobs, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Get Events\n\tevents := &events{}\n\terr = k.visor.ListResources(ctx, kubevisor.ResourceTypeEvents, events, nil, []string{\"involvedObject.kind=Job,reason=FailedCreate\"})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//Get Pods\n\tpods := &pods{}\n\terr = k.visor.ListResources(ctx, kubevisor.ResourceTypePods, pods, nil, nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t//get jobs and investigate\n\tout = &ListJobsOutput{}\n\tmapping := map[types.UID]*ListJobItem{}\n\tfor _, job := range jobs.Items {\n\t\tif len(job.Spec.Template.Spec.Containers) != 1 {\n\t\t\tk.logs.Debugf(\"skipping job '%s' in namespace '%s' as it has not just 1 container\", job.Name, job.Namespace)\n\t\t\tcontinue\n\t\t}\n\n\t\tc := job.Spec.Template.Spec.Containers[0]\n\t\titem := &ListJobItem{\n\t\t\tName: job.GetName(),\n\t\t\tImage: c.Image,\n\t\t\tCreatedAt: job.CreationTimestamp.Local(),\n\t\t\tDetails: JobDetails{},\n\t\t}\n\n\t\tif parr := job.Spec.Parallelism; parr != nil {\n\t\t\titem.Details.Parallelism = *parr\n\t\t}\n\n\t\tif dt := job.GetDeletionTimestamp(); dt != nil {\n\t\t\titem.DeletedAt = dt.Local() //mark as deleting\n\t\t}\n\n\t\tif job.Status.StartTime != nil {\n\t\t\titem.ActiveAt = job.Status.StartTime.Local()\n\t\t}\n\n\t\tfor _, dataset := range job.Spec.Template.Spec.Volumes {\n\t\t\tif dataset.FlexVolume != nil {\n\t\t\t\tif dataset.FlexVolume.Options[\"input/dataset\"] != \"\" {\n\t\t\t\t\titem.Input = append(item.Input, dataset.FlexVolume.Options[\"input/dataset\"])\n\t\t\t\t}\n\t\t\t\tif dataset.FlexVolume.Options[\"output/dataset\"] != \"\" {\n\t\t\t\t\titem.Output = append(item.Output, dataset.FlexVolume.Options[\"output/dataset\"])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\tfor _, cond := range job.Status.Conditions {\n\t\t\tif cond.Status != corev1.ConditionTrue {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tswitch cond.Type {\n\t\t\tcase batchv1.JobComplete:\n\t\t\t\titem.CompletedAt = cond.LastTransitionTime.Local()\n\t\t\tcase batchv1.JobFailed:\n\t\t\t\titem.FailedAt = cond.LastTransitionTime.Local()\n\t\t\t}\n\t\t}\n\t\titem.Memory = job.Spec.Template.Spec.Containers[0].Resources.Requests.Memory().MilliValue()\n\t\titem.VCPU = job.Spec.Template.Spec.Containers[0].Resources.Requests.Cpu().MilliValue()\n\n\t\tmapping[job.UID] = item\n\t\tout.Items = append(out.Items, item)\n\t}\n\n\t//map events to jobs\n\tfor _, ev := range events.Items {\n\t\t_, ok := mapping[ev.InvolvedObject.UID]\n\t\tif ok { //event for one of our jobs\n\t\t\tmapping[ev.InvolvedObject.UID].Details.FailedCreateEvents = append(\n\t\t\t\tmapping[ev.InvolvedObject.UID].Details.FailedCreateEvents,\n\t\t\t\tJobEvent{Message: ev.Message},\n\t\t\t)\n\t\t}\n\t}\n\n\t//map pods to jobs\n\tfor _, pod := range pods.Items {\n\t\tuid, ok := pod.Labels[\"controller-uid\"]\n\t\tif !ok {\n\t\t\tcontinue //not part of a controller\n\t\t}\n\n\t\tjobItem, ok := mapping[types.UID(uid)]\n\t\tif !ok {\n\t\t\tcontinue //not part of any job\n\t\t}\n\n\t\t//technically we can have multiple pods per job (one terminating, unknown etc) so we pick the\n\t\t//one that is created most recently to base our details on\n\t\tif pod.CreationTimestamp.Local().After(jobItem.Details.SeenAt) {\n\t\t\tjobItem.Details.SeenAt = pod.CreationTimestamp.Local() //this pod was created after previous pod\n\t\t} else {\n\t\t\tcontinue //this pod was created before the other one in the item, ignore\n\t\t}\n\n\t\t//the pod phase allows us to distinguish between Pending and Running\n\t\tswitch pod.Status.Phase {\n\t\tcase corev1.PodPending:\n\t\t\tjobItem.Details.Phase = JobDetailsPhasePending\n\t\tcase corev1.PodRunning:\n\t\t\tjobItem.Details.Phase = JobDetailsPhaseRunning\n\t\tcase corev1.PodFailed:\n\t\t\tjobItem.Details.Phase = JobDetailsPhaseFailed\n\t\tcase corev1.PodSucceeded:\n\t\t\tjobItem.Details.Phase = JobDetailsPhaseSucceeded\n\t\tdefault:\n\t\t\tjobItem.Details.Phase = JobDetailsPhaseUnknown\n\t\t}\n\n\t\tfor _, cond := range pod.Status.Conditions {\n\t\t\t//onschedulable is a reason for being pending\n\t\t\tif cond.Type == corev1.PodScheduled {\n\t\t\t\tif cond.Status == corev1.ConditionFalse {\n\t\t\t\t\tif cond.Reason == corev1.PodReasonUnschedulable {\n\t\t\t\t\t\t// From src: \"PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler\n\t\t\t\t\t\t// can't schedule the pod right now\"\n\t\t\t\t\t\tjobItem.Details.UnschedulableReason = \"NotYetSchedulable\" //special case\n\t\t\t\t\t\tjobItem.Details.UnschedulableMessage = cond.Message\n\t\t\t\t\t} else {\n\t\t\t\t\t\tjobItem.Details.UnschedulableReason = cond.Reason\n\t\t\t\t\t\tjobItem.Details.UnschedulableMessage = cond.Message\n\t\t\t\t\t}\n\n\t\t\t\t\t//NotScheduled\n\n\t\t\t\t} else if cond.Status == corev1.ConditionTrue {\n\t\t\t\t\tjobItem.Details.Scheduled = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t//container conditions allow us to capture ErrImageNotFound\n\t\tfor _, cstatus := range pod.Status.ContainerStatuses {\n\t\t\tif cstatus.Name != \"main\" { //we only care about the main container\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t//waiting reasons give us ErrImagePull/Backoff\n\t\t\tif cstatus.State.Waiting != nil {\n\t\t\t\tjobItem.Details.WaitingReason = cstatus.State.Waiting.Reason\n\t\t\t\tjobItem.Details.WaitingMessage = cstatus.State.Waiting.Message\n\t\t\t}\n\n\t\t\tif cstatus.State.Terminated != nil {\n\t\t\t\tjobItem.Details.TerminatedReason = cstatus.State.Terminated.Reason\n\t\t\t\tjobItem.Details.TerminatedMessage = cstatus.State.Terminated.Message\n\t\t\t\tjobItem.Details.TerminatedExitCode = cstatus.State.Terminated.ExitCode\n\t\t\t}\n\t\t}\n\t}\n\n\treturn out, nil\n}",
"func NewListFromFile(path string) (List, error) {\n\tlist := make(List, 0)\n\n\tfileBytes, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsc := bufio.NewScanner(bytes.NewReader(fileBytes))\n\n\tsc.Scan() // this call to Scan() removes the header from the input file.\n\tfor sc.Scan() {\n\t\tvar id, arrival, burst, priority int\n\n\t\t_, err := fmt.Sscanf(sc.Text(), \"%d %d %d %d\",\n\t\t\t&id, &arrival, &burst, &priority)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tlist = append(list, &Process{id, arrival, burst, priority, 0, 0, 0})\n\t}\n\n\treturn list, nil\n}",
"func (c *aITrainingJobs) List(opts metav1.ListOptions) (result *v1.AITrainingJobList, err error) {\n\tvar timeout time.Duration\n\tif opts.TimeoutSeconds != nil {\n\t\ttimeout = time.Duration(*opts.TimeoutSeconds) * time.Second\n\t}\n\tresult = &v1.AITrainingJobList{}\n\terr = c.client.Get().\n\t\tNamespace(c.ns).\n\t\tResource(\"aitrainingjobs\").\n\t\tVersionedParams(&opts, scheme.ParameterCodec).\n\t\tTimeout(timeout).\n\t\tDo().\n\t\tInto(result)\n\treturn\n}",
"func (a *apiServer) ListJob(request *pps.ListJobRequest, resp pps.API_ListJobServer) (retErr error) {\n\tfilterJob, err := newMessageFilterFunc(request.GetJqFilter(), request.GetProjects())\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"error creating message filter function\")\n\t}\n\n\tctx := resp.Context()\n\tpipeline := request.GetPipeline()\n\tif pipeline != nil {\n\t\tensurePipelineProject(pipeline)\n\t\t// If 'pipeline is set, check that caller has access to the pipeline's\n\t\t// output repo; currently, that's all that's required for ListJob.\n\t\t//\n\t\t// If 'pipeline' isn't set, then we don't return an error (otherwise, a\n\t\t// caller without access to a single pipeline's output repo couldn't run\n\t\t// `pachctl list job` at all) and instead silently skip jobs where the user\n\t\t// doesn't have access to the job's output repo.\n\t\tif err := a.env.AuthServer.CheckRepoIsAuthorized(ctx, &pfs.Repo{Type: pfs.UserRepoType, Project: pipeline.Project, Name: pipeline.Name}, auth.Permission_PIPELINE_LIST_JOB); err != nil && !auth.IsErrNotActivated(err) {\n\t\t\treturn errors.EnsureStack(err)\n\t\t}\n\t}\n\n\tnumber := request.Number\n\t// If number is not set, return all jobs that match the query\n\tif number == 0 {\n\t\tnumber = math.MaxInt64\n\t}\n\t// pipelineVersions holds the versions of pipelines that we're interested in\n\tpipelineVersions := make(map[string]bool)\n\tif err := ppsutil.ListPipelineInfo(ctx, a.pipelines, pipeline, request.GetHistory(),\n\t\tfunc(ptr *pps.PipelineInfo) error {\n\t\t\tpipelineVersions[ppsdb.VersionKey(ptr.Pipeline, ptr.Version)] = true\n\t\t\treturn nil\n\t\t}); err != nil {\n\t\treturn err\n\t}\n\n\tjobs := a.jobs.ReadOnly(ctx)\n\tjobInfo := &pps.JobInfo{}\n\t_f := func(string) error {\n\t\tif number == 0 {\n\t\t\treturn errutil.ErrBreak\n\t\t}\n\t\tif request.PaginationMarker != nil {\n\t\t\tcreatedAt := time.Unix(int64(jobInfo.Created.GetSeconds()), int64(jobInfo.Created.GetNanos())).UTC()\n\t\t\tfromTime := time.Unix(int64(request.PaginationMarker.GetSeconds()), int64(request.PaginationMarker.GetNanos())).UTC()\n\t\t\tif createdAt.Equal(fromTime) || !request.Reverse && createdAt.After(fromTime) || request.Reverse && createdAt.Before(fromTime) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tif request.GetDetails() {\n\t\t\tif err := a.getJobDetails(ctx, jobInfo); err != nil {\n\t\t\t\tif auth.IsErrNotAuthorized(err) {\n\t\t\t\t\treturn nil // skip job--see note at top of function\n\t\t\t\t}\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif len(request.GetInputCommit()) > 0 {\n\t\t\t// Only include the job if it's in the set of intersected commitset IDs\n\t\t\tcommitsets, err := a.intersectCommitSets(ctx, request.GetInputCommit())\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, ok := commitsets[jobInfo.Job.Id]; !ok {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\tif !pipelineVersions[ppsdb.VersionKey(jobInfo.Job.Pipeline, jobInfo.PipelineVersion)] {\n\t\t\treturn nil\n\t\t}\n\n\t\tif ok, err := filterJob(ctx, jobInfo); err != nil {\n\t\t\treturn errors.Wrap(err, \"error filtering job\")\n\t\t} else if !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\t// Erase any AuthToken - this shouldn't be returned to anyone (the workers\n\t\t// won't use this function to get their auth token)\n\t\tjobInfo.AuthToken = \"\"\n\n\t\tif err := resp.Send(jobInfo); err != nil {\n\t\t\treturn errors.Wrap(err, \"error sending job\")\n\t\t}\n\t\tnumber--\n\t\treturn nil\n\t}\n\topts := &col.Options{Target: col.SortByCreateRevision, Order: col.SortDescend}\n\tif request.Reverse {\n\t\topts.Order = col.SortAscend\n\t}\n\tif pipeline != nil {\n\t\terr = jobs.GetByIndex(ppsdb.JobsPipelineIndex, ppsdb.JobsPipelineKey(pipeline), jobInfo, opts, _f)\n\t} else {\n\t\terr = jobs.List(jobInfo, opts, _f)\n\t}\n\tif err != nil && err != errutil.ErrBreak {\n\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn status.Error(codes.DeadlineExceeded, err.Error())\n\t\t}\n\t\treturn errors.EnsureStack(err)\n\t}\n\treturn nil\n}",
"func (dt *Tracker) ListJobs() ([]string, error) {\n\treturn dt.processTracker.ListJobs()\n}",
"func (js *JobSession) GetJobs(filter drmaa2interface.JobInfo) ([]drmaa2interface.Job, error) {\n\tvar joblist []drmaa2interface.Job\n\n\tfor _, tracker := range js.tracker {\n\t\tjobs, err := tracker.ListJobs()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfor _, jobid := range jobs {\n\t\t\tif jinfo, err := tracker.JobInfo(jobid); err != nil {\n\t\t\t\tcontinue\n\t\t\t} else {\n\t\t\t\tif d2hlp.JobInfoMatches(jinfo, filter) {\n\t\t\t\t\t// TODO get template from DB\n\t\t\t\t\tjobtemplate := drmaa2interface.JobTemplate{}\n\n\t\t\t\t\tjob := newJob(jobid, js.name, jobtemplate, tracker)\n\t\t\t\t\tjoblist = append(joblist, drmaa2interface.Job(job))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn joblist, nil\n}",
"func ReadInfoFiles(\n\tfilePathPrefix string,\n\tnamespace ident.ID,\n\tshard uint32,\n\treaderBufferSize int,\n\tdecodingOpts msgpack.DecodingOptions,\n) []schema.IndexInfo {\n\tvar indexEntries []schema.IndexInfo\n\tdecoder := msgpack.NewDecoder(decodingOpts)\n\tforEachInfoFile(filePathPrefix, namespace, shard, readerBufferSize, func(_ string, data []byte) {\n\t\tdecoder.Reset(msgpack.NewDecoderStream(data))\n\t\tinfo, err := decoder.DecodeIndexInfo()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tindexEntries = append(indexEntries, info)\n\t})\n\treturn indexEntries\n}",
"func (c *Client) Jobs(ctx context.Context) *JobIterator {\n\tit := &JobIterator{\n\t\tctx: ctx,\n\t\tc: c,\n\t\tProjectID: c.projectID,\n\t}\n\tit.pageInfo, it.nextFunc = iterator.NewPageInfo(\n\t\tit.fetch,\n\t\tfunc() int { return len(it.items) },\n\t\tfunc() interface{} { b := it.items; it.items = nil; return b })\n\treturn it\n}",
"func ParseFile(filename string) []interface{} {\n content, _ := ioutil.ReadFile(filename)\n return ParseText(content)\n}",
"func (c FlinkRestClient) RetrieveJobs() ([]Job, error) {\n\treq, err := c.newRequest(\"GET\", c.constructURL(\"jobs/overview\"), nil)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tres, err := c.Client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer res.Body.Close()\n\n\tbody, err := ioutil.ReadAll(res.Body)\n\tif err != nil {\n\t\treturn []Job{}, err\n\t}\n\n\tif res.StatusCode != 200 {\n\t\treturn []Job{}, fmt.Errorf(\"Unexpected response status %v with body %v\", res.StatusCode, string(body[:]))\n\t}\n\n\tretrieveJobsResponse := retrieveJobsResponse{}\n\terr = json.Unmarshal(body, &retrieveJobsResponse)\n\tif err != nil {\n\t\treturn []Job{}, fmt.Errorf(\"Unable to parse API response as valid JSON: %v\", string(body[:]))\n\t}\n\n\treturn retrieveJobsResponse.Jobs, nil\n}",
"func add_jobs(fileNames []string, colID int, wantMedian bool, jobs chan<- job,\n result chan<- stat) {\n for _, name := range fileNames {\n jobs <- job{name, colID, wantMedian, result}\n }\n close(jobs)\n}",
"func (c *ci) Jobs() map[string]*job {\n\treturn c.jobs\n}",
"func LoadMany(db *bolt.DB, jobIds []string) (jobs []*Job, err error) {\n\terr = db.View(func(tx *bolt.Tx) error {\n\t\tjobsBucket := tx.Bucket([]byte(\"jobs\"))\n\t\tif jobsBucket == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tfor _, jobId := range jobIds {\n\t\t\tjobJson := jobsBucket.Get([]byte(jobId))\n\t\t\tif jobJson == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tjob := &Job{\n\t\t\t\tSubscriptionsMu: &sync.Mutex{},\n\t\t\t}\n\t\t\tif err := json.Unmarshal(jobJson, job); err != nil {\n\t\t\t\tlog.Warningf(\"DB contained job with id %q, but could not parse: %v\", err)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tfor _, j := range job.Messages {\n\t\t\t\tj.JobId = job.JobId\n\t\t\t}\n\n\t\t\tfor _, sess := range job.Sessions {\n\t\t\t\tsort.Slice(sess.EventNumber, func(i, j int) bool {\n\t\t\t\t\treturn sess.EventNumber[i] < sess.EventNumber[j]\n\t\t\t\t})\n\t\t\t\tsort.Ints(sess.Character)\n\t\t\t}\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn jobs, nil\n}",
"func add_jobs(fileNames []string, colID int, jobs chan<- job,\n result chan<- column) {\n for _, name := range fileNames {\n jobs <- job{name, colID, result}\n }\n close(jobs)\n}",
"func read() []Data {\n\tvar cacheData []Data\n\n\tcachePath, err := filepath.Abs(filepath.Dir(os.Args[0]))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tcachePath = filepath.Join(cachePath, \"mtl-cache.bin\")\n\tcacheFile, err := os.Open(cachePath)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\t// handle the case where the file doesn't exist\n\t\tcacheFile, err = os.Create(cachePath)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t\tdefer cacheFile.Close()\n\n\t\tenc := gob.NewEncoder(cacheFile)\n\t\tif err := enc.Encode(cacheData); err != nil {\n\t\t\tlog.Fatalf(\"Cache creation failed: %v\", err)\n\t\t}\n\n\t\treturn cacheData\n\t} else if err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer cacheFile.Close()\n\n\tdec := gob.NewDecoder(cacheFile)\n\tif err := dec.Decode(&cacheData); err != nil {\n\t\tlog.Fatal(err)\n\t}\n\treturn cacheData\n}",
"func (feeder *FileFeed) Read(files []string) ([]entity.Input, error) {\n\tinputs := make([]entity.Input, len(files))\n\tfor i, file := range files {\n\t\tlogger.Info(fmt.Sprintf(\"reading fixture: %s\", file))\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn inputs, err\n\t\t}\n\t\text := filepath.Ext(file)\n\t\tinput := entity.Input{\n\t\t\tFilename: extractFilename(file),\n\t\t\tType: ext,\n\t\t\tData: f,\n\t\t}\n\t\tinputs[i] = input\n\t}\n\treturn inputs, nil\n}",
"func listJobs(c *config) []string {\n\turl := fmt.Sprintf(jobsURL, c.Jenkins.Url)\n\tcode, b := jenkinsGet(url, c.Jenkins.User, c.Jenkins.Password, c.Jenkins.Verify)\n\n\tif code != 200 {\n\t\tlog.Fatalf(\"List jobs: response code: %d\", code)\n\t}\n\n\tjobs := Jobs{}\n\tres := []string{}\n\n\te := json.Unmarshal(b, &jobs)\n\tlogFatal(\"List jobs: json\", e)\n\n\tfor _, j := range jobs.Jobs {\n\t\tres = append(res, j.Name)\n\t}\n\n\treturn res\n}",
"func (p *SqliteProvider) GetByMultiJobID(jid string) ([]*models.Job, error) {\n\tvar res []*jobSqlite\n\terr := p.db.Select(&res, \"SELECT * FROM jobs WHERE multi_job_id=? ORDER BY DATETIME(started_at) DESC, jid\", jid)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn convertJobs(res), nil\n}",
"func readFile(file []byte, jobs chan<- LineRecord) {\n\n\tlineNumber := 1\n\tscanner := bufio.NewScanner(bytes.NewReader(file))\n\tfor scanner.Scan() {\n\t\tjobs <- LineRecord{\n\t\t\tcontent: scanner.Text(),\n\t\t\tlineNumber: lineNumber,\n\t\t}\n\t\tlineNumber += 1\n\t}\n\n\t// We are done with the file, release the channel\n\tclose(jobs)\n}",
"func loadNodeRunJobInfo(db gorp.SqlExecutor, jobID int64) ([]sdk.SpawnInfo, error) {\n\tres := []struct {\n\t\tBytes sql.NullString `db:\"spawninfos\"`\n\t}{}\n\tquery := \"SELECT spawninfos FROM workflow_node_run_job_info WHERE workflow_node_run_job_id = $1\"\n\tif _, err := db.Select(&res, query, jobID); err != nil {\n\t\tif err == sql.ErrNoRows {\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, sdk.WrapError(err, \"cannot QueryRow\")\n\t}\n\n\tspawnInfos := []sdk.SpawnInfo{}\n\tfor _, r := range res {\n\t\tv := []sdk.SpawnInfo{}\n\t\tgorpmapping.JSONNullString(r.Bytes, &v)\n\t\tspawnInfos = append(spawnInfos, v...)\n\t}\n\n\treturn spawnInfos, nil\n}",
"func (h *Hashicorp) findJobs(logger *zap.Logger) {\n\tsugar := logger.Sugar()\n\tfoundJobs := []jobs.Job{}\n\n\tc := colly.NewCollector(\n\t\tcolly.Async(true),\n\t)\n\n\terr := jobs.IsUp(h.URL)\n\tif err != nil {\n\t\tsugar.Fatal(err)\n\t}\n\n\tc.OnHTML(\".item\", func(e *colly.HTMLElement) {\n\t\tlink := e.Request.AbsoluteURL(e.ChildAttr(\"a\", \"href\"))\n\t\tsugar.Infof(\"Looking for jobs at: %v\", link)\n\t\tjob, err := h.gatherSpecs(link, logger)\n\n\t\tif err != nil {\n\t\t\tsugar.Error(zap.Error(err))\n\t\t}\n\t\tsugar.Infof(\"Job successfully scraped with title: %v\", job.Title)\n\t\tfoundJobs = append(foundJobs, job)\n\t})\n\n\tc.Visit(h.URL)\n\tc.Wait()\n\n\th.Jobs = foundJobs\n}",
"func processfile(fileName string) (participantes []string, err error) {\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer file.Close()\n\n\t// names slice will hold participant names that were read from file from file PARTICIPANTES.md.\n\tnames := []string{}\n\n\t// Reads file line by line and process each line to extract participant's name.\n\tscanner := bufio.NewScanner(file)\n\n\tfor linecounter := 1; scanner.Scan(); linecounter++ {\n\t\tline := scanner.Text()\n\n\t\t// Ignores first 10 lines.\n\t\tif linecounter < 11 {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Splits line contents into hopefully three columns.\n\t\tcolumns := strings.Split(line, \"|\")\n\n\t\t// Checks if line contains expected number of columns.\n\t\tif len(columns) != 5 {\n\t\t\treturn []string{}, fmt.Errorf(\"Line # %d, incorrect number of rows. Want 5, got %d\", linecounter, len(columns))\n\t\t}\n\n\t\t// Appends participant name to list of names.\n\t\tnames = append(names, strings.TrimSpace(columns[1]))\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn []string{}, err\n\t}\n\n\treturn names, nil\n}",
"func (m *DataClassificationRequestBuilder) ClassifyFileJobs()(*ClassifyFileJobsRequestBuilder) {\n return NewClassifyFileJobsRequestBuilderInternal(m.BaseRequestBuilder.PathParameters, m.BaseRequestBuilder.RequestAdapter)\n}",
"func (m *Mongo) GetJobs(ctx context.Context, filters []string, offset int, limit int) (*models.JobResults, error) {\n\tstateFilter := bson.M{}\n\tif len(filters) > 0 {\n\t\tstateFilter[\"state\"] = bson.M{\"$in\": filters}\n\t}\n\n\tvar jobItems []*models.Job\n\ttotalCount, err := m.connection.Collection(m.ActualCollectionName(config.ImportsCollection)).Find(ctx, stateFilter, &jobItems,\n\t\tmongodriver.Sort(bson.M{\"_id\": 1}), mongodriver.Offset(offset), mongodriver.Limit(limit))\n\tif err != nil {\n\t\tlog.Error(ctx, \"error finding items\", err)\n\t\treturn nil, err\n\t}\n\tif totalCount < 1 {\n\t\treturn nil, apierrors.ErrJobNotFound\n\t}\n\n\treturn &models.JobResults{\n\t\tItems: jobItems,\n\t\tCount: len(jobItems),\n\t\tTotalCount: totalCount,\n\t\tOffset: offset,\n\t\tLimit: limit,\n\t}, nil\n}",
"func (h *Handler) Jobs(e *emptypb.Empty, s protobufs.HackerNews_JobsServer) error {\n\titems, err := h.reader.GetJobs(s.Context())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getJobs: %w\", err)\n\t}\n\n\tfor _, item := range items {\n\t\ts.Send(grpc.ToProto(item))\n\t}\n\n\treturn nil\n}",
"func (taskBolt *TaskBolt) ListJobs(ctx context.Context, in *ga4gh_task_exec.JobListRequest) (*ga4gh_task_exec.JobListResponse, error) {\n\tlog.Debug(\"ListJobs called\")\n\n\tjobs := make([]*ga4gh_task_exec.JobDesc, 0, 10)\n\n\ttaskBolt.db.View(func(tx *bolt.Tx) error {\n\t\ttaskopB := tx.Bucket(TaskBucket)\n\t\tc := taskopB.Cursor()\n\n\t\tfor k, v := c.First(); k != nil; k, v = c.Next() {\n\t\t\tjobID := string(k)\n\t\t\tjobState := getJobState(tx, jobID)\n\n\t\t\ttask := &ga4gh_task_exec.Task{}\n\t\t\tproto.Unmarshal(v, task)\n\n\t\t\tjob := &ga4gh_task_exec.JobDesc{\n\t\t\t\tJobID: jobID,\n\t\t\t\tState: jobState,\n\t\t\t\tTask: &ga4gh_task_exec.TaskDesc{\n\t\t\t\t\tName: task.Name,\n\t\t\t\t\tProjectID: task.ProjectID,\n\t\t\t\t\tDescription: task.Description,\n\t\t\t\t},\n\t\t\t}\n\t\t\tjobs = append(jobs, job)\n\t\t}\n\t\treturn nil\n\t})\n\n\tout := ga4gh_task_exec.JobListResponse{\n\t\tJobs: jobs,\n\t}\n\n\treturn &out, nil\n}",
"func getSeqs(seqFile string) ([]Sequence) {\n\n var out []Sequence\n\n // Open the .seq file\n fi, err := os.Open(seqFile)\n if err != nil {\n fmt.Println(\"Error - couldn't open .seq file\")\n os.Exit(1)\n }\n scanner := bufio.NewScanner(fi)\n\n // For each line in the file\n for scanner.Scan() {\n\n var temp Sequence\n\n // Get name\n line := scanner.Text()[1:]\n temp.name = line\n\n // Get value\n split_line := strings.Split(line, \"_\")\n temp.val, _ = strconv.Atoi(split_line[0])\n\n // Get isForward\n if split_line[1] == \"forward\" {\n temp.isForward = true\n } else {\n temp.isForward = false\n }\n\n // Get sequence\n scanner.Scan()\n temp.seq = scanner.Text()\n\n out = append(out, temp)\n }\n\n return out\n}",
"func (c *client) startNewJob(ctx context.Context, opts launcher.LaunchOptions, jobInterface v12.JobInterface, ns string, safeName string, safeSha string) ([]runtime.Object, error) {\n\tlog.Logger().Infof(\"about to create a new job for name %s and sha %s\", safeName, safeSha)\n\n\t// lets see if we are using a version stream to store the git operator configuration\n\tfolder := filepath.Join(opts.Dir, \"versionStream\", \"git-operator\")\n\texists, err := files.DirExists(folder)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check if folder exists %s\", folder)\n\t}\n\tif !exists {\n\t\t// lets try the original location\n\t\tfolder = filepath.Join(opts.Dir, \".jx\", \"git-operator\")\n\t}\n\n\tjobFileName := \"job.yaml\"\n\n\tfileNamePath := filepath.Join(opts.Dir, \".jx\", \"git-operator\", \"filename.txt\")\n\texists, err = files.FileExists(fileNamePath)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to check for file %s\", fileNamePath)\n\t}\n\tif exists {\n\t\tdata, err := ioutil.ReadFile(fileNamePath)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to load file %s\", fileNamePath)\n\t\t}\n\t\tjobFileName = strings.TrimSpace(string(data))\n\t\tif jobFileName == \"\" {\n\t\t\treturn nil, errors.Errorf(\"the job name file %s is empty\", fileNamePath)\n\t\t}\n\t}\n\n\tfileName := filepath.Join(folder, jobFileName)\n\texists, err = files.FileExists(fileName)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find file %s in repository %s\", fileName, safeName)\n\t}\n\tif !exists {\n\t\treturn nil, errors.Errorf(\"repository %s does not have a Job file: %s\", safeName, fileName)\n\t}\n\n\tresource := &v1.Job{}\n\terr = yamls.LoadFile(fileName, resource)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to load Job file %s in repository %s\", fileName, safeName)\n\t}\n\n\tif !opts.NoResourceApply {\n\t\t// now lets check if there is a resources dir\n\t\tresourcesDir := filepath.Join(folder, \"resources\")\n\t\texists, err = files.DirExists(resourcesDir)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to check if resources directory %s exists in repository %s\", resourcesDir, safeName)\n\t\t}\n\t\tif exists {\n\t\t\tabsDir, err := filepath.Abs(resourcesDir)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to get absolute resources dir %s\", resourcesDir)\n\t\t\t}\n\n\t\t\tcmd := &cmdrunner.Command{\n\t\t\t\tName: \"kubectl\",\n\t\t\t\tArgs: []string{\"apply\", \"-f\", absDir},\n\t\t\t}\n\t\t\tlog.Logger().Infof(\"running command: %s\", cmd.CLI())\n\t\t\t_, err = c.runner(cmd)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrapf(err, \"failed to apply resources in dir %s\", absDir)\n\t\t\t}\n\t\t}\n\t}\n\n\t// lets try use a maximum of 31 characters and a minimum of 10 for the sha\n\tnamePrefix := trimLength(safeName, 20)\n\n\tid := uuid.New().String()\n\tresourceName := namePrefix + \"-\" + id\n\n\tresource.Name = resourceName\n\n\tif resource.Labels == nil {\n\t\tresource.Labels = map[string]string{}\n\t}\n\tresource.Labels[constants.DefaultSelectorKey] = constants.DefaultSelectorValue\n\tresource.Labels[launcher.RepositoryLabelKey] = safeName\n\tresource.Labels[launcher.CommitShaLabelKey] = safeSha\n\n\tr2, err := jobInterface.Create(ctx, resource, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create Job %s in namespace %s\", resourceName, ns)\n\t}\n\tlog.Logger().Infof(\"created Job %s in namespace %s\", resourceName, ns)\n\treturn []runtime.Object{r2}, nil\n}",
"func (*ListJobsRequest) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{3}\n}",
"func grep(lineRx *regexp.Regexp, fnames []string) {\n // jobs channel is used for passing on jobs\n jobs := make(chan Job, cntWorkers)\n // results channel is used for collecting results\n results := make(chan Result, len(fnames))\n // done channel is used for signaling that a worker is done with its job\n done := make(chan struct{}, cntWorkers)\n\n // Each file is a job to do.\n // Add a Job struct to the jobs channel for each file, \n // and then close the channel.\n go func() {\n for _, fname := range fnames {\n jobs <- Job{fname, results}\n }\n close(jobs)\n }()\n\n // Setup the worker goroutines that process\n // the jobs channel\n for i := 0; i < cntWorkers; i++ {\n go func() {\n for job := range jobs {\n job.Do(lineRx)\n }\n // jobs channel has been closed:\n // Signal that work has been done\n done <- struct{}{}\n }()\n }\n\n // Wait for the completion of all worker goroutines, and\n // then close the results channel\n go func() {\n for i := 0; i < cntWorkers; i++ {\n <-done\n }\n close(results)\n }()\n\n // Process the results in the main goroutine, reading from\n // the results channel until it is have been closed\n for result := range results {\n fmt.Printf(\"%s:%d:%s\\n\", result.fname, result.lino, result.line)\n }\n}",
"func (m *DataHistoryManager) retrieveJobs() ([]*DataHistoryJob, error) {\n\tif m == nil {\n\t\treturn nil, ErrNilSubsystem\n\t}\n\tif atomic.LoadInt32(&m.started) == 0 {\n\t\treturn nil, ErrSubSystemNotStarted\n\t}\n\tdbJobs, err := m.jobDB.GetAllIncompleteJobsAndResults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar response []*DataHistoryJob\n\tfor i := range dbJobs {\n\t\tdbJob, err := m.convertDBModelToJob(&dbJobs[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = m.validateJob(dbJob)\n\t\tif err != nil {\n\t\t\tlog.Error(log.DataHistory, err)\n\t\t\tcontinue\n\t\t}\n\t\tresponse = append(response, dbJob)\n\t}\n\n\treturn response, nil\n}",
"func (m *DataHistoryManager) retrieveJobs() ([]*DataHistoryJob, error) {\n\tif m == nil {\n\t\treturn nil, ErrNilSubsystem\n\t}\n\tif atomic.LoadInt32(&m.started) == 0 {\n\t\treturn nil, ErrSubSystemNotStarted\n\t}\n\tdbJobs, err := m.jobDB.GetAllIncompleteJobsAndResults()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tresponse := make([]*DataHistoryJob, 0, len(dbJobs))\n\tfor i := range dbJobs {\n\t\tdbJob, err := m.convertDBModelToJob(&dbJobs[i])\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\terr = m.validateJob(dbJob)\n\t\tif err != nil {\n\t\t\tlog.Errorln(log.DataHistory, err)\n\t\t\tcontinue\n\t\t}\n\t\tresponse = append(response, dbJob)\n\t}\n\n\treturn response, nil\n}",
"func readCacheFile(fileName string) ([]cacheEntry, error) {\n\t// Lock the mutex\n\tcacheMutex.Lock()\n\tdefer cacheMutex.Unlock()\n\n\t// Open our cache file\n\tfile, err := os.OpenFile(fileName, os.O_CREATE|os.O_RDONLY, 0644)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\tvar entries []cacheEntry\n\t// Iterate through lines in file\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t// Split the line with |\n\t\tsplit := strings.Split(line, \"|\")\n\n\t\t// If the line if not formatted correctly, return an error\n\t\tif len(split) != 4 {\n\t\t\treturn nil, fmt.Errorf(\"line in cache file %s was not formatted correctly: %s\", fileName, line)\n\t\t}\n\n\t\t// Parse the status number\n\t\tstatusNum, err := strconv.Atoi(split[2])\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"status must be a number in cache file %s: %s\", fileName, line)\n\t\t}\n\n\t\t// Make sure we have a valid status number\n\t\tif statusNum > int(StatusUnknown) {\n\t\t\treturn nil, fmt.Errorf(\"status must be less than %d in cache file %s: %d\", int(StatusUnknown), fileName, statusNum)\n\t\t}\n\n\t\t// Parse the time\n\t\tentryTimeUnix, err := strconv.ParseInt(split[3], 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid number in cache file %s: %s\", fileName, line)\n\t\t}\n\n\t\t// Make a time object from the unix\n\t\tentryTime := time.Unix(int64(entryTimeUnix), 0)\n\n\t\t// Create our new cache entry from this line\n\t\tvar newCacheEntry cacheEntry\n\t\tnewCacheEntry.ServiceName = split[0]\n\t\tnewCacheEntry.Username = split[1]\n\t\tnewCacheEntry.Status = Status(statusNum)\n\t\tnewCacheEntry.Time = entryTime\n\n\t\t// Append our new entry\n\t\tentries = append(entries, newCacheEntry)\n\t}\n\n\treturn entries, nil\n}",
"func GetJobsByCronjob(client kubernetes.Interface, options GetJobOptions) (*v1.JobList, error) {\n\n\t//sel := strings.Join([]string{\"items.metadata.ownerReferences.name\", options.Name}, \"=\")\n\t//log.Printf(\"sel: %v\", sel)\n\t//resource, err := client.BatchV1().Jobs(options.Namespace).List(meta_v1.ListOptions{LabelSelector: sel})\n\n\tresource, err := client.BatchV1().Jobs(options.Namespace).List(meta_v1.ListOptions{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn resource, nil\n}",
"func (s service) openFile(names *[]person.Names) error {\n\tf, err := os.Open(s.args[0])\n\tif err != nil {\n\t\treturn errors.New(\"error opening the file\")\n\t}\n\tdefer f.Close()\n\n\t// Read in line by line\n\tscanner := bufio.NewScanner(f)\n\tscanner.Split(bufio.ScanLines)\n\n\tfor scanner.Scan() {\n\t\t// send to get decoded\n\t\tp := &person.Names{}\n\t\ts.serializer.Decode(scanner.Bytes(), p)\n\t\t*names = append(*names, *p)\n\t}\n\n\tif len(*names) < 1 {\n\t\treturn errors.New(\"not able to get names from file, is it empty or not formatted correctly?\")\n\t}\n\n\treturn nil\n}",
"func getAllFiles(j *job.Job) {\n\tcomplete := make(chan *job.Document, j.DocCount())\n\tfailures := make(chan error, j.DocCount())\n\tfor i := range j.DocList {\n\t\tgo getFile(&j.DocList[i], complete, failures)\n\t}\n\n\twaitForDownloads(j, complete, failures)\n}",
"func (c *client) Launch(opts launcher.LaunchOptions) ([]runtime.Object, error) {\n\tctx := context.Background()\n\tns := opts.Repository.Namespace\n\tif ns == \"\" {\n\t\tns = c.ns\n\t}\n\tsafeName := naming.ToValidValue(opts.Repository.Name)\n\tsafeSha := naming.ToValidValue(opts.GitSHA)\n\tselector := fmt.Sprintf(\"%s,%s=%s\", c.selector, launcher.RepositoryLabelKey, safeName)\n\tjobInterface := c.kubeClient.BatchV1().Jobs(ns)\n\tlist, err := jobInterface.List(ctx, metav1.ListOptions{\n\t\tLabelSelector: selector,\n\t})\n\tif err != nil && apierrors.IsNotFound(err) {\n\t\terr = nil\n\t}\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to find Jobs in namespace %s with selector %s\", ns, selector)\n\t}\n\n\tvar jobsForSha []v1.Job\n\tvar activeJobs []v1.Job\n\tfor _, r := range list.Items {\n\t\tlog.Logger().Infof(\"found Job %s\", r.Name)\n\n\t\tif r.Labels[launcher.CommitShaLabelKey] == safeSha && r.Labels[launcher.RerunLabelKey] != \"true\" {\n\t\t\tjobsForSha = append(jobsForSha, r)\n\t\t}\n\n\t\t// is the job active\n\t\tif IsJobActive(r) {\n\t\t\tactiveJobs = append(activeJobs, r)\n\t\t}\n\t}\n\n\tif len(jobsForSha) == 0 {\n\t\tif len(activeJobs) > 0 {\n\t\t\tlog.Logger().Infof(\"not creating a Job in namespace %s for repo %s sha %s yet as there is an active job %s\", ns, safeName, safeSha, activeJobs[0].Name)\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn c.startNewJob(ctx, opts, jobInterface, ns, safeName, safeSha)\n\t}\n\treturn nil, nil\n}",
"func (tm *appManager) getCurrentJobs(app *v1alpha1.FLApp, namespace string) ([]*batchv1.Job, []*batchv1.Job, []string, []*batchv1.Job, error) {\n\tallJobs, err := tm.jobLister.Jobs(namespace).List(util.GetLabelSetWithappID(app).AsSelector())\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tvar activeJobs, condemnedJobs []*batchv1.Job\n\tvar phantomJobs []string\n\tfor _, job := range allJobs {\n\t\tif _, ok := app.Status.PairStatus[v1alpha1.FLReplicaTypeWorker].Local[job.Name]; ok {\n\t\t\tactiveJobs = append(activeJobs, job)\n\t\t} else {\n\t\t\tcondemnedJobs = append(condemnedJobs, job)\n\t\t}\n\t}\n\tfor jobName := range app.Status.PairStatus[v1alpha1.FLReplicaTypeWorker].Local {\n\t\texist := false\n\t\tfor _, job := range allJobs {\n\t\t\tif jobName == job.Name {\n\t\t\t\texist = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !exist {\n\t\t\tphantomJobs = append(phantomJobs, jobName)\n\t\t}\n\t}\n\treturn activeJobs, condemnedJobs, phantomJobs, allJobs, nil\n}",
"func ReadBootstrapListFromFile(path string) ([]*krpc.NodeInfo, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\n\t// Read the file line by line and parse the multiaddress string\n\tvar bootstrapNI []*krpc.NodeInfo\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\t// Ignore lines that are commented out\n\t\tif strings.HasPrefix(line, \"//\") {\n\t\t\tcontinue\n\t\t}\n\t\t// Remove any whitespaces (esp necessary with badly formatted IPv6 addrs)\n\t\taddr := strings.Join(strings.Fields(line), \"\")\n\t\tainfo, err := ParseAddrString(addr)\n\t\tif err != nil {\n\t\t\tlog.WithField(\"err\", err).Error(\"Error parsing bootstrap peers.\")\n\t\t\treturn nil, err\n\t\t}\n\t\tbootstrapNI = append(bootstrapNI, ainfo)\n\t}\n\n\treturn bootstrapNI, nil\n}",
"func (svc *GCSclient) list(bucketName string, filePrefix string) ([]string, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 300*time.Second)\n\tdefer cancel()\n\t// List all objects in a bucket using pagination\n\tvar files []string\n\tit := svc.Bucket(bucketName).Objects(ctx, &storage.Query{Prefix: filePrefix})\n\tfor {\n\t\tobj, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfiles = append(files, obj.Name)\n\t}\n\tsort.Strings(files)\n\treturn files, nil\n}",
"func (d *Discovery) readFile(filename string) ([]*targetgroup.Group, error) {\n\tfd, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer fd.Close()\n\n\tcontent, err := io.ReadAll(fd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinfo, err := fd.Stat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar targetGroups []*targetgroup.Group\n\n\tswitch ext := filepath.Ext(filename); strings.ToLower(ext) {\n\tcase \".json\":\n\t\tif err := json.Unmarshal(content, &targetGroups); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase \".yml\", \".yaml\":\n\t\tif err := yaml.UnmarshalStrict(content, &targetGroups); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\tpanic(fmt.Errorf(\"discovery.File.readFile: unhandled file extension %q\", ext))\n\t}\n\n\tfor i, tg := range targetGroups {\n\t\tif tg == nil {\n\t\t\terr = errors.New(\"nil target group item found\")\n\t\t\treturn nil, err\n\t\t}\n\n\t\ttg.Source = fileSource(filename, i)\n\t\tif tg.Labels == nil {\n\t\t\ttg.Labels = model.LabelSet{}\n\t\t}\n\t\ttg.Labels[fileSDFilepathLabel] = model.LabelValue(filename)\n\t}\n\n\td.writeTimestamp(filename, float64(info.ModTime().Unix()))\n\n\treturn targetGroups, nil\n}",
"func (*ListJobsResponse) Descriptor() ([]byte, []int) {\n\treturn file_pkg_manager_http_grpc_job_proto_rawDescGZIP(), []int{4}\n}",
"func (s *Service) Jobs(c context.Context) (js []*jobmdl.Job) {\n\tjs = s.cache\n\treturn\n}",
"func (m *MemoryStorage) GetAll() ([]Job, error) {\n\tm.mutex.RLock()\n\tdefer m.mutex.RUnlock()\n\n\tvar jobs []Job\n\n\tfor _, job := range m.checks {\n\t\tjobs = append(jobs, job)\n\t}\n\n\treturn jobs, nil\n}",
"func getJobInfo(row chunk.Row) (*JobInfo, error) {\n\tvar err error\n\tjobInfo := JobInfo{\n\t\tJobID: row.GetInt64(6),\n\t\tDataSource: row.GetString(7),\n\t\tTableSchema: row.GetString(8),\n\t\tTableName: row.GetString(9),\n\t\tImportMode: row.GetString(10),\n\t\tProgress: row.GetString(11),\n\t\tUser: row.GetString(12),\n\t}\n\tjobInfo.Status, jobInfo.StatusMessage, err = getJobStatus(row)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &jobInfo, nil\n}",
"func (c *Client) List(prefix string, opts ...backend.ListOption) (*backend.ListResult, error) {\n\toptions := backend.DefaultListOptions()\n\tfor _, opt := range opts {\n\t\topt(options)\n\t}\n\n\tif options.Paginated {\n\t\treturn nil, errors.New(\"pagination not supported\")\n\t}\n\n\troot := path.Join(c.pather.BasePath(), prefix)\n\n\tlistJobs := make(chan string)\n\tresults := make(chan listResult)\n\tdone := make(chan struct{})\n\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < c.config.ListConcurrency; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tc.lister(done, listJobs, results)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tdefer func() {\n\t\tclose(done)\n\t\tif c.config.testing {\n\t\t\t// Waiting might be delayed if an early error is encountered but\n\t\t\t// other goroutines are waiting on a long http timeout. Thus, we\n\t\t\t// only wait for each spawned goroutine to exit during testing to\n\t\t\t// assert that no goroutines leak.\n\t\t\twg.Wait()\n\t\t}\n\t}()\n\n\tvar files []string\n\n\t// Pending tracks the number of directories which are pending exploration.\n\t// Invariant: there will be a result received for every increment made to\n\t// pending.\n\tpending := 1\n\tlistJobs <- root\n\n\tfor pending > 0 {\n\t\tres := <-results\n\t\tpending--\n\t\tif res.err != nil {\n\t\t\tif httputil.IsNotFound(res.err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn nil, res.err\n\t\t}\n\t\tvar dirs []string\n\t\tfor _, fs := range res.list {\n\t\t\tp := path.Join(res.dir, fs.PathSuffix)\n\n\t\t\t// TODO(codyg): This is an ugly hack to avoid walking through non-tags\n\t\t\t// during Docker catalog. Ideally, only tags are located in the repositories\n\t\t\t// directory, however in WBU2 HDFS, there are blobs here as well. At some\n\t\t\t// point, we must migrate the data into a structure which cleanly divides\n\t\t\t// blobs and tags (like we do in S3).\n\t\t\tif _ignoreRegex.MatchString(p) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// TODO(codyg): Another ugly hack to speed up catalog performance by stopping\n\t\t\t// early when we hit tags...\n\t\t\tif _stopRegex.MatchString(p) {\n\t\t\t\tp = path.Join(p, \"tags/dummy/current/link\")\n\t\t\t\tfs.Type = \"FILE\"\n\t\t\t}\n\n\t\t\tif fs.Type == \"DIRECTORY\" {\n\t\t\t\t// Flat directory structures are common, so accumulate directories and send\n\t\t\t\t// them to the listers in a single goroutine (as opposed to a goroutine per\n\t\t\t\t// directory).\n\t\t\t\tdirs = append(dirs, p)\n\t\t\t} else {\n\t\t\t\tname, err := c.pather.NameFromBlobPath(p)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.With(\"path\", p).Errorf(\"Error converting blob path into name: %s\", err)\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tfiles = append(files, name)\n\t\t\t}\n\t\t}\n\t\tif len(dirs) > 0 {\n\t\t\t// We cannot send list jobs and receive results in the same thread, else\n\t\t\t// deadlock will occur.\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tc.sendAll(done, dirs, listJobs)\n\t\t\t\twg.Done()\n\t\t\t}()\n\t\t\tpending += len(dirs)\n\t\t}\n\t}\n\n\treturn &backend.ListResult{\n\t\tNames: files,\n\t}, nil\n}",
"func (g *gcs) List(ctx context.Context, prefix string) ([]*fs.FileInfo, error) {\n\tvar files []*fs.FileInfo\n\tit := g.bucket.Objects(ctx, &storage.Query{\n\t\tPrefix: prefix,\n\t})\n\tfor {\n\t\tattrs, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tfiles = append(files, fileinfo(attrs))\n\t}\n\treturn files, nil\n}",
"func readGeneList(ctx context.Context, geneDB *fusion.GeneDB, geneListInputPath string) {\n\tdata, err := file.ReadFile(ctx, geneListInputPath)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tvar genes []string\n\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\tline = strings.TrimSpace(line)\n\t\tif line != \"\" {\n\t\t\tgenes = append(genes, line)\n\t\t}\n\t}\n\tgeneDB.PrepopulateGenes(genes)\n\tlog.Printf(\"Interned %d genes from %s\", len(genes), geneListInputPath)\n}",
"func (s *service) GetJobs() ([]*repository.Job, error) {\n\treturn s.repo.GetJobs()\n}",
"func GetJob(process string, pid string, tempLocation string) ([]string, error) {\n\treturn []string{\"Minidump\", process, pid, tempLocation}, nil\n}",
"func parseFile(path string, data []byte) []*resource {\n\tchunks := bytes.Split(data, []byte(\"\\n---\\n\"))\n\tresources := make([]*resource, 0, len(chunks))\n\tfor i, chunk := range chunks {\n\t\tchunk = bytes.TrimSpace(chunk)\n\t\tif len(chunk) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tr, err := ParseChunk(chunk)\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Error processing %s[%d]: %v\", path, i, err)\n\t\t\tcontinue\n\t\t}\n\t\tif r == nil {\n\t\t\tcontinue\n\t\t}\n\t\tresources = append(resources, &resource{BackEndResource: r, sha: sha1.Sum(chunk)})\n\t}\n\treturn resources\n}",
"func Job() {\n\tlogger.Log.Debug(\"cron job started...\")\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\tlogger.Log.Debug(\"Panic recovered from cron job\", zap.Any(\"recover\", r))\n\t\t}\n\t}()\n\tcronCtx, err := createSharedContext(AuthAPI)\n\tif err != nil {\n\t\tlogger.Log.Debug(\"couldnt fetch token, will try next time when cron will execute\", zap.Any(\"error\", err))\n\t\treturn\n\t}\n\tif cronCtx != nil {\n\t\t*cronCtx, err = grpc.AddClaimsInContext(*cronCtx, VerifyKey)\n\t\tfileScopeMapping := make(map[string][]string)\n\t\t//Read Dir , if found create the job\n\t\tfiles, er := ioutil.ReadDir(SourceDir)\n\t\tif er != nil {\n\t\t\tlogger.Log.Debug(\"Failed to read the dirctory/files\", zap.Any(\"directory\", SourceDir), zap.Error(er))\n\t\t\treturn\n\t\t}\n\t\tfor _, fileInfo := range files {\n\t\t\ttemp := strings.Split(fileInfo.Name(), constants.SCOPE_DELIMETER)\n\t\t\tif len(temp) == 0 {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//data[\"TST\"]= []{\"f1.csv\",\"f2.csv\",\"f3.csv\"}, map is because if multiple files come\n\t\t\tfileScopeMapping[temp[0]] = append(fileScopeMapping[temp[0]], fileInfo.Name())\n\t\t}\n\n\t\tfor scope, files := range fileScopeMapping {\n\t\t\tresp, err := Obj.NotifyUpload(*cronCtx, &v1.NotifyUploadRequest{\n\t\t\t\tScope: scope,\n\t\t\t\tType: \"data\",\n\t\t\t\tUploadedBy: \"Nifi\",\n\t\t\t\tFiles: files})\n\t\t\tif err != nil || (resp != nil && !resp.Success) {\n\t\t\t\tlogger.Log.Debug(\"failed to upload the transformed files\", zap.Error(err))\n\t\t\t}\n\t\t}\n\t}\n}",
"func (s *Slurm) JobInfo(ctx context.Context, req *api.JobInfoRequest) (*api.JobInfoResponse, error) {\n\tinfo, err := s.client.SJobInfo(req.JobId)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"could not get job %d info\", req.JobId)\n\t}\n\n\tpInfo, err := mapSInfoToProtoInfo(info)\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"could not convert slurm info into proto info\")\n\t}\n\n\tif len(pInfo) == 0 {\n\t\treturn nil, errors.New(\"job info slice is empty, probably invalid scontrol output\")\n\t}\n\n\treturn &api.JobInfoResponse{Info: pInfo}, nil\n}",
"func (t *DRMAATracker) ListJobs() ([]string, error) {\n\t// need to get the job list from the internal DB\n\tt.Lock()\n\tdefer t.Unlock()\n\treturn t.store.GetJobIDs(), nil\n}",
"func readPools(fileName string) []Node {\r\n\tvar pools []Node\r\n\r\n\tfile, e := os.Open(fileName)\r\n\tif e != nil {\r\n\t\tfmt.Println(e)\r\n\t}\r\n\tdefer file.Close()\r\n\tfileScanner := bufio.NewScanner(file)\r\n\r\n\tfor fileScanner.Scan() {\r\n\t\t// line is split and parsed\r\n\t\ts := strings.Split(fileScanner.Text(), \",\")\r\n\t\tlt, _ := strconv.ParseFloat(s[1], 64)\r\n\t\tln, _ := strconv.ParseFloat(s[2], 64)\r\n\r\n\t\t// new node is created and appended to pools\r\n\t\tn := Node{data: Pool{s[0], lt, ln, (math.Pi * lt) / 180.0, (math.Pi * ln) / 180.0}}\r\n\t\tpools = append(pools, n)\r\n\t}\r\n\r\n\t// the slice is sorted from west to east and returned\r\n\tsort.Slice(pools, func(i, j int) bool { return pools[i].data.lnD < pools[j].data.lnD })\r\n\r\n\treturn pools\r\n}",
"func (c *Client) GetJobsWithDomain(domain string) ([]*Job, error) {\n\tclient := &http.Client{}\n\turl := fmt.Sprintf(\"http://%s/api/v1/jobs\", c.options.ServerAddr)\n\treq, err := http.NewRequest(\"GET\", url, strings.NewReader(fmt.Sprintf(\"domain=%s\", domain)))\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\treq.Header.Set(\"Content-Type\", \"application/json\")\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer resp.Body.Close()\n\n\tbody, err := ioutil.ReadAll(resp.Body)\n\tif err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\tjobs := []*Job{}\n\tif err := json.Unmarshal(body, jobs); err != nil {\n\t\tlogrus.WithError(err)\n\t\treturn nil, err\n\t}\n\treturn jobs, nil\n}",
"func parseFile(path string) ([]Hday, error) {\n\t// load file content\n\tc, err := ioutil.ReadFile(path)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar data []Hday\n\ts := bufio.NewScanner(bytes.NewReader(c))\n\tfor s.Scan() {\n\t\th, err := NewHday(s.Text())\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdata = append(data, h)\n\t}\n\treturn data, s.Err()\n}",
"func (r MasterJob) MapJobs() []WorkerJob {\n\trequests := make([]WorkerJob, 0)\n\n\tfor ix := range r.FilePaths {\n\t\tpath := r.FilePaths[ix]\n\n\t\tfor jx := 0; jx < int(r.MapOpCount); jx++ {\n\t\t\trequest := WorkerJob{\n\t\t\t\tFile: path,\n\t\t\t\tMapJobNumber: uint(jx),\n\t\t\t\tMapOpCount: r.MapOpCount,\n\t\t\t\tMapFuncName: r.MapFuncName,\n\t\t\t\tReduceFuncName: r.ReduceFuncName,\n\t\t\t\tReduceOpCount: r.ReduceOpCount,\n\t\t\t\tRemoteFileAddr: mrutil.UnassignedWorker,\n\t\t\t\tType: r.Type,\n\t\t\t\tWorker: mrutil.UnassignedWorker,\n\t\t\t}\n\n\t\t\trequests = append(requests, request)\n\t\t}\n\t}\n\n\treturn requests\n}",
"func (s *JobService) List(ctx context.Context, clientTimeOffset int, collectAllChildJobs bool) (*Groups, *http.Response, error) {\n\trequest := Request{\n\t\tAction: JobAction,\n\t\tMethod: \"getGroupInfo\",\n\t\tData: []interface{}{[]interface{}{nil}, clientTimeOffset, collectAllChildJobs},\n\t\tType: \"rpc\",\n\t\tTid: 1,\n\t}\n\n\treq, err := s.client.NewRequest(&request)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\tvar groups Groups\n\tr := Response{Data: &groups}\n\tresp, err := s.client.Do(ctx, req, &r)\n\tif err != nil {\n\t\treturn nil, resp, err\n\t}\n\treturn &groups, resp, nil\n}",
"func parseAllFiles() {\n\tbasePath := \"/home/andrea/infos/\" // TODO change this\n\tfiles, _ := ioutil.ReadDir(basePath)\n\tfor _, f := range files {\n\t\terr, f := model.FromJSON(basePath + f.Name())\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tfilms = append(films, f)\n\t}\n}",
"func GetJobInfo(\n\tctx context.Context,\n\tconn sqlexec.SQLExecutor,\n\tjobID int64,\n) (*JobInfo, error) {\n\tctx = util.WithInternalSourceType(ctx, kv.InternalLoadData)\n\trs, err := conn.ExecuteInternal(ctx,\n\t\t`SELECT\n\t\texpected_status,\n\t\tupdate_time >= DATE_SUB(CURRENT_TIMESTAMP(6), INTERVAL %? SECOND) AS is_alive,\n\t\tend_time,\n\t\tresult_message,\n\t\terror_message,\n\t\tstart_time,\n\n\t\tjob_id,\n\t\tdata_source,\n\t\ttable_schema,\n\t\ttable_name,\n\t\timport_mode,\n\t\tprogress,\n\t\tcreate_user\n\t\tFROM mysql.load_data_jobs\n\t\tWHERE job_id = %?;`,\n\t\tOfflineThresholdInSec, jobID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer terror.Call(rs.Close)\n\trows, err := sqlexec.DrainRecordSet(ctx, rs, 1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif len(rows) != 1 {\n\t\treturn nil, fmt.Errorf(\"job %d not found\", jobID)\n\t}\n\n\treturn getJobInfo(rows[0])\n}",
"func detectResultFiles(job *models.Job) ([]string, error) {\n\n\tfiles, err := ioutil.ReadDir(filepath.Join(job.Resource.URL, \"out\"))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresults := []string{}\n\tfor _, f := range files {\n\t\tresults = append(results, f.Name())\n\t}\n\n\treturn results, nil\n}",
"func (m *Master) constructJobResources(c *Config, restStorage map[string]rest.Storage) {\n\t// Note that job's storage settings are changed by changing the batch\n\t// group. Clearly we want all jobs to be stored in the same place no\n\t// matter where they're accessed from.\n\trestOptions := func(resource string) generic.RESTOptions {\n\t\treturn generic.RESTOptions{\n\t\t\tStorage: c.StorageDestinations.Search([]string{batch.GroupName, extensions.GroupName}, resource),\n\t\t\tDecorator: m.StorageDecorator(),\n\t\t\tDeleteCollectionWorkers: m.deleteCollectionWorkers,\n\t\t}\n\t}\n\tjobStorage, jobStatusStorage := jobetcd.NewREST(restOptions(\"jobs\"))\n\trestStorage[\"jobs\"] = jobStorage\n\trestStorage[\"jobs/status\"] = jobStatusStorage\n}",
"func readFiles(files []string) *Collection {\n\tc := Collection{Stats: make(map[BenchKey]*Benchstat)}\n\tfor _, file := range files {\n\t\treadFile(file, &c)\n\t}\n\treturn &c\n}",
"func fileProcessorWorker(input chan *FileJob, output chan *FileJob) {\n\tvar startTime int64\n\tvar fileCount int64\n\tvar gcEnabled int64\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < FileProcessJobWorkers; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\treader := NewFileReader()\n\n\t\t\tfor job := range input {\n\t\t\t\tatomic.CompareAndSwapInt64(&startTime, 0, makeTimestampMilli())\n\n\t\t\t\tloc := job.Location\n\t\t\t\tif job.Symlocation != \"\" {\n\t\t\t\t\tloc = job.Symlocation\n\t\t\t\t}\n\n\t\t\t\tfileStartTime := makeTimestampNano()\n\t\t\t\tcontent, err := reader.ReadFile(loc, int(job.Bytes))\n\t\t\t\tatomic.AddInt64(&fileCount, 1)\n\n\t\t\t\tif atomic.LoadInt64(&gcEnabled) == 0 && atomic.LoadInt64(&fileCount) >= int64(GcFileCount) {\n\t\t\t\t\tdebug.SetGCPercent(gcPercent)\n\t\t\t\t\tatomic.AddInt64(&gcEnabled, 1)\n\t\t\t\t\tif Verbose {\n\t\t\t\t\t\tprintWarn(\"read file limit exceeded GC re-enabled\")\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tif Trace {\n\t\t\t\t\tprintTrace(fmt.Sprintf(\"nanoseconds read into memory: %s: %d\", job.Location, makeTimestampNano()-fileStartTime))\n\t\t\t\t}\n\n\t\t\t\tif err == nil {\n\t\t\t\t\tjob.Content = content\n\t\t\t\t\tif processFile(job) {\n\t\t\t\t\t\toutput <- job\n\t\t\t\t\t}\n\t\t\t\t} else {\n\t\t\t\t\tif Verbose {\n\t\t\t\t\t\tprintWarn(fmt.Sprintf(\"error reading: %s %s\", job.Location, err))\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(output)\n\n\t\tif Debug {\n\t\t\tprintDebug(fmt.Sprintf(\"milliseconds reading files into memory: %d\", makeTimestampMilli()-startTime))\n\t\t}\n\t}()\n\n}",
"func (f *Input) loadLastPollFiles(ctx context.Context) error {\n\tencoded, err := f.persister.Get(ctx, knownFilesKey)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif encoded == nil {\n\t\tf.knownFiles = make([]*Reader, 0, 10)\n\t\treturn nil\n\t}\n\n\tdec := json.NewDecoder(bytes.NewReader(encoded))\n\n\t// Decode the number of entries\n\tvar knownFileCount int\n\tif err := dec.Decode(&knownFileCount); err != nil {\n\t\treturn fmt.Errorf(\"decoding file count: %w\", err)\n\t}\n\n\t// Decode each of the known files\n\tf.knownFiles = make([]*Reader, 0, knownFileCount)\n\tfor i := 0; i < knownFileCount; i++ {\n\t\t// Only the offset, fingerprint, and splitter\n\t\t// will be used before this reader is discarded\n\t\tunsafeReader, err := f.readerFactory.unsafeReader()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err = dec.Decode(unsafeReader); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tf.knownFiles = append(f.knownFiles, unsafeReader)\n\t}\n\n\treturn nil\n}",
"func listJobs(w io.Writer, projectID string) error {\n\t// projectID := \"my-project-id\"\n\t// jobID := \"my-job-id\"\n\tctx := context.Background()\n\n\tclient, err := bigquery.NewClient(ctx, projectID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bigquery.NewClient: %w\", err)\n\t}\n\tdefer client.Close()\n\n\tit := client.Jobs(ctx)\n\t// List up to 10 jobs to demonstrate iteration.\n\tfor i := 0; i < 10; i++ {\n\t\tj, err := it.Next()\n\t\tif err == iterator.Done {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tstate := \"Unknown\"\n\t\tswitch j.LastStatus().State {\n\t\tcase bigquery.Pending:\n\t\t\tstate = \"Pending\"\n\t\tcase bigquery.Running:\n\t\t\tstate = \"Running\"\n\t\tcase bigquery.Done:\n\t\t\tstate = \"Done\"\n\t\t}\n\t\tfmt.Fprintf(w, \"Job %s in state %s\\n\", j.ID(), state)\n\t}\n\treturn nil\n}",
"func (s *ContinuousRestoreJobsServiceOp) List(ctx context.Context, groupID, clusterID string, opts *ListOptions) (*atlas.ContinuousJobs, *Response, error) {\n\tif clusterID == \"\" {\n\t\treturn nil, nil, atlas.NewArgError(\"clusterID\", \"must be set\")\n\t}\n\tif groupID == \"\" {\n\t\treturn nil, nil, atlas.NewArgError(\"groupID\", \"must be set\")\n\t}\n\n\tpath := fmt.Sprintf(continuousRestoreJobsPath, groupID, clusterID)\n\n\tpath, err := setQueryParams(path, opts)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\treq, err := s.Client.NewRequest(ctx, http.MethodGet, path, nil)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\troot := new(atlas.ContinuousJobs)\n\tresp, err := s.Client.Do(ctx, req, root)\n\n\treturn root, resp, err\n}",
"func (s *JobDB) GetJobsByChatID(chatID int64) []BusInfoJob {\n\tuserKey := []byte(strconv.FormatInt(chatID, 10))\n\tstoredJobs := []BusInfoJob{}\n\n\tdb, err := bolt.Open(s.dbFile, 0600, nil)\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\tdefer db.Close()\n\n\terr = db.View(func(tx *bolt.Tx) error {\n\n\t\tb := tx.Bucket([]byte(s.userBucket))\n\t\tif b == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tv := b.Get(userKey)\n\t\tjson.Unmarshal(v, &storedJobs)\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tlog.Fatalln(err)\n\t}\n\n\treturn storedJobs\n}",
"func parseObjects(f *os.File, cfg *rest.Config) (*yamlutil.YAMLOrJSONDecoder, meta.RESTMapper, error) {\n\tdata, err := os.ReadFile(f.Name())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tdataReader := bytes.NewReader(data)\n\tdecoder := yamlutil.NewYAMLOrJSONDecoder(dataReader, 100)\n\tmapper, err := apiutil.NewDiscoveryRESTMapper(cfg)\n\n\treturn decoder, mapper, err\n}",
"func (tasklist *TaskList) LoadFromFile(file *os.File) error {\n\t*tasklist = []Task{} // Empty task list\n\n\ttaskID := 1\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\ttext := strings.Trim(scanner.Text(), whitespaces) // Read line\n\n\t\t// Ignore blank or comment lines\n\t\tif isEmpty(text) || (IgnoreComments && strings.HasPrefix(text, \"#\")) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttask, err := ParseTask(text)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttask.ID = taskID\n\n\t\t*tasklist = append(*tasklist, *task)\n\t\ttaskID++\n\t}\n\n\treturn scanner.Err()\n}",
"func (b *BQService) GetJobs(pageToken string) *Jobs {\n\tcall := b.jobsService.List(b.projectID).AllUsers(true).Projection(\"full\")\n\tif pageToken != \"\" {\n\t\tcall = call.PageToken(pageToken)\n\t}\n\tjobsList, err := call.Do()\n\tif err != nil {\n\t\tlog.Fatalf(\"unable to list running jobs %v\", err)\n\t}\n\n\tjobs := &Jobs{}\n\n\tjobs.NextPage = jobsList.NextPageToken\n\tfor _, j := range jobsList.Jobs {\n\t\tjob := b.parseJobListJobs(j)\n\t\tif job.Status == \"RUNNING\" {\n\t\t\tjobs.Running = append(jobs.Running, job)\n\t\t} else {\n\t\t\tjobs.Done = append(jobs.Done, job)\n\t\t}\n\t}\n\n\treturn jobs\n}",
"func (js *JobService) Jobs(category string) ([]entity.Job, []error) {\n\tjobs, errs := js.jobRepo.Jobs(category)\n\tif len(errs) > 0 {\n\t\treturn nil, errs\n\t}\n\n\treturn jobs, nil\n}",
"func Load(path string) ([]*Entry, error) {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn nil,err\n\t}\n\tdefer file.Close()\n\n\tlines, err := csv.NewReader(file).ReadAll()\n\tif err != nil {\n\t\treturn nil,err\n\t}\n\tvar output []*Entry\n\t// Loop through lines & turn into object\n\tfor idx, line := range lines {\n\t\tif idx == 0 {\n\t\t\tcontinue\n\t\t}\n\t\tif len (line) < 3 {\n\t\t\treturn nil, fmt.Errorf(\"malformed input error: %s\", line)\n\t\t}\n\t\tdone, err := strconv.ParseBool(line[0])\n\t\tdeadline, err := time.Parse(\"2006-01-02\",line[2])\n\t\tif err != nil {\n\t\t\treturn nil,err\n\t\t}\n\t\tvar data = Entry{\n\t\t\tDone: done,\n\t\t\tText: line[1],\n\t\t\tDeadline: deadline,\n\t\t}\n\t\toutput = append(output, &data)\n\t}\n\n\treturn output, nil\n}",
"func (m *LocalManager) List(ctx context.Context) (map[string]linker.Storage, error) {\n\tinstances := make(map[string]linker.Storage)\n\n\tfilepath.Walk(m.path, func(path string, info os.FileInfo, err error) error {\n\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tlog.From(ctx).Debug(\"loading\", zap.String(\"path\", path))\n\n\t\tfile := filepath.Base(path)\n\n\t\tif strings.HasPrefix(file, \"db-\") {\n\t\t\tinstance, err := NewLocalStorage(ctx, path)\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"recreating storage\")\n\t\t\t}\n\n\t\t\tinstances[strings.TrimPrefix(file, \"db-\")] = instance\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn instances, nil\n}",
"func LoadResourceChunkInfoAll(fileName string) []ResourceChunkInfo {\n\t// Convert the fileName into a CString and releases the memory afterwards\n\tcfileName := C.CString(fileName)\n\tdefer C.free(unsafe.Pointer(cfileName))\n\n\t// The length of the resulted array is saved in the chunkCount variable\n\tvar chunkCount C.uint\n\tcinfos := C.rresLoadResourceChunkInfoAll(cfileName, &chunkCount)\n\n\t// The C array can be released afterwards, because the values are stored in a golang slice\n\tdefer C.free(unsafe.Pointer(cinfos))\n\n\t// Iterate over the C array and store the values in a golang slice\n\tinfos := make([]ResourceChunkInfo, chunkCount)\n\tfor i := 0; i < int(chunkCount); i++ {\n\t\t// Get the C value from the C array\n\t\tret := C.GetResourceChunkInfoFromArray(cinfos, C.int(i))\n\t\t// Convert the C value into a golang value\n\t\tv := *(*ResourceChunkInfo)(unsafe.Pointer(&ret))\n\t\t// Save the golang value in the golang slice\n\t\tinfos[i] = v\n\t}\n\n\treturn infos\n}",
"func (o FioSpecOutput) BuiltinJobFiles() pulumi.StringArrayOutput {\n\treturn o.ApplyT(func(v FioSpec) []string { return v.BuiltinJobFiles }).(pulumi.StringArrayOutput)\n}",
"func (r *SynchronizationJobsCollectionRequest) Get(ctx context.Context) ([]SynchronizationJob, error) {\n\treturn r.GetN(ctx, 0)\n}",
"func LoadServices(filepath string) ([]Service, error) {\n\n\tinfo, err := os.Stat(filepath)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsize = info.Size()\n\tmodTime = info.ModTime()\n\n\tfile, e := os.Open(filepath)\n\n\tif e != nil {\n\t\treturn nil, fmt.Errorf(\"file error: %v\", e)\n\t}\n\n\tdefer file.Close()\n\n\tlog.Println(\"Just received \", filepath)\n\n\tservices := []Service{}\n\n\tscanner := bufio.NewScanner(file)\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\n\t\tdeclaration := regexp.MustCompile(`(\\S+)`)\n\t\tconfig := declaration.FindAllString(line, -1)\n\t\tif config == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif len(config) != 2 {\n\t\t\treturn nil, fmt.Errorf(\"file error: invalid format `%v` expected `{NAME} {URL}`\", line)\n\t\t}\n\t\tname, url := config[0], config[1]\n\t\tservices = append(services, Service{Name: name, URL: url})\n\t}\n\n\treturn services, nil\n}"
] | [
"0.6094294",
"0.6013153",
"0.5963445",
"0.57480615",
"0.57223684",
"0.568927",
"0.5666574",
"0.55211794",
"0.54390544",
"0.5378759",
"0.53388613",
"0.5317355",
"0.53161746",
"0.53106767",
"0.5286924",
"0.5278277",
"0.5275526",
"0.5274127",
"0.5247469",
"0.5245982",
"0.5228312",
"0.52238154",
"0.521854",
"0.5211498",
"0.52110773",
"0.52080333",
"0.5207208",
"0.5198935",
"0.51827735",
"0.51618046",
"0.51455027",
"0.5136415",
"0.5108947",
"0.51076794",
"0.5097138",
"0.50910854",
"0.508119",
"0.5078193",
"0.50648284",
"0.5039614",
"0.50377077",
"0.5033652",
"0.502847",
"0.50259477",
"0.501086",
"0.4989677",
"0.49871543",
"0.49861628",
"0.49731612",
"0.4972458",
"0.49659288",
"0.49495637",
"0.4942465",
"0.4941859",
"0.49317443",
"0.4923087",
"0.49222583",
"0.49114886",
"0.4909006",
"0.49089858",
"0.49052355",
"0.4898958",
"0.48988166",
"0.48984808",
"0.4894074",
"0.4886484",
"0.48854664",
"0.4871803",
"0.4864685",
"0.48579022",
"0.48380145",
"0.48362088",
"0.48339483",
"0.48282027",
"0.48280928",
"0.4814943",
"0.48062888",
"0.47955912",
"0.4769405",
"0.47684142",
"0.47643682",
"0.4762807",
"0.4761945",
"0.4761144",
"0.4758129",
"0.4756187",
"0.47507283",
"0.4744546",
"0.4743609",
"0.47425961",
"0.47410655",
"0.47347",
"0.47310716",
"0.47276905",
"0.47260177",
"0.47209287",
"0.47178394",
"0.47159848",
"0.47145134",
"0.47135472"
] | 0.6586617 | 0 |
ProcFS creates a proc.FileSystem representing the default procfs mountpoint /proc. When running inside a container, this will contain information from the container's pid namespace. | func ProcFS() proc.FileSystem {
fs, err := procfs.NewFileSystem("")
if err != nil {
glog.Fatal(err)
}
return fs
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ProcFS() *proc.FileSystem {\n\treturn proc.FS()\n}",
"func HostProcFS() proc.FileSystem {\n\thostProcFSOnce.Do(func() {\n\t\thostProcFS = findHostProcFS()\n\t})\n\n\treturn hostProcFS\n}",
"func HostProcFS() *proc.FileSystem {\n\thostProcFSOnce.Do(func() {\n\t\thostProcFS = findHostProcFS()\n\t})\n\n\treturn hostProcFS\n}",
"func NewFS(mountPoint string) (*FS, error) {\n\tfs, err := procfs.NewFS(mountPoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstat, err := fs.NewStat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{fs, stat.BootTime}, nil\n}",
"func MountProc(newroot string) error {\n\tsource := \"proc\"\n\ttarget := filepath.Join(newroot, \"/proc\")\n\tfstype := \"proc\"\n\tflags := 0\n\tdata := \"\"\n\n\tos.MkdirAll(target, 0755)\n\tif err := syscall.Mount(source, target, fstype, uintptr(flags), data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func mountFS(printWarning bool) error {\n\tif printWarning {\n\t\tlog.Warning(\"================================= WARNING ==========================================\")\n\t\tlog.Warning(\"BPF filesystem is not mounted. This will lead to network disruption when Cilium pods\")\n\t\tlog.Warning(\"are restarted. Ensure that the BPF filesystem is mounted in the host.\")\n\t\tlog.Warning(\"https://docs.cilium.io/en/stable/operations/system_requirements/#mounted-ebpf-filesystem\")\n\t\tlog.Warning(\"====================================================================================\")\n\t}\n\n\tlog.Infof(\"Mounting BPF filesystem at %s\", bpffsRoot)\n\n\tmapRootStat, err := os.Stat(bpffsRoot)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif err := os.MkdirAll(bpffsRoot, 0755); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to create bpf mount directory: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to stat the mount path %s: %s\", bpffsRoot, err)\n\n\t\t}\n\t} else if !mapRootStat.IsDir() {\n\t\treturn fmt.Errorf(\"%s is a file which is not a directory\", bpffsRoot)\n\t}\n\n\tif err := unix.Mount(bpffsRoot, bpffsRoot, \"bpf\", 0, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount %s: %s\", bpffsRoot, err)\n\t}\n\treturn nil\n}",
"func (*CPIO9PFID) StatFS() (p9.FSStat, error) {\n\treturn p9.FSStat{}, syscall.ENOSYS\n}",
"func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}",
"func New(c *tlc.Container, basePath string) *FsPool {\n\treturn &FsPool{\n\t\tcontainer: c,\n\t\tbasePath: basePath,\n\n\t\tfileIndex: int64(-1),\n\t\treader: nil,\n\t}\n}",
"func Statfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\taddr := args[0].Pointer()\n\tstatfsAddr := args[1].Pointer()\n\n\tpath, _, err := copyInPath(t, addr, false /* allowEmpty */)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\treturn 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {\n\t\treturn statfsImpl(t, d, statfsAddr)\n\t})\n}",
"func readProcMounts(mountFilePath string) (mountInfos, error) {\n\tfile, err := os.Open(mountFilePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer file.Close()\n\treturn parseMountFrom(file)\n}",
"func (p *Resolver) ResolveFromProcfs(pid uint32) *model.ProcessCacheEntry {\n\tp.Lock()\n\tdefer p.Unlock()\n\treturn p.resolveFromProcfs(pid, procResolveMaxDepth)\n}",
"func Mount(mountpoint string) (err error) {\n\tlog.Println(\"Mounting filesystem\")\n\tc, err := fuse.Mount(\n\t\tmountpoint,\n\t\tfuse.FSName(\"pgfs\"),\n\t\tfuse.Subtype(\"pgfs\"),\n\t\t//fuse.ReadOnly(),\n\t\t//fuse.AllowOther(), // option allow_other only allowed if 'user_allow_other' is set in /etc/fuse.conf\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer close(c)\n\n\tif p := c.Protocol(); !p.HasInvalidate() {\n\t\treturn fmt.Errorf(\"kernel FUSE support is too old to have invalidations: version %v\", p)\n\t}\n\n\ttables, err := postgres.ListTables()\n\tif err != nil {\n\t\treturn\n\t}\n\tnodes := make(map[string]*Node)\n\tsrv := fs.New(c, nil)\n\n\tvar inode uint64 = 2\n\tfor _, t := range tables {\n\t\tnode := Node{\n\t\t\tName: t.Name,\n\t\t\tfuse: srv,\n\t\t\tInode: inode,\n\t\t\tType: fuse.DT_Dir,\n\t\t\tfs: &FS{\n\t\t\t\tNodes: map[string]*Node{\n\t\t\t\t\tt.Name + \".json\": &Node{\n\t\t\t\t\t\tName: t.Name + \".json\",\n\t\t\t\t\t\tfuse: srv,\n\t\t\t\t\t\tInode: inode + 1,\n\t\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t\t\tContent: []byte(\"\"),\n\t\t\t\t\t},\n\t\t\t\t\tt.Name + \".csv\": &Node{\n\t\t\t\t\t\tName: t.Name + \".csv\",\n\t\t\t\t\t\tfuse: srv,\n\t\t\t\t\t\tInode: inode + 2,\n\t\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t\t\tContent: []byte(\"\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tnodes[t.Name] = &node\n\t\tinode += 3\n\t}\n\n\tfilesys := &FS{\n\t\tNodes: nodes,\n\t}\n\n\terr = srv.Serve(filesys)\n\treturn\n}",
"func Cgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}",
"func PhysFS(root, prefix string, indexes bool, alreadyinitialized bool) *localFileSystem {\n\tif !alreadyinitialized {\n\t\troot, err := filepath.Abs(root)\n\t\tfmt.Println(root)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = physfs.Init()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer physfs.Deinit()\n\t\terr = physfs.Mount(root, \"/\", true)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfs := physfs.FileSystem()\n\treturn &localFileSystem{\n\t\tFileSystem: fs,\n\t\torigfs: fs,\n\t\troot: root,\n\t\tprefix: prefix,\n\t\tindexes: indexes,\n\t\tphysfs: true,\n\t}\n}",
"func statfsImpl(t *kernel.Task, d *fs.Dirent, addr usermem.Addr) error {\n\tinfo, err := d.Inode.StatFS(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\t// Construct the statfs structure and copy it out.\n\tstatfs := linux.Statfs{\n\t\tType: info.Type,\n\t\t// Treat block size and fragment size as the same, as\n\t\t// most consumers of this structure will expect one\n\t\t// or the other to be filled in.\n\t\tBlockSize: d.Inode.StableAttr.BlockSize,\n\t\tBlocks: info.TotalBlocks,\n\t\t// We don't have the concept of reserved blocks, so\n\t\t// report blocks free the same as available blocks.\n\t\t// This is a normal thing for filesystems, to do, see\n\t\t// udf, hugetlbfs, tmpfs, among others.\n\t\tBlocksFree: info.FreeBlocks,\n\t\tBlocksAvailable: info.FreeBlocks,\n\t\tFiles: info.TotalFiles,\n\t\tFilesFree: info.FreeFiles,\n\t\t// Same as Linux for simple_statfs, see fs/libfs.c.\n\t\tNameLength: linux.NAME_MAX,\n\t\tFragmentSize: d.Inode.StableAttr.BlockSize,\n\t\t// Leave other fields 0 like simple_statfs does.\n\t}\n\t_, err = t.CopyOut(addr, &statfs)\n\treturn err\n}",
"func NewFS(basedir string) (kvs *FS, err error) {\n\treturn newFileSystem(basedir, os.MkdirAll)\n}",
"func NewMemMapFS() FS {\n\treturn afero.NewMemMapFs()\n}",
"func (fs *FileSystem) Mounts() []proc.Mount {\n\tdata, err := fs.ReadFile(\"self/mountinfo\")\n\tif err != nil {\n\t\tglog.Fatalf(\"Couldn't read self/mountinfo from proc\")\n\t}\n\n\tvar mounts []proc.Mount\n\tscanner := bufio.NewScanner(strings.NewReader(string(data)))\n\tfor scanner.Scan() {\n\t\tline := scanner.Text()\n\t\tm, err := parseMount(line)\n\t\tif err != nil {\n\t\t\tglog.Fatal(err)\n\t\t}\n\t\tmounts = append(mounts, m)\n\t}\n\n\treturn mounts\n}",
"func NewFileSystem() FileSystem {\r\n\treturn &osFileSystem{}\r\n}",
"func (fs *Fs) ParseProcMounts(\n\tctx context.Context,\n\tcontent io.Reader) ([]gofsutil.Info, error) {\n\tr, _, err := gofsutil.ReadProcMountsFrom(ctx, content, false,\n\t\tgofsutil.ProcMountsFields, gofsutil.DefaultEntryScanFunc())\n\treturn r, err\n}",
"func (img *Image) FileSystem() (string, error) {\n\treturn devFileSystem(img)\n}",
"func readProcMounts(mountFilePath string, out *[]MountPoint) (uint32, error) {\n\tfile, err := os.Open(mountFilePath)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer file.Close()\n\treturn readProcMountsFrom(file, out)\n}",
"func (*FileSystemBase) Statfs(path string, stat *Statfs_t) int {\n\treturn -ENOSYS\n}",
"func (o ClusterBuildStrategySpecBuildStepsSecurityContextOutput) ProcMount() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsSecurityContext) *string { return v.ProcMount }).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsSecurityContextPtrOutput) ProcMount() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsSecurityContext) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ProcMount\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o BuildStrategySpecBuildStepsSecurityContextOutput) ProcMount() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsSecurityContext) *string { return v.ProcMount }).(pulumi.StringPtrOutput)\n}",
"func NewFilesystem(_ context.Context, cfgMap map[string]interface{}) (qfs.Filesystem, error) {\n\treturn NewFS(cfgMap)\n}",
"func (o BuildStrategySpecBuildStepsSecurityContextPtrOutput) ProcMount() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsSecurityContext) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ProcMount\n\t}).(pulumi.StringPtrOutput)\n}",
"func (p *dfCollector) setProcPath(cfg interface{}) error {\n\tprocPath, err := config.GetConfigItem(cfg, \"proc_path\")\n\tif err == nil && len(procPath.(string)) > 0 {\n\t\tprocPathStats, err := os.Stat(procPath.(string))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !procPathStats.IsDir() {\n\t\t\treturn errors.New(fmt.Sprintf(\"%s is not a directory\", procPath.(string)))\n\t\t}\n\t\tp.proc_path = procPath.(string)\n\t}\n\treturn nil\n}",
"func newProcInode(iops fs.InodeOperations, msrc *fs.MountSource, typ fs.InodeType, t *kernel.Task) *fs.Inode {\n\tsattr := fs.StableAttr{\n\t\tDeviceID: device.ProcDevice.DeviceID(),\n\t\tInodeID: device.ProcDevice.NextIno(),\n\t\tBlockSize: usermem.PageSize,\n\t\tType: typ,\n\t}\n\tif t != nil {\n\t\tiops = &taskOwnedInodeOps{iops, t}\n\t}\n\treturn fs.NewInode(iops, msrc, sattr)\n}",
"func NewFileSystem() FileSystem {\n\treturn &fs{\n\t\trunner: NewCommandRunner(),\n\t}\n}",
"func (p *Resolver) SetProcessFilesystem(entry *model.ProcessCacheEntry) (string, error) {\n\tif entry.FileEvent.MountID != 0 {\n\t\tfs, err := p.mountResolver.ResolveFilesystem(entry.FileEvent.MountID, entry.Pid, entry.ContainerID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tentry.FileEvent.Filesystem = fs\n\t}\n\n\treturn entry.FileEvent.Filesystem, nil\n}",
"func NewOsFS() FS {\n\treturn afero.NewOsFs()\n}",
"func (daemon *Daemon) openContainerFS(container *container.Container) (_ *containerFSView, err error) {\n\tif err := daemon.Mount(container); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = daemon.Unmount(container)\n\t\t}\n\t}()\n\n\tmounts, err := daemon.setupMounts(container)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = container.UnmountVolumes(daemon.LogVolumeEvent)\n\t\t}\n\t}()\n\n\t// Setup in initial mount namespace complete. We're ready to unshare the\n\t// mount namespace and bind the volume mounts into that private view of\n\t// the container FS.\n\ttodo := make(chan future)\n\tdone := make(chan error)\n\terr = unshare.Go(unix.CLONE_NEWNS,\n\t\tfunc() error {\n\t\t\tif err := mount.MakeRSlave(\"/\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, m := range mounts {\n\t\t\t\tdest, err := container.GetResourcePath(m.Destination)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tvar stat os.FileInfo\n\t\t\t\tstat, err = os.Stat(m.Source)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tbindMode := \"rbind\"\n\t\t\t\tif m.NonRecursive {\n\t\t\t\t\tbindMode = \"bind\"\n\t\t\t\t}\n\t\t\t\twriteMode := \"ro\"\n\t\t\t\tif m.Writable {\n\t\t\t\t\twriteMode = \"rw\"\n\t\t\t\t\tif m.ReadOnlyNonRecursive {\n\t\t\t\t\t\treturn errors.New(\"options conflict: Writable && ReadOnlyNonRecursive\")\n\t\t\t\t\t}\n\t\t\t\t\tif m.ReadOnlyForceRecursive {\n\t\t\t\t\t\treturn errors.New(\"options conflict: Writable && ReadOnlyForceRecursive\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif m.ReadOnlyNonRecursive && m.ReadOnlyForceRecursive {\n\t\t\t\t\treturn errors.New(\"options conflict: ReadOnlyNonRecursive && ReadOnlyForceRecursive\")\n\t\t\t\t}\n\n\t\t\t\t// openContainerFS() is called for temporary mounts\n\t\t\t\t// outside the container. Soon these will be unmounted\n\t\t\t\t// with lazy unmount option and given we have mounted\n\t\t\t\t// them rbind, all the submounts will propagate if these\n\t\t\t\t// are shared. If daemon is running in host namespace\n\t\t\t\t// and has / as shared then these unmounts will\n\t\t\t\t// propagate and unmount original mount as well. So make\n\t\t\t\t// all these mounts rprivate. Do not use propagation\n\t\t\t\t// property of volume as that should apply only when\n\t\t\t\t// mounting happens inside the container.\n\t\t\t\topts := strings.Join([]string{bindMode, writeMode, \"rprivate\"}, \",\")\n\t\t\t\tif err := mount.Mount(m.Source, dest, \"\", opts); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif !m.Writable && !m.ReadOnlyNonRecursive {\n\t\t\t\t\tif err := makeMountRRO(dest); err != nil {\n\t\t\t\t\t\tif m.ReadOnlyForceRecursive {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.G(context.TODO()).WithError(err).Debugf(\"Failed to make %q recursively read-only\", dest)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn mounttree.SwitchRoot(container.BaseFS)\n\t\t},\n\t\tfunc() {\n\t\t\tdefer close(done)\n\n\t\t\tfor it := range todo {\n\t\t\t\terr := it.fn()\n\t\t\t\tif it.res != nil {\n\t\t\t\t\tit.res <- err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// The thread will terminate when this goroutine returns, taking the\n\t\t\t// mount namespace and all the volume bind-mounts with it.\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvw := &containerFSView{\n\t\td: daemon,\n\t\tctr: container,\n\t\ttodo: todo,\n\t\tdone: done,\n\t}\n\truntime.SetFinalizer(vw, (*containerFSView).Close)\n\treturn vw, nil\n}",
"func getProcPath(pid string, file string) ([]byte, error) {\n\tprocessPath := filepath.Join(\"/proc\", pid, file)\n\tdat, err := ioutil.ReadFile(processPath)\n\tif err != nil {\n\t\t//fmt.Println(\"error occured opening file:\", err)\n\t\treturn nil, err\n\t}\n\treturn dat, nil\n}",
"func ListProcFds() ([]string, error) {\n\t// returns the names of all files matching pattern\n\t// or nil if there is no matching file\n\tfs, err := filepath.Glob(\"/proc/[0-9]*/fd/[0-9]*\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs, nil\n}",
"func Filesystem(t *testing.T, fs string) {\n\thas, err := proc.HasFilesystem(fs)\n\tif err != nil {\n\t\tt.Fatalf(\"error while checking filesystem presence: %s\", err)\n\t}\n\tif !has {\n\t\tt.Skipf(\"%s filesystem seems not supported\", fs)\n\t}\n}",
"func (j *juicefs) MountFs(ctx context.Context, appInfo *config.AppInfo, jfsSetting *config.JfsSetting) (string, error) {\n\tvar mnt podmount.MntInterface\n\tif jfsSetting.UsePod {\n\t\tjfsSetting.MountPath = filepath.Join(config.PodMountBase, jfsSetting.UniqueId)\n\t\tmnt = j.podMount\n\t} else {\n\t\tjfsSetting.MountPath = filepath.Join(config.MountBase, jfsSetting.UniqueId)\n\t\tmnt = j.processMount\n\t}\n\n\terr := mnt.JMount(ctx, appInfo, jfsSetting)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tklog.V(5).Infof(\"Mount: mounting %q at %q with options %v\", util.StripPasswd(jfsSetting.Source), jfsSetting.MountPath, jfsSetting.Options)\n\treturn jfsSetting.MountPath, nil\n}",
"func NewProc(fname string) (*Proc, error) {\n\tf, err := os.Open(fname)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Proc{File: f, Buf: bufio.NewReader(f)}, nil\n}",
"func NewFS() FS {\n\treturn make(FS)\n}",
"func NewFS(dataDir string) Interface {\n\treturn &fsLoader{dataDir: dataDir}\n}",
"func (s *Store) NewProc(app *App, name string) *Proc {\n\treturn &Proc{\n\t\tName: name,\n\t\tApp: app,\n\t\tdir: cp.NewDir(app.dir.Prefix(procsPath, string(name)), s.GetSnapshot()),\n\t}\n}",
"func NewDaosFileSystem(group, pool, container string) (*DaosFileSystem, error) {\n\tdebug.Printf(\"Connecting to %s (group: %q)\", pool, group)\n\tuh, err := ufd.Connect(group, pool)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"Connection to %q failed\", pool)\n\t}\n\tdebug.Printf(\"Connected to %s\", pool)\n\n\tdfs := &DaosFileSystem{\n\t\tName: container,\n\t\tuh: uh,\n\t\troot: &DaosNode{\n\t\t\toid: RootOID,\n\t\t\tparent: RootOID,\n\t\t\tmodeType: os.ModeDir,\n\t\t\tName: \"/\",\n\t\t},\n\t}\n\tdfs.root.fs = dfs\n\tdfs.og = newOidGenerator(dfs)\n\n\tif err := dfs.openOrCreateContainer(); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := dfs.getRootObject(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn dfs, nil\n}",
"func main() {\n\ttype SysProcIDMap struct {\n\t\tContainerID int\n\t\tHostID int\n\t\tSize int\n\t}\n\tvar rootfsPath string\n\n\tcmd := reexec.Command(\"nsInitialisation\", rootfsPath)\n\tcmd = exec.Command(\"/bin/bash\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stdin = os.Stdin\n\tcmd.Stderr = os.Stderr\n\n\tcmd.Env = []string{\"PS1=-[ns-process]- # \"}\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCloneflags: syscall.CLONE_NEWUTS |\n\t\t\tsyscall.CLONE_NEWNS |\n\t\t\tsyscall.CLONE_NEWIPC |\n\t\t\tsyscall.CLONE_NEWNET |\n\t\t\tsyscall.CLONE_NEWPID |\n\t\t\tsyscall.CLONE_NEWUSER,\n\t\tUidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getuid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t\tGidMappings: []syscall.SysProcIDMap{\n\t\t\t{\n\t\t\t\tContainerID: 0,\n\t\t\t\tHostID: os.Getgid(),\n\t\t\t\tSize: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tif err := cmd.Run(); err != nil {\n\t\tfmt.Printf(\"Error running the /bin/bash command %s\\n\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func umountProc(syncFD int) {\n\tsyncFile := os.NewFile(uintptr(syncFD), \"procfs umount sync FD\")\n\tbuf := make([]byte, 1)\n\tif w, err := syncFile.Write(buf); err != nil || w != 1 {\n\t\tutil.Fatalf(\"unable to write into the proc umounter descriptor: %v\", err)\n\t}\n\tsyncFile.Close()\n\n\tvar waitStatus unix.WaitStatus\n\tif _, err := unix.Wait4(0, &waitStatus, 0, nil); err != nil {\n\t\tutil.Fatalf(\"error waiting for the proc umounter process: %v\", err)\n\t}\n\tif !waitStatus.Exited() || waitStatus.ExitStatus() != 0 {\n\t\tutil.Fatalf(\"the proc umounter process failed: %v\", waitStatus)\n\t}\n\tif err := unix.Access(\"/proc/self\", unix.F_OK); err != unix.ENOENT {\n\t\tutil.Fatalf(\"/proc is still accessible\")\n\t}\n}",
"func CreateProcPath(args ...string) string {\n\tvar procPath string\n\tfor _, str := range args {\n\t\tprocPath = procPath + \"/\" + str\n\t}\n\treturn procPath\n}",
"func MakeFileSystem(idp string, typef byte) {\n\tvar flgfound bool = false\n\tmpartition := Mounted{}\n\tfor _, mp := range sliceMP {\n\t\tidm := \"vd\" + string(mp.Letter) + strconv.FormatInt(mp.Number, 10)\n\t\tif idp == idm {\n\t\t\tflgfound = true\n\t\t\tmpartition = mp\n\t\t\tbreak\n\t\t}\n\t}\n\tif flgfound {\n\t\tvar bname [16]byte\n\t\tpartition := mpartition.Part\n\t\t// Se realiza el formateo de la partición\n\t\tif typef == 'u' {\n\t\t\twriteByteArray(mpartition.Path, partition.PartStart, partition.PartSize)\n\t\t}\n\t\t// Current Position Disk Partition\n\t\tvar cpd int64\n\t\t// Se obtiene el tamaño de las estructuras y la cantidad (#Estructuras)\n\t\tsStrc, cStrc := GetNumberOfStructures(partition.PartSize)\n\t\t// Se creará el Super Boot\n\t\tnewSB := SuperBoot{}\n\t\t// Nombre HD\n\t\tcopy(bname[:], mpartition.Name)\n\t\tnewSB.NombreHd = bname\n\t\tnewSB.FechaCreacion = getCurrentTime()\n\t\tnewSB.FechaUltimoMontaje = mpartition.TMount\n\t\tnewSB.ConteoMontajes = 1\n\t\t// Cantidad de estructuras creadas\n\t\tnewSB.CantArbolVirtual = 1\n\t\tnewSB.CantDetalleDirectorio = 1\n\t\tnewSB.CantidadInodos = 1\n\t\tnewSB.CantidadBloques = 2\n\t\t// Cantidad de estructuras ocupadas...\n\t\tnewSB.ArbolesVirtualesLibres = cStrc - 1\n\t\tnewSB.DetallesDirectorioLibres = cStrc - 1\n\t\tnewSB.InodosLibres = (cStrc * 5) - 1\n\t\tnewSB.BloquesLibres = (cStrc * 20) - 2 // Por los dos bloques del archivo user.txt\n\t\t// Inicio BMap AVD = Inicio_Particion + SizeSB\n\t\tcpd = partition.PartStart + sStrc.sizeSB\n\t\tnewSB.AptBmapArbolDirectorio = cpd\n\t\t// Inicio AVD = Inicio BitMap AVD + #Estructuras\n\t\tcpd = cpd + cStrc\n\t\tnewSB.AptArbolDirectorio = cpd\n\t\t// Inicio BMap DDir = Inicio AVD + (sizeAVD*#Estructuras)\n\t\tcpd = cpd + (sStrc.sizeAV * cStrc)\n\t\tnewSB.AptBmapDetalleDirectorio = cpd\n\t\t// Inicio DDir = Inicio BMap DDir + #Estructuras\n\t\tcpd = cpd + cStrc\n\t\tnewSB.AptDetalleDirectorio = cpd\n\t\t// Inicio BMap Inodo = Inicio DDir + (sizeDDir * #Estructuras)\n\t\tcpd = cpd + (sStrc.sizeDDir * cStrc)\n\t\tnewSB.AptBmapTablaInodo = cpd\n\t\t// Inicio Inodos = Inicio BMap Inodo + (5 * sizeInodo)\n\t\tcpd = cpd + (5 * cStrc)\n\t\tnewSB.AptTablaInodo = cpd\n\t\t// Inicio BMap Bloque = Inicio Inodos + (5 * sizeInodo * #Estructuras)\n\t\tcpd = cpd + (5 * sStrc.sizeInodo * cStrc)\n\t\tnewSB.AptBmapBloques = cpd\n\t\t// Inicio Bloque = Inicio Inodo + (20 * #Estructuras)\n\t\tcpd = cpd + (20 * cStrc)\n\t\tnewSB.AptBloques = cpd\n\t\t// Inicio Bitacora (Log) = Inicio Bloque + (20 * sizeBloque * #Estructuras)\n\t\tcpd = cpd + (20 * sStrc.sizeBD * cStrc)\n\t\tnewSB.AptLog = cpd\n\t\t// Inicio Copia SB = Inicio Bitacora + (sizeLog * #Estructuras)\n\t\tcpd = cpd + (sStrc.sizeLog * cStrc)\n\t\t//--- Se guarda el tamaño de las estructuras ------------------------------------\n\t\tnewSB.TamStrcArbolDirectorio = sStrc.sizeAV\n\t\tnewSB.TamStrcDetalleDirectorio = sStrc.sizeDDir\n\t\tnewSB.TamStrcInodo = sStrc.sizeInodo\n\t\tnewSB.TamStrcBloque = sStrc.sizeBD\n\t\t//--- Se guarda el primer bit vacio del bitmap de cada estructura ---------------\n\t\tnewSB.PrimerBitLibreArbolDir = 2\n\t\tnewSB.PrimerBitLibreDetalleDir = 2\n\t\tnewSB.PrimerBitLibreTablaInodo = 2\n\t\tnewSB.PrimerBitLibreBloques = 3\n\t\t//--- Numero Magico -------------------------------------------------------------\n\t\tnewSB.NumeroMagico = 201503442\n\t\t//--- Escribir SB en Disco ------------------------------------------------------\n\t\tWriteSuperBoot(mpartition.Path, newSB, partition.PartStart)\n\t\t//--- Escritura de la Copia de SB -----------------------------------------------\n\t\tWriteSuperBoot(mpartition.Path, newSB, cpd)\n\t\t//--- (1) Crear un AVD : root \"/\" -----------------------------------------------\n\t\tavdRoot := ArbolVirtualDir{}\n\t\tavdRoot.FechaCreacion = getCurrentTime()\n\t\tcopy(avdRoot.NombreDirectorio[:], \"/\")\n\t\tcopy(avdRoot.AvdPropietario[:], \"root\")\n\t\tcopy(avdRoot.AvdGID[:], \"root\")\n\t\tavdRoot.AvdPermisos = 777\n\t\tavdRoot.AptDetalleDirectorio = 1\n\t\tWriteAVD(mpartition.Path, avdRoot, newSB.AptArbolDirectorio)\n\t\t//--- (2) Crear un Detalle de Directorio ----------------------------------------\n\t\tdetalleDir := DetalleDirectorio{}\n\t\tarchivoInf := InfoArchivo{}\n\t\tarchivoInf.FechaCreacion = getCurrentTime()\n\t\tarchivoInf.FechaModifiacion = getCurrentTime()\n\t\tcopy(archivoInf.FileName[:], \"user.txt\")\n\t\tarchivoInf.ApInodo = 1\n\t\tdetalleDir.InfoFile[0] = archivoInf\n\t\tWriteDetalleDir(mpartition.Path, detalleDir, newSB.AptDetalleDirectorio)\n\t\t//--- (3) Crear una Tabla de Inodo ----------------------------------------------\n\t\tstrAux := \"1,G,root\\n1,U,root,201503442\\n\"\n\t\ttbInodo := TablaInodo{}\n\t\ttbInodo.NumeroInodo = 1 // Primer Inodo creado\n\t\ttbInodo.SizeArchivo = int64(len(strAux))\n\t\ttbInodo.CantBloquesAsignados = 2\n\t\ttbInodo.AptBloques[0] = int64(1)\n\t\ttbInodo.AptBloques[1] = int64(2)\n\t\tcopy(tbInodo.IDPropietario[:], \"root\")\n\t\tcopy(tbInodo.IDUGrupo[:], \"root\")\n\t\ttbInodo.IPermisos = 777\n\t\tWriteTInodo(mpartition.Path, tbInodo, newSB.AptTablaInodo)\n\t\t//--- (4) Creación de los Bloques de datos --------------------------------------\n\t\tbloque1 := BloqueDeDatos{}\n\t\tcopy(bloque1.Data[:], strAux[0:25])\n\t\tWriteBloqueD(mpartition.Path, bloque1, newSB.AptBloques)\n\t\tbloque2 := BloqueDeDatos{}\n\t\tcopy(bloque2.Data[:], strAux[25:len(strAux)])\n\t\tWriteBloqueD(mpartition.Path, bloque2, newSB.AptBloques+newSB.TamStrcBloque)\n\t\t//--- (5) Escribir en BitMap ----------------------------------------------------\n\t\tauxBytes := []byte{1}\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapArbolDirectorio)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapDetalleDirectorio)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapTablaInodo)\n\t\tauxBytes = append(auxBytes, 1)\n\t\tWriteBitMap(mpartition.Path, auxBytes, newSB.AptBmapBloques)\n\t} else {\n\t\tfmt.Println(\"[!] La particion\", idp, \" no se encuentra montada...\")\n\t}\n}",
"func (fs *Memory) FileSystem() *afero.Afero {\n\treturn fs.fs\n}",
"func MakeFsOnDisk() FileSystem { return filesys.MakeFsOnDisk() }",
"func setupMounts(ctx context.Context, c *Container, s *specs.Spec) error {\n\tvar mounts []specs.Mount\n\t// Override the default mounts which are duplicate with user defined ones.\n\tfor _, sm := range s.Mounts {\n\t\tdup := false\n\t\tfor _, cm := range c.Mounts {\n\t\t\tif sm.Destination == cm.Destination {\n\t\t\t\tdup = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif dup {\n\t\t\tcontinue\n\t\t}\n\t\tif sm.Destination == \"/dev/shm\" && c.HostConfig.ShmSize != nil {\n\t\t\tsm.Options = append(sm.Options, fmt.Sprintf(\"size=%s\", strconv.FormatInt(*c.HostConfig.ShmSize, 10)))\n\t\t}\n\t\tmounts = append(mounts, sm)\n\t}\n\t// TODO: we can suggest containerd to add the cgroup into the default spec.\n\tmounts = append(mounts, specs.Mount{\n\t\tDestination: \"/sys/fs/cgroup\",\n\t\tType: \"cgroup\",\n\t\tSource: \"cgroup\",\n\t\tOptions: []string{\"ro\", \"nosuid\", \"noexec\", \"nodev\"},\n\t})\n\n\tif c.HostConfig == nil {\n\t\treturn nil\n\t}\n\t// user defined mount\n\tfor _, mp := range c.Mounts {\n\t\tif trySetupNetworkMount(mp, c) {\n\t\t\t// ignore the network mount, we will handle it later.\n\t\t\tcontinue\n\t\t}\n\n\t\t// check duplicate mountpoint\n\t\tfor _, sm := range mounts {\n\t\t\tif sm.Destination == mp.Destination {\n\t\t\t\treturn fmt.Errorf(\"duplicate mount point: %s\", mp.Destination)\n\t\t\t}\n\t\t}\n\n\t\tpg := mp.Propagation\n\t\trootfspg := s.Linux.RootfsPropagation\n\t\t// Set rootfs propagation, default setting is private.\n\t\tswitch pg {\n\t\tcase \"shared\", \"rshared\":\n\t\t\tif rootfspg != \"shared\" && rootfspg != \"rshared\" {\n\t\t\t\ts.Linux.RootfsPropagation = \"shared\"\n\t\t\t}\n\t\tcase \"slave\", \"rslave\":\n\t\t\tif rootfspg != \"shared\" && rootfspg != \"rshared\" && rootfspg != \"slave\" && rootfspg != \"rslave\" {\n\t\t\t\ts.Linux.RootfsPropagation = \"rslave\"\n\t\t\t}\n\t\t}\n\n\t\topts := []string{\"rbind\"}\n\t\tif !mp.RW {\n\t\t\topts = append(opts, \"ro\")\n\t\t}\n\t\tif pg != \"\" {\n\t\t\topts = append(opts, pg)\n\t\t}\n\n\t\t// TODO: support copy data.\n\n\t\tif mp.Destination == \"/dev/shm\" && c.HostConfig.ShmSize != nil {\n\t\t\topts = []string{fmt.Sprintf(\"size=%s\", strconv.FormatInt(*c.HostConfig.ShmSize, 10))}\n\t\t}\n\n\t\tmounts = append(mounts, specs.Mount{\n\t\t\tSource: mp.Source,\n\t\t\tDestination: mp.Destination,\n\t\t\tType: \"bind\",\n\t\t\tOptions: opts,\n\t\t})\n\t}\n\n\t// if disable hostfiles, we will not mount the hosts files into container.\n\tif !c.Config.DisableNetworkFiles {\n\t\tmounts = append(mounts, generateNetworkMounts(c)...)\n\t}\n\n\ts.Mounts = mounts\n\n\tif c.HostConfig.Privileged {\n\t\tif !s.Root.Readonly {\n\t\t\t// Clear readonly for /sys.\n\t\t\tfor i := range s.Mounts {\n\t\t\t\tif s.Mounts[i].Destination == \"/sys\" {\n\t\t\t\t\tclearReadonly(&s.Mounts[i])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}",
"func (fsys *FS) Statfs(path string, stat *fuse.Statfs_t) (errc int) {\n\tdefer fs.Trace(path, \"\")(\"stat=%+v, errc=%d\", stat, &errc)\n\tconst blockSize = 4096\n\tfsBlocks := uint64(1 << 50)\n\tif runtime.GOOS == \"windows\" {\n\t\tfsBlocks = (1 << 43) - 1\n\t}\n\tstat.Blocks = fsBlocks // Total data blocks in file system.\n\tstat.Bfree = fsBlocks // Free blocks in file system.\n\tstat.Bavail = fsBlocks // Free blocks in file system if you're not root.\n\tstat.Files = 1E9 // Total files in file system.\n\tstat.Ffree = 1E9 // Free files in file system.\n\tstat.Bsize = blockSize // Block size\n\tstat.Namemax = 255 // Maximum file name length?\n\tstat.Frsize = blockSize // Fragment size, smallest addressable data size in the file system.\n\treturn 0\n}",
"func (c *LiveCollector) FS() fslib.FS {\n\treturn c.SourceFS\n}",
"func (sp SourceSpec) NewFilesystem(base string) *Filesystem {\n\treturn &Filesystem{SourceSpec: sp, Base: base}\n}",
"func OwnFS(fs http.FileSystem, root, prefix string, indexes bool) *localFileSystem {\n\treturn &localFileSystem{\n\t\tFileSystem: fs,\n\t\torigfs: fs,\n\t\troot: \"/root/\" + root,\n\t\tprefix: prefix,\n\t\tindexes: indexes,\n\t\tphysfs: false,\n\t}\n}",
"func NewFilesystemCollector(logger log.Logger) (Collector, error) {\n\tif *oldMountPointsExcluded != \"\" {\n\t\tif !mountPointsExcludeSet {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"--collector.filesystem.ignored-mount-points is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.mount-points-exclude\")\n\t\t\t*mountPointsExclude = *oldMountPointsExcluded\n\t\t} else {\n\t\t\treturn nil, errors.New(\"--collector.filesystem.ignored-mount-points and --collector.filesystem.mount-points-exclude are mutually exclusive\")\n\t\t}\n\t}\n\n\tif *oldFSTypesExcluded != \"\" {\n\t\tif !fsTypesExcludeSet {\n\t\t\tlevel.Warn(logger).Log(\"msg\", \"--collector.filesystem.ignored-fs-types is DEPRECATED and will be removed in 2.0.0, use --collector.filesystem.fs-types-exclude\")\n\t\t\t*fsTypesExclude = *oldFSTypesExcluded\n\t\t} else {\n\t\t\treturn nil, errors.New(\"--collector.filesystem.ignored-fs-types and --collector.filesystem.fs-types-exclude are mutually exclusive\")\n\t\t}\n\t}\n\n\tsubsystem := \"filesystem\"\n\tlevel.Info(logger).Log(\"msg\", \"Parsed flag --collector.filesystem.mount-points-exclude\", \"flag\", *mountPointsExclude)\n\tmountPointPattern := regexp.MustCompile(*mountPointsExclude)\n\tlevel.Info(logger).Log(\"msg\", \"Parsed flag --collector.filesystem.fs-types-exclude\", \"flag\", *fsTypesExclude)\n\tfilesystemsTypesPattern := regexp.MustCompile(*fsTypesExclude)\n\n\tsizeDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"size_bytes\"),\n\t\t\"Filesystem size in bytes.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\tfreeDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"free_bytes\"),\n\t\t\"Filesystem free space in bytes.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\tavailDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"avail_bytes\"),\n\t\t\"Filesystem space available to non-root users in bytes.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\tfilesDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"files\"),\n\t\t\"Filesystem total file nodes.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\tfilesFreeDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"files_free\"),\n\t\t\"Filesystem total free file nodes.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\troDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"readonly\"),\n\t\t\"Filesystem read-only status.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\tdeviceErrorDesc := prometheus.NewDesc(\n\t\tprometheus.BuildFQName(namespace, subsystem, \"device_error\"),\n\t\t\"Whether an error occurred while getting statistics for the given device.\",\n\t\tfilesystemLabelNames, nil,\n\t)\n\n\treturn &filesystemCollector{\n\t\texcludedMountPointsPattern: mountPointPattern,\n\t\texcludedFSTypesPattern: filesystemsTypesPattern,\n\t\tsizeDesc: sizeDesc,\n\t\tfreeDesc: freeDesc,\n\t\tavailDesc: availDesc,\n\t\tfilesDesc: filesDesc,\n\t\tfilesFreeDesc: filesFreeDesc,\n\t\troDesc: roDesc,\n\t\tdeviceErrorDesc: deviceErrorDesc,\n\t\tlogger: logger,\n\t}, nil\n}",
"func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\tfd := args[0].Int()\n\tstatfsAddr := args[1].Pointer()\n\n\tfile := t.GetFile(fd)\n\tif file == nil {\n\t\treturn 0, nil, syserror.EBADF\n\t}\n\tdefer file.DecRef()\n\n\treturn 0, nil, statfsImpl(t, file.Dirent, statfsAddr)\n}",
"func isRealProc(mountPoint string) (bool, error) {\n\tstat := syscall.Statfs_t{}\n\terr := syscall.Statfs(mountPoint, &stat)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87\n\treturn stat.Type == 0x9fa0, nil\n}",
"func NewProcessInfo() (*ProcInfo, error) {\n\tp := ProcInfo{make(map[int]ProcessCtx), sync.RWMutex{}}\n\tprocDir, err := os.Open(\"/proc\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer procDir.Close()\n\tentries, err := procDir.Readdirnames(-1)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// iterate over each pid\n\tfor _, procEntry := range entries {\n\t\tpid, err := strconv.ParseUint(procEntry, 10, 32)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\ttaskDir, err := os.Open(fmt.Sprintf(\"/proc/%d/task\", pid))\n\t\tif err != nil {\n\t\t\tlogger.Debug(\"could not read /proc/pid/task file:\", \"pid\", pid, \"error\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tdefer taskDir.Close()\n\t\tprocessTasks, err := taskDir.Readdirnames(-1)\n\t\tif err != nil {\n\t\t\tlogger.Debug(\"could not read task dir\", \"error\", err.Error())\n\t\t\tcontinue\n\t\t}\n\t\tfor _, task := range processTasks {\n\t\t\ttaskDir := fmt.Sprintf(\"/proc/%d/task/%v\", pid, task)\n\t\t\ttaskStatus := fmt.Sprintf(\"/proc/%d/task/%v/status\", pid, task)\n\t\t\tdata, err := ioutil.ReadFile(taskStatus)\n\t\t\tif err != nil {\n\t\t\t\t// process might have exited - ignore this task\n\t\t\t\tlogger.Debug(\"could not read /proc/pid/task/pid/status file\", \"error\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessStatus, err := parseProcStatus(data, taskStatus)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debug(\"could not parse task status\", \"error\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tcontainerId, err := containers.GetContainerIdFromTaskDir(taskDir)\n\t\t\tif err != nil {\n\t\t\t\tlogger.Debug(\"could not get containerid from task dir\", \"error\", err.Error())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tprocessStatus.ContainerID = containerId\n\t\t\tp.UpdateElement(int(processStatus.HostTid), processStatus)\n\t\t}\n\t}\n\treturn &p, nil\n}",
"func NewFS(f fs.Fs) *FS {\n\tfsys := &FS{\n\t\tFS: mountlib.NewFS(f),\n\t\tf: f,\n\t\topenDirs: newOpenFiles(0x01),\n\t\topenFilesWr: newOpenFiles(0x02),\n\t\topenFilesRd: newOpenFiles(0x03),\n\t\tready: make(chan (struct{})),\n\t}\n\treturn fsys\n}",
"func newFileSystem(basedir string, mkdir osMkdirAll) (*FS, error) {\n\tif err := mkdir(basedir, 0700); err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{basedir: basedir}, nil\n}",
"func NewMemFs() FileSystem {\n\treturn &memFS{\n\t\tfiles: &hashmap.HashMap{},\n\t}\n}",
"func (f *FileSystem) Statfs(ctx context.Context, req *fuse.StatfsRequest, resp *fuse.StatfsResponse) error {\n\t// each block size is 4096 bytes by default.\n\tconst unit = uint64(4096)\n\n\tresp.Bsize = uint32(unit)\n\tresp.Blocks = uint64(f.account.Disk.Size) / unit\n\tresp.Bavail = uint64(f.account.Disk.Avail) / unit\n\tresp.Bfree = uint64(f.account.Disk.Avail) / unit\n\n\treturn nil\n}",
"func baseProcDataDir() string {\n\treturn path.Join(dataDir, \"proc\")\n}",
"func New(homepath string) (fs.FileProvider, error) {\n\treturn &physicalFS{\n\t\thomeRealDirectory: homepath,\n\t\tcurrentRealDirectory: homepath,\n\t\tidentity: nil,\n\t}, nil\n}",
"func NewFS(db *bolt.DB, bucketpath string) (*FileSystem, error) {\n\n\t// create buckets\n\terr := db.Update(func(tx *bolt.Tx) error {\n\t\treturn bucketInit(tx, bucketpath)\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// load or initialize\n\trootIno := uint64(1)\n\tfs := &FileSystem{\n\t\tdb: db,\n\t\tbucket: bucketpath,\n\t\trootIno: rootIno,\n\t\tcwd: \"/\",\n\t}\n\terr = db.Update(func(tx *bolt.Tx) error {\n\t\tbb, err := openBucket(tx, bucketpath)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb := newFsBucket(bb)\n\n\t\t// create the `nil` node if it doesn't exist\n\t\terr = b.InodeInit()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// load the\n\t\tdata := make([]byte, 4)\n\t\tbinary.BigEndian.PutUint32(data, uint32(0755))\n\t\t_, err = b.LoadOrSet(\"umask\", data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// load the root Ino if one is available\n\t\tdata, err = b.LoadOrSet(\"rootIno\", i2b(rootIno))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\trootIno = b2i(data)\n\n\t\t_, err = b.GetInode(rootIno)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err == os.ErrNotExist {\n\t\t\tnode := newInode(os.ModeDir | 0755)\n\t\t\tnode.countUp()\n\t\t\terr = b.PutInode(rootIno, node)\n\t\t\tif err != nil {\n\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tfs.rootIno = rootIno\n\treturn fs, nil\n\n}",
"func FSInfo(file string) (FSInfoData, error) {\n\tvar cinfo C.struct_ploop_info\n\tvar info FSInfoData\n\tcfile := C.CString(file)\n\tdefer cfree(cfile)\n\n\tret := C.ploop_get_info_by_descr(cfile, &cinfo)\n\tif ret == 0 {\n\t\tinfo.blocksize = uint64(cinfo.fs_bsize)\n\t\tinfo.blocks = uint64(cinfo.fs_blocks)\n\t\tinfo.blocks_free = uint64(cinfo.fs_bfree)\n\t\tinfo.inodes = uint64(cinfo.fs_inodes)\n\t\tinfo.inodes_free = uint64(cinfo.fs_ifree)\n\t}\n\n\treturn info, mkerr(ret)\n}",
"func GetOpenListOfPid(pid int) []*os.File {\n\tvar err error\n\tvar file *os.File\n\tvar filelist []string\n\n\tfds := make([]*os.File, 0, 0)\n\n\tfile, err = os.Open(\"/proc/\" + strconv.Itoa(pid) + \"/fd/\")\n\tif err != nil {\n\t\tLogger.Errlogf(\"ERROR: %s\\n\", err)\n\t\treturn fds\n\t}\n\tdefer file.Close()\n\n\tfilelist, err = file.Readdirnames(1024)\n\tif err != nil {\n\t\tif err == io.EOF {\n\t\t\tLogger.Errlogf(\"read dir end: %s, %v\\n\", err, filelist)\n\t\t} else {\n\t\t\tLogger.Errlogf(\"ERROR: %s\\n\", err)\n\t\t\treturn fds\n\t\t}\n\t}\n\t/*\n\t\trhinofly@rhinofly-Y570:~/data/liteide/build$ ls -l /proc/self/fd/\n\t\ttotal 0\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 0 -> /dev/pts/16\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 1 -> /dev/pts/16\n\t\tlrwx------ 1 rhinofly rhinofly 64 Nov 4 09:47 2 -> /dev/pts/16\n\t\tlr-x------ 1 rhinofly rhinofly 64 Nov 4 09:47 3 -> /proc/29484/fd\n\t*/\n\ttmpid := strconv.Itoa(int(file.Fd()))\n\tif len(filelist) > 0 {\n\t\tfor idx := range filelist {\n\t\t\t//func NewFile(fd uintptr, name string) *File\n\t\t\tlink, _ := os.Readlink(\"/proc/\" + strconv.Itoa(pid) + \"/fd/\" + filelist[idx])\n\t\t\tif filelist[idx] == tmpid {\n\t\t\t\t//Logger.Errlogf(\"file in %d dir: %d, %v, link %s, is me %v\\n\", pid, idx, filelist[idx], link, file.Name())\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\t//Logger.Errlogf(\"file in %d dir: %d, %v -> %s\\n\", pid, idx, filelist[idx], link)\n\t\t\tfd, err := strconv.Atoi(filelist[idx])\n\t\t\tif err != nil {\n\t\t\t\tLogger.Errlogf(\"strconv.Atoi(%v): %s\\n\", filelist[idx], err)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfds = append(fds, os.NewFile(uintptr(fd), link))\n\t\t}\n\t}\n\treturn fds\n}",
"func listProc() ([]int, error) {\n\td, err := os.Open(\"/proc\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer d.Close()\n\n\tresults := make([]int, 0, 50)\n\tfor {\n\t\tfis, err := d.Readdir(10)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfor _, fi := range fis {\n\t\t\t// Pid must be a directory with a numeric name\n\t\t\tif !fi.IsDir() {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// Using Atoi here will also filter out . and ..\n\t\t\tpid, err := strconv.Atoi(fi.Name())\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tresults = append(results, pid)\n\t\t}\n\t}\n\treturn results, nil\n}",
"func NewStat() (Stat, error) {\n\tfs, err := NewFS(fs.DefaultProcMountPoint)\n\tif err != nil {\n\t\treturn Stat{}, err\n\t}\n\treturn fs.Stat()\n}",
"func (b *Bootstrap) Bootstrap(hostname string) error {\n\tlog.Infof(\"Mounting proc\")\n\tif err := syscall.Mount(\"none\", \"/proc\", \"proc\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := syscall.Mount(\"none\", \"/dev\", \"devtmpfs\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := syscall.Mount(\"none\", \"/dev/pts\", \"devpts\", 0, \"\"); err != nil {\n\t\treturn err\n\t}\n\n\tif err := updateHostname(hostname); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func MountFilesystem(mountpoint string, proxy string) {\n\n\t// The proxy URL should end with a slash, add it if the user forgot about this\n\tif proxy[len(proxy)-1] != '/' {\n\t\tproxy += \"/\"\n\t}\n\n\tc, err := fuse.Mount(\n\t\tmountpoint,\n\t\tfuse.FSName(fmt.Sprintf(\"hgmsfs(%s)\", proxy)),\n\t\tfuse.Subtype(\"hgmfs\"),\n\t\tfuse.LocalVolume(),\n\t\tfuse.VolumeName(\"hgms-volume\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tif lruEnabled == true {\n\t\tlruCache, err = ssc.New(\"./ssc.db\", lruBlockSize, lruMaxItems)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tfmt.Printf(\"Serving FS at '%s' (lru_cache=%.2fMB)\\n\", mountpoint, float64(lruBlockSize*lruMaxItems/1024/1024))\n\n\terr = fs.Serve(c, HgmFs{mountPoint: mountpoint, proxyUrl: proxy})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// check if the mount process has an error to report\n\t<-c.Ready\n\tif err := c.MountError; err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func RunInsideImageFS(podmanEngine cli.PodmanEngine, image string, containerFn ContainerFn) (bool, error) {\n\treport, err := podmanEngine.MountImage(image)\n\tif err != nil {\n\t\tlog.Error(\"stdout: \", report.Stdout)\n\t\tlog.Error(\"stderr: \", report.Stderr)\n\t\tlog.Error(\"could not mount filesystem\", err)\n\t\treturn false, err\n\t}\n\n\tdefer func() {\n\t\treport, err := podmanEngine.UnmountImage(image)\n\t\tif err != nil {\n\t\t\tlog.Warn(\"stdout: \", report.Stdout)\n\t\t\tlog.Warn(\"stderr: \", report.Stderr)\n\t\t}\n\t}()\n\n\treturn containerFn(migration.ImageToImageReference(strings.TrimSpace(report.MountDir)))\n}",
"func (w *wrapper) Statfs(path string, stat *fuse.Statfs_t) int {\n\treturn -fuse.ENOSYS\n}",
"func GetFSInfo(ch chan metrics.Metric) {\n\tmountedFS, err := disk.Partitions(false)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"collector\": \"filesystem\",\n\t\t\t\"os\": \"linux\",\n\t\t\t\"action\": \"GetFileSystems\",\n\t\t}).Errorf(\"Unable to find mounted filesystems: %+v\", err)\n\t}\n\tfor _, FSs := range mountedFS {\n\t\tfsStats, err := disk.Usage(FSs.Mountpoint)\n\t\tif err != nil {\n\t\t\tlog.WithFields(log.Fields{\n\t\t\t\t\"collector\": \"filesystem\",\n\t\t\t\t\"os\": \"linux\",\n\t\t\t\t\"action\": \"GetFSStats\",\n\t\t\t}).Errorf(\"Unable to get stats from mounted filesystem: %+v\", err)\n\t\t}\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"collector\": \"filesystem\",\n\t\t\t\"os\": \"linux\",\n\t\t}).Debug(fsStats)\n\t\tfsStat := metrics.FileSystem{\n\t\t\tFileSystem: *fsStats,\n\t\t}\n\t\tch <- fsStat\n\t}\n}",
"func NewFs() *Fs { return &Fs{make(map[string]*file)} }",
"func newStaticProcInode(ctx context.Context, msrc *fs.MountSource, contents []byte) *fs.Inode {\n\tiops := &staticFileInodeOps{\n\t\tInodeSimpleAttributes: fsutil.NewInodeSimpleAttributes(ctx, fs.RootOwner, fs.FilePermsFromMode(0444), linux.PROC_SUPER_MAGIC),\n\t\tInodeStaticFileGetter: fsutil.InodeStaticFileGetter{\n\t\t\tContents: contents,\n\t\t},\n\t}\n\treturn newProcInode(iops, msrc, fs.SpecialFile, nil)\n}",
"func OpenFileSystem(filename string) FileSystem {\n\treturn nil\n}",
"func getFileSystem(frame *rtda.Frame) {\n\tthread := frame.Thread\n\tunixFsClass := frame.GetClassLoader().LoadClass(\"java/io/UnixFileSystem\")\n\tif unixFsClass.InitializationNotStarted() {\n\t\tframe.NextPC = thread.PC // undo getFileSystem\n\t\tthread.InitClass(unixFsClass)\n\t\treturn\n\t}\n\n\tunixFsObj := unixFsClass.NewObj()\n\tframe.PushRef(unixFsObj)\n\n\t// call <init>\n\tframe.PushRef(unixFsObj) // this\n\tconstructor := unixFsClass.GetDefaultConstructor()\n\tthread.InvokeMethod(constructor)\n}",
"func (s *spiff) FileSystem() vfs.FileSystem {\n\treturn s.fs\n}",
"func (c *Container) Attach(pid string, processIO garden.ProcessIO) (process garden.Process, err error) {\n\tctx := context.Background()\n\n\tif pid == \"\" {\n\t\treturn nil, ErrInvalidInput(\"empty pid\")\n\t}\n\n\ttask, err := c.container.Task(ctx, cio.Load)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"task: %w\", err)\n\t}\n\n\tcioOpts := containerdCIO(processIO, false)\n\n\tproc, err := task.LoadProcess(ctx, pid, cio.NewAttach(cioOpts...))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"load proc: %w\", err)\n\t}\n\n\tstatus, err := proc.Status(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"proc status: %w\", err)\n\t}\n\n\tif status.Status != containerd.Running {\n\t\treturn nil, fmt.Errorf(\"proc not running: status = %s\", status.Status)\n\t}\n\n\texitStatusC, err := proc.Wait(ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"proc wait: %w\", err)\n\t}\n\n\treturn NewProcess(proc, exitStatusC), nil\n}",
"func MakeRealFS() FileSystem {\n\treturn realFS{}\n}",
"func RuntimeFsOnto(ctx context.Context, runtimeFs afero.Fs) context.Context {\n\treturn context.WithValue(ctx, runtimeFsKey, runtimeFs)\n}",
"func NewOsFs() FS {\n\treturn &osFs{}\n}",
"func ProcInfo(pid int) (*os.Process, error) {\n\treturn os.FindProcess(pid)\n}",
"func TestTmpfsDevShmNoDupMount(t *testing.T) {\n\tskip.If(t, os.Getuid() != 0, \"skipping test that requires root\")\n\tc := &container.Container{\n\t\tShmPath: \"foobar\", // non-empty, for c.IpcMounts() to work\n\t\tHostConfig: &containertypes.HostConfig{\n\t\t\tIpcMode: containertypes.IPCModeShareable, // default mode\n\t\t\t// --tmpfs /dev/shm:rw,exec,size=NNN\n\t\t\tTmpfs: map[string]string{\n\t\t\t\t\"/dev/shm\": \"rw,exec,size=1g\",\n\t\t\t},\n\t\t},\n\t}\n\td := setupFakeDaemon(t, c)\n\n\t_, err := d.createSpec(context.TODO(), &configStore{}, c)\n\tassert.Check(t, err)\n}",
"func MakeFsInMemory() FileSystem { return filesys.MakeFsInMemory() }",
"func createFDMap(ctx context.Context, k *kernel.Kernel, l *limits.LimitSet, console bool) (*kernel.FDMap, error) {\n\tfdm := k.NewFDMap()\n\tdefer fdm.DecRef()\n\n\t// Maps sandbox fd to host fd.\n\tfdMap := map[int]int{\n\t\t0: syscall.Stdin,\n\t\t1: syscall.Stdout,\n\t\t2: syscall.Stderr,\n\t}\n\tmounter := fs.FileOwnerFromContext(ctx)\n\n\tfor sfd, hfd := range fdMap {\n\t\tfile, err := host.ImportFile(ctx, hfd, mounter, console /* allow ioctls */)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to import fd %d: %v\", hfd, err)\n\t\t}\n\t\tdefer file.DecRef()\n\t\tif err := fdm.NewFDAt(kdefs.FD(sfd), file, kernel.FDFlags{}, l); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to add imported fd %d to FDMap: %v\", hfd, err)\n\t\t}\n\t}\n\n\tfdm.IncRef()\n\treturn fdm, nil\n}",
"func (fs FS) Stat() (Stat, error) {\n\tfileName := fs.proc.Path(\"stat\")\n\tdata, err := util.ReadFileNoStat(fileName)\n\tif err != nil {\n\t\treturn Stat{}, err\n\t}\n\tprocStat, err := parseStat(bytes.NewReader(data), fileName)\n\tif err != nil {\n\t\treturn Stat{}, err\n\t}\n\treturn procStat, nil\n}",
"func (d *gcpVolDriver) createMountpoint(mountpoint string) error {\n\tif err := os.MkdirAll(mountpoint, 0755); err != nil {\n\t\treturn err\n\t}\n\tlog.Printf(\"Mountpoint %s created on host\\n\", mountpoint)\n\treturn nil\n}",
"func NewFileSystem(token string, debug bool) *FileSystem {\n\toauthClient := oauth2.NewClient(\n\t\toauth2.NoContext,\n\t\toauth2.StaticTokenSource(&oauth2.Token{AccessToken: token}),\n\t)\n\tclient := putio.NewClient(oauthClient)\n\tclient.UserAgent = defaultUserAgent\n\n\treturn &FileSystem{\n\t\tputio: client,\n\t\tlogger: NewLogger(\"putiofs: \", debug),\n\t}\n}",
"func NewFilesystem(ctx context.Context, cfg *Config) (KeyManager, error) {\n\troot := cfg.FilesystemRoot\n\tif root != \"\" {\n\t\tif err := os.MkdirAll(root, 0o700); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &Filesystem{\n\t\troot: root,\n\t}, nil\n}",
"func (c *CryptohomeMountInfo) findMountsForPID(ctx context.Context, pid int32) ([]disk.PartitionStat, error) {\n\tpath := fmt.Sprintf(\"/proc/%d/mounts\", pid)\n\tres, err := c.readMountsInfo(ctx, path)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to get list of mounts for pid %d\", pid)\n\t}\n\treturn res, nil\n}",
"func New(ctx context.Context, path string) (Interface, error) {\n\tscheme := getScheme(path)\n\tmkfs, ok := registry[scheme]\n\tif !ok {\n\t\treturn nil, errors.Errorf(\"file system scheme %v not registered for %v\", scheme, path)\n\t}\n\treturn mkfs(ctx), nil\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}",
"func FS(useLocal bool) http.FileSystem {\n\tif useLocal {\n\t\treturn _escLocal\n\t}\n\treturn _escStatic\n}"
] | [
"0.760197",
"0.6918058",
"0.6888519",
"0.6845408",
"0.5938905",
"0.5629438",
"0.5607776",
"0.5424195",
"0.53839487",
"0.53667194",
"0.5349028",
"0.5292019",
"0.5251152",
"0.5177979",
"0.5174695",
"0.51732606",
"0.51594526",
"0.514626",
"0.5138058",
"0.51178694",
"0.5115364",
"0.510538",
"0.51035106",
"0.51025975",
"0.5097752",
"0.5095052",
"0.5065563",
"0.5065279",
"0.50584614",
"0.5045965",
"0.5045743",
"0.5045054",
"0.5022967",
"0.5018883",
"0.5015717",
"0.5009952",
"0.4989755",
"0.49771607",
"0.49685413",
"0.49628398",
"0.4958121",
"0.4953957",
"0.49527112",
"0.49384308",
"0.49290964",
"0.49290928",
"0.49272662",
"0.4926512",
"0.49186155",
"0.49111703",
"0.4893872",
"0.4890039",
"0.48845807",
"0.48796988",
"0.48771462",
"0.4875854",
"0.48757544",
"0.48748207",
"0.48605883",
"0.4846963",
"0.48423636",
"0.4838146",
"0.4834113",
"0.48275572",
"0.48204514",
"0.4810341",
"0.48040092",
"0.47809574",
"0.47778153",
"0.47695085",
"0.47664663",
"0.4761967",
"0.4755698",
"0.47434068",
"0.47387996",
"0.472917",
"0.47232947",
"0.4718128",
"0.47152784",
"0.47148305",
"0.46888947",
"0.4669033",
"0.46625212",
"0.4661828",
"0.46614486",
"0.46586528",
"0.46533093",
"0.465192",
"0.46496406",
"0.46489605",
"0.4645235",
"0.46359777",
"0.46336693",
"0.46251798",
"0.46168476",
"0.46168476",
"0.46168476",
"0.46168476",
"0.46168476",
"0.46168476"
] | 0.77894247 | 0 |
HostProcFS creates a proc.FileSystem representing the underlying host's procfs. If we are running in the host pid namespace, it uses /proc. Otherwise, it identifies a mountedin host procfs by it being mounted on a directory that isn't /proc and /proc/self linking to a differing PID than that returned by os.Getpid(). If we are running in a container and no mountedin host procfs was identified, then it returns nil. | func HostProcFS() proc.FileSystem {
hostProcFSOnce.Do(func() {
hostProcFS = findHostProcFS()
})
return hostProcFS
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func HostProcFS() *proc.FileSystem {\n\thostProcFSOnce.Do(func() {\n\t\thostProcFS = findHostProcFS()\n\t})\n\n\treturn hostProcFS\n}",
"func ProcFS() proc.FileSystem {\n\tfs, err := procfs.NewFileSystem(\"\")\n\tif err != nil {\n\t\tglog.Fatal(err)\n\t}\n\treturn fs\n}",
"func ProcFS() *proc.FileSystem {\n\treturn proc.FS()\n}",
"func HostProc(combineWith ...string) string {\n\treturn GetEnv(\"HOST_PROC\", \"/proc\", combineWith...)\n}",
"func (o FioSpecVolumeVolumeSourcePtrOutput) HostPath() FioSpecVolumeVolumeSourceHostPathPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceHostPath {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.HostPath\n\t}).(FioSpecVolumeVolumeSourceHostPathPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceOutput) HostPath() FioSpecVolumeVolumeSourceHostPathPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceHostPath { return v.HostPath }).(FioSpecVolumeVolumeSourceHostPathPtrOutput)\n}",
"func NewFS(mountPoint string) (*FS, error) {\n\tfs, err := procfs.NewFS(mountPoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tstat, err := fs.NewStat()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &FS{fs, stat.BootTime}, nil\n}",
"func (o IopingSpecVolumeVolumeSourceOutput) HostPath() IopingSpecVolumeVolumeSourceHostPathPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceHostPath { return v.HostPath }).(IopingSpecVolumeVolumeSourceHostPathPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourcePtrOutput) HostPath() IopingSpecVolumeVolumeSourceHostPathPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceHostPath {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.HostPath\n\t}).(IopingSpecVolumeVolumeSourceHostPathPtrOutput)\n}",
"func NFSActiveOnHost(ctx context.Context) (bool, error) {\n\tLogc(ctx).Debug(\">>>> osutils_windows.NFSActiveOnHost\")\n\tdefer Logc(ctx).Debug(\"<<<< osutils_windows.NFSActiveOnHost\")\n\treturn false, errors.UnsupportedError(\"NFSActiveOnHost is not supported for windows\")\n}",
"func (o FioSpecVolumeVolumeSourceHostPathPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceHostPath) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Path\n\t}).(pulumi.StringPtrOutput)\n}",
"func (img *Image) FileSystem() (string, error) {\n\treturn devFileSystem(img)\n}",
"func hfs() http.FileSystem {\n\tif cachedHFS != nil {\n\t\treturn cachedHFS\n\t}\n\tf, err := fs.New()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcachedHFS = f\n\treturn f\n}",
"func isRealProc(mountPoint string) (bool, error) {\n\tstat := syscall.Statfs_t{}\n\terr := syscall.Statfs(mountPoint, &stat)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// 0x9fa0 is PROC_SUPER_MAGIC: https://elixir.bootlin.com/linux/v6.1/source/include/uapi/linux/magic.h#L87\n\treturn stat.Type == 0x9fa0, nil\n}",
"func MountProc(newroot string) error {\n\tsource := \"proc\"\n\ttarget := filepath.Join(newroot, \"/proc\")\n\tfstype := \"proc\"\n\tflags := 0\n\tdata := \"\"\n\n\tos.MkdirAll(target, 0755)\n\tif err := syscall.Mount(source, target, fstype, uintptr(flags), data); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func New(homepath string) (fs.FileProvider, error) {\n\treturn &physicalFS{\n\t\thomeRealDirectory: homepath,\n\t\tcurrentRealDirectory: homepath,\n\t\tidentity: nil,\n\t}, nil\n}",
"func (o AppTemplateContainerStartupProbeOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerStartupProbe) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func PhysFS(root, prefix string, indexes bool, alreadyinitialized bool) *localFileSystem {\n\tif !alreadyinitialized {\n\t\troot, err := filepath.Abs(root)\n\t\tfmt.Println(root)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = physfs.Init()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer physfs.Deinit()\n\t\terr = physfs.Mount(root, \"/\", true)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfs := physfs.FileSystem()\n\treturn &localFileSystem{\n\t\tFileSystem: fs,\n\t\torigfs: fs,\n\t\troot: root,\n\t\tprefix: prefix,\n\t\tindexes: indexes,\n\t\tphysfs: true,\n\t}\n}",
"func (p *Resolver) ResolveFromProcfs(pid uint32) *model.ProcessCacheEntry {\n\tp.Lock()\n\tdefer p.Unlock()\n\treturn p.resolveFromProcfs(pid, procResolveMaxDepth)\n}",
"func (p *Provisioner) ProvisionHostPath(opts pvController.ProvisionOptions, volumeConfig *VolumeConfig) (*v1.PersistentVolume, error) {\n\tpvc := opts.PVC\n\ttaints := GetTaints(opts.SelectedNode)\n\tname := opts.PVName\n\tstgType := volumeConfig.GetStorageType()\n\tsaName := getOpenEBSServiceAccountName()\n\n\tnodeAffinityKey := volumeConfig.GetNodeAffinityLabelKey()\n\tif len(nodeAffinityKey) == 0 {\n\t\tnodeAffinityKey = k8sNodeLabelKeyHostname\n\t}\n\tnodeAffinityValue := GetNodeLabelValue(opts.SelectedNode, nodeAffinityKey)\n\n\tpath, err := volumeConfig.GetPath()\n\tif err != nil {\n\t\talertlog.Logger.Errorw(\"\",\n\t\t\t\"eventcode\", \"local.pv.provision.failure\",\n\t\t\t\"msg\", \"Failed to provision Local PV\",\n\t\t\t\"rname\", opts.PVName,\n\t\t\t\"reason\", \"Unable to get volume config\",\n\t\t\t\"storagetype\", stgType,\n\t\t)\n\t\treturn nil, err\n\t}\n\n\timagePullSecrets := GetImagePullSecrets(getOpenEBSImagePullSecrets())\n\n\tklog.Infof(\"Creating volume %v at node with label %v=%v, path:%v,ImagePullSecrets:%v\", name, nodeAffinityKey, nodeAffinityValue, path, imagePullSecrets)\n\n\t//Before using the path for local PV, make sure it is created.\n\tinitCmdsForPath := []string{\"mkdir\", \"-m\", \"0777\", \"-p\"}\n\tpodOpts := &HelperPodOptions{\n\t\tcmdsForPath: initCmdsForPath,\n\t\tname: name,\n\t\tpath: path,\n\t\tnodeAffinityLabelKey: nodeAffinityKey,\n\t\tnodeAffinityLabelValue: nodeAffinityValue,\n\t\tserviceAccountName: saName,\n\t\tselectedNodeTaints: taints,\n\t\timagePullSecrets: imagePullSecrets,\n\t}\n\tiErr := p.createInitPod(podOpts)\n\tif iErr != nil {\n\t\tklog.Infof(\"Initialize volume %v failed: %v\", name, iErr)\n\t\talertlog.Logger.Errorw(\"\",\n\t\t\t\"eventcode\", \"local.pv.provision.failure\",\n\t\t\t\"msg\", \"Failed to provision Local PV\",\n\t\t\t\"rname\", opts.PVName,\n\t\t\t\"reason\", \"Volume initialization failed\",\n\t\t\t\"storagetype\", stgType,\n\t\t)\n\t\treturn nil, iErr\n\t}\n\n\t// VolumeMode will always be specified as Filesystem for host path volume,\n\t// and the value passed in from the PVC spec will be ignored.\n\tfs := v1.PersistentVolumeFilesystem\n\n\t// It is possible that the HostPath doesn't already exist on the node.\n\t// Set the Local PV to create it.\n\t//hostPathType := v1.HostPathDirectoryOrCreate\n\n\t// TODO initialize the Labels and annotations\n\t// Use annotations to specify the context using which the PV was created.\n\t//volAnnotations := make(map[string]string)\n\t//volAnnotations[string(v1alpha1.CASTypeKey)] = casVolume.Spec.CasType\n\t//fstype := casVolume.Spec.FSType\n\n\tlabels := make(map[string]string)\n\tlabels[string(mconfig.CASTypeKey)] = \"local-\" + stgType\n\t//labels[string(v1alpha1.StorageClassKey)] = *className\n\n\t//TODO Change the following to a builder pattern\n\tpvObj, err := persistentvolume.NewBuilder().\n\t\tWithName(name).\n\t\tWithLabels(labels).\n\t\tWithReclaimPolicy(*opts.StorageClass.ReclaimPolicy).\n\t\tWithAccessModes(pvc.Spec.AccessModes).\n\t\tWithVolumeMode(fs).\n\t\tWithCapacityQty(pvc.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]).\n\t\tWithLocalHostDirectory(path).\n\t\tWithNodeAffinity(nodeAffinityKey, nodeAffinityValue).\n\t\tBuild()\n\n\tif err != nil {\n\t\talertlog.Logger.Errorw(\"\",\n\t\t\t\"eventcode\", \"local.pv.provision.failure\",\n\t\t\t\"msg\", \"Failed to provision Local PV\",\n\t\t\t\"rname\", opts.PVName,\n\t\t\t\"reason\", \"failed to build persistent volume\",\n\t\t\t\"storagetype\", stgType,\n\t\t)\n\t\treturn nil, err\n\t}\n\talertlog.Logger.Infow(\"\",\n\t\t\"eventcode\", \"local.pv.provision.success\",\n\t\t\"msg\", \"Successfully provisioned Local PV\",\n\t\t\"rname\", opts.PVName,\n\t\t\"storagetype\", stgType,\n\t)\n\treturn pvObj, nil\n}",
"func CurrentContextAsHost() (host *Host, err error) {\n\tcpus := runtime.NumCPU()\n\tmemory, err := getMemorySize()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thost = new(Host)\n\thostname, err := os.Hostname()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thost.Name = hostname\n\thostid_str, err := hostId()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thost.Id = hostid_str\n\thost.PrivateNetwork = Network{}\n\thost.Cores = cpus\n\thost.Memory = memory\n\thost.LastUpdated = time.Now()\n\treturn host, err\n}",
"func getHostFile() (string, error) {\n\tpaltform := runtime.GOOS\n\tif hostFile, ok := pathMap[paltform]; ok {\n\t\treturn hostFile, nil\n\t} else {\n\t\treturn \"\", errors.New(\"unsupported PLATFORM!\")\n\t}\n}",
"func (fs *Memory) FileSystem() *afero.Afero {\n\treturn fs.fs\n}",
"func (o FioSpecVolumeVolumeSourceOutput) Cephfs() FioSpecVolumeVolumeSourceCephfsPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceCephfs { return v.Cephfs }).(FioSpecVolumeVolumeSourceCephfsPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceHostPathOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceHostPath) string { return v.Path }).(pulumi.StringOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceHostPathPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSourceHostPath) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Path\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourcePtrOutput) Cephfs() FioSpecVolumeVolumeSourceCephfsPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSource) *FioSpecVolumeVolumeSourceCephfs {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Cephfs\n\t}).(FioSpecVolumeVolumeSourceCephfsPtrOutput)\n}",
"func getOSMounts() []specs.Mount {\n\t// Prior to hosts_dir env-var, this value was set to\n\t// os.Getwd()\n\thostsDir := \"/var/lib/faasd\"\n\tif v, ok := os.LookupEnv(\"hosts_dir\"); ok && len(v) > 0 {\n\t\thostsDir = v\n\t}\n\n\tmounts := []specs.Mount{}\n\tmounts = append(mounts, specs.Mount{\n\t\tDestination: \"/etc/resolv.conf\",\n\t\tType: \"bind\",\n\t\tSource: path.Join(hostsDir, \"resolv.conf\"),\n\t\tOptions: []string{\"rbind\", \"ro\"},\n\t})\n\n\tmounts = append(mounts, specs.Mount{\n\t\tDestination: \"/etc/hosts\",\n\t\tType: \"bind\",\n\t\tSource: path.Join(hostsDir, \"hosts\"),\n\t\tOptions: []string{\"rbind\", \"ro\"},\n\t})\n\treturn mounts\n}",
"func GetHostFacts(f Facter) error {\n\thostInfo, err := h.Info()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tf.Add(\"fqdn\", hostInfo.Hostname)\n\tsplitted := strings.SplitN(hostInfo.Hostname, \".\", 2)\n\tvar hostname *string\n\tif len(splitted) > 1 {\n\t\thostname = &splitted[0]\n\t\tf.Add(\"domain\", splitted[1])\n\t} else {\n\t\thostname = &hostInfo.Hostname\n\t}\n\tf.Add(\"hostname\", *hostname)\n\n\tvar isVirtual bool\n\tif hostInfo.VirtualizationRole == \"host\" {\n\t\tisVirtual = false\n\t} else {\n\t\tisVirtual = true\n\t}\n\tf.Add(\"is_virtual\", isVirtual)\n\n\tf.Add(\"kernel\", capitalize(hostInfo.OS))\n\tf.Add(\"operatingsystemrelease\", hostInfo.PlatformVersion)\n\tf.Add(\"operatingsystem\", capitalize(hostInfo.Platform))\n\tf.Add(\"osfamily\", capitalize(hostInfo.PlatformFamily))\n\tf.Add(\"uptime_seconds\", hostInfo.Uptime)\n\tf.Add(\"uptime_minutes\", hostInfo.Uptime/60)\n\tf.Add(\"uptime_hours\", hostInfo.Uptime/60/60)\n\tf.Add(\"uptime_days\", hostInfo.Uptime/60/60/24)\n\tf.Add(\"uptime\", fmt.Sprintf(\"%d days\", hostInfo.Uptime/60/60/24))\n\tf.Add(\"virtual\", hostInfo.VirtualizationSystem)\n\n\tenvPath := os.Getenv(\"PATH\")\n\tif envPath != \"\" {\n\t\tf.Add(\"path\", envPath)\n\t}\n\n\tuser, err := user.Current()\n\tif err == nil {\n\t\tf.Add(\"id\", user.Username)\n\t} else {\n\t\tpanic(err)\n\t}\n\n\tvar uname syscall.Utsname\n\terr = syscall.Uname(&uname)\n\tif err == nil {\n\t\tkernelRelease := int8ToString(uname.Release)\n\t\tkernelVersion := strings.Split(kernelRelease, \"-\")[0]\n\t\tkvSplitted := strings.Split(kernelVersion, \".\")\n\t\tf.Add(\"kernelrelease\", kernelRelease)\n\t\tf.Add(\"kernelversion\", kernelVersion)\n\t\tf.Add(\"kernelmajversion\", strings.Join(kvSplitted[0:2], \".\"))\n\n\t\thardwareModel := int8ToString(uname.Machine)\n\t\tf.Add(\"hardwaremodel\", hardwareModel)\n\t\tf.Add(\"architecture\", guessArch(hardwareModel))\n\t}\n\n\tz, _ := time.Now().Zone()\n\tf.Add(\"timezone\", z)\n\n\treturn nil\n}",
"func Cgroupfs(l *LinuxFactory) error {\n\tl.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {\n\t\treturn &fs.Manager{\n\t\t\tCgroups: config,\n\t\t\tPaths: paths,\n\t\t}\n\t}\n\treturn nil\n}",
"func (o IopingSpecVolumeVolumeSourceOutput) Cephfs() IopingSpecVolumeVolumeSourceCephfsPtrOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceCephfs { return v.Cephfs }).(IopingSpecVolumeVolumeSourceCephfsPtrOutput)\n}",
"func Filesystem(t *testing.T, fs string) {\n\thas, err := proc.HasFilesystem(fs)\n\tif err != nil {\n\t\tt.Fatalf(\"error while checking filesystem presence: %s\", err)\n\t}\n\tif !has {\n\t\tt.Skipf(\"%s filesystem seems not supported\", fs)\n\t}\n}",
"func (o ClusterBuildStrategySpecBuildStepsStartupProbeTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsStartupProbeTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func fileOnHost(path string) (*os.File, error) {\n\tif err := os.MkdirAll(filepath.Dir(path), os.ModePerm); err != nil {\n\t\treturn nil, err\n\t}\n\treturn os.Create(path) //nolint:gosec // No security issue: path is safe.\n}",
"func determineFilesystemType(devicePath string) (string, error) {\n\tif devicePath == \"\" {\n\t\treturn \"\", fmt.Errorf(\"null device path\")\n\t}\n\t// Use `file -bsL` to determine whether any filesystem type is detected.\n\t// If a filesystem is detected (ie., the output is not \"data\", we use\n\t// `blkid` to determine what the filesystem is. We use `blkid` as `file`\n\t// has inconvenient output.\n\t// We do *not* use `lsblk` as that requires udev to be up-to-date which\n\t// is often not the case when a device is erased using `dd`.\n\toutput, err := pmemexec.RunCommand(\"file\", \"-bsL\", devicePath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif strings.TrimSpace(output) == \"data\" {\n\t\t// No filesystem detected.\n\t\treturn \"\", nil\n\t}\n\t// Some filesystem was detected, use blkid to figure out what it is.\n\toutput, err = pmemexec.RunCommand(\"blkid\", \"-c\", \"/dev/null\", \"-o\", \"full\", devicePath)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif len(output) == 0 {\n\t\treturn \"\", fmt.Errorf(\"no device information for %s\", devicePath)\n\t}\n\n\t// expected output format from blkid:\n\t// devicepath: UUID=\"<uuid>\" TYPE=\"<filesystem type>\"\n\tattrs := strings.Split(string(output), \":\")\n\tif len(attrs) != 2 {\n\t\treturn \"\", fmt.Errorf(\"Can not parse blkid output: %s\", output)\n\t}\n\tfor _, field := range strings.Fields(attrs[1]) {\n\t\tattr := strings.Split(field, \"=\")\n\t\tif len(attr) == 2 && attr[0] == \"TYPE\" {\n\t\t\treturn strings.Trim(attr[1], \"\\\"\"), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"no filesystem type detected for %s\", devicePath)\n}",
"func (api *PublicStorageHostManagerAPI) StorageHost(id string) storage.HostInfo {\n\tvar enodeid enode.ID\n\n\t// convert the hex string back to the enode.ID type\n\tidSlice, err := hex.DecodeString(id)\n\tif err != nil {\n\t\treturn storage.HostInfo{}\n\t}\n\tcopy(enodeid[:], idSlice)\n\n\t// get the storage host information based on the enode id\n\tinfo, exist := api.shm.storageHostTree.RetrieveHostInfo(enodeid)\n\n\tif !exist {\n\t\treturn storage.HostInfo{}\n\t}\n\treturn info\n}",
"func (o ClusterBuildStrategySpecBuildStepsLifecyclePreStopTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsLifecyclePreStopTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func mountFS(printWarning bool) error {\n\tif printWarning {\n\t\tlog.Warning(\"================================= WARNING ==========================================\")\n\t\tlog.Warning(\"BPF filesystem is not mounted. This will lead to network disruption when Cilium pods\")\n\t\tlog.Warning(\"are restarted. Ensure that the BPF filesystem is mounted in the host.\")\n\t\tlog.Warning(\"https://docs.cilium.io/en/stable/operations/system_requirements/#mounted-ebpf-filesystem\")\n\t\tlog.Warning(\"====================================================================================\")\n\t}\n\n\tlog.Infof(\"Mounting BPF filesystem at %s\", bpffsRoot)\n\n\tmapRootStat, err := os.Stat(bpffsRoot)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif err := os.MkdirAll(bpffsRoot, 0755); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to create bpf mount directory: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to stat the mount path %s: %s\", bpffsRoot, err)\n\n\t\t}\n\t} else if !mapRootStat.IsDir() {\n\t\treturn fmt.Errorf(\"%s is a file which is not a directory\", bpffsRoot)\n\t}\n\n\tif err := unix.Mount(bpffsRoot, bpffsRoot, \"bpf\", 0, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount %s: %s\", bpffsRoot, err)\n\t}\n\treturn nil\n}",
"func (o AppTemplateContainerLivenessProbeOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerLivenessProbe) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (c *Config) Localhost() model.HostInfo {\n\treturn model.HostInfo{RpcAddr: cluster.HostAddr(c.PrivateApiRpc.ListenAddr)}\n}",
"func (o ArgoCDSpecServerPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServer) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func CheckHostProcess() Check {\n\treturn Check{\n\t\tID: \"hostProcess\",\n\t\tLevel: api.LevelBaseline,\n\t\tVersions: []VersionedCheck{\n\t\t\t{\n\t\t\t\tMinimumVersion: api.MajorMinorVersion(1, 0),\n\t\t\t\tCheckPod: hostProcess_1_0,\n\t\t\t},\n\t\t},\n\t}\n}",
"func (o BuildStrategySpecBuildStepsStartupProbeTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsStartupProbeTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}",
"func (*CPIO9PFID) StatFS() (p9.FSStat, error) {\n\treturn p9.FSStat{}, syscall.ENOSYS\n}",
"func (e *dockerExec) hostPath(elems ...string) string {\n\treturn e.Executor.execHostPath(e.id, elems...)\n}",
"func (o ClusterBuildStrategySpecBuildStepsLifecyclePostStartTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsLifecyclePostStartTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsLifecyclePreStopTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsLifecyclePreStopTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsStartupProbeTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsStartupProbeTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func PrepareHostPath(id string) error {\n\n\tif err := os.MkdirAll(masterPath, 0600); err != nil {\n\t\treturn fmt.Errorf(\"create host shared path failed, err: %s\", err)\n\t}\n\tif m, _ := mount.Mounted(masterPath); m != true {\n\t\tif err := mount.Mount(\"none\", masterPath, \"tmpfs\", \"size=16m\"); err != nil {\n\t\t\treturn fmt.Errorf(\"mount host shared path failed:, %s\", err)\n\t\t}\n\t\tif err := syscall.Mount(\"none\", masterPath, \"none\", syscall.MS_SHARED|syscall.MS_REC, \"\"); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to make mountpoint shared, err: %s\", err)\n\t\t}\n\t}\n\n\tif err := os.MkdirAll(filepath.Join(masterPath, id), 0600); err != nil {\n\t\treturn fmt.Errorf(\"create host shared path failed, err: %s\", err)\n\t}\n\treturn nil\n}",
"func NewOsFS() FS {\n\treturn afero.NewOsFs()\n}",
"func (o PgbenchSpecPostgresPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *PgbenchSpecPostgres) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn &v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (s *store) AllocateHostDir(name string) (string, error) {\n\tpath := filepath.Join(s.StorePath(), name)\n\terr := os.MkdirAll(path, 0755)\n\treturn path, err\n}",
"func (o IopingSpecVolumeVolumeSourcePtrOutput) Cephfs() IopingSpecVolumeVolumeSourceCephfsPtrOutput {\n\treturn o.ApplyT(func(v *IopingSpecVolumeVolumeSource) *IopingSpecVolumeVolumeSourceCephfs {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Cephfs\n\t}).(IopingSpecVolumeVolumeSourceCephfsPtrOutput)\n}",
"func (o AppTemplateContainerReadinessProbeOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v AppTemplateContainerReadinessProbe) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o BuildStrategySpecBuildStepsLifecyclePreStopTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsLifecyclePreStopTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func NewFileSystem() FileSystem {\r\n\treturn &osFileSystem{}\r\n}",
"func New(c *tlc.Container, basePath string) *FsPool {\n\treturn &FsPool{\n\t\tcontainer: c,\n\t\tbasePath: basePath,\n\n\t\tfileIndex: int64(-1),\n\t\treader: nil,\n\t}\n}",
"func (o BuildStrategySpecBuildStepsStartupProbeTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsStartupProbeTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o BuildStrategySpecBuildStepsLifecyclePreStopTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsLifecyclePreStopTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o ArgoCDSpecServerGrpcPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecServerGrpc) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func PodFitsHost(pod *v1.Pod, meta Metadata, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {\n\tif len(pod.Spec.NodeName) == 0 {\n\t\treturn true, nil, nil\n\t}\n\tnode := nodeInfo.Node()\n\tif node == nil {\n\t\treturn false, nil, fmt.Errorf(\"node not found\")\n\t}\n\tif pod.Spec.NodeName == node.Name {\n\t\treturn true, nil, nil\n\t}\n\treturn false, []PredicateFailureReason{ErrPodNotMatchHostName}, nil\n}",
"func OS(root string) FileSystem {\n\treturn osFS{root, vfs.OS(root)}\n}",
"func getProcPath(pid string, file string) ([]byte, error) {\n\tprocessPath := filepath.Join(\"/proc\", pid, file)\n\tdat, err := ioutil.ReadFile(processPath)\n\tif err != nil {\n\t\t//fmt.Println(\"error occured opening file:\", err)\n\t\treturn nil, err\n\t}\n\treturn dat, nil\n}",
"func GetHostFile() (*hostess.Hostfile, []error) {\n\n\t// prep for refactor\n\t// capture duplicate localhost here\n\t// TODO need a better solution, this is a hack\n\thf, errs := hostess.LoadHostfile()\n\n\tfor _, err := range errs {\n\n\t\t// auto-fixing hostfile problems.\n\t\tif err.Error() == \"Duplicate hostname entry for localhost -> ::1\" {\n\t\t\t_, err = BackupHostFile(hf)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{errors.New(\"Could not back up hostfile.\")}\n\t\t\t}\n\n\t\t\t// fix the duplicate\n\t\t\tinput, err := ioutil.ReadFile(hf.Path)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\tlines := strings.Split(string(input), \"\\n\")\n\t\t\tfor i, line := range lines {\n\t\t\t\t// if the line looks something like this then it's\n\t\t\t\t// probably the fault of hostess on a previous run and\n\t\t\t\t// safe to fix.\n\t\t\t\tif strings.Contains(line, \"::1 localhost localhost\") {\n\t\t\t\t\tlines[i] = \"::1 localhost\"\n\t\t\t\t}\n\t\t\t}\n\n\t\t\toutput := strings.Join(lines, \"\\n\")\n\t\t\terr = ioutil.WriteFile(hf.Path, []byte(output), 0644)\n\t\t\tif err != nil {\n\t\t\t\treturn hf, []error{err}\n\t\t\t}\n\n\t\t\treturn hostess.LoadHostfile()\n\t\t}\n\n\t}\n\n\treturn hf, errs\n}",
"func OwnFS(fs http.FileSystem, root, prefix string, indexes bool) *localFileSystem {\n\treturn &localFileSystem{\n\t\tFileSystem: fs,\n\t\torigfs: fs,\n\t\troot: \"/root/\" + root,\n\t\tprefix: prefix,\n\t\tindexes: indexes,\n\t\tphysfs: false,\n\t}\n}",
"func PrebuiltUserShareHostFactory() android.Module {\n\tmodule := &PrebuiltEtc{}\n\tInitPrebuiltEtcModule(module, \"usr/share\")\n\t// This module is host-only\n\tandroid.InitAndroidArchModule(module, android.HostSupported, android.MultilibCommon)\n\treturn module\n}",
"func (o ClusterBuildStrategySpecBuildStepsLifecyclePostStartTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsLifecyclePostStartTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o BuildStrategySpecBuildStepsLifecyclePostStartTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsLifecyclePostStartTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (fs *Fs) ParseProcMounts(\n\tctx context.Context,\n\tcontent io.Reader) ([]gofsutil.Info, error) {\n\tr, _, err := gofsutil.ReadProcMountsFrom(ctx, content, false,\n\t\tgofsutil.ProcMountsFields, gofsutil.DefaultEntryScanFunc())\n\treturn r, err\n}",
"func (_class PIFClass) GetHost(sessionID SessionRef, self PIFRef) (_retval HostRef, _err error) {\n\t_method := \"PIF.get_host\"\n\t_sessionIDArg, _err := convertSessionRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"session_id\"), sessionID)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_selfArg, _err := convertPIFRefToXen(fmt.Sprintf(\"%s(%s)\", _method, \"self\"), self)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_result, _err := _class.client.APICall(_method, _sessionIDArg, _selfArg)\n\tif _err != nil {\n\t\treturn\n\t}\n\t_retval, _err = convertHostRefToGo(_method + \" -> \", _result.Value)\n\treturn\n}",
"func (r *StorageClusterReconciler) newCephFilesystemInstances(initData *ocsv1.StorageCluster) ([]*cephv1.CephFilesystem, error) {\n\tret := &cephv1.CephFilesystem{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: generateNameForCephFilesystem(initData),\n\t\t\tNamespace: initData.Namespace,\n\t\t},\n\t\tSpec: cephv1.FilesystemSpec{\n\t\t\tMetadataPool: cephv1.PoolSpec{\n\t\t\t\tReplicated: generateCephReplicatedSpec(initData, \"metadata\"),\n\t\t\t\tFailureDomain: initData.Status.FailureDomain,\n\t\t\t},\n\t\t\tMetadataServer: cephv1.MetadataServerSpec{\n\t\t\t\tActiveCount: 1,\n\t\t\t\tActiveStandby: true,\n\t\t\t\tPlacement: getPlacement(initData, \"mds\"),\n\t\t\t\tResources: defaults.GetDaemonResources(\"mds\", initData.Spec.Resources),\n\t\t\t\t// set PriorityClassName for the MDS pods\n\t\t\t\tPriorityClassName: openshiftUserCritical,\n\t\t\t},\n\t\t},\n\t}\n\n\tif initData.Spec.StorageProfiles == nil {\n\t\t// standalone deployment will not have storageProfile, we need to\n\t\t// define default dataPool, if storageProfile is set this will be\n\t\t// overridden.\n\t\tret.Spec.DataPools = []cephv1.NamedPoolSpec{\n\t\t\t{\n\t\t\t\tPoolSpec: cephv1.PoolSpec{\n\t\t\t\t\tDeviceClass: generateDeviceClass(initData),\n\t\t\t\t\tReplicated: generateCephReplicatedSpec(initData, \"data\"),\n\t\t\t\t\tFailureDomain: initData.Status.FailureDomain,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t} else {\n\t\t// set deviceClass and parameters from storageProfile\n\t\tfor i := range initData.Spec.StorageProfiles {\n\t\t\tdeviceClass := initData.Spec.StorageProfiles[i].DeviceClass\n\t\t\tparameters := initData.Spec.StorageProfiles[i].SharedFilesystemConfiguration.Parameters\n\t\t\tret.Spec.DataPools = append(ret.Spec.DataPools, cephv1.NamedPoolSpec{\n\t\t\t\tName: deviceClass,\n\t\t\t\tPoolSpec: cephv1.PoolSpec{\n\t\t\t\t\tReplicated: generateCephReplicatedSpec(initData, \"data\"),\n\t\t\t\t\tDeviceClass: deviceClass,\n\t\t\t\t\tParameters: parameters,\n\t\t\t\t\tFailureDomain: initData.Status.FailureDomain,\n\t\t\t\t},\n\t\t\t})\n\t\t}\n\t}\n\n\terr := controllerutil.SetControllerReference(initData, ret, r.Scheme)\n\tif err != nil {\n\t\tr.Log.Error(err, \"Unable to set Controller Reference for CephFileSystem.\", \"CephFileSystem\", klog.KRef(ret.Namespace, ret.Name))\n\t\treturn nil, err\n\t}\n\n\treturn []*cephv1.CephFilesystem{ret}, nil\n}",
"func NewSafeFormatAndMountFromHost(pluginName string, host volume.VolumeHost) *mount.SafeFormatAndMount {\n\tmounter := host.GetMounter(pluginName)\n\texec := host.GetExec(pluginName)\n\treturn &mount.SafeFormatAndMount{Interface: mounter, Exec: exec}\n}",
"func (p *Resolver) SetProcessFilesystem(entry *model.ProcessCacheEntry) (string, error) {\n\tif entry.FileEvent.MountID != 0 {\n\t\tfs, err := p.mountResolver.ResolveFilesystem(entry.FileEvent.MountID, entry.Pid, entry.ContainerID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tentry.FileEvent.Filesystem = fs\n\t}\n\n\treturn entry.FileEvent.Filesystem, nil\n}",
"func (o ArgoCDSpecServerGrpcOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServerGrpc) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func OpenedBy(l shell.Logger, debug bool, path string) (string, error) {\n\tpidEntries, err := os.ReadDir(\"/proc\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to read /proc: %w\", err)\n\t}\n\n\tabsPath, err := filepath.Abs(path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get absolute path: %w\", err)\n\t}\n\n\tfor _, p := range pidEntries {\n\t\tpid := p.Name()\n\n\t\tif !numeric.MatchString(pid) || !openedByPid(l, debug, absPath, pid) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// /proc/<pid>/exe is a symlink to the executable\n\t\texe, err := os.Readlink(fmt.Sprintf(\"/proc/%s/exe\", pid))\n\t\tif err != nil {\n\t\t\tif debug {\n\t\t\t\tl.Warningf(\"Failed to read executable for pid %s: %v\", pid, err)\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\n\t\treturn exe, nil\n\t}\n\n\treturn \"\", ErrFileNotOpen\n}",
"func (o ArgoCDSpecServerOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ArgoCDSpecServer) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsLivenessProbeTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsLivenessProbeTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o IopingSpecVolumeVolumeSourceHostPathOutput) Path() pulumi.StringOutput {\n\treturn o.ApplyT(func(v IopingSpecVolumeVolumeSourceHostPath) string { return v.Path }).(pulumi.StringOutput)\n}",
"func diskCephfsOptions(clusterName string, userName string, fsName string, fsPath string) (string, []string, error) {\n\t// Get the credentials and host\n\tmonAddresses, secret, err := cephFsConfig(clusterName, userName)\n\tif err != nil {\n\t\treturn \"\", nil, err\n\t}\n\n\tfsOptions := []string{\n\t\tfmt.Sprintf(\"name=%v\", userName),\n\t\tfmt.Sprintf(\"secret=%v\", secret),\n\t\tfmt.Sprintf(\"mds_namespace=%v\", fsName),\n\t}\n\n\tsrcpath := \"\"\n\tfor _, monAddress := range monAddresses {\n\t\t// Add the default port to the mon hosts if not already provided\n\t\tif strings.Contains(monAddress, \":6789\") {\n\t\t\tsrcpath += fmt.Sprintf(\"%s,\", monAddress)\n\t\t} else {\n\t\t\tsrcpath += fmt.Sprintf(\"%s:6789,\", monAddress)\n\t\t}\n\t}\n\tsrcpath = srcpath[:len(srcpath)-1]\n\tsrcpath += fmt.Sprintf(\":/%s\", fsPath)\n\n\treturn srcpath, fsOptions, nil\n}",
"func (o ArgoCDSpecGrafanaPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ArgoCDSpecGrafana) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (s *spiff) FileSystem() vfs.FileSystem {\n\treturn s.fs\n}",
"func PodFitsHostPortsPredicate(pod *v1.Pod, meta []*v1.ContainerPort, nodeInfo *schedulernodeinfo.NodeInfo) (bool, []PredicateFailureReason, error) {\n\twantPorts := meta\n\tif wantPorts == nil {\n\t\t// Fallback to computing it.\n\t\twantPorts = schedutil.GetContainerPorts(pod)\n\t}\n\tif len(wantPorts) == 0 {\n\t\treturn true, nil, nil\n\t}\n\n\texistingPorts := nodeInfo.UsedPorts()\n\n\t// try to see whether existingPorts and wantPorts will conflict or not\n\tif portsConflict(existingPorts, wantPorts) {\n\t\treturn false, []PredicateFailureReason{ErrPodNotFitsHostPorts}, nil\n\t}\n\n\treturn true, nil, nil\n}",
"func (o BuildStrategySpecBuildStepsLifecyclePostStartTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildStrategySpecBuildStepsLifecyclePostStartTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func podVFIndexInHost(hostNetPod *corev1.Pod, targetPod *corev1.Pod, interfaceName string) (int, error) {\n\tvar stdout, stderr string\n\tvar err error\n\tEventually(func() error {\n\t\tstdout, stderr, err = pod.ExecCommand(clients, targetPod, \"readlink\", \"-f\", fmt.Sprintf(\"/sys/class/net/%s\", interfaceName))\n\t\tif stdout == \"\" {\n\t\t\treturn fmt.Errorf(\"empty response from pod exec\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to find %s interface address %v - %s\", interfaceName, err, stderr)\n\t\t}\n\n\t\treturn nil\n\t}, 1*time.Minute, 5*time.Second).ShouldNot(HaveOccurred())\n\n\t// sysfs address looks like: /sys/devices/pci0000:17/0000:17:02.0/0000:19:00.5/net/net1\n\tpathSegments := strings.Split(stdout, \"/\")\n\tsegNum := len(pathSegments)\n\n\tif !strings.HasPrefix(pathSegments[segNum-1], \"net1\") { // not checking equality because of rubbish like new line\n\t\treturn 0, fmt.Errorf(\"Expecting net1 as last segment of %s\", stdout)\n\t}\n\n\tpodVFAddr := pathSegments[segNum-3] // 0000:19:00.5\n\n\tdevicePath := strings.Join(pathSegments[0:segNum-2], \"/\") // /sys/devices/pci0000:17/0000:17:02.0/0000:19:00.5/\n\tfindAllSiblingVfs := strings.Split(fmt.Sprintf(\"ls -gG %s/physfn/\", devicePath), \" \")\n\n\tres := 0\n\tEventually(func() error {\n\t\tstdout, stderr, err = pod.ExecCommand(clients, hostNetPod, findAllSiblingVfs...)\n\t\tif stdout == \"\" {\n\t\t\treturn fmt.Errorf(\"empty response from pod exec\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to find %s siblings %v - %s\", devicePath, err, stderr)\n\t\t}\n\n\t\t// lines of the format of\n\t\t// lrwxrwxrwx. 1 0 Mar 6 15:15 virtfn3 -> ../0000:19:00.5\n\t\tscanner := bufio.NewScanner(strings.NewReader(stdout))\n\t\tfor scanner.Scan() {\n\t\t\tline := scanner.Text()\n\t\t\tif !strings.Contains(line, \"virtfn\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tcolumns := strings.Fields(line)\n\n\t\t\tif len(columns) != 9 {\n\t\t\t\treturn fmt.Errorf(\"Expecting 9 columns in %s, found %d\", line, len(columns))\n\t\t\t}\n\n\t\t\tvfAddr := strings.TrimPrefix(columns[8], \"../\") // ../0000:19:00.2\n\n\t\t\tif vfAddr == podVFAddr { // Found!\n\t\t\t\tvfName := columns[6] // virtfn0\n\t\t\t\tvfNumber := strings.TrimPrefix(vfName, \"virtfn\")\n\t\t\t\tres, err = strconv.Atoi(vfNumber)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn fmt.Errorf(\"Could not get vf number from vfname %s\", vfName)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\t\t}\n\t\treturn fmt.Errorf(\"Could not find %s index in %s\", podVFAddr, stdout)\n\t}, 1*time.Minute, 5*time.Second).ShouldNot(HaveOccurred())\n\n\treturn res, nil\n}",
"func (o GetAppTemplateContainerStartupProbeOutput) Host() pulumi.StringOutput {\n\treturn o.ApplyT(func(v GetAppTemplateContainerStartupProbe) string { return v.Host }).(pulumi.StringOutput)\n}",
"func getFileSystemType(path string) (fsType string, hr error) {\n\tdrive := filepath.VolumeName(path)\n\tif len(drive) != 2 {\n\t\treturn \"\", errors.New(\"getFileSystemType path must start with a drive letter\")\n\t}\n\n\tvar (\n\t\tmodkernel32 = windows.NewLazySystemDLL(\"kernel32.dll\")\n\t\tprocGetVolumeInformation = modkernel32.NewProc(\"GetVolumeInformationW\")\n\t\tbuf = make([]uint16, 255)\n\t\tsize = windows.MAX_PATH + 1\n\t)\n\tdrive += `\\`\n\tn := uintptr(unsafe.Pointer(nil))\n\tr0, _, _ := syscall.Syscall9(procGetVolumeInformation.Addr(), 8, uintptr(unsafe.Pointer(windows.StringToUTF16Ptr(drive))), n, n, n, n, n, uintptr(unsafe.Pointer(&buf[0])), uintptr(size), 0)\n\tif int32(r0) < 0 {\n\t\thr = syscall.Errno(win32FromHresult(r0))\n\t}\n\tfsType = windows.UTF16ToString(buf)\n\treturn\n}",
"func (o ClusterBuildStrategySpecBuildStepsReadinessProbeTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsReadinessProbeTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsStartupProbeHttpGetPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *ClusterBuildStrategySpecBuildStepsStartupProbeHttpGet) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func MakeFsOnDisk() FileSystem { return filesys.MakeFsOnDisk() }",
"func TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := syscall.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (p *process) HostPort(containerPort int) (int, error) {\n\treturn containerPort, nil\n}",
"func (u *UserUploader) FileSystem() *afero.Afero {\n\treturn u.uploader.Storer.FileSystem()\n}",
"func (p artifactPath) hostPath(path string) string {\n\treturn rebasePath(path, p.mountPath, p.mountBind)\n}",
"func (o BuildStrategySpecBuildStepsLivenessProbeTcpSocketPtrOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildStrategySpecBuildStepsLivenessProbeTcpSocket) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Host\n\t}).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceCephfsOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v FioSpecVolumeVolumeSourceCephfs) *string { return v.Path }).(pulumi.StringPtrOutput)\n}",
"func (o ClusterBuildStrategySpecBuildStepsLivenessProbeTcpSocketOutput) Host() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v ClusterBuildStrategySpecBuildStepsLivenessProbeTcpSocket) *string { return v.Host }).(pulumi.StringPtrOutput)\n}",
"func (o FioSpecVolumeVolumeSourceCephfsPtrOutput) Path() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *FioSpecVolumeVolumeSourceCephfs) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Path\n\t}).(pulumi.StringPtrOutput)\n}",
"func ListProcFds() ([]string, error) {\n\t// returns the names of all files matching pattern\n\t// or nil if there is no matching file\n\tfs, err := filepath.Glob(\"/proc/[0-9]*/fd/[0-9]*\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn fs, nil\n}",
"func (d *cephfs) fsExists(clusterName string, userName string, fsName string) bool {\n\t_, err := shared.RunCommand(\"ceph\", \"--name\", fmt.Sprintf(\"client.%s\", userName), \"--cluster\", clusterName, \"fs\", \"get\", fsName)\n\treturn err == nil\n}"
] | [
"0.8426301",
"0.679312",
"0.66974336",
"0.60143",
"0.5733004",
"0.57286805",
"0.57118076",
"0.53681195",
"0.5331099",
"0.527021",
"0.51754916",
"0.51229256",
"0.5111104",
"0.5068996",
"0.50395757",
"0.50207096",
"0.49939087",
"0.490678",
"0.489303",
"0.484484",
"0.4815",
"0.480984",
"0.4798665",
"0.47958627",
"0.4780428",
"0.47708312",
"0.47518104",
"0.47147077",
"0.47094038",
"0.4703729",
"0.47007725",
"0.4699277",
"0.46951833",
"0.4691013",
"0.46877632",
"0.4675633",
"0.46641836",
"0.46638066",
"0.46624923",
"0.4656144",
"0.46538678",
"0.46395144",
"0.4638735",
"0.46362245",
"0.46260363",
"0.46227488",
"0.46217775",
"0.461537",
"0.46048465",
"0.46030584",
"0.46023265",
"0.4594973",
"0.4590767",
"0.45858088",
"0.45847514",
"0.45847097",
"0.4567478",
"0.4565161",
"0.4562207",
"0.45563596",
"0.45521897",
"0.45508847",
"0.45471913",
"0.45469934",
"0.4542504",
"0.4542053",
"0.45417836",
"0.4535548",
"0.4533795",
"0.45309222",
"0.45177945",
"0.45128694",
"0.4510822",
"0.45100912",
"0.45095667",
"0.45087093",
"0.4508691",
"0.45073354",
"0.44941315",
"0.44886154",
"0.4487595",
"0.44837326",
"0.4480697",
"0.44755578",
"0.44584525",
"0.44536394",
"0.44469944",
"0.444239",
"0.44403848",
"0.44342682",
"0.4432414",
"0.44307032",
"0.4429766",
"0.44254348",
"0.4424372",
"0.44234842",
"0.44159392",
"0.44139233",
"0.44127548",
"0.44087836"
] | 0.8387475 | 1 |
TracingDir returns the directory on either the debugfs or tracefs used to control the Linux kernel trace event subsystem. | func TracingDir() string {
mounts := HostProcFS().Mounts()
// Look for an existing tracefs
for _, m := range mounts {
if m.FilesystemType == "tracefs" {
glog.V(1).Infof("Found tracefs at %s", m.MountPoint)
return m.MountPoint
}
}
// If no mounted tracefs has been found, look for it as a
// subdirectory of the older debugfs
for _, m := range mounts {
if m.FilesystemType == "debugfs" {
d := filepath.Join(m.MountPoint, "tracing")
s, err := os.Stat(filepath.Join(d, "events"))
if err == nil && s.IsDir() {
glog.V(1).Infof("Found debugfs w/ tracing at %s", d)
return d
}
return m.MountPoint
}
}
return ""
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func TracingDir() string {\n\tmounts := Mounts()\n\n\t// Look for an existing tracefs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"tracefs\" {\n\t\t\tglog.V(1).Infof(\"Found tracefs at %s\", m.MountPoint)\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\t// If no mounted tracefs has been found, look for it as a\n\t// subdirectory of the older debugfs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"debugfs\" {\n\t\t\td := filepath.Join(m.MountPoint, \"tracing\")\n\t\t\ts, err := os.Stat(filepath.Join(d, \"events\"))\n\t\t\tif err == nil && s.IsDir() {\n\t\t\t\tglog.V(1).Infof(\"Found debugfs w/ tracing at %s\", d)\n\t\t\t\treturn d\n\t\t\t}\n\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func (c *RPCClient) TraceDirectory() (string, error) {\n\tvar out RecordedOut\n\terr := c.call(\"Recorded\", RecordedIn{}, &out)\n\treturn out.TraceDirectory, err\n}",
"func PerfEventDir() string {\n\tfor _, mi := range HostProcFS().Mounts() {\n\t\tif mi.FilesystemType == \"cgroup\" {\n\t\t\tfor option := range mi.SuperOptions {\n\t\t\t\tif option == \"perf_event\" {\n\t\t\t\t\treturn mi.MountPoint\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func PerfEventDir() string {\n\tfor _, mi := range Mounts() {\n\t\tif mi.FilesystemType == \"cgroup\" {\n\t\t\tfor option := range mi.SuperOptions {\n\t\t\t\tif option == \"perf_event\" {\n\t\t\t\t\treturn mi.MountPoint\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func getTelemetryServiceDirectory() (path string, dir string) {\n\tpath = filepath.Join(CniInstallDir, TelemetryServiceProcessName)\n\n\tif _, exists := os.Stat(path); exists != nil {\n\t\tex, _ := os.Executable()\n\t\texDir := filepath.Dir(ex)\n\t\tpath = filepath.Join(exDir, TelemetryServiceProcessName)\n\t\tdir = exDir\n\t} else {\n\t\tdir = CniInstallDir\n\t}\n\treturn\n}",
"func (env *LocalTestEnv) Directory() string {\n\treturn env.TmpPath\n}",
"func (s *Scope) logDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Logs\"), nil\n}",
"func LogsDir() string {\n\treturn filepath.Join(userLogsDir, \"kopia\")\n}",
"func GetDir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}",
"func (env *LocalTestEnv) LogDirectory() string {\n\treturn path.Join(env.TmpPath, \"logs\")\n}",
"func getTSNetDir(logf logger.Logf, confDir, prog string) (string, error) {\n\toldPath := filepath.Join(confDir, \"tslib-\"+prog)\n\tnewPath := filepath.Join(confDir, \"tsnet-\"+prog)\n\n\tfi, err := os.Lstat(oldPath)\n\tif os.IsNotExist(err) {\n\t\t// Common path.\n\t\treturn newPath, nil\n\t}\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif !fi.IsDir() {\n\t\treturn \"\", fmt.Errorf(\"expected old tslib path %q to be a directory; got %v\", oldPath, fi.Mode())\n\t}\n\n\t// At this point, oldPath exists and is a directory. But does\n\t// the new path exist?\n\n\tfi, err = os.Lstat(newPath)\n\tif err == nil && fi.IsDir() {\n\t\t// New path already exists somehow. Ignore the old one and\n\t\t// don't try to migrate it.\n\t\treturn newPath, nil\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn \"\", err\n\t}\n\tif err := os.Rename(oldPath, newPath); err != nil {\n\t\treturn \"\", err\n\t}\n\tlogf(\"renamed old tsnet state storage directory %q to %q\", oldPath, newPath)\n\treturn newPath, nil\n}",
"func (c *configImpl) LogsDir() string {\n\tif c.Dist() {\n\t\t// Always write logs to the real dist dir, even if Bazel is using a rigged dist dir for other files\n\t\treturn filepath.Join(c.RealDistDir(), \"logs\")\n\t}\n\treturn c.OutDir()\n}",
"func (s *Scope) logDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultLogDir, nil\n\n\tcase User:\n\t\tfallthrough\n\n\tcase CustomHome:\n\t\treturn s.dataDir()\n\t}\n\n\treturn \"\", ErrInvalidScope\n}",
"func CallerDir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}",
"func GetRuntimeDir() (string, error) {\n\treturn \"\", errors.New(\"this function is not implemented for windows\")\n}",
"func (o BuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}",
"func GetSnapshotDir(snapshotID string) string {\n\treturn filepath.Join(SnapshotsDirname, snapshotID)\n}",
"func (o BuildSpecSourcePtrOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ContextDir\n\t}).(pulumi.StringPtrOutput)\n}",
"func (at AssetType) DirPath() string {\n\tinvPath := env.InvestigationsPath()\n\tswitch at {\n\tcase AssetTypeGCPAnalysis:\n\t\treturn filepath.Join(invPath, \"gcp-analyses\")\n\tcase AssetTypeIBMAnalysis:\n\t\treturn filepath.Join(invPath, \"ibm-analyses\")\n\tcase AssetTypeAudio:\n\t\treturn filepath.Join(invPath, \"audio\")\n\tcase AssetTypeRecognition:\n\t\treturn filepath.Join(invPath, \"recognitions\")\n\tcase AssetTypeTranscript:\n\t\treturn filepath.Join(invPath, \"transcripts\")\n\tcase AssetTypeVideo:\n\t\treturn filepath.Join(invPath, \"videos\")\n\tdefault:\n\t\treturn \"\"\n\t}\n}",
"func (o BuildRunStatusBuildSpecSourcePtrOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ContextDir\n\t}).(pulumi.StringPtrOutput)\n}",
"func Dir() string {\n\tsrcdir := os.Getenv(\"TEST_SRCDIR\")\n\treturn filepath.Join(\n\t\tsrcdir, os.Getenv(\"TEST_WORKSPACE\"),\n\t\t\"go\", \"tools\", \"gazelle\", \"testdata\",\n\t)\n}",
"func GetWorkDirPath(dir string, t *testing.T) string {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\tt.Fatalf(\"failed to get working directory: %s\", err)\n\t}\n\treturn fmt.Sprintf(\"%s%c%s\", path, os.PathSeparator, dir)\n}",
"func (o BuildRunStatusBuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}",
"func GetCurrentDirPath() string {\n\tdirPath := filepath.Dir(os.Args[0])\n\treturn dirPath\n}",
"func getDir(location string) string {\n\tdir := viper.GetString(DirFlag)\n\tif dir[len(dir)-1] != '/' {\n\t\tdir = filepath.Join(dir, \"/\")\n\t}\n\treturn os.ExpandEnv(filepath.Join(dir, location))\n}",
"func configDirPath() string {\n\tusr, err := user.Current()\n\tif err != nil {\n\t\tlogging.LogFatal(\"config/configDirPath() - Can't find current user: \", err)\n\t}\n\n\t// println(\"usr.HomeDir: \", usr.HomeDir)\n\tconfigDirPath := paths.GetFilePath(usr.HomeDir, configDirName)\n\n\treturn configDirPath\n}",
"func Dir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\n\treturn filepath.Dir(filename)\n}",
"func GetCurrentDirectory() string {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Panicln(err)\n\t}\n\treturn dir\n}",
"func (fs *FileSystem) Getwd() (dir string, err error) {\n\treturn fs.cwd, nil\n}",
"func (g *GlobalContext) Dir() string {\n\tpwd, err := os.Getwd()\n\n\tif err != nil {\n\t\tg.LogError(err)\n\t}\n\n\treturn pwd + \"/.secrets\"\n}",
"func sourceFileDirectory() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn filepath.Dir(filename)\n}",
"func getWorkDirPath(dir string) (string, error) {\n\tpath, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn fmt.Sprintf(\"%s%c%s\", path, os.PathSeparator, dir), nil\n}",
"func LogDir() string {\n\tlogDir := \"\"\n\tflag.Visit(func(f *flag.Flag) {\n\t\tif f.Name == \"log_dir\" {\n\t\t\tlogDir = f.Value.String()\n\t\t}\n\t})\n\treturn logDir\n}",
"func (s *Scope) configDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Config\"), nil\n}",
"func (td *OsmTestData) GetTestDirPath() string {\n\tabsPath, err := filepath.Abs(strings.Join([]string{td.TestDirBase, td.TestDirName}, \"/\"))\n\tif err != nil {\n\t\ttd.T.Errorf(\"Error getting TestDirAbsPath: %v\", err)\n\t}\n\treturn absPath\n}",
"func LogDir() string {\n\treturn environ.GetValueStrOrPanic(\"LOG_DIR\")\n}",
"func (c *Config) RewardDir() string {\n\tif len(c.rewardDir) == 0 {\n\t\tc.rewardDir = path.Join(\"rewards\", c.Name)\n\t}\n\n\treturn c.rewardDir\n}",
"func CallerDirectoryPath(t *testing.T) string {\n\tt.Log(\"Exec CallerFilePath\")\n\t_, filename, _, ok := runtime.Caller(1)\n\tif !ok {\n\t\tt.Error(\"Can not read current file when runtime\")\n\t}\n\tt.Log(\"filename is \" + filename)\n\tdirectoryPath := filepath.Dir(filename)\n\tt.Log(\"Current test directoryPath: \" + directoryPath)\n\treturn directoryPath\n}",
"func (t *Application) stateDir() (dir string, closer func() error, err error) {\n\tif *t.StateDir == \"\" {\n\t\treturn tempStateDir()\n\t}\n\treturn *t.StateDir, nil, nil\n}",
"func TstAppDataDir(goos, appName string, roaming bool) string {\n\treturn appDir(goos, appName, roaming)\n}",
"func (logger *FileLogger) dir() string {\n\treturn filepath.Dir(logger.Filename)\n}",
"func InertiaDir() string {\n\tif os.Getenv(\"INERTIA_PATH\") != \"\" {\n\t\treturn os.Getenv(\"INERTIA_PATH\")\n\t}\n\tconfDir, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"/inertia\"\n\t}\n\treturn filepath.Join(confDir, \"inertia\")\n}",
"func RelativeDir(relativePath string) string {\n\t_, f, _, _ := runtime.Caller(1)\n\treturn filepath.Join(filepath.Dir(f), relativePath)\n}",
"func fileDir() string {\n\n\t_, filename, _, _ := runtime.Caller(0)\n\treturn path.Dir(filename)\n}",
"func GetGoPathDir() string {\n\troseDir := GetRosieDir()\n\treturn path.Join(roseDir, goPathDirName)\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func (gw *Gateway) GetWalletDir() (string, error) {\n\tif !gw.Config.EnableWalletAPI {\n\t\treturn \"\", wallet.ErrWalletAPIDisabled\n\t}\n\treturn gw.v.Config.WalletDirectory, nil\n}",
"func TestFileDir() string {\n\t_, file, _, ok := runtime.Caller(1)\n\tif ok {\n\t\treturn filepath.Dir(file)\n\t}\n\tpanic(\"cannot determine test file directory\")\n}",
"func GetProfileDir(lc logger.LoggingClient, profileDir string) string {\n\tenvValue := os.Getenv(envProfile)\n\tif len(envValue) > 0 {\n\t\tprofileDir = envValue\n\t\tlogEnvironmentOverride(lc, \"-p/-profile\", envProfile, envValue)\n\t}\n\n\tif len(profileDir) > 0 {\n\t\tprofileDir += \"/\"\n\t}\n\n\treturn profileDir\n}",
"func LogDir() string {\n\treturn logDir\n}",
"func (l *FileWriter) dir() string {\n\treturn filepath.Dir(l.filename())\n}",
"func GetConfDir(lc logger.LoggingClient, configDir string) string {\n\tenvValue := os.Getenv(envConfDir)\n\tif len(envValue) > 0 {\n\t\tconfigDir = envValue\n\t\tlogEnvironmentOverride(lc, \"-c/-confdir\", envConfDir, envValue)\n\t}\n\n\tif len(configDir) == 0 {\n\t\tconfigDir = defaultConfDirValue\n\t}\n\n\treturn configDir\n}",
"func (c *Config) LogDir() string {\n\trel := filepath.Join(c.Base, c.Get(\"log_dir\", logDir))\n\tabs, err := filepath.Abs(rel)\n\tif nil != err {\n\t\tpanic(err)\n\t}\n\treturn abs\n}",
"func GetLogDir() string {\n\treturn vboxmanage.GetVMInfoByRegexp(boxName, \"LogFldr=\\\"(.*)\\\"\")\n}",
"func (k *Kluster) StateDir() string {\n\treturn filepath.Join(k.Dir(), StateDirname)\n}",
"func (c Config) GetConfigDirPath() (string, error) {\n\t// Get home directory.\n\thome := os.Getenv(homeKey)\n\tif home != \"\" {\n\t\treturn filepath.Join(home, \".mstreamb0t\"), nil\n\t}\n\tu, err := user.Current()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(u.HomeDir, \".mstreamb0t\"), nil\n}",
"func (v *Module) GetDirectory() (o string) {\n\tif v != nil {\n\t\to = v.Directory\n\t}\n\treturn\n}",
"func TstAppDataDir(goos, appName string, roaming bool) string {\n\treturn appDataDir(goos, appName, roaming)\n}",
"func getResourceDirectoryPath() (directory string, err error) {\n\t_, filename, _, ok := runtime.Caller(0)\n\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"could not derive directory path\")\n\t}\n\n\treturn fmt.Sprintf(\"%s/%s\", path.Dir(filename), \"../../resource\"), nil\n}",
"func getPathToLog(dir string, logPath string) string {\n\treturn path.Join(dir, logPath)\n}",
"func GetLocalPath(in string) (dirpath string, err error) {\n\t// Assume cwd\n\tif len(in) == 0 {\n\t\treturn os.Getwd()\n\t}\n\n\t// Assume cwd + supplied path if not an absolute path\n\tif !filepath.IsAbs(in) {\n\t\tvar wd string\n\t\tif wd, err = os.Getwd(); err == nil {\n\t\t\tdirpath = filepath.Join(wd, in)\n\t\t}\n\t}\n\n\treturn\n}",
"func GetConfDir() string {\n\treturn fileutil.GetConfDir()\n}",
"func GetCgroupDir(pid, subsystem string) (string, error) {\n\tpath := filepath.Join(\"/proc\", pid, \"cgroup\")\n\tcgroupmap, err := cgroups.ParseCgroupFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif path, ok := cgroupmap[subsystem]; ok {\n\t\treturn path, nil\n\t}\n\tif path, ok := cgroupmap[cgroupNamePrefix+subsystem]; ok {\n\t\treturn path, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Error: ControllerPath of %s is not found\", subsystem)\n}",
"func GameDir() (string, error) {\n\texePath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\tgameDir := filepath.Dir(exePath)\n\treturn gameDir, nil\n}",
"func enclosingDir(path string) string {\n\tfor {\n\t\tif stat, err := os.Lstat(path); err == nil {\n\t\t\tif stat.IsDir() {\n\t\t\t\treturn path\n\t\t\t}\n\t\t}\n\t\tif path == \"\" {\n\t\t\treturn \"\"\n\t\t}\n\t\tpath = filepath.Dir(path)\n\t}\n}",
"func (s *Scope) dataDir() (string, error) {\n\tvar rfid syscall.GUID\n\n\tswitch s.Type {\n\tcase System:\n\t\trfid = rfidProgramData\n\n\tcase User:\n\t\trfid = rfidLocalAppData\n\n\tcase CustomHome:\n\t\treturn s.CustomHome, nil\n\n\tdefault:\n\t\treturn \"\", ErrInvalidScope\n\t}\n\n\tpath, err := getFolderPath(rfid)\n\tif err != nil {\n\t\treturn \"\", ErrRetrievingPath\n\t}\n\n\tif path, err = filepath.Abs(path); err != nil {\n\t\treturn \"\", ErrRetrievingPath\n\t}\n\n\treturn filepath.Join(path, s.Vendor, s.App), nil\n}",
"func CurrentDir() string {\n\t_, f, _, _ := runtime.Caller(1)\n\treturn filepath.Dir(f)\n}",
"func (d *Device) DeviceDir(repository string) string {\n\treturn deviceDirectory(repository, d.ID)\n}",
"func (c *client) GetTKGCompatibilityDirectory() (string, error) {\n\ttkgDir, err := c.GetTKGDirectory()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(tkgDir, constants.LocalCompatibilityFolderName), nil\n}",
"func (d *DockerPath) GetLogPath(network string) (path string) {\n\tif mainnet == network {\n\t\tpath = filepath.Join(d.BaseDir, d.MainnetLogDir)\n\t} else {\n\t\tpath = filepath.Join(d.BaseDir, d.TestnetLogDir)\n\t}\n\treturn path\n}",
"func Dir() string {\n\treturn configDir\n}",
"func ConfigDir() string {\n\treturn configDir\n}",
"func (c *client) GetTKGDirectory() (string, error) {\n\tif c.configDir == \"\" {\n\t\treturn \"\", errors.New(\"tkg config directory is empty\")\n\t}\n\treturn c.configDir, nil\n}",
"func ConfigDirectory() (dir string, e error) {\r\n\tdir, e = SystemDirectory()\r\n\tif s, ok := os.LookupEnv(\"PROGRAMDATA\"); ok {\r\n\t\tdir, e = s, nil\r\n\t}\r\n\treturn\r\n}",
"func (fs FilesystemStorage) TempDir() string {\n\treturn filepath.Join(fs.String(), \"tmp\")\n}",
"func Dir() string {\n\tdir, _ := os.Getwd()\n\treturn dir\n}",
"func (j *joinData) KubeletDir() string {\n\tif j.dryRun {\n\t\treturn j.dryRunDir\n\t}\n\treturn kubeadmconstants.KubeletRunDirectory\n}",
"func (k *Kluster) Dir() string {\n\treturn filepath.Dir(k.path)\n}",
"func LogDirName() string {\n\treturn logDirName\n}",
"func GetCurrFileDir() string {\n\t// get this file path\n\t_, file, _, ok := runtime.Caller(1)\n\tif !ok {\n\t\tpanic(fmt.Errorf(\"Unable to access the current file path\"))\n\t}\n\treturn filepath.Dir(file)\n}",
"func (fs FileStorageSettings) Dir() string {\n\tpath := \"\"\n\tif fs.Type == FileStorageTypeLocal {\n\t\tpath = fs.Local.Path\n\t} else if fs.Type == FileStorageTypeS3 {\n\t\tpath = fs.S3.Key\n\t}\n\treturn filepath.Dir(path)\n}",
"func (c *Config) RoundDir() string {\n\tif len(c.roundDir) == 0 {\n\t\tc.roundDir = path.Join(c.RewardDir(), c.Round)\n\t}\n\n\treturn c.roundDir\n}",
"func (inst *Installer) NightlyInstallDir() string {\n\treturn filepath.Join(\n\t\tinst.opts.InstallBaseDir,\n\t\tinst.NightlyID(),\n\t\tinst.opts.Timestamp,\n\t)\n}",
"func (j *joinData) ManifestDir() string {\n\tif j.dryRun {\n\t\treturn j.dryRunDir\n\t}\n\treturn kubeadmconstants.GetStaticPodDirectory()\n}",
"func (a SecondLifeClient) GetDirectory() (string, error) {\n\t// Directory detection took from indra/llvfs/lldir_win32.cpp\n\tenvParam := os.Getenv(\"APPDATA\")\n\tif envParam != \"\" {\n\t\treturn fmt.Sprintf(\"%s\\\\%s\", envParam, a), nil\n\t}\n\n\tknownPath, err := windows.KnownFolderPath(windows.FOLDERID_RoamingAppData, 0)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"unable to retrieve application data path: %w\", err)\n\t}\n\n\treturn fmt.Sprintf(\"%s\\\\%s\", knownPath, a), nil\n}",
"func ModsDir() (string, error) {\n\treturn modsDir, dirErr\n}",
"func TempDir() string",
"func TempDir() (string, error) {\n\t// Attempt to make temp dir for runner in /dev/shm. If that fails (eg\n\t// no permission), then attempt at OS default temp dir.\n\t// Check if /dev/shm exists first. Don't want to accidentially create a\n\t// directory in /dev (if someones runs this as root).\n\tif fi, err := os.Stat(\"/dev/shm\"); err == nil && fi.IsDir() {\n\t\tdir, err := ioutil.TempDir(\"/dev/shm\", \"edge-impulse-cli\")\n\t\tif err == nil {\n\t\t\treturn dir, nil\n\t\t}\n\t}\n\treturn ioutil.TempDir(\"\", \"edge-impulse-cli\")\n}",
"func (u *UserPath) GetLogPath(network string) (path string) {\n\tif mainnet == network {\n\t\tpath = filepath.Join(u.BaseDir, u.MainnetLogDir)\n\t} else {\n\t\tpath = filepath.Join(u.BaseDir, u.TestnetLogDir)\n\t}\n\treturn path\n}",
"func DataDirPath(envContainer EnvContainer) (string, error) {\n\treturn xdgDirPath(envContainer, \"XDG_DATA_HOME\", filepath.Join(\".local\", \"share\"))\n}",
"func (e *Engine) SourceDir() string {\n\treturn e.dirs.src\n}",
"func (l *Locator) SourceDir() string {\n\treturn l.src\n}",
"func CurrentDir(L *lua.LState) string {\n\t// same: debug.getinfo(2,'S').source\n\tvar dbg *lua.Debug\n\tvar err error\n\tvar ok bool\n\n\tdbg, ok = L.GetStack(1)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\t_, err = L.GetInfo(\"S\", dbg, lua.LNil)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn filepath.Dir(dbg.Source)\n}",
"func getCurrentDirectory() string {\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tdir = \"\"\n\t}\n\treturn strings.Replace(dir, \"\\\\\", \"/\", -1)\n}",
"func (s *Signatures) Dir() (dir string) {\n\tsignatures.Lock()\n\tdefer signatures.Unlock()\n\n\treturn signatures.dir\n}",
"func GetDefaultDataDir(env string) string {\n\tif env == \"windows\" {\n\t\treturn filepath.Join(os.Getenv(\"APPDATA\"), \"GoCryptoTrader\")\n\t}\n\n\tusr, err := user.Current()\n\tif err == nil {\n\t\treturn filepath.Join(usr.HomeDir, \".gocryptotrader\")\n\t}\n\n\tdir, err := os.UserHomeDir()\n\tif err != nil {\n\t\tlog.Warnln(log.Global, \"Environment variable unset, defaulting to current directory\")\n\t\tdir = \".\"\n\t}\n\treturn filepath.Join(dir, \".gocryptotrader\")\n}",
"func LintWorkflowDir(wfClientset wfclientset.Interface, namespace, dirPath string, strict bool) error {\n\twalkFunc := func(path string, info os.FileInfo, err error) error {\n\t\tif info == nil || info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\t\tfileExt := filepath.Ext(info.Name())\n\t\tswitch fileExt {\n\t\tcase \".yaml\", \".yml\", \".json\":\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t\treturn LintWorkflowFile(wfClientset, namespace, path, strict)\n\t}\n\treturn filepath.Walk(dirPath, walkFunc)\n}",
"func (config *Config) WorkingDir() string {\n\tconfigPath, err := filepath.Abs(config.configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to find config file (%s):\\n%v\\n\", config.configPath, err)\n\t}\n\n\tconfigPath, err = filepath.EvalSymlinks(configPath)\n\tif err != nil {\n\t\tlog.Fatalf(\"config error: unable to remove symbolic links for filepath (%s):\\n%v\\n\", configPath, err)\n\t}\n\n\treturn filepath.Join(filepath.Dir(configPath), config.Path)\n}"
] | [
"0.8564206",
"0.64106494",
"0.59586483",
"0.5947505",
"0.5719515",
"0.5587467",
"0.54746264",
"0.542685",
"0.5421473",
"0.5372698",
"0.5356713",
"0.53307366",
"0.53164977",
"0.53043145",
"0.5259979",
"0.52455527",
"0.5234985",
"0.5233929",
"0.51598644",
"0.51591307",
"0.51205194",
"0.5118738",
"0.51149863",
"0.5106971",
"0.51002747",
"0.5090167",
"0.5087013",
"0.5071868",
"0.5065494",
"0.50618035",
"0.5053515",
"0.5048668",
"0.5043288",
"0.50401455",
"0.5033981",
"0.5032486",
"0.502444",
"0.50236547",
"0.50106204",
"0.50020957",
"0.4995587",
"0.49951163",
"0.49921507",
"0.49876344",
"0.49804556",
"0.4976321",
"0.4976321",
"0.4976321",
"0.49482048",
"0.49471676",
"0.49249727",
"0.4924469",
"0.49231505",
"0.4922744",
"0.49206036",
"0.48945162",
"0.48701176",
"0.48690805",
"0.48633036",
"0.48550624",
"0.4850691",
"0.48476303",
"0.48451778",
"0.48364094",
"0.4829813",
"0.48171166",
"0.48113525",
"0.48070562",
"0.4806634",
"0.4800931",
"0.47993502",
"0.47879946",
"0.47827497",
"0.47821072",
"0.47789794",
"0.47742438",
"0.47608814",
"0.47589815",
"0.47575587",
"0.47555602",
"0.4752781",
"0.47432545",
"0.4740113",
"0.47397774",
"0.47250274",
"0.47173676",
"0.4713799",
"0.47104716",
"0.4707393",
"0.47061056",
"0.4705917",
"0.4704246",
"0.47015506",
"0.46971402",
"0.46952426",
"0.46870524",
"0.4679612",
"0.46788016",
"0.46709883",
"0.46658343"
] | 0.8467846 | 1 |
PerfEventDir returns the mountpoint of the perf_event cgroup pseudofilesystem or an empty string if it wasn't found. | func PerfEventDir() string {
for _, mi := range HostProcFS().Mounts() {
if mi.FilesystemType == "cgroup" {
for option := range mi.SuperOptions {
if option == "perf_event" {
return mi.MountPoint
}
}
}
}
return ""
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func PerfEventDir() string {\n\tfor _, mi := range Mounts() {\n\t\tif mi.FilesystemType == \"cgroup\" {\n\t\t\tfor option := range mi.SuperOptions {\n\t\t\t\tif option == \"perf_event\" {\n\t\t\t\t\treturn mi.MountPoint\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func TracingDir() string {\n\tmounts := HostProcFS().Mounts()\n\n\t// Look for an existing tracefs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"tracefs\" {\n\t\t\tglog.V(1).Infof(\"Found tracefs at %s\", m.MountPoint)\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\t// If no mounted tracefs has been found, look for it as a\n\t// subdirectory of the older debugfs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"debugfs\" {\n\t\t\td := filepath.Join(m.MountPoint, \"tracing\")\n\t\t\ts, err := os.Stat(filepath.Join(d, \"events\"))\n\t\t\tif err == nil && s.IsDir() {\n\t\t\t\tglog.V(1).Infof(\"Found debugfs w/ tracing at %s\", d)\n\t\t\t\treturn d\n\t\t\t}\n\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func TracingDir() string {\n\tmounts := Mounts()\n\n\t// Look for an existing tracefs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"tracefs\" {\n\t\t\tglog.V(1).Infof(\"Found tracefs at %s\", m.MountPoint)\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\t// If no mounted tracefs has been found, look for it as a\n\t// subdirectory of the older debugfs\n\tfor _, m := range mounts {\n\t\tif m.FilesystemType == \"debugfs\" {\n\t\t\td := filepath.Join(m.MountPoint, \"tracing\")\n\t\t\ts, err := os.Stat(filepath.Join(d, \"events\"))\n\t\t\tif err == nil && s.IsDir() {\n\t\t\t\tglog.V(1).Infof(\"Found debugfs w/ tracing at %s\", d)\n\t\t\t\treturn d\n\t\t\t}\n\n\t\t\treturn m.MountPoint\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func (dp Datapoint) Directory() (string, error) {\n\tind := strings.LastIndexByte(dp.Name, '.')\n\tif ind < 0 {\n\t\treturn \"\", fmt.Errorf(\"Metric without directory %s\", dp.Name)\n\t}\n\treturn dp.Name[:ind], nil\n}",
"func Dirname(fd uintptr) (string, error) {\n\tproc := fmt.Sprintf(\"/proc/self/fd/%d\", fd)\n\ts, err := os.Readlink(proc)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn path.Dir(s), nil\n}",
"func (o BuildSpecSourcePtrOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ContextDir\n\t}).(pulumi.StringPtrOutput)\n}",
"func GetCgroupDir(pid, subsystem string) (string, error) {\n\tpath := filepath.Join(\"/proc\", pid, \"cgroup\")\n\tcgroupmap, err := cgroups.ParseCgroupFile(path)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif path, ok := cgroupmap[subsystem]; ok {\n\t\treturn path, nil\n\t}\n\tif path, ok := cgroupmap[cgroupNamePrefix+subsystem]; ok {\n\t\treturn path, nil\n\t}\n\treturn \"\", fmt.Errorf(\"Error: ControllerPath of %s is not found\", subsystem)\n}",
"func (m NoopMounter) Dir() string {\n\treturn \"\"\n}",
"func (metadata EventMetadata) GetPath() (string, error) {\n\tpath, err := os.Readlink(\n\t\tfilepath.Join(\n\t\t\tProcFsFdInfo,\n\t\t\tstrconv.FormatUint(\n\t\t\t\tuint64(metadata.Fd),\n\t\t\t\t10,\n\t\t\t),\n\t\t),\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fanotify: %w\", err)\n\t}\n\n\treturn path, nil\n}",
"func rootlessSocketPathFromRunDir() (string, error) {\n\tuid := os.Getuid()\n\tf := filepath.Join(baseRunDir, \"user\", fmt.Sprintf(\"%d\", uid), \"docker.sock\")\n\tif fileExists(f) {\n\t\treturn f, nil\n\t}\n\treturn \"\", ErrRootlessDockerNotFoundRunDir\n}",
"func (o BuildRunStatusBuildSpecSourcePtrOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v *BuildRunStatusBuildSpecSource) *string {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.ContextDir\n\t}).(pulumi.StringPtrOutput)\n}",
"func PuppetLabsDir() (string, error) {\n\treturn \"/etc/puppetlabs\", nil\n}",
"func (s *Scope) dataDir() (string, error) {\n\tvar rfid syscall.GUID\n\n\tswitch s.Type {\n\tcase System:\n\t\trfid = rfidProgramData\n\n\tcase User:\n\t\trfid = rfidLocalAppData\n\n\tcase CustomHome:\n\t\treturn s.CustomHome, nil\n\n\tdefault:\n\t\treturn \"\", ErrInvalidScope\n\t}\n\n\tpath, err := getFolderPath(rfid)\n\tif err != nil {\n\t\treturn \"\", ErrRetrievingPath\n\t}\n\n\tif path, err = filepath.Abs(path); err != nil {\n\t\treturn \"\", ErrRetrievingPath\n\t}\n\n\treturn filepath.Join(path, s.Vendor, s.App), nil\n}",
"func GetDir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}",
"func (o BuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}",
"func TestFileDir() string {\n\t_, file, _, ok := runtime.Caller(1)\n\tif ok {\n\t\treturn filepath.Dir(file)\n\t}\n\tpanic(\"cannot determine test file directory\")\n}",
"func dockerCertDir(sys *types.SystemContext, hostPort string) (string, error) {\n\tif sys != nil && sys.DockerCertPath != \"\" {\n\t\treturn sys.DockerCertPath, nil\n\t}\n\tif sys != nil && sys.DockerPerHostCertDirPath != \"\" {\n\t\treturn filepath.Join(sys.DockerPerHostCertDirPath, hostPort), nil\n\t}\n\n\tvar (\n\t\thostCertDir string\n\t\tfullCertDirPath string\n\t)\n\tfor _, systemPerHostCertDirPath := range systemPerHostCertDirPaths {\n\t\tif sys != nil && sys.RootForImplicitAbsolutePaths != \"\" {\n\t\t\thostCertDir = filepath.Join(sys.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)\n\t\t} else {\n\t\t\thostCertDir = systemPerHostCertDirPath\n\t\t}\n\n\t\tfullCertDirPath = filepath.Join(hostCertDir, hostPort)\n\t\t_, err := os.Stat(fullCertDirPath)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\tif os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t}\n\t\tif os.IsPermission(err) {\n\t\t\tlogrus.Debugf(\"error accessing certs directory due to permissions: %v\", err)\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\treturn fullCertDirPath, nil\n}",
"func GameDir() (string, error) {\n\texePath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", errors.WithStack(err)\n\t}\n\tgameDir := filepath.Dir(exePath)\n\treturn gameDir, nil\n}",
"func (s *Scope) configDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultConfigDirs[0], nil\n\n\tcase User:\n\t\tpath := os.Getenv(\"XDG_CONFIG_HOME\")\n\t\tif path == \"\" {\n\t\t\treturn expandUser(\"~/.config\"), nil\n\t\t}\n\t\treturn path, nil\n\n\tcase CustomHome:\n\t\treturn filepath.Join(s.CustomHome, \".config\"), nil\n\t}\n\n\treturn \"\", ErrInvalidScope\n}",
"func fileDir() string {\n\n\t_, filename, _, _ := runtime.Caller(0)\n\treturn path.Dir(filename)\n}",
"func (s *Scope) configDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Config\"), nil\n}",
"func getMountPoint(volName string) string {\n\treturn filepath.Join(mountRoot, volName)\n}",
"func (c *Container) Mountpoint() (string, error) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tif err := c.syncContainer(); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"error updating container %s state\", c.ID())\n\t}\n\treturn c.state.Mountpoint, nil\n}",
"func (g *GlobalContext) Dir() string {\n\tpwd, err := os.Getwd()\n\n\tif err != nil {\n\t\tg.LogError(err)\n\t}\n\n\treturn pwd + \"/.secrets\"\n}",
"func CacheDir() (string, error) {\n\tc, err := os.UserCacheDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(c, appName), nil\n}",
"func (s *Settings) DataDir() string {\n\t// List is in reverse order of priority - last one will be preferred.\n\tcandidates := []string{\n\t\t`/opt/factorio`,\n\t\t`~/.factorio`,\n\t\t`~/factorio`,\n\t\t`~/Library/Application Support/factorio`,\n\t}\n\tif e := os.Getenv(\"APPDATA\"); e != \"\" {\n\t\tcandidates = append(candidates, filepath.Join(e, \"Factorio\"))\n\t}\n\n\tif s.datadir != \"\" {\n\t\tcandidates = []string{s.datadir}\n\t}\n\n\tmatch := \"\"\n\tfor _, c := range candidates {\n\t\ts, err := homedir.Expand(c)\n\t\tif err != nil {\n\t\t\tglog.Infof(\"Unable to expand %s: %v\", c, err)\n\t\t\tcontinue\n\t\t}\n\t\tinfo, err := os.Stat(s)\n\t\tif os.IsNotExist(err) {\n\t\t\tglog.Infof(\"Path %s does not exists, skipped\", s)\n\t\t\tcontinue\n\t\t}\n\t\tif !info.IsDir() {\n\t\t\tglog.Infof(\"Path %s is a file, skipped\", s)\n\t\t\tcontinue\n\t\t}\n\t\tglog.Infof(\"Found factorio data dir: %s\", s)\n\t\tmatch = s\n\t}\n\tif match == \"\" {\n\t\tglog.Infof(\"No Factorio data dir found\")\n\t\treturn \"\"\n\t}\n\tglog.Infof(\"Using Factorio data dir: %s\", match)\n\treturn match\n}",
"func CallerDir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}",
"func (mounter *Mounter) GetDeviceNameFromMount(mountPath, pluginDir string) (string, error) {\n\treturn getDeviceNameFromMount(mounter.logger, mounter, mountPath, pluginDir)\n}",
"func Dir() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\n\treturn filepath.Dir(filename)\n}",
"func GetSnapshotDir(snapshotID string) string {\n\treturn filepath.Join(SnapshotsDirname, snapshotID)\n}",
"func (s *Scope) dataDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultDataDirs[0], nil\n\n\tcase User:\n\t\tpath := os.Getenv(\"XDG_DATA_HOME\")\n\t\tif path == \"\" {\n\t\t\treturn expandUser(\"~/.local/share\"), nil\n\t\t}\n\t\treturn path, nil\n\n\tcase CustomHome:\n\t\treturn filepath.Join(s.CustomHome, \".local/share\"), nil\n\t}\n\n\treturn \"\", ErrInvalidScope\n}",
"func (d Dir) Path() string {\n\tif d.env == \"\" {\n\t\tpanic(\"xdgdir.Dir.Path() on zero Dir\")\n\t}\n\tp := d.path()\n\tif p != \"\" && d.userOwned {\n\t\tinfo, err := os.Stat(p)\n\t\tif err != nil {\n\t\t\treturn \"\"\n\t\t}\n\t\tif !info.IsDir() || info.Mode().Perm() != 0700 {\n\t\t\treturn \"\"\n\t\t}\n\t\tst, ok := info.Sys().(*syscall.Stat_t)\n\t\tif !ok || int(st.Uid) != geteuid() {\n\t\t\treturn \"\"\n\t\t}\n\t}\n\treturn p\n}",
"func rootlessSocketPathFromEnv() (string, error) {\n\txdgRuntimeDir, exists := os.LookupEnv(\"XDG_RUNTIME_DIR\")\n\tif exists {\n\t\tf := filepath.Join(xdgRuntimeDir, \"docker.sock\")\n\t\tif fileExists(f) {\n\t\t\treturn f, nil\n\t\t}\n\n\t\treturn \"\", ErrRootlessDockerNotFoundXDGRuntimeDir\n\t}\n\n\treturn \"\", ErrXDGRuntimeDirNotSet\n}",
"func (d *Device) DeviceDir(repository string) string {\n\treturn deviceDirectory(repository, d.ID)\n}",
"func rootlessSocketPathFromHomeRunDir() (string, error) {\n\thome, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf := filepath.Join(home, \".docker\", \"run\", \"docker.sock\")\n\tif fileExists(f) {\n\t\treturn f, nil\n\t}\n\treturn \"\", ErrRootlessDockerNotFoundHomeRunDir\n}",
"func (fs FilesystemStorage) TempDir() string {\n\treturn filepath.Join(fs.String(), \"tmp\")\n}",
"func Dir() string {\n\tif ForceDir != \"\" {\n\t\treturn ForceDir\n\t}\n\tdir := os.Getenv(\"XDG_DATA_HOME\")\n\tif dir != \"\" {\n\t\treturn dir\n\t}\n\tusr, _ := user.Current()\n\tif usr != nil {\n\t\treturn filepath.Join(usr.HomeDir, \".local\", \"share\")\n\t}\n\treturn \"\"\n}",
"func GetMountedFilePath(inputVal string, flags uintptr) (string, error) {\n\ts := strings.Split(inputVal, \":\")\n\tif len(s) != 2 {\n\t\treturn \"\", fmt.Errorf(\"%s: Usage: <block device identifier>:<path>\", inputVal)\n\t}\n\n\t// s[0] can be sda or UUID.\n\tdevice, err := GetStorageDevice(s[0])\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"fn GetStorageDevice: err = %v\", err)\n\t}\n\n\tdevName := device.Name\n\tmountPath, err := MountDevice(device, flags)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to mount %s , flags=%v, err=%v\", devName, flags, err)\n\t}\n\n\tfPath := filepath.Join(mountPath, s[1])\n\treturn fPath, nil\n}",
"func DirName() string {\n\t_, filename, _, _ := runtime.Caller(1)\n\treturn path.Dir(filename)\n}",
"func GetRuntimeDir() (string, error) {\n\treturn \"\", errors.New(\"this function is not implemented for windows\")\n}",
"func (d *gcpVolDriver) getMountpoint(name string) string {\n\treturn filepath.Join(d.driverRootDir, name, \"_data\")\n}",
"func (j *joinData) ManifestDir() string {\n\tif j.dryRun {\n\t\treturn j.dryRunDir\n\t}\n\treturn kubeadmconstants.GetStaticPodDirectory()\n}",
"func (o BuildRunStatusBuildSpecSourceOutput) ContextDir() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v BuildRunStatusBuildSpecSource) *string { return v.ContextDir }).(pulumi.StringPtrOutput)\n}",
"func (mr *MountResolver) getOverlayPath(mount *model.MountEvent) string {\n\tif entry, found := mr.overlayPathCache.Get(mount.MountID); found {\n\t\treturn entry.(string)\n\t}\n\n\tif ancestor := mr.getAncestor(mount); ancestor != nil {\n\t\tmount = ancestor\n\t}\n\n\tfor _, deviceMount := range mr.devices[mount.Device] {\n\t\tif mount.MountID != deviceMount.MountID && deviceMount.IsOverlayFS() {\n\t\t\tif p := mr.getParentPath(deviceMount.MountID); p != \"\" {\n\t\t\t\tmr.overlayPathCache.Add(mount.MountID, p)\n\t\t\t\treturn p\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\"\n}",
"func (m *DefaultMounter) Dir() string {\n\treturn m.dir\n}",
"func GetPath(mounter volume.Mounter) (string, error) {\n\tpath := mounter.GetPath()\n\tif path == \"\" {\n\t\treturn \"\", fmt.Errorf(\"path is empty %s\", reflect.TypeOf(mounter).String())\n\t}\n\treturn path, nil\n}",
"func (fs FilesystemStorage) AttachmentDir() string {\n\treturn filepath.Join(fs.String(), \"att\")\n}",
"func rootlessSocketPathFromHomeDesktopDir() (string, error) {\n\thome, err := os.UserHomeDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tf := filepath.Join(home, \".docker\", \"desktop\", \"docker.sock\")\n\tif fileExists(f) {\n\t\treturn f, nil\n\t}\n\treturn \"\", ErrRootlessDockerNotFoundHomeDesktopDir\n}",
"func PluginSpecDir() string {\n\treturn filepath.Join(os.Getenv(\"programdata\"), \"docker\", \"plugins\")\n}",
"func configDir() (string, error) {\n\tif dc := os.Getenv(\"DOCKER_CONFIG\"); dc != \"\" {\n\t\treturn dc, nil\n\t}\n\tif h := dockerUserHomeDir(); h != \"\" {\n\t\treturn filepath.Join(dockerUserHomeDir(), \".docker\"), nil\n\t}\n\treturn \"\", errNoHomeDir\n}",
"func (s *Scope) cacheDir() (string, error) {\n\tswitch s.Type {\n\tcase System:\n\t\treturn defaultCacheDir, nil\n\n\tcase User:\n\t\tpath := os.Getenv(\"XDG_CACHE_HOME\")\n\t\tif path == \"\" {\n\t\t\treturn expandUser(\"~/.cache\"), nil\n\t\t}\n\t\treturn path, nil\n\n\tcase CustomHome:\n\t\treturn filepath.Join(s.CustomHome, \".cache\"), nil\n\t}\n\n\treturn \"\", ErrInvalidScope\n}",
"func (d *QemuDriver) getMonitorPath(dir string) (string, error) {\n\tvar longPathSupport bool\n\tcurrentQemuVer := d.DriverContext.node.Attributes[qemuDriverVersionAttr]\n\tcurrentQemuSemver := semver.New(currentQemuVer)\n\tif currentQemuSemver.LessThan(*qemuVersionLongSocketPathFix) {\n\t\tlongPathSupport = false\n\t\td.logger.Printf(\"[DEBUG] driver.qemu: long socket paths are not available in this version of QEMU (%s)\", currentQemuVer)\n\t} else {\n\t\tlongPathSupport = true\n\t\td.logger.Printf(\"[DEBUG] driver.qemu: long socket paths available in this version of QEMU (%s)\", currentQemuVer)\n\t}\n\tfullSocketPath := fmt.Sprintf(\"%s/%s\", dir, qemuMonitorSocketName)\n\tif len(fullSocketPath) > qemuLegacyMaxMonitorPathLen && longPathSupport == false {\n\t\treturn \"\", fmt.Errorf(\"monitor path is too long for this version of qemu\")\n\t}\n\treturn fullSocketPath, nil\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func SelfDir() string {\n\treturn filepath.Dir(SelfPath())\n}",
"func GetContainerSpecDir(id string) string {\n\treturn filepath.Join(masterPath, id)\n}",
"func PathCfgDir() string {\n\tdir := os.Getenv(ENV_CFG_DIR)\n\tif dir != \"\" {\n\t\treturn dir\n\t}\n\thomeDir, err := Home()\n\tif err != nil {\n\t\tlog.Fatal(\"can not fetch home directory\")\n\t}\n\treturn filepath.Join(homeDir, DEFAULT_CFG_DIR)\n}",
"func (c *Container) MountPoint() (string, error) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\tif err := c.syncContainer(); err != nil {\n\t\treturn \"\", errors.Wrapf(err, \"error updating container %s state\", c.ID())\n\t}\n\treturn c.state.Mountpoint, nil\n}",
"func (s *Scope) cacheDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Cache\"), nil\n}",
"func (s *Scope) logDir() (string, error) {\n\tp, err := s.dataDir()\n\tif err != nil {\n\t\treturn p, err\n\t}\n\n\treturn filepath.Join(p, \"Logs\"), nil\n}",
"func getPureAppDataDir(namespace string) (string, int) {\n\tif strings.HasPrefix(namespace, \"nginx-without-enc\") {\n\t\treturn \"/usr/share/nginx/html\", units.GiB / 2\n\t}\n\tif strings.HasPrefix(namespace, \"wordpress\") {\n\t\treturn \"/var/www/html\", units.GiB / 2\n\t}\n\tif strings.HasPrefix(namespace, \"elasticsearch\") {\n\t\treturn \"/usr/share/elasticsearch/data\", units.GiB * 2\n\t}\n\tif strings.HasPrefix(namespace, \"mysql-without-enc\") {\n\t\treturn \"/var/lib/mysql\", units.GiB\n\t}\n\treturn \"\", 0\n}",
"func ExeDir() (string, error) {\n\texePath, err := os.Executable()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Dir(exePath), nil\n}",
"func (v Ver) GetChartsDir() string {\n\tif len(common.Config.Rendering.ChartsDir) == 0 {\n\t\treturn path.Join(common.Config.Rendering.ResourceDir, \"helm\", v.String())\n\t}\n\treturn path.Join(common.Config.Rendering.ChartsDir, v.String())\n}",
"func (e *Engine) SourceDir() string {\n\treturn e.dirs.src\n}",
"func getSecureRunDir() (string, error) {\n\tuid := syscall.Getuid()\n\n\trunDir := path.Join(os.TempDir(), fmt.Sprintf(\"elvish-%d\", uid))\n\terr := os.MkdirAll(runDir, 0700)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"mkdir: %v\", err)\n\t}\n\n\tvar stat syscall.Stat_t\n\terr = syscall.Stat(runDir, &stat)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"stat: %v\", err)\n\t}\n\n\tif int(stat.Uid) != uid {\n\t\treturn \"\", ErrBadOwner\n\t}\n\tif stat.Mode&077 != 0 {\n\t\treturn \"\", ErrBadPermission\n\t}\n\treturn runDir, err\n}",
"func dirLocator() func() (string, bool) {\n\tvar cur int\n\treturn func() (string, bool) {\n\t\tcur++\n\t\tswitch cur {\n\t\tcase 1:\n\t\t\treturn \"\", true\n\t\tcase 2:\n\t\t\tif dir, err := os.Getwd(); err == nil {\n\t\t\t\treturn dir + \"/\", true\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase 3:\n\t\t\tif dir, err := filepath.Abs(filepath.Dir(os.Args[0])); err == nil {\n\t\t\t\treturn dir + \"/\", true\n\t\t\t}\n\t\t\tfallthrough\n\t\tcase 4:\n\t\t\t// linux-only implementation from\n\t\t\t// https://github.com/kardianos/osext/blob/master/osext_procfs.go\n\t\t\tconst deletedTag = \" (deleted)\"\n\t\t\texecpath, err := os.Readlink(\"/proc/self/exe\")\n\t\t\tif err == nil {\n\t\t\t\texecpath = strings.TrimSuffix(execpath, deletedTag)\n\t\t\t\texecpath = strings.TrimPrefix(execpath, deletedTag)\n\t\t\t\treturn filepath.Dir(execpath) + \"/\", true\n\t\t\t}\n\t\t\tfallthrough\n\t\tdefault:\n\t\t\treturn \"\", false\n\t\t}\n\t}\n}",
"func GetPassDir() (d string, err error) {\n\td, ok := os.LookupEnv(PASSGODIR)\n\tif !ok {\n\t\thome, err := GetHomeDir()\n\t\tif err == nil {\n\t\t\td = filepath.Join(home, \".passgo\")\n\t\t}\n\t}\n\treturn\n}",
"func GetRootlessDir() string {\n\treturn rootlessDir\n}",
"func Dir() (string, error) {\n\tc, err := os.UserConfigDir()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn filepath.Join(c, appName), nil\n}",
"func (r *Ring) Eventfd() uintptr {\n\treturn r.eventfd\n}",
"func (t *Application) stateDir() (dir string, closer func() error, err error) {\n\tif *t.StateDir == \"\" {\n\t\treturn tempStateDir()\n\t}\n\treturn *t.StateDir, nil, nil\n}",
"func socketPath(bundleDir string) (string, error) {\n\tnum := rand.Intn(10000)\n\tpath := filepath.Join(bundleDir, fmt.Sprintf(\"socket-%4d\", num))\n\tconst maxPathLen = 108\n\tif len(path) <= maxPathLen {\n\t\treturn path, nil\n\t}\n\n\t// Path is too large, try to make it smaller.\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting cwd: %v\", err)\n\t}\n\tpath, err = filepath.Rel(cwd, path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error getting relative path for %q from cwd %q: %v\", path, cwd, err)\n\t}\n\tif len(path) > maxPathLen {\n\t\treturn \"\", fmt.Errorf(\"could not get socket path under length limit %d: %s\", maxPathLen, path)\n\t}\n\treturn path, nil\n}",
"func GetPasswdsPath(workDirectory string) (string, error) {\n\treturn fmt.Sprintf(\"%s/.PASSWDS\", workDirectory), nil\n}",
"func pidCgroupPath(pid string) (string, error) {\n\tf, err := os.Open(fmt.Sprintf(\"/proc/%s/cgroup\", pid))\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\tfields := strings.Split(scanner.Text(), \":\")\n\t\tif len(fields) != 3 {\n\t\t\tcontinue\n\t\t}\n\t\tif fields[1] == \"pids\" {\n\t\t\treturn fmt.Sprintf(\"/sys/fs/cgroup/pids/%s/cgroup.procs\", fields[2]), nil\n\t\t}\n\t}\n\treturn \"\", fmt.Errorf(\"couldn't find pids group for PID %s\", pid)\n}",
"func (cc *CollectdConfig) InstanceConfigDir() string {\n\treturn filepath.Join(cc.ConfigDir, cc.InstanceName)\n}",
"func (e *Engine) WorkDir() string {\n\treturn e.dirs.work\n}",
"func (c *Collection) AbsDir() string {\n\treturn filepath.Join(c.cfg.SourceDir(), c.PathPrefix())\n}",
"func k8sDataDirName() string {\n\treturn time.Now().Format(\"..2006_01_02_15_04_05.000000000\")\n}",
"func (attacher *azureDiskAttacher) GetDeviceMountPath(spec *volume.Spec) (string, error) {\n\tvolumeSource, err := getVolumeSource(spec)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn makeGlobalPDPath(attacher.host, volumeSource.DiskName), nil\n}",
"func (mgr *Manager) DataDir() string {\n\treturn mgr.dataDir\n}",
"func getEventPathFromVar(eventPathVar string) string {\n\t// Assume eventPathVar matches the eventPathVarRegex\n\tif eventPathVar == \"$(event)\" {\n\t\treturn \"\"\n\t}\n\treturn strings.TrimSuffix(strings.TrimPrefix(eventPathVar, \"$(event.\"), \")\")\n}",
"func TracepointEventAttr(subsystem, event string) (*unix.PerfEventAttr, error) {\n\tconfig, err := GetTracepointConfig(subsystem, event)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &unix.PerfEventAttr{\n\t\tType: PERF_TYPE_TRACEPOINT,\n\t\tConfig: config,\n\t\tSize: uint32(unsafe.Sizeof(unix.PerfEventAttr{})),\n\t\tBits: unix.PerfBitDisabled | unix.PerfBitExcludeHv,\n\t\tRead_format: unix.PERF_FORMAT_TOTAL_TIME_RUNNING | unix.PERF_FORMAT_TOTAL_TIME_ENABLED,\n\t\tSample_type: PERF_SAMPLE_IDENTIFIER,\n\t}, nil\n}",
"func (ep *epInfoCache) StateDir() string { return ep.epdir }",
"func getInternalMountPath(workingMountDir string, vol *smbVolume) string {\n\tif vol == nil {\n\t\treturn \"\"\n\t}\n\tmountDir := vol.uuid\n\tif vol.uuid == \"\" {\n\t\tmountDir = vol.subDir\n\t}\n\treturn filepath.Join(workingMountDir, mountDir)\n}",
"func (c *CmdReal) GetDir() string {\n\treturn c.cmd.Dir\n}",
"func GetProfileDir(lc logger.LoggingClient, profileDir string) string {\n\tenvValue := os.Getenv(envProfile)\n\tif len(envValue) > 0 {\n\t\tprofileDir = envValue\n\t\tlogEnvironmentOverride(lc, \"-p/-profile\", envProfile, envValue)\n\t}\n\n\tif len(profileDir) > 0 {\n\t\tprofileDir += \"/\"\n\t}\n\n\treturn profileDir\n}",
"func GetDataDir() string {\n\tvar dataDirectory string\n\tdefaultDataHome := os.Getenv(\"XDG_DATA_HOME\")\n\n\tif len(defaultDataHome) == 0 {\n\t\tdataDirectory = path.Join(os.Getenv(\"HOME\"), \".local\", \"share\")\n\t} else {\n\t\tdataDirectory = path.Join(defaultDataHome)\n\t}\n\n\tdataDirectory = path.Join(dataDirectory, APP_NAME)\n\treturn dataDirectory\n}",
"func FindHierarchyMountRootPath(subsystemName string) string {\n\tf, err := os.Open(\"/proc/self/mountinfo\")\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\n\tdefer f.Close()\n\n\tscanner := bufio.NewScanner(f)\n\tfor scanner.Scan() {\n\t\ttxt := scanner.Text()\n\t\tfields := strings.Split(txt, \" \")\n\t\t// find whether \"subsystemName\" appear in the last field\n\t\t// if so, then the fifth field is the path\n\t\tfor _, opt := range strings.Split(fields[len(fields)-1], \",\") {\n\t\t\tif opt == subsystemName {\n\t\t\t\treturn fields[4]\n\t\t\t}\n\t\t}\n\t}\n\treturn \"\"\n}",
"func getTelemetryServiceDirectory() (path string, dir string) {\n\tpath = filepath.Join(CniInstallDir, TelemetryServiceProcessName)\n\n\tif _, exists := os.Stat(path); exists != nil {\n\t\tex, _ := os.Executable()\n\t\texDir := filepath.Dir(ex)\n\t\tpath = filepath.Join(exDir, TelemetryServiceProcessName)\n\t\tdir = exDir\n\t} else {\n\t\tdir = CniInstallDir\n\t}\n\treturn\n}",
"func RootDir() string {\n\t_, b, _, _ := runtime.Caller(0)\n\td := path.Join(path.Dir(b))\n\treturn filepath.Dir(d)\n}",
"func UserCacheDir(tc Context) (string, error) {\n\treturn os.UserCacheDir()\n}",
"func (env *LocalTestEnv) Directory() string {\n\treturn env.TmpPath\n}",
"func GetTracepointConfig(subsystem, event string) (uint64, error) {\n\tres, err := fileToStrings(\n\t\tTracingDir + fmt.Sprintf(\"/events/%s/%s/id\", subsystem, event))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn strconv.ParseUint(res[0], 10, 64)\n}",
"func getSourceMount(source string) (string, string, error) {\n\t// Ensure any symlinks are resolved.\n\tsourcePath, err := filepath.EvalSymlinks(source)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\tmi, err := mount.GetMounts(mount.ParentsFilter(sourcePath))\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\tif len(mi) < 1 {\n\t\treturn \"\", \"\", fmt.Errorf(\"Can't find mount point of %s\", source)\n\t}\n\n\t// find the longest mount point\n\tvar idx, maxlen int\n\tfor i := range mi {\n\t\tif len(mi[i].Mountpoint) > maxlen {\n\t\t\tmaxlen = len(mi[i].Mountpoint)\n\t\t\tidx = i\n\t\t}\n\t}\n\treturn mi[idx].Mountpoint, mi[idx].Optional, nil\n}",
"func (k *Kluster) StateDir() string {\n\treturn filepath.Join(k.Dir(), StateDirname)\n}",
"func GetDevicePath(str []byte) (string, error) {\n\tvar volDev VolumeDevSpec\n\terr := json.Unmarshal(str, &volDev)\n\tif err != nil && len(err.Error()) != 0 {\n\t\treturn \"\", err\n\t}\n\n\t// Get the device node for the unit returned from the attach.\n\t// Lookup each device that has a label and if that label matches\n\t// the one for the given bus number.\n\t// The device we need is then constructed from the dir name with\n\t// the matching label.\n\tpciSlotAddr := fmt.Sprintf(\"%s/%s/address\", sysPciSlots, volDev.ControllerPciSlotNumber)\n\n\tfh, err := os.Open(pciSlotAddr)\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\"Error\": err}).Warn(\"Get device path failed for unit# %s @ PCI slot %s: \",\n\t\t\tvolDev.Unit, volDev.ControllerPciSlotNumber)\n\t\treturn \"\", fmt.Errorf(\"Device not found\")\n\t}\n\n\tbuf := make([]byte, pciAddrLen)\n\t_, err = fh.Read(buf)\n\n\tfh.Close()\n\tif err != nil && err != io.EOF {\n\t\tlog.WithFields(log.Fields{\"Error\": err}).Warn(\"Get device path failed for unit# %s @ PCI slot %s: \",\n\t\t\tvolDev.Unit, volDev.ControllerPciSlotNumber)\n\t\treturn \"\", fmt.Errorf(\"Device not found\")\n\t}\n\treturn fmt.Sprintf(\"/dev/disk/by-path/pci-%s.0-scsi-0:0:%s:0\", string(buf), volDev.Unit), nil\n\n}",
"func (o *os) GetUserDataDir() gdnative.String {\n\to.ensureSingleton()\n\t//log.Println(\"Calling _OS.GetUserDataDir()\")\n\n\t// Build out the method's arguments\n\tptrArguments := make([]gdnative.Pointer, 0, 0)\n\n\t// Get the method bind\n\tmethodBind := gdnative.NewMethodBind(\"_OS\", \"get_user_data_dir\")\n\n\t// Call the parent method.\n\t// String\n\tretPtr := gdnative.NewEmptyString()\n\tgdnative.MethodBindPtrCall(methodBind, o.GetBaseObject(), ptrArguments, retPtr)\n\n\t// If we have a return type, convert it from a pointer into its actual object.\n\tret := gdnative.NewStringFromPointer(retPtr)\n\treturn ret\n}",
"func GetExecDirectory() string {\n\tex, err := os.Executable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\texPath := filepath.Dir(ex)\n\treturn exPath\n}",
"func (f *File) dirName() string {\n\ti := strings.LastIndexByte(f.Key, '/')\n\tif i == -1 {\n\t\treturn \"\"\n\t}\n\n\treturn f.Key[:i+1]\n}",
"func GetCacheDir() (string, error) {\n\tgoctlH, err := GetGoctlHome()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn filepath.Join(goctlH, cacheDir), nil\n}"
] | [
"0.8565974",
"0.6476519",
"0.61357117",
"0.50944376",
"0.5090655",
"0.5085397",
"0.5061623",
"0.50163496",
"0.4961857",
"0.49506846",
"0.4856047",
"0.48389697",
"0.47908404",
"0.47382855",
"0.47168255",
"0.47013557",
"0.469574",
"0.46919125",
"0.4681249",
"0.46691275",
"0.46665832",
"0.46664837",
"0.46621773",
"0.4660223",
"0.4654553",
"0.46389553",
"0.46295467",
"0.4626669",
"0.46240526",
"0.4613761",
"0.4613556",
"0.46018398",
"0.45968458",
"0.45764685",
"0.45759434",
"0.45736665",
"0.45562515",
"0.4552088",
"0.4544989",
"0.4542391",
"0.45358813",
"0.453271",
"0.4528442",
"0.45250285",
"0.45176488",
"0.45082507",
"0.45035988",
"0.45017517",
"0.44984645",
"0.44941294",
"0.4486402",
"0.44847038",
"0.4475841",
"0.4475841",
"0.4475841",
"0.44667047",
"0.4455725",
"0.4454537",
"0.44311273",
"0.44307795",
"0.44219524",
"0.44065553",
"0.43897527",
"0.438503",
"0.4381791",
"0.43733093",
"0.43690065",
"0.43566805",
"0.4340792",
"0.433937",
"0.4338792",
"0.43358043",
"0.4334451",
"0.4330367",
"0.43281558",
"0.43255737",
"0.43162894",
"0.43142775",
"0.43112937",
"0.42967436",
"0.42887792",
"0.42881268",
"0.42874607",
"0.4286002",
"0.42822066",
"0.42821148",
"0.42774805",
"0.42745188",
"0.4270261",
"0.42686692",
"0.4266953",
"0.42484868",
"0.42435738",
"0.422983",
"0.4224763",
"0.42225647",
"0.42186224",
"0.42174688",
"0.421666",
"0.42136717"
] | 0.8672294 | 0 |
MountTempFS mounts a filesystem. It is primarily a wrapper around unix.Mount, but it also ensures that the mountpoint exists before attempting to mount to it. | func MountTempFS(source string, target string, fstype string, flags uintptr, data string) error {
// Make sure that `target` exists.
err := os.MkdirAll(target, 0500)
if err != nil {
glog.V(2).Infof("Couldn't create temp %s mountpoint: %s", fstype, err)
return err
}
err = unix.Mount(source, target, fstype, flags, data)
if err != nil {
glog.V(2).Infof("Couldn't mount %s on %s: %s", fstype, target, err)
return err
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func MountFilesystem(mountpoint string, proxy string) {\n\n\t// The proxy URL should end with a slash, add it if the user forgot about this\n\tif proxy[len(proxy)-1] != '/' {\n\t\tproxy += \"/\"\n\t}\n\n\tc, err := fuse.Mount(\n\t\tmountpoint,\n\t\tfuse.FSName(fmt.Sprintf(\"hgmsfs(%s)\", proxy)),\n\t\tfuse.Subtype(\"hgmfs\"),\n\t\tfuse.LocalVolume(),\n\t\tfuse.VolumeName(\"hgms-volume\"),\n\t)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\n\tif lruEnabled == true {\n\t\tlruCache, err = ssc.New(\"./ssc.db\", lruBlockSize, lruMaxItems)\n\t\tif err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n\tfmt.Printf(\"Serving FS at '%s' (lru_cache=%.2fMB)\\n\", mountpoint, float64(lruBlockSize*lruMaxItems/1024/1024))\n\n\terr = fs.Serve(c, HgmFs{mountPoint: mountpoint, proxyUrl: proxy})\n\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\t// check if the mount process has an error to report\n\t<-c.Ready\n\tif err := c.MountError; err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func UnmountTempFS(dir string, fstype string) error {\n\terr := unix.Unmount(dir, 0)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't unmount %s at %s: %s\", fstype, dir, err)\n\t\treturn err\n\t}\n\n\terr = os.Remove(dir)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't remove %s: %s\", dir, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func UnmountTempFS(dir string, fstype string) error {\n\terr := unix.Unmount(dir, 0)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't unmount %s at %s: %s\", fstype, dir, err)\n\t\treturn err\n\t}\n\n\terr = os.Remove(dir)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't remove %s: %s\", dir, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func MountFilesystem(vol *apis.LVMVolume, mount *MountInfo, podinfo *PodLVInfo) error {\n\tif err := os.MkdirAll(mount.MountPath, 0755); err != nil {\n\t\treturn status.Errorf(codes.Internal, \"Could not create dir {%q}, err: %v\", mount.MountPath, err)\n\t}\n\n\treturn MountVolume(vol, mount, podinfo)\n}",
"func (z *zfsctl) MountFileSystem(ctx context.Context, name, options, opts string, all bool) *execute {\n\targs := []string{\"mount\"}\n\tif len(options) > 0 {\n\t\targs = append(args, options)\n\t}\n\tif len(opts) > 0 {\n\t\targs = append(args, opts)\n\t}\n\tif all {\n\t\targs = append(args, \"-a\")\n\t} else {\n\t\targs = append(args, name)\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func Mount(f fs.Fs, mountpoint string) error {\n\tif debugFUSE {\n\t\tfuse.Debug = func(msg interface{}) {\n\t\t\tfs.Debug(\"fuse\", \"%v\", msg)\n\t\t}\n\t}\n\n\t// Mount it\n\terrChan, err := mount(f, mountpoint)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to mount FUSE fs\")\n\t}\n\n\t// Wait for umount\n\terr = <-errChan\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to umount FUSE fs\")\n\t}\n\n\treturn nil\n}",
"func (g *Generator) AddTmpfsMount(dest string, options []string) {\n\tmnt := rspec.Mount{\n\t\tDestination: dest,\n\t\tType: \"tmpfs\",\n\t\tSource: \"tmpfs\",\n\t\tOptions: options,\n\t}\n\n\tg.initSpec()\n\tg.spec.Mounts = append(g.spec.Mounts, mnt)\n}",
"func (j *juicefs) MountFs(ctx context.Context, appInfo *config.AppInfo, jfsSetting *config.JfsSetting) (string, error) {\n\tvar mnt podmount.MntInterface\n\tif jfsSetting.UsePod {\n\t\tjfsSetting.MountPath = filepath.Join(config.PodMountBase, jfsSetting.UniqueId)\n\t\tmnt = j.podMount\n\t} else {\n\t\tjfsSetting.MountPath = filepath.Join(config.MountBase, jfsSetting.UniqueId)\n\t\tmnt = j.processMount\n\t}\n\n\terr := mnt.JMount(ctx, appInfo, jfsSetting)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tklog.V(5).Infof(\"Mount: mounting %q at %q with options %v\", util.StripPasswd(jfsSetting.Source), jfsSetting.MountPath, jfsSetting.Options)\n\treturn jfsSetting.MountPath, nil\n}",
"func TestTmpfsDevShmNoDupMount(t *testing.T) {\n\tskip.If(t, os.Getuid() != 0, \"skipping test that requires root\")\n\tc := &container.Container{\n\t\tShmPath: \"foobar\", // non-empty, for c.IpcMounts() to work\n\t\tHostConfig: &containertypes.HostConfig{\n\t\t\tIpcMode: containertypes.IPCModeShareable, // default mode\n\t\t\t// --tmpfs /dev/shm:rw,exec,size=NNN\n\t\t\tTmpfs: map[string]string{\n\t\t\t\t\"/dev/shm\": \"rw,exec,size=1g\",\n\t\t\t},\n\t\t},\n\t}\n\td := setupFakeDaemon(t, c)\n\n\t_, err := d.createSpec(context.TODO(), &configStore{}, c)\n\tassert.Check(t, err)\n}",
"func mount(\n\tdir string,\n\tcfg *MountConfig,\n\tready chan<- error) (dev *os.File, err error) {\n\t// On linux, mounting is never delayed.\n\tready <- nil\n\n\t// Create a socket pair.\n\tfds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Socketpair: %v\", err)\n\t\treturn\n\t}\n\n\t// Wrap the sockets into os.File objects that we will pass off to fusermount.\n\twriteFile := os.NewFile(uintptr(fds[0]), \"fusermount-child-writes\")\n\tdefer writeFile.Close()\n\n\treadFile := os.NewFile(uintptr(fds[1]), \"fusermount-parent-reads\")\n\tdefer readFile.Close()\n\n\t// Start fusermount, passing it pipes for stdout and stderr.\n\tcmd := exec.Command(\n\t\t\"fusermount\",\n\t\t\"-o\", cfg.toOptionsString(),\n\t\t\"--\",\n\t\tdir,\n\t)\n\n\tcmd.Env = append(os.Environ(), \"_FUSE_COMMFD=3\")\n\tcmd.ExtraFiles = []*os.File{writeFile}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"StdoutPipe: %v\", err)\n\t\treturn\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"StderrPipe: %v\", err)\n\t\treturn\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Starting fusermount: %v\", err)\n\t\treturn\n\t}\n\n\t// Log fusermount output until it closes stdout and stderr.\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\tgo lineLogger(&wg, \"mount helper output\", stdout)\n\tgo lineLogger(&wg, \"mount helper error\", stderr)\n\twg.Wait()\n\n\t// Wait for the command.\n\terr = cmd.Wait()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"fusermount: %v\", err)\n\t\treturn\n\t}\n\n\t// Wrap the socket file in a connection.\n\tc, err := net.FileConn(readFile)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"FileConn: %v\", err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\n\t// We expect to have a Unix domain socket.\n\tuc, ok := c.(*net.UnixConn)\n\tif !ok {\n\t\terr = fmt.Errorf(\"Expected UnixConn, got %T\", c)\n\t\treturn\n\t}\n\n\t// Read a message.\n\tbuf := make([]byte, 32) // expect 1 byte\n\toob := make([]byte, 32) // expect 24 bytes\n\t_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ReadMsgUnix: %v\", err)\n\t\treturn\n\t}\n\n\t// Parse the message.\n\tscms, err := syscall.ParseSocketControlMessage(oob[:oobn])\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ParseSocketControlMessage: %v\", err)\n\t\treturn\n\t}\n\n\t// We expect one message.\n\tif len(scms) != 1 {\n\t\terr = fmt.Errorf(\"expected 1 SocketControlMessage; got scms = %#v\", scms)\n\t\treturn\n\t}\n\n\tscm := scms[0]\n\n\t// Pull out the FD returned by fusermount\n\tgotFds, err := syscall.ParseUnixRights(&scm)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"syscall.ParseUnixRights: %v\", err)\n\t\treturn\n\t}\n\n\tif len(gotFds) != 1 {\n\t\terr = fmt.Errorf(\"wanted 1 fd; got %#v\", gotFds)\n\t\treturn\n\t}\n\n\t// Turn the FD into an os.File.\n\tdev = os.NewFile(uintptr(gotFds[0]), \"/dev/fuse\")\n\n\treturn\n}",
"func Mount(mountpoint string, fs string, device string, isReadOnly bool) error {\n\tlog.WithFields(log.Fields{\n\t\t\"device\": device,\n\t\t\"mountpoint\": mountpoint,\n\t}).Debug(\"Calling syscall.Mount() \")\n\n\tflags := 0\n\tif isReadOnly {\n\t\tflags = syscall.MS_RDONLY\n\t}\n\terr := syscall.Mount(device, mountpoint, fs, uintptr(flags), \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to mount device %s at %s: %s\", device, mountpoint, err)\n\t}\n\treturn nil\n}",
"func TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := syscall.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func mountFS(printWarning bool) error {\n\tif printWarning {\n\t\tlog.Warning(\"================================= WARNING ==========================================\")\n\t\tlog.Warning(\"BPF filesystem is not mounted. This will lead to network disruption when Cilium pods\")\n\t\tlog.Warning(\"are restarted. Ensure that the BPF filesystem is mounted in the host.\")\n\t\tlog.Warning(\"https://docs.cilium.io/en/stable/operations/system_requirements/#mounted-ebpf-filesystem\")\n\t\tlog.Warning(\"====================================================================================\")\n\t}\n\n\tlog.Infof(\"Mounting BPF filesystem at %s\", bpffsRoot)\n\n\tmapRootStat, err := os.Stat(bpffsRoot)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tif err := os.MkdirAll(bpffsRoot, 0755); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unable to create bpf mount directory: %s\", err)\n\t\t\t}\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to stat the mount path %s: %s\", bpffsRoot, err)\n\n\t\t}\n\t} else if !mapRootStat.IsDir() {\n\t\treturn fmt.Errorf(\"%s is a file which is not a directory\", bpffsRoot)\n\t}\n\n\tif err := unix.Mount(bpffsRoot, bpffsRoot, \"bpf\", 0, \"\"); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount %s: %s\", bpffsRoot, err)\n\t}\n\treturn nil\n}",
"func mount(mountPoint string, opts *MountOptions, ready chan<- error) (fd int, err error) {\n\tuser, err := user.Current()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tf, err := os.OpenFile(\"/dev/fuse\", os.O_RDWR, 0666)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tfd = int(f.Fd())\n\n\tm := cmount.Mount{\n\t\tType: fmt.Sprintf(\"fuse.%s\", opts.Name),\n\t\tSource: opts.FsName,\n\t\tOptions: []string{\n\t\t\t\"nosuid\",\n\t\t\t\"nodev\",\n\t\t\tfmt.Sprintf(\"fd=%d\", fd),\n\t\t\tfmt.Sprintf(\"rootmode=%#o\", syscall.S_IFDIR),\n\t\t\tfmt.Sprintf(\"user_id=%s\", user.Uid),\n\t\t\tfmt.Sprintf(\"group_id=%s\", user.Gid),\n\t\t},\n\t}\n\n\tif opts.AllowOther {\n\t\tm.Options = append(m.Options, \"allow_other\")\n\t}\n\n\tm.Options = append(m.Options, opts.Options...)\n\n\terr = m.Mount(mountPoint)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tclose(ready)\n\treturn fd, err\n}",
"func forceMount(device, target, mType, options string) error {\n\tflag, data := parseOptions(options)\n\terr := sysMount(device, target, mType, uintptr(flag), data)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func mount(mountpoint string) error {\n\tc, err := fuse.Mount(mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer c.Close()\n\n\tclient := getClient()\n\n\t// fmt.Println(\"doing getData for superblock\")\n\tsuperKey := S3_SUPERBLOCK_NAME + \"0\"\n\tsuper, err := getDataByKey(client, superKey)\n\tif err != nil {\n\t\tsuper = makeNewSuperblock()\n\t}\n\tfilesys := makeFs(super)\n\t// fmt.Println(\"finished makeFs\")\n\n\t// from http://stackoverflow.com/questions/11268943/golang-is-it-possible-to-capture-a-ctrlc-signal-and-run-a-cleanup-function-in\n\tc2 := make(chan os.Signal, 1)\n\tsignal.Notify(c2, os.Interrupt)\n\tsignal.Notify(c2, syscall.SIGTERM)\n\tgo func() {\n\t\t<-c2\n\t\tfilesys.Destroy()\n\t\tos.Exit(1)\n\t}()\n\n\t_, err = getInode(filesys.rootInode)\n\tif err != nil {\n\t\tmakeNewRootInode()\n\t}\n\n\tif runTests {\n\t\tfmt.Println(\"Test flag was set, so running all tests.\")\n\t\tgo runAllTests()\n\t}\n\n\tfmt.Println(\"File system mounted.\")\n\tif err := fs.Serve(c, filesys); err != nil {\n\t\treturn err\n\t}\n\n\t// check if the mount process has an error to report\n\t<-c.Ready\n\tif err := c.MountError; err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (m *mounter) Mount() (err error) {\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"KBFS failed to FUSE mount at %s: %s\", m.options.MountPoint, err)\n\t\t\tfmt.Println(msg)\n\t\t\tm.log.Warning(msg)\n\t\t}\n\t}()\n\n\tm.c, err = fuseMountDir(m.options.MountPoint, m.options.PlatformParams)\n\t// Exit if we were successful or we are not a force mounting on error.\n\t// Otherwise, try unmounting and mounting again.\n\tif err == nil || !m.options.ForceMount {\n\t\treturn err\n\t}\n\n\t// Mount failed, let's try to unmount and then try mounting again, even\n\t// if unmounting errors here.\n\t_ = m.Unmount()\n\n\t// In case we are on darwin, ask the installer to reinstall the mount dir\n\t// and try again as the last resort. This specifically fixes a situation\n\t// where /keybase gets created and owned by root after Keybase app is\n\t// started, and `kbfs` later fails to mount because of a permission error.\n\tm.reinstallMountDirIfPossible()\n\tm.c, err = fuseMountDir(m.options.MountPoint, m.options.PlatformParams)\n\n\treturn err\n}",
"func (j *juicefs) MountFs(name string, options []string) (string, error) {\n\th := md5.New()\n\tif _, err := h.Write([]byte(strings.Join(options, \",\"))); err != nil {\n\t\treturn \"\", status.Errorf(codes.Internal, \"Could not write options to hash: %v\", options)\n\t}\n\tmountPath := path.Join(mountBase, fmt.Sprintf(\"%s-%s\", name, hex.EncodeToString(h.Sum(nil))))\n\n\texists, err := j.ExistsPath(mountPath)\n\n\tif err != nil {\n\t\treturn mountPath, status.Errorf(codes.Internal, \"Could not check mount point %q exists: %v\", mountPath, err)\n\t}\n\n\tif !exists {\n\t\tklog.V(5).Infof(\"Mount: mounting %q at %q with options %v\", name, mountPath, options)\n\t\tif err := j.Mount(name, mountPath, fsType, []string{}); err != nil {\n\t\t\tos.Remove(mountPath)\n\t\t\treturn \"\", status.Errorf(codes.Internal, \"Could not mount %q at %q: %v\", name, mountPath, err)\n\t\t}\n\t\treturn mountPath, nil\n\t}\n\n\t// path exists\n\tnotMnt, err := j.IsLikelyNotMountPoint(mountPath)\n\tif err != nil {\n\t\treturn mountPath, status.Errorf(codes.Internal, \"Could not check %q IsLikelyNotMountPoint: %v\", mountPath, err)\n\t}\n\n\tif notMnt {\n\t\tklog.V(5).Infof(\"Mount: mounting %q at %q with options %v\", name, mountPath, options)\n\t\tif err := j.Mount(name, mountPath, fsType, []string{}); err != nil {\n\t\t\treturn \"\", status.Errorf(codes.Internal, \"Could not mount %q at %q: %v\", name, mountPath, err)\n\t\t}\n\t\treturn mountPath, nil\n\t}\n\n\tklog.V(5).Infof(\"Mount: skip mounting for existing mount point %q\", mountPath)\n\treturn mountPath, nil\n}",
"func mount(device, mountpoint string) error {\n\tif out, err := exec.Command(\"mount\", device, mountpoint).CombinedOutput(); err != nil {\n\t\treturn fmt.Errorf(\"Error mounting %s to %s: %v\\n%s\", device, mountpoint, err, string(out))\n\n\t}\n\treturn nil\n}",
"func (i *ImageService) Mount(ctx context.Context, container *container.Container) error {\n\tsnapshotter := i.client.SnapshotService(i.snapshotter)\n\tmounts, err := snapshotter.Mounts(ctx, container.ID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// The temporary location will be under /var/lib/docker/... because\n\t// we set the `TMPDIR`\n\troot, err := os.MkdirTemp(\"\", fmt.Sprintf(\"%s_rootfs-mount\", container.ID))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create temp dir: %w\", err)\n\t}\n\n\tif err := mount.All(mounts, root); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount %s: %w\", root, err)\n\t}\n\n\tcontainer.BaseFS = root\n\treturn nil\n}",
"func bindmountFs(src, target string) error {\n\n\tif _, err := os.Stat(target); err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tos.MkdirAll(target, 0755)\n\t\t} else {\n\t\t\tlog.Errorf(\"stat %s, %v\", target, err)\n\t\t\treturn err\n\t\t}\n\t}\n\t_, err := os.Stat(target)\n\tif err != nil {\n\t\tlog.Errorf(\"stat %s, %v\", target, err)\n\t\treturn err\n\t}\n\targs := []string{\"--bind\", src, target}\n\t_, err = execCommand(\"mount\", args...)\n\treturn err\n}",
"func (m *Mounter) Mount(\n\tminor int,\n\tdevPath, path, fs string,\n\tflags uintptr,\n\tdata string,\n\ttimeout int,\n\topts map[string]string,\n) error {\n\t// device gets overwritten if opts specifies fuse mount with\n\t// options.OptionsDeviceFuseMount.\n\tdevice := devPath\n\tif value, ok := opts[options.OptionsDeviceFuseMount]; ok {\n\t\t// fuse mounts show-up with this key as device.\n\t\tdevice = value\n\t}\n\n\tpath = normalizeMountPath(path)\n\tif len(m.allowedDirs) > 0 {\n\t\tfoundPrefix := false\n\t\tfor _, allowedDir := range m.allowedDirs {\n\t\t\tif strings.Contains(path, allowedDir) {\n\t\t\t\tfoundPrefix = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !foundPrefix {\n\t\t\treturn ErrMountpathNotAllowed\n\t\t}\n\t}\n\tdev, ok := m.HasTarget(path)\n\tif ok && dev != device {\n\t\tlogrus.Warnf(\"cannot mount %q, device %q is mounted at %q\", device, dev, path)\n\t\treturn ErrExist\n\t}\n\tm.Lock()\n\tinfo, ok := m.mounts[device]\n\tif !ok {\n\t\tinfo = &Info{\n\t\t\tDevice: device,\n\t\t\tMountpoint: make([]*PathInfo, 0),\n\t\t\tMinor: minor,\n\t\t\tFs: fs,\n\t\t}\n\t}\n\tm.mounts[device] = info\n\tm.Unlock()\n\tinfo.Lock()\n\tdefer info.Unlock()\n\n\t// Validate input params\n\t// FS check is not needed if it is a bind mount\n\tif !strings.HasPrefix(info.Fs, fs) && (flags&syscall.MS_BIND) != syscall.MS_BIND {\n\t\tlogrus.Warnf(\"%s Existing mountpoint has fs %q cannot change to %q\",\n\t\t\tdevice, info.Fs, fs)\n\t\treturn ErrEinval\n\t}\n\n\t// Try to find the mountpoint. If it already exists, do nothing\n\tfor _, p := range info.Mountpoint {\n\t\tif p.Path == path {\n\t\t\tlogrus.Infof(\"%q mountpoint for device %q already exists\",\n\t\t\t\tdevice, path)\n\t\t\treturn nil\n\t\t}\n\t}\n\n\th := m.kl.Acquire(path)\n\tdefer m.kl.Release(&h)\n\n\t// Record previous state of the path\n\tpathWasReadOnly := m.isPathSetImmutable(path)\n\tvar (\n\t\tisBindMounted bool = false\n\t\tbindMountPath string\n\t)\n\n\tif err := m.makeMountpathReadOnly(path); err != nil {\n\t\tif strings.Contains(err.Error(), \"Inappropriate ioctl for device\") {\n\t\t\tlogrus.Warnf(\"failed to make %s readonly. Err: %v\", path, err)\n\t\t\t// If we cannot chattr the original mount path, we bind mount it to\n\t\t\t// a path in osd mount path and then chattr it\n\t\t\tif bindMountPath, err = m.bindMountOriginalPath(path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tisBindMounted = true\n\t\t} else {\n\t\t\treturn fmt.Errorf(\"failed to make %s readonly. Err: %v\", path, err)\n\t\t}\n\t}\n\n\t// The device is not mounted at path, mount it and add to its mountpoints.\n\tif err := m.mountImpl.Mount(devPath, path, fs, flags, data, timeout); err != nil {\n\t\t// Rollback only if was writeable\n\t\tif !pathWasReadOnly {\n\t\t\tif e := m.makeMountpathWriteable(path); e != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to make %v writeable during rollback. Err: %v Mount err: %v\",\n\t\t\t\t\tpath, e, err)\n\t\t\t}\n\t\t\tif isBindMounted {\n\t\t\t\tif cleanupErr := m.cleanupBindMount(path, bindMountPath, err); cleanupErr != nil {\n\t\t\t\t\treturn cleanupErr\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn err\n\t}\n\n\tinfo.Mountpoint = append(info.Mountpoint, &PathInfo{Path: path})\n\n\treturn nil\n}",
"func fMountat(dirfd uintptr, source, target, fstype string, flags uintptr, data string) error {\n\tvar (\n\t\tsourceP, targetP, fstypeP, dataP *byte\n\t\tpid uintptr\n\t\terr error\n\t\terrno, status syscall.Errno\n\t)\n\n\tsourceP, err = syscall.BytePtrFromString(source)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttargetP, err = syscall.BytePtrFromString(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfstypeP, err = syscall.BytePtrFromString(fstype)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif data != \"\" {\n\t\tdataP, err = syscall.BytePtrFromString(data)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tvar pipefds [2]int\n\tif err := syscall.Pipe2(pipefds[:], syscall.O_CLOEXEC); err != nil {\n\t\treturn fmt.Errorf(\"failed to open pipe: %w\", err)\n\t}\n\n\tdefer func() {\n\t\t// close both ends of the pipe in a deferred function, since open file\n\t\t// descriptor table is shared with child\n\t\tsyscall.Close(pipefds[0])\n\t\tsyscall.Close(pipefds[1])\n\t}()\n\n\tpid, errno = forkAndMountat(dirfd,\n\t\tuintptr(unsafe.Pointer(sourceP)),\n\t\tuintptr(unsafe.Pointer(targetP)),\n\t\tuintptr(unsafe.Pointer(fstypeP)),\n\t\tflags,\n\t\tuintptr(unsafe.Pointer(dataP)),\n\t\tpipefds[1],\n\t)\n\n\tif errno != 0 {\n\t\treturn fmt.Errorf(\"failed to fork thread: %w\", errno)\n\t}\n\n\tdefer func() {\n\t\t_, err := unix.Wait4(int(pid), nil, 0, nil)\n\t\tfor err == syscall.EINTR {\n\t\t\t_, err = unix.Wait4(int(pid), nil, 0, nil)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.L.WithError(err).Debugf(\"failed to find pid=%d process\", pid)\n\t\t}\n\t}()\n\n\t_, _, errno = syscall.RawSyscall(syscall.SYS_READ,\n\t\tuintptr(pipefds[0]),\n\t\tuintptr(unsafe.Pointer(&status)),\n\t\tunsafe.Sizeof(status))\n\tif errno != 0 {\n\t\treturn fmt.Errorf(\"failed to read pipe: %w\", errno)\n\t}\n\n\tif status != 0 {\n\t\treturn fmt.Errorf(\"failed to mount: %w\", status)\n\t}\n\n\treturn nil\n}",
"func ensureTempDir(rootfsPath string) {\n\tpth := filepath.Join(rootfsPath, \"/tmp\")\n\tstickyMode := os.FileMode(0777) | os.ModeSticky\n\t// try to chmod first\n\terr := os.Chmod(pth, stickyMode)\n\tif err == nil {\n\t\treturn\n\t}\n\t// for unexpected complaints, bail\n\tif !os.IsNotExist(err) {\n\t\tpanic(executor.SetupError.New(\"cradle: could not ensure reasonable /tmp: %s\", err))\n\t}\n\t// mkdir if not exist\n\tif err := os.Mkdir(pth, stickyMode); err != nil {\n\t\tpanic(executor.SetupError.New(\"cradle: could not ensure reasonable /tmp: %s\", err))\n\t}\n\t// chmod it *again* because unit tests reveal that `os.Mkdir` is subject to umask\n\tif err := os.Chmod(pth, stickyMode); err != nil {\n\t\tpanic(executor.SetupError.New(\"cradle: could not ensure reasonable /tmp: %s\", err))\n\t}\n}",
"func CheckOrMountFS(bpfRoot string) {\n\tmountOnce.Do(func() {\n\t\tif err := checkOrMountFS(bpfRoot); err != nil {\n\t\t\tlog.WithError(err).Fatal(\"Unable to mount BPF filesystem\")\n\t\t}\n\t})\n}",
"func (v *VaultFS) Mount() error {\n\tvar err error\n\tv.conn, err = fuse.Mount(\n\t\tv.mountpoint,\n\t\tfuse.FSName(\"vault\"),\n\t\tfuse.VolumeName(\"vault\"),\n\t)\n\n\tlogrus.Debug(\"created conn\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Debug(\"starting to serve\")\n\treturn fs.Serve(v.conn, v)\n}",
"func sysMount(device, target, mType string, flag uintptr, data string) error {\n\tif err := syscall.Mount(device, target, mType, flag, data); err != nil {\n\t\treturn err\n\t}\n\n\t// If we have a bind mount or remount, remount...\n\tif flag&syscall.MS_BIND == syscall.MS_BIND &&\n\t\tflag&syscall.MS_RDONLY == syscall.MS_RDONLY {\n\t\treturn syscall.Mount(\n\t\t\tdevice, target, mType, flag|syscall.MS_REMOUNT, data)\n\t}\n\treturn nil\n}",
"func (b *Builder) WithTmpfs(target, data string) *Builder {\n\tb.Mounts = append(b.Mounts, Mount{\n\t\tSource: \"tmpfs\",\n\t\tTarget: target,\n\t\tFsType: \"tmpfs\",\n\t\tFlags: mFlag,\n\t\tData: data,\n\t})\n\treturn b\n}",
"func (m *DefaultMounter) Mount(cfg *dokan.Config, log logger.Logger) error {\n\tvar err error\n\tvar h *dokan.MountHandle\n\t// See if the path was set after creation of this mounter\n\tif m.dir == \"\" && cfg.Path != \"\" {\n\t\tm.dir = cfg.Path\n\t}\n\t// Retry loop\n\tfor i := 8; true; i *= 2 {\n\t\th, err = m.mountHelper(cfg)\n\t\t// break if success, no force or too many tries.\n\t\tif err == nil || i > 128 {\n\t\t\tbreak\n\t\t}\n\t\tlog.Errorf(\"Failed to mount dokan filesystem (i=%d): %v\", i, err)\n\t\t// Sleep two times 800ms, 1.6s, 3.2s, ...\n\t\ttime.Sleep(time.Duration(i) * 100 * time.Millisecond)\n\t\tif m.force {\n\t\t\tdokan.Unmount(m.dir)\n\t\t\ttime.Sleep(time.Duration(i) * 100 * time.Millisecond)\n\t\t}\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Info(\"Mounting the filesystem was a success!\")\n\treturn h.BlockTillDone()\n}",
"func MountDevice(device *block.BlockDev, flags uintptr) (string, error) {\n\tdevName := device.Name\n\n\tDebug(\"MountDevice: Checking cache first for %s\", devName)\n\tcachedMountPath, err := getMountCacheData(devName, flags)\n\tif err == nil {\n\t\tlog.Printf(\"getMountCacheData succeeded for %s\", devName)\n\t\treturn cachedMountPath, nil\n\t}\n\tDebug(\"MountDevice: cache lookup failed for %s\", devName)\n\n\tDebug(\"MountDevice: Attempting to mount %s with flags %d\", devName, flags)\n\tmountPath, err := os.MkdirTemp(\"/tmp\", \"slaunch-\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to create tmp mount directory: %v\", err)\n\t}\n\n\tif _, err := device.Mount(mountPath, flags); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to mount %s, flags %d, err=%v\", devName, flags, err)\n\t}\n\n\tDebug(\"MountDevice: Mounted %s with flags %d\", devName, flags)\n\tsetMountCache(devName, mountCacheData{flags: flags, mountPath: mountPath}) // update cache\n\treturn mountPath, nil\n}",
"func doMount(destinationDir string, ninputs *pb.WorkloadInfo) error {\n\tnewDir := filepath.Join(configuration.NodeAgentWorkloadHomeDir, ninputs.Workloadpath)\n\terr := os.MkdirAll(newDir, 0777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Not really needed but attempt to workaround:\n\t// https://github.com/kubernetes/kubernetes/blob/61ac9d46382884a8bd9e228da22bca5817f6d226/pkg/util/mount/mount_linux.go\n\tcmdMount := getExecCmd(\"/bin/mount\", \"-t\", \"tmpfs\", \"-o\", \"size=8K\", \"tmpfs\", destinationDir)\n\terr = cmdMount.Run()\n\tif err != nil {\n\t\tos.RemoveAll(newDir) //nolint: errcheck\n\t\treturn err\n\t}\n\n\tnewDestinationDir := filepath.Join(destinationDir, \"nodeagent\")\n\terr = os.MkdirAll(newDestinationDir, 0777)\n\tif err != nil {\n\t\tgetExecCmd(\"/bin/unmount\", destinationDir).Run() //nolint: errcheck\n\t\tos.RemoveAll(newDir) //nolint: errcheck\n\t\treturn err\n\t}\n\n\t// Do a bind mount\n\tcmd := getExecCmd(\"/bin/mount\", \"--bind\", newDir, newDestinationDir)\n\terr = cmd.Run()\n\tif err != nil {\n\t\tgetExecCmd(\"/bin/umount\", destinationDir).Run() //nolint: errcheck\n\t\tos.RemoveAll(newDir) //nolint: errcheck\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (fs *Memory) TempFileSystem() *afero.Afero {\n\treturn fs.tempFs\n}",
"func (o *Filesystem) Mount(ctx context.Context, options map[string]dbus.Variant) (mountPath string, err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Mount\", 0, options).Store(&mountPath)\n\treturn\n}",
"func createFilesToMount(tempDir string, step batcheslib.Step, stepContext *template.StepContext) (map[string]*os.File, func(), error) {\n\t// Parse and render the step.Files.\n\tfiles, err := template.RenderStepMap(step.Files, stepContext)\n\tif err != nil {\n\t\treturn nil, nil, errors.Wrap(err, \"parsing step files\")\n\t}\n\n\tvar toCleanup []string\n\tcleanup := func() {\n\t\tfor _, fname := range toCleanup {\n\t\t\tos.Remove(fname)\n\t\t}\n\t}\n\n\t// Create temp files with the rendered content of step.Files so that we\n\t// can mount them into the container.\n\tfilesToMount := make(map[string]*os.File, len(files))\n\tfor name, content := range files {\n\t\tfp, err := os.CreateTemp(tempDir, \"\")\n\t\tif err != nil {\n\t\t\treturn nil, cleanup, errors.Wrap(err, \"creating temporary file\")\n\t\t}\n\t\ttoCleanup = append(toCleanup, fp.Name())\n\n\t\tif _, err := fp.WriteString(content); err != nil {\n\t\t\treturn nil, cleanup, errors.Wrap(err, \"writing to temporary file\")\n\t\t}\n\n\t\tif err := fp.Close(); err != nil {\n\t\t\treturn nil, cleanup, errors.Wrap(err, \"closing temporary file\")\n\t\t}\n\n\t\tfilesToMount[name] = fp\n\t}\n\n\treturn filesToMount, cleanup, nil\n}",
"func (c *TestContext) UseFilesystem() (testDir string, homeDir string) {\n\thomeDir, err := os.MkdirTemp(\"\", \"porter-test\")\n\trequire.NoError(c.T, err)\n\tc.cleanupDirs = append(c.cleanupDirs, homeDir)\n\n\ttestDir = c.GetTestDefinitionDirectory()\n\tc.FileSystem = aferox.NewAferox(testDir, afero.NewOsFs())\n\tc.defaultNewCommand()\n\tc.DisableUmask()\n\n\treturn testDir, homeDir\n}",
"func (osh *SystemHandler) Mount(source string, target string, fsType string, flags uintptr, data string) error {\n\treturn syscall.Mount(source, target, fsType, flags, data)\n}",
"func DiskMount(srcPath string, dstPath string, readonly bool, recursive bool, propagation string, mountOptions []string, fsName string) error {\n\tvar err error\n\n\t// Prepare the mount flags\n\tflags := 0\n\tif readonly {\n\t\tflags |= unix.MS_RDONLY\n\t}\n\n\t// Detect the filesystem\n\tif fsName == \"none\" {\n\t\tflags |= unix.MS_BIND\n\t}\n\n\tif propagation != \"\" {\n\t\tswitch propagation {\n\t\tcase \"private\":\n\t\t\tflags |= unix.MS_PRIVATE\n\t\tcase \"shared\":\n\t\t\tflags |= unix.MS_SHARED\n\t\tcase \"slave\":\n\t\t\tflags |= unix.MS_SLAVE\n\t\tcase \"unbindable\":\n\t\t\tflags |= unix.MS_UNBINDABLE\n\t\tcase \"rprivate\":\n\t\t\tflags |= unix.MS_PRIVATE | unix.MS_REC\n\t\tcase \"rshared\":\n\t\t\tflags |= unix.MS_SHARED | unix.MS_REC\n\t\tcase \"rslave\":\n\t\t\tflags |= unix.MS_SLAVE | unix.MS_REC\n\t\tcase \"runbindable\":\n\t\t\tflags |= unix.MS_UNBINDABLE | unix.MS_REC\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"Invalid propagation mode %q\", propagation)\n\t\t}\n\t}\n\n\tif recursive {\n\t\tflags |= unix.MS_REC\n\t}\n\n\t// Mount the filesystem\n\terr = unix.Mount(srcPath, dstPath, fsName, uintptr(flags), strings.Join(mountOptions, \",\"))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to mount %q at %q with filesystem %q: %w\", srcPath, dstPath, fsName, err)\n\t}\n\n\t// Remount bind mounts in readonly mode if requested\n\tif readonly == true && flags&unix.MS_BIND == unix.MS_BIND {\n\t\tflags = unix.MS_RDONLY | unix.MS_BIND | unix.MS_REMOUNT\n\t\terr = unix.Mount(\"\", dstPath, fsName, uintptr(flags), \"\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Unable to mount %q in readonly mode: %w\", dstPath, err)\n\t\t}\n\t}\n\n\tflags = unix.MS_REC | unix.MS_SLAVE\n\terr = unix.Mount(\"\", dstPath, \"\", uintptr(flags), \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Unable to make mount %q private: %w\", dstPath, err)\n\t}\n\n\treturn nil\n}",
"func TestMountDevFd(t *testing.T) {\n\trealMountPoint, err := ioutil.TempDir(\"\", t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer syscall.Rmdir(realMountPoint)\n\n\t// Call the fusermount suid helper to obtain the file descriptor in place\n\t// of a privileged parent.\n\tvar fuOpts MountOptions\n\tfd, err := callFusermount(realMountPoint, &fuOpts)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfdMountPoint := fmt.Sprintf(\"/dev/fd/%d\", fd)\n\n\t// Real test starts here:\n\t// See if we can feed fdMountPoint to NewServer\n\tfs := NewDefaultRawFileSystem()\n\topts := MountOptions{\n\t\tDebug: true,\n\t}\n\tsrv, err := NewServer(fs, fdMountPoint, &opts)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo srv.Serve()\n\tif err := srv.WaitMount(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// If we are actually mounted, we should get ENOSYS.\n\t//\n\t// This won't deadlock despite pollHack not working for `/dev/fd/N` mounts\n\t// because functions in the syscall package don't use the poller.\n\tvar st syscall.Stat_t\n\terr = syscall.Stat(realMountPoint, &st)\n\tif err != syscall.ENOSYS {\n\t\tt.Errorf(\"expected ENOSYS, got %v\", err)\n\t}\n\n\t// Cleanup is somewhat tricky because `srv` does not know about\n\t// `realMountPoint`, so `srv.Unmount()` cannot work.\n\t//\n\t// A normal user has to call `fusermount -u` for themselves to unmount.\n\t// But in this test we can monkey-patch `srv.mountPoint`.\n\tsrv.mountPoint = realMountPoint\n\tif err := srv.Unmount(); err != nil {\n\t\tt.Error(err)\n\t}\n}",
"func testCmdMountFilesystem(t *testing.T) {\n\tt.Log(\"TODO\")\n}",
"func Mount(mountpoint string) (err error) {\n\tlog.Println(\"Mounting filesystem\")\n\tc, err := fuse.Mount(\n\t\tmountpoint,\n\t\tfuse.FSName(\"pgfs\"),\n\t\tfuse.Subtype(\"pgfs\"),\n\t\t//fuse.ReadOnly(),\n\t\t//fuse.AllowOther(), // option allow_other only allowed if 'user_allow_other' is set in /etc/fuse.conf\n\t)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer close(c)\n\n\tif p := c.Protocol(); !p.HasInvalidate() {\n\t\treturn fmt.Errorf(\"kernel FUSE support is too old to have invalidations: version %v\", p)\n\t}\n\n\ttables, err := postgres.ListTables()\n\tif err != nil {\n\t\treturn\n\t}\n\tnodes := make(map[string]*Node)\n\tsrv := fs.New(c, nil)\n\n\tvar inode uint64 = 2\n\tfor _, t := range tables {\n\t\tnode := Node{\n\t\t\tName: t.Name,\n\t\t\tfuse: srv,\n\t\t\tInode: inode,\n\t\t\tType: fuse.DT_Dir,\n\t\t\tfs: &FS{\n\t\t\t\tNodes: map[string]*Node{\n\t\t\t\t\tt.Name + \".json\": &Node{\n\t\t\t\t\t\tName: t.Name + \".json\",\n\t\t\t\t\t\tfuse: srv,\n\t\t\t\t\t\tInode: inode + 1,\n\t\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t\t\tContent: []byte(\"\"),\n\t\t\t\t\t},\n\t\t\t\t\tt.Name + \".csv\": &Node{\n\t\t\t\t\t\tName: t.Name + \".csv\",\n\t\t\t\t\t\tfuse: srv,\n\t\t\t\t\t\tInode: inode + 2,\n\t\t\t\t\t\tType: fuse.DT_File,\n\t\t\t\t\t\tContent: []byte(\"\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t\tnodes[t.Name] = &node\n\t\tinode += 3\n\t}\n\n\tfilesys := &FS{\n\t\tNodes: nodes,\n\t}\n\n\terr = srv.Serve(filesys)\n\treturn\n}",
"func (j *juicefs) JfsMount(secrets map[string]string, options []string) (Jfs, error) {\n\tstdoutStderr, err := j.AuthFs(secrets)\n\tklog.V(5).Infof(\"MountFs: authentication output is '%s'\\n\", stdoutStderr)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Could not auth juicefs: %v\", err)\n\t}\n\n\tmountPath, err := j.MountFs(secrets[\"name\"], options)\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Internal, \"Could not mount juicefs: %v\", err)\n\t}\n\n\treturn &jfs{\n\t\tProvider: j,\n\t\tName: secrets[\"name\"],\n\t\tMountPath: mountPath,\n\t\tOptions: options,\n\t}, nil\n}",
"func Mount(dev, path, fsType, data string, flags uintptr) error {\n\tif err := unix.Mount(dev, path, fsType, flags, data); err != nil {\n\t\treturn fmt.Errorf(\"Mount %q on %q type %q flags %x: %v\",\n\t\t\tdev, path, fsType, flags, err)\n\t}\n\treturn nil\n}",
"func NewMount(p ctxgroup.ContextGroup, fsys fs.FS, mountpoint string) (Mount, error) {\n\tconn, err := fuse.Mount(mountpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm := &mount{\n\t\tmpoint: mountpoint,\n\t\tfuseConn: conn,\n\t\tfilesys: fsys,\n\t\tcg: ctxgroup.WithParent(p), // link it to parent.\n\t}\n\tm.cg.SetTeardown(m.unmount)\n\n\t// launch the mounting process.\n\tif err := m.mount(); err != nil {\n\t\tm.Unmount() // just in case.\n\t\treturn nil, err\n\t}\n\n\treturn m, nil\n}",
"func TestMountCreat(t *testing.T) {\n\tconst concurrency = 2\n\tconst repeat = 2\n\n\tdir := test_helpers.InitFS(t)\n\tmnt := dir + \".mnt\"\n\n\tfor j := 0; j < repeat; j++ {\n\t\ttest_helpers.MountOrFatal(t, dir, mnt, \"-extpass=echo test\")\n\t\tvar wg sync.WaitGroup\n\t\twg.Add(concurrency)\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tgo func(i int) {\n\t\t\t\tpath := fmt.Sprintf(\"%s/%d\", mnt, i)\n\t\t\t\tfd, err := syscall.Open(path, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600)\n\t\t\t\tsyscall.Close(fd)\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Creat %q: %v\", path, err)\n\t\t\t\t}\n\t\t\t\twg.Done()\n\t\t\t}(i)\n\t\t}\n\t\twg.Wait()\n\t\ttest_helpers.UnmountPanic(mnt)\n\t}\n}",
"func (img *Image) Mount(mountPoint, fs string, flags uintptr, data string) error {\n\treturn devMount(img, mountPoint, fs, flags, data)\n}",
"func (m *DefaultMounter) mountHelper(cfg *dokan.Config) (*dokan.MountHandle, error) {\n\t// m.dir is constant and safe to access outside the lock.\n\thandle, err := dokan.Mount(cfg)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tm.mnt = handle\n\treturn handle, nil\n}",
"func (z *zfsctl) Mount(ctx context.Context) *execute {\n\targs := []string{\"mount\"}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func mountVirtualFileSystem(archivePath string, password string, scrambleKey string) error {\n\terr := isArchiveFormatZip(archivePath)\n\tif err == nil {\n\t\tvirtualFileSystemArchiveType = constants.VirtualFileSystemZip\n\t\tvirtualFileSystemArchive = archivePath\n\t\tvirtualFileSystemPassword = password\n\t\tvirtualFileSystemEncryptionKey = scrambleKey\n\t\treturn err\n\t}\n\terr = isArchiveFormatRar(archivePath, password)\n\tif err == nil {\n\t\tvirtualFileSystemArchiveType = constants.VirtualFileSystemRar\n\t\tvirtualFileSystemArchive = archivePath\n\t\tvirtualFileSystemPassword = password\n\t\tvirtualFileSystemEncryptionKey = scrambleKey\n\t\treturn err\n\t}\n\terr = errors.New(fmt.Sprintf(\"Failed to open or decode '%s'.\", archivePath))\n\treturn err\n}",
"func maybeWrapWithSSHFSMountAndUnmount(ctx context.Context, user string, useSSHFS bool, f func() error) error {\n\tif useSSHFS {\n\t\tif err := arc.MountSDCardPartitionOnHostWithSSHFS(ctx, user); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to mount Android's SDCard partition on host\")\n\t\t}\n\t}\n\terr := f()\n\tif useSSHFS {\n\t\tif err := arc.UnmountSDCardPartitionFromHost(ctx, user); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to unmount Android's SDCard partition from host\")\n\t\t}\n\t}\n\treturn err\n}",
"func (p *Tmpfs) Attach(ctx driver.Context, v *types.Volume) error {\n\tctx.Log.Debugf(\"Tmpfs attach volume: %s\", v.Name)\n\tmountPath := v.Path()\n\tsize := v.Size()\n\treqID := v.Option(\"reqID\")\n\tids := v.Option(\"ids\")\n\n\tif ids != \"\" {\n\t\tif !strings.Contains(ids, reqID) {\n\t\t\tids = ids + \",\" + reqID\n\t\t}\n\t} else {\n\t\tids = reqID\n\t}\n\n\tif err := os.MkdirAll(mountPath, 0755); err != nil && !os.IsExist(err) {\n\t\treturn fmt.Errorf(\"error creating %q directory: %v\", mountPath, err)\n\t}\n\n\tif !utils.IsMountpoint(mountPath) {\n\t\terr := syscall.Mount(\"shm\", mountPath, \"tmpfs\",\n\t\t\tuintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV),\n\t\t\tfmt.Sprintf(\"mode=1777,size=%s\", size))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"mounting shm tmpfs: %s %v\", mountPath, err)\n\t\t}\n\t}\n\n\tv.SetOption(\"ids\", ids)\n\tv.SetOption(\"freeTime\", \"\")\n\n\treturn nil\n}",
"func (m *DefaultMounter) Mount(\n\tsource string,\n\ttarget string,\n\tfstype string,\n\tflags uintptr,\n\tdata string,\n\ttimeout int,\n) error {\n\treturn syscall.Mount(source, target, fstype, flags, data)\n}",
"func ServeFuseFS(\n\tfilesys *plugin.Registry,\n\tmountpoint string,\n\tanalyticsClient analytics.Client,\n) (chan<- context.Context, <-chan struct{}, error) {\n\tfuse.Debug = func(msg interface{}) {\n\t\tlog.Tracef(\"FUSE: %v\", msg)\n\t}\n\n\tlog.Infof(\"FUSE: Mounting at %v\", mountpoint)\n\tfuseConn, err := fuse.Mount(mountpoint)\n\tif err != nil {\n\t\treturn nil, nil, mountFailedErr(err)\n\t}\n\n\t// Start the FUSE server. We use the serverExitedCh to catch externally triggered unmounts.\n\t// If we're explicitly asked to shutdown the server, we want to wait until both Unmount and\n\t// Serve have exited before signaling completion.\n\tserverExitedCh := make(chan struct{})\n\tgo func() {\n\t\tserverConfig := &fs.Config{\n\t\t\tWithContext: func(ctx context.Context, req fuse.Request) context.Context {\n\t\t\t\tpid := int(req.Hdr().Pid)\n\t\t\t\tnewctx := context.WithValue(ctx, activity.JournalKey, activity.JournalForPID(pid))\n\t\t\t\tnewctx = context.WithValue(newctx, analytics.ClientKey, analyticsClient)\n\t\t\t\treturn newctx\n\t\t\t},\n\t\t}\n\t\tserver := fs.New(fuseConn, serverConfig)\n\t\troot := newRoot(filesys)\n\t\tif err := server.Serve(&root); err != nil {\n\t\t\tlog.Warnf(\"FUSE: fs.Serve errored with: %v\", err)\n\t\t}\n\n\t\t// check if the mount process has an error to report\n\t\t<-fuseConn.Ready\n\t\tif err := fuseConn.MountError; err != nil {\n\t\t\tlog.Warnf(\"FUSE: Mount process errored with: %v\", err)\n\t\t}\n\t\tlog.Infof(\"FUSE: Serve complete\")\n\n\t\t// Signal that Serve exited so the clean-up goroutine can close the stopped channel\n\t\t// if it hasn't already done so.\n\t\tdefer close(serverExitedCh)\n\t}()\n\n\t// Clean-up\n\tstopCh := make(chan context.Context)\n\tstoppedCh := make(chan struct{})\n\tgo func() {\n\t\tselect {\n\t\tcase <-stopCh:\n\t\t\t// Handle explicit shutdown\n\t\t\tlog.Infof(\"FUSE: Shutting down the server\")\n\n\t\t\tlog.Infof(\"FUSE: Unmounting %v\", mountpoint)\n\t\t\tif err = fuse.Unmount(mountpoint); err != nil {\n\t\t\t\tlog.Warnf(\"FUSE: Shutdown failed: %v\", err)\n\t\t\t\tlog.Warnf(\"FUSE: Manual cleanup required: umount %v\", mountpoint)\n\n\t\t\t\t// Retry in a loop until no longer blocked buy an open handle.\n\t\t\t\t// All errors are `*os.PathError`, so we just match a known error string.\n\t\t\t\t// Note that casing of the error message differs on macOS and Linux.\n\t\t\t\tfor ; err != nil && strings.HasSuffix(strings.ToLower(err.Error()), \"resource busy\"); err = fuse.Unmount(mountpoint) {\n\t\t\t\t\tlog.Debugf(\"FUSE: Unmount failed: %v\", err)\n\t\t\t\t\ttime.Sleep(3 * time.Second)\n\t\t\t\t}\n\t\t\t\tlog.Debugf(\"FUSE: Unmount: %v\", err)\n\t\t\t}\n\t\t\tlog.Infof(\"FUSE: Unmount complete\")\n\t\tcase <-serverExitedCh:\n\t\t\t// Server exited on its own, fallthrough.\n\t\t}\n\t\t// Check that Serve has exited successfully in case we initiated the Unmount.\n\t\t<-serverExitedCh\n\t\terr := fuseConn.Close()\n\t\tif err != nil {\n\t\t\tlog.Infof(\"FUSE: Error closing the connection: %v\", err)\n\t\t}\n\t\tlog.Infof(\"FUSE: Server shutdown complete\")\n\t\tclose(stoppedCh)\n\t}()\n\n\treturn stopCh, stoppedCh, nil\n}",
"func (f *FakeMounter) Mount(source string, target string, fstype string, options []string) error {\n\treturn f.MountSensitive(source, target, fstype, options, nil /* sensitiveOptions */)\n}",
"func SetupChroot(rootfs string, definition Definition, m []ChrootMount) (func() error, error) {\n\t// Mount the rootfs\n\terr := unix.Mount(rootfs, rootfs, \"\", unix.MS_BIND, \"\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to mount '%s': %w\", rootfs, err)\n\t}\n\n\t// Setup all other needed mounts\n\tmounts := []ChrootMount{\n\t\t{\"none\", \"/proc\", \"proc\", 0, \"\", true},\n\t\t{\"none\", \"/sys\", \"sysfs\", 0, \"\", true},\n\t\t{\"none\", \"/run\", \"tmpfs\", 0, \"\", true},\n\t\t{\"none\", \"/tmp\", \"tmpfs\", 0, \"\", true},\n\t\t{\"none\", \"/dev\", \"tmpfs\", 0, \"\", true},\n\t\t{\"none\", \"/dev/shm\", \"tmpfs\", 0, \"\", true},\n\t\t{\"/etc/resolv.conf\", \"/etc/resolv.conf\", \"\", unix.MS_BIND, \"\", false},\n\t}\n\n\t// Keep a reference to the host rootfs and cwd\n\troot, err := os.Open(\"/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcwd, err := os.Getwd()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Setup all needed mounts in a temporary location\n\tif len(m) > 0 {\n\t\terr = setupMounts(rootfs, append(mounts, m...))\n\t} else {\n\t\terr = setupMounts(rootfs, mounts)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to mount filesystems: %w\", err)\n\t}\n\n\t// Chroot into the container's rootfs\n\terr = unix.Chroot(rootfs)\n\tif err != nil {\n\t\troot.Close()\n\t\treturn nil, err\n\t}\n\n\terr = unix.Chdir(\"/\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Move all the mounts into place\n\terr = moveMounts(append(mounts, m...))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Populate /dev directory instead of bind mounting it from the host\n\terr = populateDev()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to populate /dev: %w\", err)\n\t}\n\n\t// Change permission for /dev/shm\n\terr = unix.Chmod(\"/dev/shm\", 01777)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"Failed to chmod /dev/shm: %w\", err)\n\t}\n\n\tvar env Environment\n\tenvs := definition.Environment\n\n\tif envs.ClearDefaults {\n\t\tenv = Environment{}\n\t} else {\n\t\tenv = Environment{\n\t\t\t\"PATH\": EnvVariable{\n\t\t\t\tValue: \"/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin\",\n\t\t\t\tSet: true,\n\t\t\t},\n\t\t\t\"SHELL\": EnvVariable{\n\t\t\t\tValue: \"/bin/sh\",\n\t\t\t\tSet: true,\n\t\t\t},\n\t\t\t\"TERM\": EnvVariable{\n\t\t\t\tValue: \"xterm\",\n\t\t\t\tSet: true,\n\t\t\t},\n\t\t\t\"DEBIAN_FRONTEND\": EnvVariable{\n\t\t\t\tValue: \"noninteractive\",\n\t\t\t\tSet: true,\n\t\t\t},\n\t\t}\n\t}\n\n\tif envs.EnvVariables != nil && len(envs.EnvVariables) > 0 {\n\t\timageTargets := ImageTargetUndefined | ImageTargetAll\n\n\t\tif definition.Targets.Type == DefinitionFilterTypeContainer {\n\t\t\timageTargets |= ImageTargetContainer\n\t\t} else if definition.Targets.Type == DefinitionFilterTypeVM {\n\t\t\timageTargets |= ImageTargetVM\n\t\t}\n\n\t\tfor _, e := range envs.EnvVariables {\n\t\t\tif !ApplyFilter(&e, definition.Image.Release, definition.Image.ArchitectureMapped, definition.Image.Variant, definition.Targets.Type, imageTargets) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tentry, ok := env[e.Key]\n\t\t\tif ok {\n\t\t\t\tentry.Value = e.Value\n\t\t\t\tentry.Set = true\n\t\t\t} else {\n\t\t\t\tenv[e.Key] = EnvVariable{\n\t\t\t\t\tValue: e.Value,\n\t\t\t\t\tSet: true,\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\t// Set environment variables\n\toldEnv := SetEnvVariables(env)\n\n\t// Setup policy-rc.d override\n\tpolicyCleanup := false\n\tif lxd.PathExists(\"/usr/sbin/\") && !lxd.PathExists(\"/usr/sbin/policy-rc.d\") {\n\t\terr = os.WriteFile(\"/usr/sbin/policy-rc.d\", []byte(`#!/bin/sh\nexit 101\n`), 0755)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tpolicyCleanup = true\n\t}\n\n\texitFunc := func() error {\n\t\tdefer root.Close()\n\n\t\t// Cleanup policy-rc.d\n\t\tif policyCleanup {\n\t\t\terr = os.Remove(\"/usr/sbin/policy-rc.d\")\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Failed to remove %q: %w\", \"/usr/sbin/policy-rc.d\", err)\n\t\t\t}\n\t\t}\n\n\t\t// Reset old environment variables\n\t\tSetEnvVariables(oldEnv)\n\n\t\t// Switch back to the host rootfs\n\t\terr = root.Chdir()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to chdir: %w\", err)\n\t\t}\n\n\t\terr = unix.Chroot(\".\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to chroot: %w\", err)\n\t\t}\n\n\t\terr = unix.Chdir(cwd)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to chdir: %w\", err)\n\t\t}\n\n\t\t// This will kill all processes in the chroot and allow to cleanly\n\t\t// unmount everything.\n\t\terr = killChrootProcesses(rootfs)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed killing chroot processes: %w\", err)\n\t\t}\n\n\t\t// And now unmount the entire tree\n\t\terr = unix.Unmount(rootfs, unix.MNT_DETACH)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed unmounting rootfs: %w\", err)\n\t\t}\n\n\t\tdevPath := filepath.Join(rootfs, \"dev\")\n\n\t\t// Wipe $rootfs/dev\n\t\terr := os.RemoveAll(devPath)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"Failed to remove directory %q: %w\", devPath, err)\n\t\t}\n\n\t\tActiveChroots[rootfs] = nil\n\n\t\treturn os.MkdirAll(devPath, 0755)\n\t}\n\n\tActiveChroots[rootfs] = exitFunc\n\n\treturn exitFunc, nil\n}",
"func (ns *nodeServer) mount(sourcePath, targetPath string, mountOptions []string, rawBlock bool) error {\n\tnotMnt, err := ns.mounter.IsLikelyNotMountPoint(targetPath)\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn fmt.Errorf(\"failed to determain if '%s' is a valid mount point: %s\", targetPath, err.Error())\n\t}\n\tif !notMnt {\n\t\treturn nil\n\t}\n\n\t// Create target path, using a file for raw block bind mounts\n\t// or a directory for filesystems. Might already exist from a\n\t// previous call or because Kubernetes erroneously created it\n\t// for us.\n\tif rawBlock {\n\t\tf, err := os.OpenFile(targetPath, os.O_CREATE, os.FileMode(0644))\n\t\tif err == nil {\n\t\t\tdefer f.Close()\n\t\t} else if !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"create target device file: %w\", err)\n\t\t}\n\t} else {\n\t\tif err := os.Mkdir(targetPath, os.FileMode(0755)); err != nil && !os.IsExist(err) {\n\t\t\treturn fmt.Errorf(\"create target directory: %w\", err)\n\t\t}\n\t}\n\n\t// We supposed to use \"mount\" package - ns.mounter.Mount()\n\t// but it seems not supporting -c \"canonical\" option, so do it with exec()\n\t// added -c makes canonical mount, resulting in mounted path matching what LV thinks is lvpath.\n\targs := []string{\"-c\"}\n\tif len(mountOptions) != 0 {\n\t\targs = append(args, \"-o\", strings.Join(mountOptions, \",\"))\n\t}\n\n\targs = append(args, sourcePath, targetPath)\n\tif _, err := pmemexec.RunCommand(\"mount\", args...); err != nil {\n\t\treturn fmt.Errorf(\"mount filesystem failed: %s\", err.Error())\n\t}\n\n\treturn nil\n}",
"func TestMountBackground(t *testing.T) {\n\tdir := test_helpers.InitFS(t)\n\tmnt := dir + \".mnt\"\n\terr := os.Mkdir(mnt, 0700)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\t// Manually create a pipe pair and connect the child's stdout and stderr\n\t// to it. We cannot use StdoutPipe because that will close the pipe\n\t// when the child forks away.\n\tpr, pw, err := os.Pipe()\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\targs := []string{\"-extpass\", \"echo test\", dir, mnt}\n\tcmd := exec.Command(test_helpers.GocryptfsBinary, args...)\n\tcmd.Stdout = pw\n\tcmd.Stderr = pw\n\terr = cmd.Run()\n\tif err != nil {\n\t\tt.Error(err)\n\t}\n\tpw.Close()\n\tdefer test_helpers.UnmountPanic(mnt)\n\t// Read until we get EOF.\n\tc1 := make(chan struct{}, 1)\n\tgo func() {\n\t\tbuf := make([]byte, 1000)\n\t\tfor {\n\t\t\t_, err = pr.Read(buf)\n\t\t\t// We should get io.EOF when the child closes stdout\n\t\t\t// and stderr.\n\t\t\tif err != nil {\n\t\t\t\tpr.Close()\n\t\t\t\tc1 <- struct{}{}\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\tselect {\n\tcase <-c1:\n\t\treturn\n\tcase <-time.After(time.Second * 5):\n\t\tt.Fatal(\"timeout\")\n\t}\n}",
"func KMountVFat(img string, start, length uint64) (*KMount, error) {\n\tmntPoint, err := ioutil.TempDir(\"\", \"raspberry-box\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"TempDir() failed: %v\", err)\n\t}\n\n\tl, err := losetup.Attach(img, start, false)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"loop failed: %v\", err)\n\t}\n\n\tif err := syscall.Mount(l.Path(), mntPoint, \"vfat\", syscall.MS_NOATIME, \"\"); err != nil {\n\t\tl.Detach()\n\t\treturn nil, fmt.Errorf(\"mount failed: %v\", err)\n\t}\n\treturn &KMount{\n\t\tmntPoint: mntPoint,\n\t\tloop: l,\n\t\tneedsUnmount: true,\n\t}, nil\n}",
"func mount(device, target, mType, options string) error {\n\tflag, _ := parseOptions(options)\n\tif flag&REMOUNT != REMOUNT {\n\t\tif mounted, err := mounted(target); err != nil || mounted {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn forceMount(device, target, mType, options)\n}",
"func handleMount(source, location, fstype string, flags uintptr, data string) error {\n\tif err := os.MkdirAll(location, os.FileMode(0755)); err != nil {\n\t\treturn err\n\t}\n\treturn syscall.Mount(source, location, fstype, flags, data)\n}",
"func (fs FilesystemStorage) TempDir() string {\n\treturn filepath.Join(fs.String(), \"tmp\")\n}",
"func PhysFS(root, prefix string, indexes bool, alreadyinitialized bool) *localFileSystem {\n\tif !alreadyinitialized {\n\t\troot, err := filepath.Abs(root)\n\t\tfmt.Println(root)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\terr = physfs.Init()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tdefer physfs.Deinit()\n\t\terr = physfs.Mount(root, \"/\", true)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tfs := physfs.FileSystem()\n\treturn &localFileSystem{\n\t\tFileSystem: fs,\n\t\torigfs: fs,\n\t\troot: root,\n\t\tprefix: prefix,\n\t\tindexes: indexes,\n\t\tphysfs: true,\n\t}\n}",
"func Mount(client *client.Client, mountpoint string, stop <-chan bool, stopped chan<- bool) {\n\tc, err := fuse.Mount(mountpoint)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\tdefer c.Close()\n\tlog.Printf(\"Mounting read-only filesystem on %v\\nCtrl+C to unmount.\", mountpoint)\n\n\tcs := make(chan os.Signal, 1)\n\tsignal.Notify(cs, os.Interrupt)\n\tgo func() {\n\t\tselect {\n\t\tcase <-cs:\n\t\t\tlog.Printf(\"got signal\")\n\t\t\tbreak\n\t\tcase <-stop:\n\t\t\tlog.Printf(\"got stop\")\n\t\t\tbreak\n\t\t}\n\t\tlog.Println(\"Closing client...\")\n\t\t//client.Blobs.Close()\n\t\tlog.Printf(\"Unmounting %v...\\n\", mountpoint)\n\t\terr := fuse.Unmount(mountpoint)\n\t\tif err != nil {\n\t\t\tlog.Printf(\"Error unmounting: %v\", err)\n\t\t} else {\n\t\t\tstopped <- true\n\t\t}\n\t}()\n\n\terr = fs.Serve(c, NewFS(client))\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\t// check if the mount process has an error to report\n\t<-c.Ready\n\tif err := c.MountError; err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func FileSystemCreate(f types.Filesystem) error {\n\tvar cmd *exec.Cmd\n\tvar debugCMD string\n\n\tswitch f.Mount.Format {\n\tcase \"swap\":\n\t\tcmd = exec.Command(\"/sbin/mkswap\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkswap\", f.Mount.Device)\n\tcase \"ext4\", \"ext3\", \"ext2\":\n\t\t// Add filesystem flags\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-t\")\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Format)\n\n\t\t// Add force\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, \"-F\")\n\n\t\t// Add Device to formate\n\t\tf.Mount.Create.Options = append(f.Mount.Create.Options, f.Mount.Device)\n\n\t\t// Format disk\n\t\tcmd = exec.Command(\"/sbin/mke2fs\", f.Mount.Create.Options...)\n\t\tfor i := range f.Mount.Create.Options {\n\t\t\tdebugCMD = fmt.Sprintf(\"%s %s\", debugCMD, f.Mount.Create.Options[i])\n\t\t}\n\tcase \"vfat\":\n\t\tcmd = exec.Command(\"/sbin/mkfs.fat\", f.Mount.Device)\n\t\tdebugCMD = fmt.Sprintf(\"%s %s\", \"/sbin/mkfs.fat\", f.Mount.Device)\n\tdefault:\n\t\tlog.Warnf(\"Unknown filesystem type [%s]\", f.Mount.Format)\n\t}\n\tcmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Command [%s] Filesystem [%v]\", debugCMD, err)\n\t}\n\n\treturn nil\n}",
"func Statfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\taddr := args[0].Pointer()\n\tstatfsAddr := args[1].Pointer()\n\n\tpath, _, err := copyInPath(t, addr, false /* allowEmpty */)\n\tif err != nil {\n\t\treturn 0, nil, err\n\t}\n\n\treturn 0, nil, fileOpOn(t, linux.AT_FDCWD, path, true /* resolve */, func(root *fs.Dirent, d *fs.Dirent, _ uint) error {\n\t\treturn statfsImpl(t, d, statfsAddr)\n\t})\n}",
"func (devices *DeviceSet) MountDevice(hash, path string, moptions graphdriver.MountOpts) error {\n\tinfo, err := devices.lookupDeviceWithLock(hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif info.Deleted {\n\t\treturn fmt.Errorf(\"devmapper: Can't mount device %v as it has been marked for deferred deletion\", info.Hash)\n\t}\n\n\tinfo.lock.Lock()\n\tdefer info.lock.Unlock()\n\n\tdevices.Lock()\n\tdefer devices.Unlock()\n\n\tif err := devices.activateDeviceIfNeeded(info, false); err != nil {\n\t\treturn fmt.Errorf(\"devmapper: Error activating devmapper device for '%s': %s\", hash, err)\n\t}\n\n\tfstype, err := ProbeFsType(info.DevName())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\toptions := \"\"\n\n\tif fstype == xfs {\n\t\t// XFS needs nouuid or it can't mount filesystems with the same fs\n\t\toptions = joinMountOptions(options, \"nouuid\")\n\t}\n\n\tmountOptions := devices.mountOptions\n\tif len(moptions.Options) > 0 {\n\t\taddNouuid := strings.Contains(\"nouuid\", mountOptions)\n\t\tmountOptions = strings.Join(moptions.Options, \",\")\n\t\tif addNouuid {\n\t\t\tmountOptions = fmt.Sprintf(\"nouuid,%s\", mountOptions)\n\t\t}\n\t}\n\n\toptions = joinMountOptions(options, mountOptions)\n\toptions = joinMountOptions(options, label.FormatMountLabel(\"\", moptions.MountLabel))\n\n\tif err := mount.Mount(info.DevName(), path, fstype, options); err != nil {\n\t\treturn fmt.Errorf(\"failed to mount; dmesg: %s: %w\", string(dmesg.Dmesg(256)), err)\n\t}\n\n\tif fstype == xfs && devices.xfsNospaceRetries != \"\" {\n\t\tif err := devices.xfsSetNospaceRetries(info); err != nil {\n\t\t\tif err := mount.Unmount(path); err != nil {\n\t\t\t\tlogrus.Warnf(\"devmapper.MountDevice cleanup error: %v\", err)\n\t\t\t}\n\t\t\tdevices.deactivateDevice(info)\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}",
"func doMount(logger *zap.SugaredLogger, mounterPath string, mountCmd string, source string, target string, fstype string, options []string) error {\n\tmountArgs := makeMountArgs(source, target, fstype, options)\n\tif len(mounterPath) > 0 {\n\t\tmountArgs = append([]string{mountCmd}, mountArgs...)\n\t\tmountCmd = mounterPath\n\t}\n\tlogger.With(\"command\", mountCmd, \"args\", mountArgs).Info(\"Mounting\")\n\tcommand := exec.Command(mountCmd, mountArgs...)\n\toutput, err := command.CombinedOutput()\n\tif err != nil {\n\t\tlogger.With(\n\t\t\tzap.Error(err),\n\t\t\t\"command\", mountCmd,\n\t\t\t\"source\", source,\n\t\t\t\"target\", target,\n\t\t\t\"fsType\", fstype,\n\t\t\t\"options\", options,\n\t\t\t\"output\", string(output),\n\t\t).Error(\"Mount failed.\")\n\t\treturn fmt.Errorf(\"mount failed: %v\\nMounting command: %s\\nMounting arguments: %s %s %s %v\\nOutput: %v\\n\",\n\t\t\terr, mountCmd, source, target, fstype, options, string(output))\n\t}\n\tlogger.Debugf(\"Mount output: %v\", string(output))\n\treturn err\n}",
"func Mount(dev, path string) error {\n\tcmd := exec.Command(\"mount\", dev, path)\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"mount: %s\", utils.OneLine(out))\n\t}\n\n\treturn nil\n}",
"func (fs *FS) Mount(\n\tctx context.Context,\n\tsource, target, fsType string,\n\toptions ...string) error {\n\n\treturn fs.mount(ctx, source, target, fsType, options...)\n}",
"func (c *CrosDisks) Mount(ctx context.Context, devicePath, fsType string, options []string) error {\n\treturn c.call(ctx, \"Mount\", devicePath, fsType, options).Err\n}",
"func NewFilesystem(_ context.Context, cfgMap map[string]interface{}) (qfs.Filesystem, error) {\n\treturn NewFS(cfgMap)\n}",
"func (c *Client) Mount(ctx context.Context, svc iaas.Service, export string, mountPoint string, withCache bool) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t\t\"MountPoint\": mountPoint,\n\t\t\"cacheOption\": map[bool]string{true: \"ac\", false: \"noac\"}[withCache],\n\t}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_mount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to mount remote NFS share\")\n\t}\n\treturn nil\n}",
"func (mounter *SafeFormatAndMount) formatAndMount(source string, target string, fstype string, options []string) error {\n\toptions = append(options, \"defaults\")\n\tmounter.Logger = mounter.Logger.With(\n\t\t\"source\", source,\n\t\t\"target\", target,\n\t\t\"fstype\", fstype,\n\t\t\"options\", options,\n\t)\n\t// Run fsck on the disk to fix repairable issues\n\tmounter.Logger.Info(\"Checking disk for issues using 'fsck'.\")\n\targs := []string{\"-a\", source}\n\tcmd := mounter.Runner.Command(\"fsck\", args...)\n\tout, err := cmd.CombinedOutput()\n\tmounter.Logger = mounter.Logger.With(\"output\", out)\n\tif err != nil {\n\t\tee, isExitError := err.(utilexec.ExitError)\n\t\tswitch {\n\t\tcase err == utilexec.ErrExecutableNotFound:\n\t\t\tmounter.Logger.Info(\"'fsck' not found on system; continuing mount without running 'fsck'.\")\n\t\tcase isExitError && ee.ExitStatus() == fsckErrorsCorrected:\n\t\t\tmounter.Logger.Info(\"Device has errors that were corrected with 'fsck'.\")\n\t\tcase isExitError && ee.ExitStatus() == fsckErrorsUncorrected:\n\t\t\tmounter.Logger.Info(\"'fsck' found errors on device but was unable to correct them.\")\n\t\t\treturn fmt.Errorf(\"'fsck' found errors on device %s but could not correct them: %s.\", source, string(out))\n\t\tcase isExitError && ee.ExitStatus() > fsckErrorsUncorrected:\n\t\t\tmounter.Logger.Error(\"'fsck' error.\")\n\t\t}\n\t}\n\n\t// Try to mount the disk\n\tmounter.Logger.Info(\"Attempting to mount disk.\")\n\tmountErr := mounter.Interface.Mount(source, target, fstype, options)\n\tif mountErr != nil {\n\t\t// Mount failed. This indicates either that the disk is unformatted or\n\t\t// it contains an unexpected filesystem.\n\t\texistingFormat, err := mounter.getDiskFormat(source)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif existingFormat == \"\" {\n\t\t\t// Disk is unformatted so format it.\n\t\t\targs = []string{source}\n\t\t\t// Use 'ext4' as the default\n\t\t\tif len(fstype) == 0 {\n\t\t\t\tfstype = \"ext4\"\n\t\t\t}\n\n\t\t\tif fstype == \"ext4\" || fstype == \"ext3\" {\n\t\t\t\targs = []string{\"-F\", source}\n\t\t\t}\n\t\t\tmounter.Logger.With(\"argruments\", args).Info(\"Disk appears to be unformatted, attempting to format.\")\n\t\t\tcmd := mounter.Runner.Command(\"mkfs.\"+fstype, args...)\n\t\t\t_, err := cmd.CombinedOutput()\n\t\t\tif err == nil {\n\t\t\t\t// the disk has been formatted successfully try to mount it again.\n\t\t\t\tmounter.Logger.Info(\"Disk successfully formatted.\")\n\t\t\t\treturn mounter.Interface.Mount(source, target, fstype, options)\n\t\t\t}\n\t\t\tmounter.Logger.With(zap.Error(err)).Error(\"Format of disk failed.\")\n\t\t\treturn err\n\t\t} else {\n\t\t\t// Disk is already formatted and failed to mount\n\t\t\tif len(fstype) == 0 || fstype == existingFormat {\n\t\t\t\t// This is mount error\n\t\t\t\treturn mountErr\n\t\t\t} else {\n\t\t\t\t// Block device is formatted with unexpected filesystem, let the user know\n\t\t\t\treturn fmt.Errorf(\"failed to mount the volume as %q, it already contains %s. Mount error: %v\", fstype, existingFormat, mountErr)\n\t\t\t}\n\t\t}\n\t}\n\treturn mountErr\n}",
"func (d *fsStorage) Mount(volume *Volume) error {\n\treturn nil\n}",
"func (u *CryptohomeClient) MountVault(ctx context.Context, label string, authConfig *AuthConfig, create bool, vaultConfig *VaultConfig) error {\n\tswitch u.mountAPI {\n\tcase OldCryptohomeMountAPI:\n\t\textraFlags := vaultConfigToExtraFlags(vaultConfig)\n\t\textraFlags = append(extraFlags, authConfigToExtraFlags(authConfig)...)\n\t\tif _, err := u.binary.mountEx(ctx, authConfig.Username, create, label, extraFlags); err != nil {\n\t\t\treturn errors.Wrap(err, \"failed to mount\")\n\t\t}\n\t\treturn nil\n\tcase AuthSessionMountAPI:\n\t\treturn u.mountVaultWithAuthSession(ctx, label, authConfig, create, vaultConfig)\n\tcase AuthFactorMountAPI:\n\t\treturn u.mountVaultWithAuthFactor(ctx, label, authConfig, create, vaultConfig)\n\tdefault:\n\t\treturn errors.New(\"unrecognized mountAPI parameter in tast test\")\n\t}\n}",
"func TestMountPasswordIncorrect(t *testing.T) {\n\tcDir := test_helpers.InitFS(t) // Create filesystem with password \"test\"\n\tctlSock := cDir + \".sock\"\n\tpDir := cDir + \".mnt\"\n\terr := test_helpers.Mount(cDir, pDir, false, \"-extpass\", \"echo WRONG\", \"-wpanic=false\", \"-ctlsock\", ctlSock)\n\texitCode := test_helpers.ExtractCmdExitCode(err)\n\tif exitCode != exitcodes.PasswordIncorrect {\n\t\tt.Errorf(\"want=%d, got=%d\", exitcodes.PasswordIncorrect, exitCode)\n\t}\n\tif _, err := os.Stat(ctlSock); err == nil {\n\t\tt.Errorf(\"socket file %q left behind\", ctlSock)\n\t}\n}",
"func TstTempDir(name string) string {\n\tf, err := os.MkdirTemp(\"\", name)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn f\n}",
"func WithTmpFS(destination string, size int64) Option {\n\treturn func(opts *Options) {\n\t\topts.TmpFS = append(opts.TmpFS, TmpVolume{\n\t\t\tDestination: destination,\n\t\t\tSize: size,\n\t\t})\n\t}\n}",
"func (mounter *Mounter) Mount(source string, target string, fstype string, options []string) error {\n\t// Path to mounter binary if containerized mounter is needed. Otherwise, it is set to empty.\n\t// All Linux distros are expected to be shipped with a mount utility that an support bind mounts.\n\tmounterPath := \"\"\n\tbind, bindRemountOpts := isBind(options)\n\tif bind {\n\t\terr := doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, []string{\"bind\"})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\treturn doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, bindRemountOpts)\n\t}\n\t// The list of filesystems that require containerized mounter on GCI image cluster\n\tfsTypesNeedMounter := []string{\"nfs\", \"glusterfs\", \"ceph\", \"cifs\"}\n\tfor _, fst := range fsTypesNeedMounter {\n\t\tif fst == fstype {\n\t\t\tmounterPath = mounter.mounterPath\n\t\t}\n\t}\n\treturn doMount(mounter.logger, mounterPath, defaultMountCommand, source, target, fstype, options)\n}",
"func (f *filesystem) Mount(ctx context.Context, device string, flags fs.MountSourceFlags, data string, cgroupsInt interface{}) (*fs.Inode, error) {\n\tpanic(\"unimplemented\")\n}",
"func Mount(target string, opts MountOptions) error {\n\t// bucket := options[\"bucket\"]\n\t// subPath := options[\"subPath\"]\n\targs := []string{\n\t\t\"-o\", \"allow_other\",\n\t\t\"--dir-mode\", opts.DirMode,\n\t\t\"--file-mode\", opts.FileMode,\n\t\t\"--region\", opts.Region,\n\t\t\"--gid\", opts.GID,\n\t\t\"--uid\", opts.UID,\n\t\t\"--acl\", opts.ACL,\n\t}\n\n\tif opts.Endpoint != \"\" {\n\t\targs = append(args, \"--endpoint\", opts.Endpoint)\n\t}\n\n\tverbose := viper.Get(\"verbose\")\n\tif verbose == true {\n\t\targs = append(args, \"--debug_s3\", \"--debug_fuse\")\n\t}\n\n\t// to avoid collisions we generate synthetic\n\tvar bucket string\n\tif opts.SubPath != \"\" {\n\t\tbucket = opts.Bucket + \":\" + opts.SubPath\n\t} else {\n\t\tbucket = opts.Bucket\n\t}\n\n\tmountPath := path.Join(cwd(), \"mnt\", opts.genVolumeName())\n\targs = append(args, bucket, mountPath)\n\n\tif !isMountPoint(mountPath) {\n\t\texec.Command(\"umount\", mountPath).Run()\n\t\texec.Command(\"rm\", \"-rf\", mountPath).Run()\n\t\tlogrus.Infof(\"Making dir %s\", mountPath)\n\t\tos.MkdirAll(mountPath, 0755)\n\n\t\tconst bin = \"goofys\"\n\t\tmountCmd := exec.Command(bin, args...)\n\t\tmountCmd.Env = os.Environ()\n\t\tif opts.AccessKeyB64 != \"\" {\n\t\t\ts := decodeBase64(opts.AccessKeyB64)\n\t\t\tmountCmd.Env = applyEnvVar(mountCmd.Env, \"AWS_ACCESS_KEY_ID\", s)\n\t\t}\n\t\tif opts.SecretKeyB64 != \"\" {\n\t\t\ts := decodeBase64(opts.SecretKeyB64)\n\t\t\tmountCmd.Env = applyEnvVar(mountCmd.Env, \"AWS_SECRET_ACCESS_KEY\", s)\n\t\t}\n\n\t\tvar stderr bytes.Buffer\n\t\tmountCmd.Stderr = &stderr\n\t\tlogrus.Infof(\"Running: %s %s\", bin, strings.Join(args, \" \"))\n\t\tlogrus.Debugf(\"Using env vars: %v\", mountCmd.Env)\n\t\terr := mountCmd.Run()\n\t\tif err != nil {\n\t\t\tlogrus.Errorf(\"Error: %s\", err)\n\t\t\tlogrus.Errorf(\"Output: %s\", stderr.String())\n\t\t\t// errMsg := err.Error() + \": \" + stderr.String()\n\t\t\t// if viper.Get(\"verbose\") != \"\" {\n\t\t\t// \terrMsg += fmt.Sprintf(\"; /var/log/syslog follows\")\n\t\t\t// \tgrepCmd := exec.Command(\"sh\", \"-c\", \"grep goofys /var/log/syslog | tail\")\n\t\t\t// \tvar stdout bytes.Buffer\n\t\t\t// \tgrepCmd.Stdout = &stdout\n\t\t\t// \tgrepCmd.Run()\n\t\t\t// \terrMsg += stdout.String()\n\t\t\t// }\n\t\t\treturn fmt.Errorf(\"Error executing %s: %s\", bin, err)\n\t\t}\n\t}\n\t// Now we rmdir the target, and then make a symlink to it!\n\terr := os.Remove(target)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlogrus.Infof(\"Making symlink %s to %s\", target, mountPath)\n\terr = os.Symlink(mountPath, target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}",
"func TestMountPasswordEmpty(t *testing.T) {\n\tcDir := test_helpers.InitFS(t) // Create filesystem with password \"test\"\n\tctlSock := cDir + \".sock\"\n\tpDir := cDir + \".mnt\"\n\terr := test_helpers.Mount(cDir, pDir, false, \"-extpass\", \"true\", \"-wpanic=false\", \"-ctlsock\", ctlSock)\n\texitCode := test_helpers.ExtractCmdExitCode(err)\n\tif exitCode != exitcodes.ReadPassword {\n\t\tt.Errorf(\"want=%d, got=%d\", exitcodes.ReadPassword, exitCode)\n\t}\n\tif _, err := os.Stat(ctlSock); err == nil {\n\t\tt.Errorf(\"socket file %q left behind\", ctlSock)\n\t}\n}",
"func IsMountFS(mntType int64, path string) (bool, bool, error) {\n\treturn false, false, errors.New(\"not implemented\")\n}",
"func TestMount(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping integration tests in short mode\")\n\t}\n\tc, _ := minikubetestenv.AcquireCluster(t)\n\tctx := context.Background()\n\t// If the test has a deadline, cancel the context slightly before it in\n\t// order to allow time for clean subprocess teardown. Without this it\n\t// is possible to leave filesystems mounted after test failure.\n\tif deadline, ok := t.Deadline(); ok {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, time.Duration(float64(time.Until(deadline))*.99))\n\t\tdefer cancel()\n\t}\n\teg, ctx := errgroup.WithContext(ctx)\n\trepoName := tu.UniqueString(\"TestMount-repo\")\n\tconfigDir := t.TempDir()\n\tp, err := tu.NewPachctl(ctx, c, filepath.Join(configDir, \"config.json\"))\n\trequire.NoError(t, err)\n\tdefer p.Close()\n\tfor _, projectName := range []string{tu.UniqueString(\"TestMount-project1\"), tu.UniqueString(\"TestMount-project2\")} {\n\t\tprojectName := projectName\n\t\tmntDirPath := filepath.Join(t.TempDir())\n\t\tfileName := tu.UniqueString(\"filename\")\n\t\t// TODO: Refactor tu.PachctlBashCmd to handle this a bit more\n\t\t// elegantly, perhaps based on a context or something like that\n\t\t// rather than on a name. For now, though, this does work, even\n\t\t// if the indirection through subtests which always succeed but\n\t\t// spawn goroutines which may fail is a bit confusing.\n\t\teg.Go(func() error {\n\t\t\tcmd, err := p.CommandTemplate(ctx, `\n\t\t\t\t\tpachctl create project {{.projectName}}\n\t\t\t\t\tpachctl create repo {{.repoName}} --project {{.projectName}}\n\t\t\t\t\t# this needs to be execed in order for process killing to cleanly unmount\n\t\t\t\t\texec pachctl mount {{.mntDirPath}} -w --project {{.projectName}}\n\t\t\t\t\t`,\n\t\t\t\tmap[string]string{\n\t\t\t\t\t\"projectName\": projectName,\n\t\t\t\t\t\"repoName\": repoName,\n\t\t\t\t\t\"mntDirPath\": mntDirPath,\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not create mount command\")\n\t\t\t}\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tt.Log(\"stdout:\", cmd.Stdout())\n\t\t\t\tt.Log(\"stderr:\", cmd.Stderr())\n\t\t\t\treturn errors.Wrap(err, \"could not mount\")\n\t\t\t}\n\t\t\tif cmd, err = p.CommandTemplate(ctx, `\n\t\t\t\t\tpachctl list files {{.repoName}}@master --project {{.projectName}} | grep {{.fileName}} > /dev/null || exit \"could not find {{.fileName}}\"\n\t\t\t\t\t# check that only one file is present\n\t\t\t\t\t[[ $(pachctl list files {{.repoName}}@master --project {{.projectName}} | wc -l) -eq 2 ]] || exit \"more than one file found in repo\"\n\t\t\t\t\t`,\n\t\t\t\tmap[string]string{\n\t\t\t\t\t\"projectName\": projectName,\n\t\t\t\t\t\"repoName\": repoName,\n\t\t\t\t\t\"fileName\": fileName,\n\t\t\t\t}); err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not create validation command\")\n\t\t\t}\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tt.Log(\"stdout:\", cmd.Stdout())\n\t\t\t\tt.Log(\"stderr:\", cmd.Stderr())\n\t\t\t\treturn errors.Wrap(err, \"could not validate\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\teg.Go(func() error {\n\t\t\tif err := backoff.Retry(func() error {\n\t\t\t\tff, err := os.ReadDir(mntDirPath)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn errors.Wrapf(err, \"could not read %s\", mntDirPath)\n\t\t\t\t}\n\t\t\t\tif len(ff) == 0 {\n\t\t\t\t\treturn errors.Errorf(\"%s not yet mounted\", mntDirPath)\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}, backoff.NewExponentialBackOff()); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"%q never mounted\", mntDirPath)\n\t\t\t}\n\t\t\ttestFilePath := filepath.Join(mntDirPath, repoName, fileName)\n\t\t\tcmd, err := p.CommandTemplate(ctx, `\n\t\t\t\t\techo \"this is a test\" > {{.testFilePath}}\n\t\t\t\t\tfusermount -u {{.mntDirPath}}\n\t\t\t\t\t`,\n\t\t\t\tmap[string]string{\n\t\t\t\t\t\"mntDirPath\": mntDirPath,\n\t\t\t\t\t\"testFilePath\": testFilePath,\n\t\t\t\t})\n\t\t\tif err != nil {\n\t\t\t\treturn errors.Wrap(err, \"could not create mutator\")\n\t\t\t}\n\t\t\tif err := cmd.Run(); err != nil {\n\t\t\t\tt.Log(\"stdout:\", cmd.Stdout())\n\t\t\t\tt.Log(\"stderr:\", cmd.Stderr())\n\t\t\t\treturn errors.Wrap(err, \"could not run mutator\")\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\trequire.NoError(t, eg.Wait(), \"goroutines failed\")\n}",
"func (w *wrapper) Statfs(path string, stat *fuse.Statfs_t) int {\n\treturn -fuse.ENOSYS\n}",
"func Fstatfs(t *kernel.Task, args arch.SyscallArguments) (uintptr, *kernel.SyscallControl, error) {\n\tfd := args[0].Int()\n\tstatfsAddr := args[1].Pointer()\n\n\tfile := t.GetFile(fd)\n\tif file == nil {\n\t\treturn 0, nil, syserror.EBADF\n\t}\n\tdefer file.DecRef()\n\n\treturn 0, nil, statfsImpl(t, file.Dirent, statfsAddr)\n}",
"func (f *Fs) mountSharedFolder(ctx context.Context, id string) error {\n\targ := sharing.MountFolderArg{\n\t\tSharedFolderId: id,\n\t}\n\terr := f.pacer.Call(func() (bool, error) {\n\t\t_, err := f.sharing.MountFolder(&arg)\n\t\treturn shouldRetry(ctx, err)\n\t})\n\treturn err\n}",
"func (daemon *Daemon) openContainerFS(container *container.Container) (_ *containerFSView, err error) {\n\tif err := daemon.Mount(container); err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = daemon.Unmount(container)\n\t\t}\n\t}()\n\n\tmounts, err := daemon.setupMounts(container)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\t_ = container.UnmountVolumes(daemon.LogVolumeEvent)\n\t\t}\n\t}()\n\n\t// Setup in initial mount namespace complete. We're ready to unshare the\n\t// mount namespace and bind the volume mounts into that private view of\n\t// the container FS.\n\ttodo := make(chan future)\n\tdone := make(chan error)\n\terr = unshare.Go(unix.CLONE_NEWNS,\n\t\tfunc() error {\n\t\t\tif err := mount.MakeRSlave(\"/\"); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfor _, m := range mounts {\n\t\t\t\tdest, err := container.GetResourcePath(m.Destination)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tvar stat os.FileInfo\n\t\t\t\tstat, err = os.Stat(m.Source)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t\tif err := fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tbindMode := \"rbind\"\n\t\t\t\tif m.NonRecursive {\n\t\t\t\t\tbindMode = \"bind\"\n\t\t\t\t}\n\t\t\t\twriteMode := \"ro\"\n\t\t\t\tif m.Writable {\n\t\t\t\t\twriteMode = \"rw\"\n\t\t\t\t\tif m.ReadOnlyNonRecursive {\n\t\t\t\t\t\treturn errors.New(\"options conflict: Writable && ReadOnlyNonRecursive\")\n\t\t\t\t\t}\n\t\t\t\t\tif m.ReadOnlyForceRecursive {\n\t\t\t\t\t\treturn errors.New(\"options conflict: Writable && ReadOnlyForceRecursive\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif m.ReadOnlyNonRecursive && m.ReadOnlyForceRecursive {\n\t\t\t\t\treturn errors.New(\"options conflict: ReadOnlyNonRecursive && ReadOnlyForceRecursive\")\n\t\t\t\t}\n\n\t\t\t\t// openContainerFS() is called for temporary mounts\n\t\t\t\t// outside the container. Soon these will be unmounted\n\t\t\t\t// with lazy unmount option and given we have mounted\n\t\t\t\t// them rbind, all the submounts will propagate if these\n\t\t\t\t// are shared. If daemon is running in host namespace\n\t\t\t\t// and has / as shared then these unmounts will\n\t\t\t\t// propagate and unmount original mount as well. So make\n\t\t\t\t// all these mounts rprivate. Do not use propagation\n\t\t\t\t// property of volume as that should apply only when\n\t\t\t\t// mounting happens inside the container.\n\t\t\t\topts := strings.Join([]string{bindMode, writeMode, \"rprivate\"}, \",\")\n\t\t\t\tif err := mount.Mount(m.Source, dest, \"\", opts); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\n\t\t\t\tif !m.Writable && !m.ReadOnlyNonRecursive {\n\t\t\t\t\tif err := makeMountRRO(dest); err != nil {\n\t\t\t\t\t\tif m.ReadOnlyForceRecursive {\n\t\t\t\t\t\t\treturn err\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tlog.G(context.TODO()).WithError(err).Debugf(\"Failed to make %q recursively read-only\", dest)\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn mounttree.SwitchRoot(container.BaseFS)\n\t\t},\n\t\tfunc() {\n\t\t\tdefer close(done)\n\n\t\t\tfor it := range todo {\n\t\t\t\terr := it.fn()\n\t\t\t\tif it.res != nil {\n\t\t\t\t\tit.res <- err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// The thread will terminate when this goroutine returns, taking the\n\t\t\t// mount namespace and all the volume bind-mounts with it.\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvw := &containerFSView{\n\t\td: daemon,\n\t\tctr: container,\n\t\ttodo: todo,\n\t\tdone: done,\n\t}\n\truntime.SetFinalizer(vw, (*containerFSView).Close)\n\treturn vw, nil\n}",
"func (p *btrfsPool) Mount() (string, error) {\n\tctx := context.TODO()\n\tmnt, err := p.Mounted()\n\tif err == nil {\n\t\treturn mnt, nil\n\t} else if !errors.Is(err, ErrDeviceNotMounted) {\n\t\treturn \"\", errors.Wrap(err, \"failed to check device mount status\")\n\t}\n\n\t// device is not mounted\n\tmnt = p.Path()\n\tif err := os.MkdirAll(mnt, 0755); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := syscall.Mount(p.device.Path, mnt, \"btrfs\", 0, \"\"); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif err := p.utils.QGroupEnable(ctx, mnt); err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to enable qgroup: %w\", err)\n\t}\n\n\treturn mnt, p.maintenance()\n}",
"func mount(\n\tctx context.Context,\n\tbucketName string,\n\tflags *FlagStorage) (fs *Goofys, mfs *fuse.MountedFileSystem, err error) {\n\n\t// XXX really silly copy here! in goofys.Mount we will copy it\n\t// back to FlagStorage. But I don't see a easier way to expose\n\t// Config in the api package\n\tvar config goofys.Config\n\tcopier.Copy(&config, *flags)\n\n\treturn goofys.Mount(ctx, bucketName, &config)\n}",
"func (d *driverInfo) Mount(volume *Volume) error {\n\t// don't mount twice\n\tif err := volume.CheckUnmounted(); err != nil {\n\t\treturn err\n\t}\n\n\tvolume.MountPath = d.getMountPath(volume.Name)\n\texists, err := fs.DirExists(volume.MountPath)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error accessing mount path '%s': %v\", volume.MountPath, err)\n\t}\n\n\tif !exists {\n\t\tif err := fs.CreateDir(volume.MountPath, true, 0700); err != nil {\n\t\t\treturn fmt.Errorf(\"error creating mount path '%s': %v\", volume.MountPath, err)\n\t\t}\n\t}\n\n\tif err := d.storage.Mount(volume); err != nil {\n\t\tvolume.MountPath = \"\"\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func bindMount(source, dest string) error {\n\treturn syscall.Mount(source, dest, \"\", syscall.MS_BIND, \"\")\n}",
"func ForceUnmount(m Mount) error {\n\tpoint := m.MountPoint()\n\tlog.Infof(\"Force-Unmounting %s...\", point)\n\n\tvar cmd *exec.Cmd\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tcmd = exec.Command(\"diskutil\", \"umount\", \"force\", point)\n\tcase \"linux\":\n\t\tcmd = exec.Command(\"fusermount\", \"-u\", point)\n\tdefault:\n\t\treturn fmt.Errorf(\"unmount: unimplemented\")\n\t}\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\n\t\t// try vanilla unmount first.\n\t\tif err := exec.Command(\"umount\", point).Run(); err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// retry to unmount with the fallback cmd\n\t\terrc <- cmd.Run()\n\t}()\n\n\tselect {\n\tcase <-time.After(2 * time.Second):\n\t\treturn fmt.Errorf(\"umount timeout\")\n\tcase err := <-errc:\n\t\treturn err\n\t}\n}",
"func (fs *Mysqlfs) TempFile(dir, prefix string) (billy.File, error) {\n\treturn util.TempFile(fs, dir, prefix)\n}",
"func Mount(dir, opts string) error {\n\tinp := strings.Join([]string{dir, opts}, \"|\")\n\n\tninputs, err := produceWorkloadInfo(opts)\n\tif err != nil {\n\t\treturn failure(\"mount\", inp, err.Error())\n\t}\n\n\tif err := doMount(dir, ninputs); err != nil {\n\t\tsErr := \"Failure to mount: \" + err.Error()\n\t\treturn failure(\"mount\", inp, sErr)\n\t}\n\n\tif configuration.UseGrpc == true {\n\t\tif err := sendWorkloadAdded(ninputs); err != nil {\n\t\t\thandleErrMount(dir, ninputs)\n\t\t\tsErr := \"Failure to notify nodeagent: \" + err.Error()\n\t\t\treturn failure(\"mount\", inp, sErr)\n\t\t}\n\t} else if err := addCredentialFile(ninputs); err != nil {\n\t\thandleErrMount(dir, ninputs)\n\t\tsErr := \"Failure to create credentials: \" + err.Error()\n\t\treturn failure(\"mount\", inp, sErr)\n\t}\n\n\treturn genericSuccess(\"mount\", inp, \"Mount ok.\")\n}",
"func setupSystemd(mounts []rspec.Mount, g generate.Generator) {\n\toptions := []string{\"rw\", \"rprivate\", \"noexec\", \"nosuid\", \"nodev\"}\n\tfor _, dest := range []string{\"/run\", \"/run/lock\"} {\n\t\tif mountExists(mounts, dest) {\n\t\t\tcontinue\n\t\t}\n\t\ttmpfsMnt := rspec.Mount{\n\t\t\tDestination: dest,\n\t\t\tType: \"tmpfs\",\n\t\t\tSource: \"tmpfs\",\n\t\t\tOptions: append(options, \"tmpcopyup\"),\n\t\t}\n\t\tg.AddMount(tmpfsMnt)\n\t}\n\tfor _, dest := range []string{\"/tmp\", \"/var/log/journal\"} {\n\t\tif mountExists(mounts, dest) {\n\t\t\tcontinue\n\t\t}\n\t\ttmpfsMnt := rspec.Mount{\n\t\t\tDestination: dest,\n\t\t\tType: \"tmpfs\",\n\t\t\tSource: \"tmpfs\",\n\t\t\tOptions: append(options, \"tmpcopyup\"),\n\t\t}\n\t\tg.AddMount(tmpfsMnt)\n\t}\n\n\tif node.CgroupIsV2() {\n\t\tg.RemoveMount(cgroupSysFsPath)\n\n\t\tsystemdMnt := rspec.Mount{\n\t\t\tDestination: cgroupSysFsPath,\n\t\t\tType: \"cgroup\",\n\t\t\tSource: \"cgroup\",\n\t\t\tOptions: []string{\"private\", \"rw\"},\n\t\t}\n\t\tg.AddMount(systemdMnt)\n\t} else {\n\t\t// If the /sys/fs/cgroup is bind mounted from the host,\n\t\t// then systemd-mode cgroup should be disabled\n\t\t// https://bugzilla.redhat.com/show_bug.cgi?id=2064741\n\t\tif !hasCgroupMount(g.Mounts()) {\n\t\t\tsystemdMnt := rspec.Mount{\n\t\t\t\tDestination: cgroupSysFsSystemdPath,\n\t\t\t\tType: \"bind\",\n\t\t\t\tSource: cgroupSysFsSystemdPath,\n\t\t\t\tOptions: []string{\"bind\", \"nodev\", \"noexec\", \"nosuid\"},\n\t\t\t}\n\t\t\tg.AddMount(systemdMnt)\n\t\t}\n\t\tg.AddLinuxMaskedPaths(filepath.Join(cgroupSysFsSystemdPath, \"release_agent\"))\n\t}\n\tg.AddProcessEnv(\"container\", \"crio\")\n}",
"func (d *Directory) Temporary() error {\n\ttmp, err := ioutil.TempDir(\"\", \"workdir\")\n\tif err != nil {\n\t\treturn err\n\t}\n\td.SetPath(tmp)\n\treturn nil\n}",
"func (fs FilesystemStorage) obtainTempFile() (f *os.File, err error) {\n\tfname := fmt.Sprintf(\"tempfile-%x-%d\", crypto.RandBytes(4), time.Now().Unix())\n\tlog.WithFields(log.Fields{\n\t\t\"pkg\": \"fs-store\",\n\t\t\"filepath\": fname,\n\t}).Debug(\"opening temp file\")\n\tf, err = os.OpenFile(filepath.Join(fs.TempDir(), fname), os.O_RDWR|os.O_CREATE, 0400)\n\treturn\n}",
"func MustTempFile(t TB, dir, prefix string) (f *os.File) {\n\tf, err := ioutil.TempFile(dir, prefix)\n\tmust(t, err)\n\treturn f\n}",
"func setupMounts(rootDir string, bindMounts []string) error {\n\terr := wsyscall.Mount(\"none\", filepath.Join(rootDir, \"proc\"), \"proc\", 0, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\terr = wsyscall.Mount(\"none\", filepath.Join(rootDir, \"sys\"), \"sysfs\", 0, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, bindMount := range bindMounts {\n\t\terr := wsyscall.Mount(bindMount,\n\t\t\tfilepath.Join(rootDir, bindMount), \"\",\n\t\t\twsyscall.MS_BIND|wsyscall.MS_RDONLY, \"\")\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error bind mounting: %s: %s\", bindMount, err)\n\t\t}\n\t}\n\treturn nil\n}"
] | [
"0.6258177",
"0.61509657",
"0.61509657",
"0.60223854",
"0.5980758",
"0.5885019",
"0.5884537",
"0.5825131",
"0.5823397",
"0.5804555",
"0.56531787",
"0.5624289",
"0.56143105",
"0.5539166",
"0.54718363",
"0.54713595",
"0.5460068",
"0.5445862",
"0.54432094",
"0.54395497",
"0.54228497",
"0.53955805",
"0.53518796",
"0.53460604",
"0.53246135",
"0.53230506",
"0.5320212",
"0.5292304",
"0.52498305",
"0.5248105",
"0.52472496",
"0.52307457",
"0.52264386",
"0.5210824",
"0.51900893",
"0.5185471",
"0.5151731",
"0.5148044",
"0.51214653",
"0.5120192",
"0.51066643",
"0.50965977",
"0.50870854",
"0.50797266",
"0.5074978",
"0.5073727",
"0.50469226",
"0.50397635",
"0.50340056",
"0.5027765",
"0.50204796",
"0.5013795",
"0.50126535",
"0.5011782",
"0.49948764",
"0.498084",
"0.49505678",
"0.4943967",
"0.49308696",
"0.49249554",
"0.49095392",
"0.4899178",
"0.48943844",
"0.48873916",
"0.48809096",
"0.4880801",
"0.48786163",
"0.4843823",
"0.48351866",
"0.48167825",
"0.47976103",
"0.4777243",
"0.47401193",
"0.47340706",
"0.47318256",
"0.47275802",
"0.47267574",
"0.47255206",
"0.4725469",
"0.47248742",
"0.4723681",
"0.47221208",
"0.47158408",
"0.47078085",
"0.4702116",
"0.46994287",
"0.46966708",
"0.4687709",
"0.46857524",
"0.46730652",
"0.46653908",
"0.46599978",
"0.46375",
"0.46301097",
"0.46272153",
"0.4626625",
"0.46248677",
"0.46151686",
"0.46121618"
] | 0.8210658 | 1 |
UnmountTempFS unmounts a filesystem. It is primarily a wrapper around unix.Unmount, but it also removes the mountpoint after the filesystem is unmounted. | func UnmountTempFS(dir string, fstype string) error {
err := unix.Unmount(dir, 0)
if err != nil {
glog.V(2).Infof("Couldn't unmount %s at %s: %s", fstype, dir, err)
return err
}
err = os.Remove(dir)
if err != nil {
glog.V(2).Infof("Couldn't remove %s: %s", dir, err)
return err
}
return nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Unmount(mountpoint string) (err error) {\n\tlog.Println(\"Unmounting filesystem\")\n\terr = fuse.Unmount(mountpoint)\n\treturn\n}",
"func MountTempFS(source string, target string, fstype string, flags uintptr, data string) error {\n\t// Make sure that `target` exists.\n\terr := os.MkdirAll(target, 0500)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't create temp %s mountpoint: %s\", fstype, err)\n\t\treturn err\n\t}\n\n\terr = unix.Mount(source, target, fstype, flags, data)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't mount %s on %s: %s\", fstype, target, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func MountTempFS(source string, target string, fstype string, flags uintptr, data string) error {\n\t// Make sure that `target` exists.\n\terr := os.MkdirAll(target, 0500)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't create temp %s mountpoint: %s\", fstype, err)\n\t\treturn err\n\t}\n\n\terr = unix.Mount(source, target, fstype, flags, data)\n\tif err != nil {\n\t\tglog.V(2).Infof(\"Couldn't mount %s on %s: %s\", fstype, target, err)\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func Unmount(ctx context.Context, mountPath string, opts ...UnmountOpt) error {\n\tuo := unmountOpts{\n\t\tfusermountPath: \"fusermount\",\n\t}\n\n\tfor _, opt := range opts {\n\t\tif err := opt(&uo); err != nil {\n\t\t\treturn fmt.Errorf(\"%w\", err)\n\t\t}\n\t}\n\n\treturn unmountSquashFS(ctx, mountPath, uo)\n}",
"func (ms *Server) Unmount() (err error) {\n\tif ms.mountPoint == \"\" {\n\t\treturn nil\n\t}\n\tif parseFuseFd(ms.mountPoint) >= 0 {\n\t\treturn fmt.Errorf(\"Cannot unmount magic mountpoint %q. Please use `fusermount -u REALMOUNTPOINT` instead.\", ms.mountPoint)\n\t}\n\tdelay := time.Duration(0)\n\tfor try := 0; try < 5; try++ {\n\t\terr = unmount(ms.mountPoint, ms.opts)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\t// Sleep for a bit. This is not pretty, but there is\n\t\t// no way we can be certain that the kernel thinks all\n\t\t// open files have already been closed.\n\t\tdelay = 2*delay + 5*time.Millisecond\n\t\ttime.Sleep(delay)\n\t}\n\tif err != nil {\n\t\treturn\n\t}\n\t// Wait for event loops to exit.\n\tms.loops.Wait()\n\tms.mountPoint = \"\"\n\treturn err\n}",
"func ForceUnmount(m Mount) error {\n\tpoint := m.MountPoint()\n\tlog.Infof(\"Force-Unmounting %s...\", point)\n\n\tvar cmd *exec.Cmd\n\tswitch runtime.GOOS {\n\tcase \"darwin\":\n\t\tcmd = exec.Command(\"diskutil\", \"umount\", \"force\", point)\n\tcase \"linux\":\n\t\tcmd = exec.Command(\"fusermount\", \"-u\", point)\n\tdefault:\n\t\treturn fmt.Errorf(\"unmount: unimplemented\")\n\t}\n\n\terrc := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errc)\n\n\t\t// try vanilla unmount first.\n\t\tif err := exec.Command(\"umount\", point).Run(); err == nil {\n\t\t\treturn\n\t\t}\n\n\t\t// retry to unmount with the fallback cmd\n\t\terrc <- cmd.Run()\n\t}()\n\n\tselect {\n\tcase <-time.After(2 * time.Second):\n\t\treturn fmt.Errorf(\"umount timeout\")\n\tcase err := <-errc:\n\t\treturn err\n\t}\n}",
"func (m *Mounter) Unmount(\n\tdevPath string,\n\tpath string,\n\tflags int,\n\ttimeout int,\n\topts map[string]string,\n) error {\n\tm.Lock()\n\t// device gets overwritten if opts specifies fuse mount with\n\t// options.OptionsDeviceFuseMount.\n\tdevice := devPath\n\tpath = normalizeMountPath(path)\n\tif value, ok := opts[options.OptionsDeviceFuseMount]; ok {\n\t\t// fuse mounts show-up with this key as device.\n\t\tdevice = value\n\t}\n\tinfo, ok := m.mounts[device]\n\tif !ok {\n\t\tlogrus.Warnf(\"Unable to unmount device %q path %q: %v\",\n\t\t\tdevPath, path, ErrEnoent.Error())\n\t\tlogrus.Infof(\"Found %v mounts in mounter's cache: \", len(m.mounts))\n\t\tlogrus.Infof(\"Mounter has the following mountpoints: \")\n\t\tfor dev, info := range m.mounts {\n\t\t\tlogrus.Infof(\"For Device %v: Info: %v\", dev, info)\n\t\t\tif info == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, path := range info.Mountpoint {\n\t\t\t\tlogrus.Infof(\"\\t Mountpath: %v Rootpath: %v\", path.Path, path.Root)\n\t\t\t}\n\t\t}\n\t\tm.Unlock()\n\t\treturn ErrEnoent\n\t}\n\tm.Unlock()\n\tinfo.Lock()\n\tdefer info.Unlock()\n\tfor i, p := range info.Mountpoint {\n\t\tif p.Path != path {\n\t\t\tcontinue\n\t\t}\n\t\terr := m.mountImpl.Unmount(path, flags, timeout)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\t// Blow away this mountpoint.\n\t\tinfo.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1]\n\t\tinfo.Mountpoint = info.Mountpoint[0 : len(info.Mountpoint)-1]\n\t\tm.maybeRemoveDevice(device)\n\t\tif options.IsBoolOptionSet(opts, options.OptionsDeleteAfterUnmount) {\n\t\t\tm.RemoveMountPath(path, opts)\n\t\t}\n\n\t\treturn nil\n\t}\n\tlogrus.Warnf(\"Device %q is not mounted at path %q\", device, path)\n\treturn ErrEnoent\n}",
"func Unmount(path string, force, lazy bool) error {\n\tvar flags = unix.UMOUNT_NOFOLLOW\n\tif len(path) == 0 {\n\t\treturn errors.New(\"path cannot be empty\")\n\t}\n\tif force && lazy {\n\t\treturn errors.New(\"force and lazy unmount cannot both be set\")\n\t}\n\tif force {\n\t\tflags |= unix.MNT_FORCE\n\t}\n\tif lazy {\n\t\tflags |= unix.MNT_DETACH\n\t}\n\tif err := unix.Unmount(path, flags); err != nil {\n\t\treturn fmt.Errorf(\"umount %q flags %x: %v\", path, flags, err)\n\t}\n\treturn nil\n}",
"func (m *mount) unmount() error {\n\tlog.Infof(\"Unmounting %s\", m.MountPoint())\n\n\t// try unmounting with fuse lib\n\terr := fuse.Unmount(m.MountPoint())\n\tif err == nil {\n\t\treturn nil\n\t}\n\tlog.Error(\"fuse unmount err: %s\", err)\n\n\t// try closing the fuseConn\n\terr = m.fuseConn.Close()\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\tlog.Error(\"fuse conn error: %s\", err)\n\t}\n\n\t// try mount.ForceUnmountManyTimes\n\tif err := ForceUnmountManyTimes(m, 10); err != nil {\n\t\treturn err\n\t}\n\n\tlog.Infof(\"Seemingly unmounted %s\", m.MountPoint())\n\treturn nil\n}",
"func (o *Filesystem) Unmount(ctx context.Context, options map[string]dbus.Variant) (err error) {\n\terr = o.object.CallWithContext(ctx, InterfaceFilesystem+\".Unmount\", 0, options).Store()\n\treturn\n}",
"func Unmount(mountpoint string) error {\n\treturn syscall.Unmount(mountpoint, 0)\n}",
"func (fs *FS) Unmount(ctx context.Context, target string) error {\n\treturn fs.unmount(ctx, target)\n}",
"func (i *ImageService) Unmount(ctx context.Context, container *container.Container) error {\n\troot := container.BaseFS\n\n\tif err := mount.UnmountAll(root, 0); err != nil {\n\t\treturn fmt.Errorf(\"failed to unmount %s: %w\", root, err)\n\t}\n\n\tif err := os.Remove(root); err != nil {\n\t\tlogrus.WithError(err).WithField(\"dir\", root).Error(\"failed to remove mount temp dir\")\n\t}\n\n\tcontainer.BaseFS = \"\"\n\n\treturn nil\n}",
"func forceUnmount(target string) (err error) {\n\t// Simple retry logic for unmount\n\tfor i := 0; i < 10; i++ {\n\t\tif err = sysUnmount(target, 0); err == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}\n\treturn\n}",
"func Unmount(path string, force, lazy bool) error {\n\treturn mount.Unmount(path, force, lazy)\n}",
"func Unmount(pMountPoint string) error {\n\tvUnmountError := fuse.Unmount(pMountPoint)\n\tif vUnmountError != nil {\n\t\treturn diagnostic.NewError(\"An error occurred while unmounting onedrive filesystem mounted at %s\", vUnmountError, pMountPoint)\n\t}\n\treturn nil\n}",
"func Unmount(path string) error {\n\tcmd := exec.Command(\"fusermount\", \"-u\", path)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"exec of fusermount -u %s failed: %v\", path, err)\n\t}\n\treturn nil\n}",
"func (c *Client) Unmount(ctx context.Context, svc iaas.Service, export string) fail.Error {\n\ttimings, xerr := svc.Timings()\n\tif xerr != nil {\n\t\treturn xerr\n\t}\n\n\tdata := map[string]interface{}{\"Export\": export}\n\tstdout, xerr := executeScript(ctx, timings, c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\tif xerr != nil {\n\t\txerr.Annotate(\"stdout\", stdout)\n\t\treturn fail.Wrap(xerr, \"error executing script to unmount remote NFS share\")\n\t}\n\treturn nil\n}",
"func (d *Driver) Unmount(mountDir string) {\n\tDebug(\"findmnt: \" + mountDir)\n\t_, err := RunCommand(\"findmnt\", \"-n\", \"-o\", \"SOURCE\", \"--target\", mountDir)\n\tif err != nil {\n\t\tDebug(err.Error())\n\t}\n\n\tDebug(\"syscall.Unmount: \" + mountDir)\n\tif err := syscall.Unmount(mountDir, 0); err != nil {\n\t\tFailure(err)\n\t}\n\n\tDebug(\"Detach hetzner volume from server\")\n\tvolume := GetVolume(d.client, d.options.PVOrVolumeName)\n\t_, _, errDetach := d.client.Volume.Detach(context.Background(), volume)\n\n\tif errDetach != nil {\n\t\tFailure(errDetach)\n\t}\n\n\t// Delete json file with token in it\n\t//Debug(\"os.Remove\")\n\t//if err := os.Remove(jsonOptionsFile); err != nil {\n\t//\tfailure(err)\n\t//}\n\n\tSuccess()\n}",
"func UnmountVirtualFileSystem() {\n\tvirtualFileSystemArchiveType = 0\n\tvirtualFileSystemArchive = \"\"\n\tvirtualFileSystemPassword = \"\"\n\tvirtualFileSystemEncryptionKey = \"\"\n}",
"func unmount(target string) error {\n\tif mounted, err := mounted(target); err != nil || !mounted {\n\t\treturn err\n\t}\n\treturn forceUnmount(target)\n}",
"func (f *FakeMounter) Unmount(target string) error {\n\tf.mutex.Lock()\n\tdefer f.mutex.Unlock()\n\n\t// If target is a symlink, get its absolute path\n\tabsTarget, err := filepath.EvalSymlinks(target)\n\tif err != nil {\n\t\tabsTarget = target\n\t}\n\n\tnewMountpoints := []MountPoint{}\n\tfor _, mp := range f.MountPoints {\n\t\tif mp.Path == absTarget {\n\t\t\tif f.UnmountFunc != nil {\n\t\t\t\terr := f.UnmountFunc(absTarget)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\t\t\tklog.V(5).Infof(\"Fake mounter: unmounted %s from %s\", mp.Device, absTarget)\n\t\t\t// Don't copy it to newMountpoints\n\t\t\tcontinue\n\t\t}\n\t\tnewMountpoints = append(newMountpoints, MountPoint{Device: mp.Device, Path: mp.Path, Type: mp.Type})\n\t}\n\tf.MountPoints = newMountpoints\n\tf.log = append(f.log, FakeAction{Action: FakeActionUnmount, Target: absTarget})\n\tdelete(f.MountCheckErrors, target)\n\treturn nil\n}",
"func Unmount(mount string, flags int) error {\n\treturn ErrNotImplementOnUnix\n}",
"func unmountSquashFS(ctx context.Context, mountPath string, uo unmountOpts) error {\n\targs := []string{\n\t\t\"-u\",\n\t\tfilepath.Clean(mountPath),\n\t}\n\tcmd := exec.CommandContext(ctx, uo.fusermountPath, args...) //nolint:gosec\n\tcmd.Stdout = uo.stdout\n\tcmd.Stderr = uo.stderr\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn fmt.Errorf(\"failed to unmount: %w\", err)\n\t}\n\n\treturn nil\n}",
"func Unmount(dest string) error {\n\treturn syscall.Unmount(dest, 0)\n}",
"func (s sys) Unmount(path string) error {\n\treturn syscall.Unmount(path, 0)\n}",
"func Unmount(dir string) error {\n\tvar emsgs []string\n\t// Stop the listener.\n\tcomps := strings.Split(dir, \"/\")\n\tif len(comps) < 6 {\n\t\tsErr := fmt.Sprintf(\"Failure to notify nodeagent dir %v\", dir)\n\t\treturn failure(\"unmount\", dir, sErr)\n\t}\n\n\tuid := comps[5]\n\t// TBD: Check if uid is the correct format.\n\tnaInp := &pb.WorkloadInfo{\n\t\tAttrs: &pb.WorkloadInfo_WorkloadAttributes{Uid: uid},\n\t\tWorkloadpath: uid,\n\t}\n\tif configuration.UseGrpc == true {\n\t\tif err := sendWorkloadDeleted(naInp); err != nil {\n\t\t\tsErr := \"Failure to notify nodeagent: \" + err.Error()\n\t\t\treturn failure(\"unmount\", dir, sErr)\n\t\t}\n\t} else if err := removeCredentialFile(naInp); err != nil {\n\t\t// Go ahead and finish the unmount; no need to hold up kubelet.\n\t\temsgs = append(emsgs, \"Failure to delete credentials file: \"+err.Error())\n\t}\n\n\t// unmount the bind mount\n\tif err := getExecCmd(\"/bin/umount\", filepath.Join(dir, \"nodeagent\")).Run(); err != nil {\n\t\temsgs = append(emsgs, fmt.Sprintf(\"unmount of %s failed\", filepath.Join(dir, \"nodeagent\")))\n\t}\n\n\t// unmount the tmpfs\n\tif err := getExecCmd(\"/bin/umount\", dir).Run(); err != nil {\n\t\temsgs = append(emsgs, fmt.Sprintf(\"unmount of %s failed\", dir))\n\t}\n\n\t// delete the directory that was created.\n\tdelDir := filepath.Join(configuration.NodeAgentWorkloadHomeDir, uid)\n\terr := os.Remove(delDir)\n\tif err != nil {\n\t\temsgs = append(emsgs, fmt.Sprintf(\"unmount del failure %s: %s\", delDir, err.Error()))\n\t\t// go head and return ok.\n\t}\n\n\tif len(emsgs) == 0 {\n\t\temsgs = append(emsgs, \"Unmount Ok\")\n\t}\n\n\treturn genericSuccess(\"unmount\", dir, strings.Join(emsgs, \",\"))\n}",
"func Unmount(h hostRunner, target string) error {\n\tout, err := h.RunSSHCommand(fmt.Sprintf(\"findmnt -T %s && sudo umount %s || true\", target, target))\n\tif err != nil {\n\t\treturn errors.Wrap(err, out)\n\t}\n\treturn nil\n}",
"func (img *Image) Unmount(mountPoint string) error {\n\treturn devUnmount(img, mountPoint)\n}",
"func cleanupFilesystem(helper *clients.TestClient, k8sh *utils.K8sHelper, s *suite.Suite, namespace string, filesystemName string) {\n\tlogger.Infof(\"Deleting file system\")\n\terr := helper.FSClient.Delete(filesystemName, namespace)\n\tassert.Nil(s.T(), err)\n\tlogger.Infof(\"File system %s deleted\", filesystemName)\n}",
"func removeTempdir(d string) {\n\tif CLEARTEMP == false {\n\t\treturn\n\t}\n\tos.RemoveAll(d)\n}",
"func Unmount(target string) error {\n\tlogrus.Infof(\"Unmount %s\", target)\n\terr := os.Remove(target)\n\tif err == nil {\n\t\trespondSuccess()\n\t}\n\treturn err\n}",
"func testCmdUnmountFilesystem(t *testing.T) {\n\tt.Log(\"TODO\")\n}",
"func (m *DefaultMounter) Unmount(target string, flags int, timeout int) error {\n\treturn syscall.Unmount(target, flags)\n}",
"func (c *Controller) Unmount(unmountRequest k8sresources.FlexVolumeUnmountRequest) k8sresources.FlexVolumeResponse {\n\tc.logger.Println(\"Controller: unmount start\")\n\tdefer c.logger.Println(\"Controller: unmount end\")\n\tc.logger.Printf(\"unmountRequest %#v\", unmountRequest)\n\tvar detachRequest resources.DetachRequest\n\tvar pvName string\n\n\t// Validate that the mountpoint is a symlink as ubiquity expect it to be\n\trealMountPoint, err := c.exec.EvalSymlinks(unmountRequest.MountPath)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Cannot execute umount because the mountPath [%s] is not a symlink as expected. Error: %#v\", unmountRequest.MountPath, err)\n\t\tc.logger.Println(msg)\n\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t}\n\tubiquityMountPrefix := fmt.Sprintf(resources.PathToMountUbiquityBlockDevices, \"\")\n\tif strings.HasPrefix(realMountPoint, ubiquityMountPrefix) {\n\t\t// SCBE backend flow\n\t\tpvName = path.Base(unmountRequest.MountPath)\n\n\t\tdetachRequest = resources.DetachRequest{Name: pvName, Host: getHost()}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tpvName,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t\tc.logger.Println(fmt.Sprintf(\"Removing the slink [%s] to the real mountpoint [%s]\", unmountRequest.MountPath, realMountPoint))\n\t\terr := c.exec.Remove(unmountRequest.MountPath)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"fail to remove slink %s. Error %#v\", unmountRequest.MountPath, err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{Status: \"Failure\", Message: msg, Device: \"\"}\n\t\t}\n\n\t} else {\n\n\t\tlistVolumeRequest := resources.ListVolumesRequest{}\n\t\tvolumes, err := c.Client.ListVolumes(listVolumeRequest)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\"Error getting the volume list from ubiquity server %#v\", err)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tvolume, err := getVolumeForMountpoint(unmountRequest.MountPath, volumes)\n\t\tif err != nil {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Error finding the volume with mountpoint [%s] from the list of ubiquity volumes %#v. Error is : %#v\",\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\tvolumes,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tdetachRequest = resources.DetachRequest{Name: volume.Name}\n\t\terr = c.Client.Detach(detachRequest)\n\t\tif err != nil && err.Error() != \"fileset not linked\" {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"Failed to unmount volume [%s] on mountpoint [%s]. Error: %#v\",\n\t\t\t\tvolume.Name,\n\t\t\t\tunmountRequest.MountPath,\n\t\t\t\terr)\n\t\t\tc.logger.Println(msg)\n\n\t\t\treturn k8sresources.FlexVolumeResponse{\n\t\t\t\tStatus: \"Failure\",\n\t\t\t\tMessage: msg,\n\t\t\t}\n\t\t}\n\n\t\tpvName = volume.Name\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"Succeeded to umount volume [%s] on mountpoint [%s]\",\n\t\tpvName,\n\t\tunmountRequest.MountPath,\n\t)\n\tc.logger.Println(msg)\n\n\treturn k8sresources.FlexVolumeResponse{\n\t\tStatus: \"Success\",\n\t\tMessage: \"Volume unmounted successfully\",\n\t}\n}",
"func Unmount(out io.Writer, logger log.FieldLogger) (err error) {\n\tdisk, err := queryPhysicalVolume(logger)\n\tif err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif disk == \"\" {\n\t\tlogger.Info(\"No physical volumes found.\")\n\t\treturn nil\n\t}\n\tlogger.Infof(\"Found physical volume on disk %v.\", disk)\n\tconfig := &config{\n\t\tFieldLogger: logger,\n\t\tdisk: disk,\n\t\tout: out,\n\t}\n\tif err = config.removeLingeringDevices(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeLogicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removeVolumeGroup(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\tif err = config.removePhysicalVolume(); err != nil {\n\t\treturn trace.Wrap(err)\n\t}\n\treturn nil\n}",
"func (cs *ControllerServer) internalUnmount(ctx context.Context, vol *nfsVolume) error {\n\ttargetPath := cs.getInternalMountPath(vol)\n\n\t// Unmount nfs server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := cs.Driver.ns.NodeUnpublishVolume(ctx, &csi.NodeUnpublishVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tTargetPath: cs.getInternalMountPath(vol),\n\t})\n\treturn err\n}",
"func UnexportFilesystem(conn *dbus.Conn, path dbus.ObjectPath) error {\n\treturn conn.Export(nil, path, InterfaceFilesystem)\n}",
"func (f fs) EnsureMountRemoved(path string) error {\n\tklog.V(2).Infof(\"removing %v\", path)\n\n\t_, err := f.sys.Stat(path)\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = f.sys.GetMount(path)\n\tif err != nil && strings.Contains(err.Error(), \"is not a mountpoint\") {\n\t\treturn f.sys.Remove(path)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\t// if we are here, a mount has been found- try to unmount\n\tif err := f.sys.Unmount(path); err != nil {\n\t\treturn err\n\t}\n\treturn f.sys.Remove(path)\n}",
"func UnmountSharedFolders() {\n\tshares := FindSharedFolders()\n\tfor _, s := range shares {\n\t\tshare := strings.TrimSpace(s)\n\t\tRunCommandWithDefaults(DockerHost(), fmt.Sprintf(`sudo umount \"%s\"`, share))\n\t}\n}",
"func Unmount(dest string) error {\n\treturn nil\n}",
"func (m *DefaultMounter) Unmount() error {\n\tif m.mnt == nil {\n\t\treturn nil\n\t}\n\tm.lock.Lock()\n\th := m.mnt\n\tm.lock.Unlock()\n\treturn h.Close()\n}",
"func (d *fsStorage) Unmount(volume *Volume) error {\n\treturn nil\n}",
"func UnmountAllSmbMounts(ctx context.Context, cr *chrome.Chrome) error {\n\t// Open the test API.\n\ttconn, err := cr.TestAPIConn(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to create the test API conn\")\n\t}\n\t// Open the Files App.\n\tfiles, err := filesapp.Launch(ctx, tconn)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to launch Files app\")\n\t}\n\tdefer files.Close(ctx)\n\t// Get connection to foreground Files app to verify changes.\n\tfilesSWA := \"chrome://file-manager/\"\n\tmatchFilesApp := func(t *chrome.Target) bool {\n\t\treturn t.URL == filesSWA\n\t}\n\tconn, err := cr.NewConnForTarget(ctx, matchFilesApp)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to connect to Files app foreground window\")\n\t}\n\tdefer conn.Close()\n\n\tinfo, err := sysutil.MountInfoForPID(sysutil.SelfPID)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to mount info\")\n\t}\n\tfor i := range info {\n\t\tif info[i].Fstype == \"fuse.smbfs\" {\n\t\t\tsmbfsUniqueID := filepath.Base(info[i].MountPath)\n\t\t\tif err := conn.Call(ctx, nil,\n\t\t\t\t`(mount) => new Promise((resolve, reject) =>\n\t\t\t\t\tchrome.fileManagerPrivate.removeMount(mount, () => {\n\t\t\t\t\t\tif (chrome.runtime.lastError) {\n\t\t\t\t\t\t\treject(chrome.runtime.lastError.message);\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tresolve();\n\t\t\t\t\t\t}\n\t\t\t\t\t}))`, \"smb:\"+smbfsUniqueID,\n\t\t\t); err != nil {\n\t\t\t\treturn errors.Wrapf(err, \"failed to unmount SMB mountpoint %q\", smbfsUniqueID)\n\t\t\t}\n\t\t\ttesting.ContextLog(ctx, \"Unmounted SMB mountpoint \", smbfsUniqueID)\n\t\t}\n\t}\n\treturn nil\n}",
"func Unmount(path string) error {\n\tcmd := exec.Command(\"umount\", path)\n\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"umount: %s\", utils.OneLine(out))\n\t}\n\n\treturn nil\n}",
"func (z *ZfsH) Unmount(d *Dataset, force bool) (*Dataset, error) {\n\tif d.Type == DatasetSnapshot {\n\t\treturn nil, errors.New(\"cannot unmount snapshots\")\n\t}\n\targs := make([]string, 1, 3)\n\targs[0] = \"umount\"\n\tif force {\n\t\targs = append(args, \"-f\")\n\t}\n\targs = append(args, d.Name)\n\t_, err := z.zfs(args...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn z.GetDataset(d.Name)\n}",
"func (d *lvm) UnmountVolume(vol Volume, keepBlockDev bool, op *operations.Operation) (bool, error) {\n\tunlock := vol.MountLock()\n\tdefer unlock()\n\n\tvar err error\n\tourUnmount := false\n\tmountPath := vol.MountPath()\n\n\trefCount := vol.MountRefCountDecrement()\n\n\t// Check if already mounted.\n\tif vol.contentType == ContentTypeFS && filesystem.IsMountPoint(mountPath) {\n\t\tif refCount > 0 {\n\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\treturn false, ErrInUse\n\t\t}\n\n\t\terr = TryUnmount(mountPath, 0)\n\t\tif err != nil {\n\t\t\treturn false, fmt.Errorf(\"Failed to unmount LVM logical volume: %w\", err)\n\t\t}\n\n\t\td.logger.Debug(\"Unmounted logical volume\", logger.Ctx{\"volName\": vol.name, \"path\": mountPath, \"keepBlockDev\": keepBlockDev})\n\n\t\t// We only deactivate filesystem volumes if an unmount was needed to better align with our\n\t\t// unmount return value indicator.\n\t\tif !keepBlockDev {\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tourUnmount = true\n\t} else if vol.contentType == ContentTypeBlock {\n\t\t// For VMs, unmount the filesystem volume.\n\t\tif vol.IsVMBlock() {\n\t\t\tfsVol := vol.NewVMBlockFilesystemVolume()\n\t\t\tourUnmount, err = d.UnmountVolume(fsVol, false, op)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\t\t}\n\n\t\tvolDevPath := d.lvmDevPath(d.config[\"lvm.vg_name\"], vol.volType, vol.contentType, vol.name)\n\t\tif !keepBlockDev && shared.PathExists(volDevPath) {\n\t\t\tif refCount > 0 {\n\t\t\t\td.logger.Debug(\"Skipping unmount as in use\", logger.Ctx{\"volName\": vol.name, \"refCount\": refCount})\n\t\t\t\treturn false, ErrInUse\n\t\t\t}\n\n\t\t\t_, err = d.deactivateVolume(vol)\n\t\t\tif err != nil {\n\t\t\t\treturn false, err\n\t\t\t}\n\n\t\t\tourUnmount = true\n\t\t}\n\t}\n\n\treturn ourUnmount, nil\n}",
"func (c *Client) Unmount(export string) error {\n\tdata := map[string]interface{}{\n\t\t\"Export\": export,\n\t}\n\tretcode, stdout, stderr, err := executeScript(*c.SSHConfig, \"nfs_client_share_unmount.sh\", data)\n\treturn handleExecuteScriptReturn(retcode, stdout, stderr, err, \"Error executing script to unmount remote NFS share\")\n}",
"func (proxy *remoteDriverProxy) Unmount(name, id string) error {\n\tvar req = remoteVolumeUnmountReq{\n\t\tName: name,\n\t\tID: id,\n\t}\n\n\tvar resp remoteVolumeUnmountResp\n\n\tif err := proxy.client.CallService(remoteVolumeUnmountService, &req, &resp, true); err != nil {\n\t\treturn err\n\t}\n\n\tif resp.Err != \"\" {\n\t\treturn errors.New(resp.Err)\n\t}\n\n\treturn nil\n}",
"func execUnmount(mountDir string) error {\n\t// CONTAINER=`docker ps --filter \"label=mountpath=${mount_dir}\" --format \"{{.ID}}\"`\n\toutput, err := exec.Command(\"docker\",\n\t\t\"ps\",\n\t\t\"--filter\",\n\t\t\"label=mountpath=\"+mountDir,\n\t\t\"--format\",\n\t\t\"{{.ID}}\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker ps failed: %v\", err)\n\t}\n\n\t// docker rm ${CONTAINER} -f\n\tstr := strings.Replace(string(output), \"\\n\", \"\", -1)\n\t_, err = exec.Command(\"docker\",\n\t\t\"rm\",\n\t\tstr,\n\t\t\"-f\").CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker rm failed: %v\", err)\n\t}\n\n\t// umount -l ${mount_dir}\n\t_, err = exec.Command(\"umount\",\n\t\t\"-l\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"umount failed: %v\", err)\n\t}\n\n\t// rmdir ${mount_dir}\n\t_, err = exec.Command(\"rm\",\n\t\t\"-rf\",\n\t\tmountDir).CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"rmdir failed: %v\", err)\n\t}\n\n\treturn nil\n}",
"func RemoveTempFolders() {\n\tclose(folders)\n\tfor f := range folders {\n\t\tos.RemoveAll(f)\n\t}\n}",
"func (mounter *csiProxyMounterV1Beta) Unmount(target string) error {\n\tklog.V(4).Infof(\"Unmount: %s\", target)\n\treturn mounter.Rmdir(target)\n}",
"func (v *Volume) unmount(force bool) error {\n\tif !v.needsMount() {\n\t\treturn nil\n\t}\n\n\t// Update the volume from the DB to get an accurate mount counter.\n\tif err := v.update(); err != nil {\n\t\treturn err\n\t}\n\n\tif v.state.MountCount == 0 {\n\t\tlogrus.Debugf(\"Volume %s already unmounted\", v.Name())\n\t\treturn nil\n\t}\n\n\tif !force {\n\t\tv.state.MountCount--\n\t} else {\n\t\tv.state.MountCount = 0\n\t}\n\n\tlogrus.Debugf(\"Volume %s mount count now at %d\", v.Name(), v.state.MountCount)\n\n\tif v.state.MountCount == 0 {\n\t\tif v.UsesVolumeDriver() {\n\t\t\tif v.plugin == nil {\n\t\t\t\treturn fmt.Errorf(\"volume plugin %s (needed by volume %s) missing: %w\", v.Driver(), v.Name(), define.ErrMissingPlugin)\n\t\t\t}\n\n\t\t\treq := new(pluginapi.UnmountRequest)\n\t\t\treq.Name = v.Name()\n\t\t\treq.ID = pseudoCtrID\n\t\t\tif err := v.plugin.UnmountVolume(req); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t} else if v.config.Driver == define.VolumeDriverImage {\n\t\t\tif _, err := v.runtime.storageService.UnmountContainerImage(v.config.StorageID, force); err != nil {\n\t\t\t\treturn fmt.Errorf(\"unmounting volume %s image: %w\", v.Name(), err)\n\t\t\t}\n\n\t\t\tv.state.MountPoint = \"\"\n\t\t\treturn v.save()\n\t\t}\n\n\t\t// Unmount the volume\n\t\tif err := detachUnmount(v.config.MountPoint); err != nil {\n\t\t\tif err == unix.EINVAL {\n\t\t\t\t// Ignore EINVAL - the mount no longer exists.\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"unmounting volume %s: %w\", v.Name(), err)\n\t\t}\n\t\tlogrus.Debugf(\"Unmounted volume %s\", v.Name())\n\t}\n\n\treturn v.save()\n}",
"func TestVolumeUnmountsFromForceDeletedPod(ctx context.Context, c clientset.Interface, f *framework.Framework, clientPod *v1.Pod, volumePath string) {\n\tTestVolumeUnmountsFromDeletedPodWithForceOption(ctx, c, f, clientPod, true, false, nil, volumePath)\n}",
"func (p *Tmpfs) Detach(ctx driver.Context, v *types.Volume) error {\n\tctx.Log.Debugf(\"Tmpfs detach volume: %s\", v.Name)\n\tmountPath := v.Path()\n\treqID := v.Option(\"reqID\")\n\tids := v.Option(\"ids\")\n\n\tarr := strings.Split(ids, \",\")\n\tnewIDs := []string{}\n\tfor _, id := range arr {\n\t\tif id != reqID {\n\t\t\tnewIDs = append(newIDs, reqID)\n\t\t}\n\t}\n\n\tif len(newIDs) == 0 && utils.IsMountpoint(mountPath) {\n\t\tif err := syscall.Unmount(mountPath, 0); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to umount %q, err: %v\", mountPath, err)\n\t\t}\n\n\t\tif err := os.Remove(mountPath); err != nil && !os.IsNotExist(err) {\n\t\t\treturn fmt.Errorf(\"remove %q directory failed, err: %v\", mountPath, err)\n\t\t}\n\n\t\tv.SetOption(\"freeTime\", strconv.FormatInt(time.Now().Unix(), 10))\n\t}\n\n\tv.SetOption(\"ids\", strings.Join(newIDs, \",\"))\n\n\treturn nil\n}",
"func (d *driverInfo) Unmount(volume *Volume) error {\n\tif err := volume.CheckMounted(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := d.storage.Unmount(volume); err != nil {\n\t\treturn err\n\t}\n\n\tif err := fs.RemoveDir(volume.MountPath, true); err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"name\": volume.Name,\n\t\t\t\"mountPath\": volume.MountPath,\n\t\t\t\"error\": err,\n\t\t}).Warning(\"error removing mount path\")\n\t}\n\n\tvolume.MountPath = \"\"\n\treturn nil\n}",
"func (s *schg) dropTmpDirs() (err error) {\n\tfor _, p := range s.tmp {\n\t\te := os.RemoveAll(p)\n\t\tif err == nil {\n\t\t\terr = e\n\t\t}\n\t}\n\ts.tmp = make([]string, 0)\n\treturn\n}",
"func (gc *GlobalOpts) CleanupTmp() {\n\terr := os.RemoveAll(gc.TmpDir)\n\tif err != nil {\n\t\tlog.Printf(\"[ERROR] Failed to remove temporary directory %v\", err)\n\t}\n}",
"func TmpfsRoot(l *LinuxFactory) error {\n\tmounted, err := mount.Mounted(l.Root)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !mounted {\n\t\tif err := syscall.Mount(\"tmpfs\", l.Root, \"tmpfs\", 0, \"\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func OptUnmountFusermountPath(path string) UnmountOpt {\n\treturn func(mo *unmountOpts) error {\n\t\tif filepath.Base(path) == path {\n\t\t\treturn errFusermountPathInvalid\n\t\t}\n\t\tmo.fusermountPath = path\n\t\treturn nil\n\t}\n}",
"func (c *client) Unmount(\n\tctx types.Context,\n\tmountPoint string,\n\topts types.Store) error {\n\n\tif c.isController() {\n\t\treturn utils.NewUnsupportedForClientTypeError(\n\t\t\tc.clientType, \"Unmount\")\n\t}\n\n\tif lsxSO, _ := c.Supported(ctx, opts); !lsxSO.Umount() {\n\t\treturn errExecutorNotSupported\n\t}\n\n\tctx = context.RequireTX(ctx.Join(c.ctx))\n\n\tserviceName, ok := context.ServiceName(ctx)\n\tif !ok {\n\t\treturn goof.New(\"missing service name\")\n\t}\n\n\tsi, err := c.getServiceInfo(serviceName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdriverName := si.Driver.Name\n\n\tif _, err = c.runExecutor(\n\t\tctx,\n\t\tdriverName,\n\t\ttypes.LSXCmdUmount,\n\t\tmountPoint); err != nil {\n\t\treturn err\n\t}\n\n\tctx.Debug(\"xli umount success\")\n\treturn nil\n}",
"func TestMountDevFd(t *testing.T) {\n\trealMountPoint, err := ioutil.TempDir(\"\", t.Name())\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tdefer syscall.Rmdir(realMountPoint)\n\n\t// Call the fusermount suid helper to obtain the file descriptor in place\n\t// of a privileged parent.\n\tvar fuOpts MountOptions\n\tfd, err := callFusermount(realMountPoint, &fuOpts)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tfdMountPoint := fmt.Sprintf(\"/dev/fd/%d\", fd)\n\n\t// Real test starts here:\n\t// See if we can feed fdMountPoint to NewServer\n\tfs := NewDefaultRawFileSystem()\n\topts := MountOptions{\n\t\tDebug: true,\n\t}\n\tsrv, err := NewServer(fs, fdMountPoint, &opts)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\tgo srv.Serve()\n\tif err := srv.WaitMount(); err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// If we are actually mounted, we should get ENOSYS.\n\t//\n\t// This won't deadlock despite pollHack not working for `/dev/fd/N` mounts\n\t// because functions in the syscall package don't use the poller.\n\tvar st syscall.Stat_t\n\terr = syscall.Stat(realMountPoint, &st)\n\tif err != syscall.ENOSYS {\n\t\tt.Errorf(\"expected ENOSYS, got %v\", err)\n\t}\n\n\t// Cleanup is somewhat tricky because `srv` does not know about\n\t// `realMountPoint`, so `srv.Unmount()` cannot work.\n\t//\n\t// A normal user has to call `fusermount -u` for themselves to unmount.\n\t// But in this test we can monkey-patch `srv.mountPoint`.\n\tsrv.mountPoint = realMountPoint\n\tif err := srv.Unmount(); err != nil {\n\t\tt.Error(err)\n\t}\n}",
"func (c *CryptohomeBinary) Unmount(ctx context.Context, username string) ([]byte, error) {\n\treturn c.call(ctx, \"--action=unmount\", \"--user=\"+username)\n}",
"func (d *Driver) internalUnmount(ctx context.Context, vol *smbVolume) error {\n\ttargetPath := getInternalMountPath(d.workingMountDir, vol)\n\n\t// Unmount smb server at base-dir\n\tklog.V(4).Infof(\"internally unmounting %v\", targetPath)\n\t_, err := d.NodeUnstageVolume(ctx, &csi.NodeUnstageVolumeRequest{\n\t\tVolumeId: vol.id,\n\t\tStagingTargetPath: targetPath,\n\t})\n\treturn err\n}",
"func (client GlusterClient) Unmount(mountPath string) error {\n\tcommand := exec.Command(\"umount\", mountPath)\n\terr := command.Run()\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error {\n\tlogrus.Debugf(\"devmapper: UnmountDevice START(hash=%s)\", hash)\n\tdefer logrus.Debugf(\"devmapper: UnmountDevice END(hash=%s)\", hash)\n\n\tinfo, err := devices.lookupDeviceWithLock(hash)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinfo.lock.Lock()\n\tdefer info.lock.Unlock()\n\n\tdevices.Lock()\n\tdefer devices.Unlock()\n\n\tlogrus.Debugf(\"devmapper: Unmount(%s)\", mountPath)\n\tif err := mount.Unmount(mountPath); err != nil {\n\t\tif ok, _ := Mounted(mountPath); ok {\n\t\t\treturn err\n\t\t}\n\t}\n\tlogrus.Debug(\"devmapper: Unmount done\")\n\n\t// Remove the mountpoint here. Removing the mountpoint (in newer kernels)\n\t// will cause all other instances of this mount in other mount namespaces\n\t// to be killed (this is an anti-DoS measure that is necessary for things\n\t// like devicemapper). This is necessary to avoid cases where a libdm mount\n\t// that is present in another namespace will cause subsequent RemoveDevice\n\t// operations to fail. We ignore any errors here because this may fail on\n\t// older kernels which don't have\n\t// torvalds/linux@8ed936b5671bfb33d89bc60bdcc7cf0470ba52fe applied.\n\tif err := os.Remove(mountPath); err != nil {\n\t\tlogrus.Debugf(\"devmapper: error doing a remove on unmounted device %s: %v\", mountPath, err)\n\t}\n\n\treturn devices.deactivateDevice(info)\n}",
"func (f *fixture) TearDown(ctx context.Context, s *testing.FixtState) {\n\tif f.startChrome && f.cr != nil {\n\t\tif err := UnmountAllSmbMounts(ctx, f.cr); err != nil {\n\t\t\ts.Error(\"Failed to unmount all SMB mounts: \", err)\n\t\t}\n\t}\n\tf.cr = nil\n\tif err := f.server.Stop(ctx); err != nil {\n\t\ts.Error(\"Failed to stop smbd: \", err)\n\t}\n\tf.server = nil\n\tif err := os.RemoveAll(f.tempDir); err != nil {\n\t\ts.Error(\"Failed to remove temporary guest share: \", err)\n\t}\n\tf.tempDir = \"\"\n}",
"func unpackRootfs(b *sytypes.Bundle, tmpfsRef types.ImageReference, sysCtx *types.SystemContext) (err error) {\n\tvar mapOptions umocilayer.MapOptions\n\n\t// Allow unpacking as non-root\n\tif os.Geteuid() != 0 {\n\t\tmapOptions.Rootless = true\n\n\t\tuidMap, err := idtools.ParseMapping(fmt.Sprintf(\"0:%d:1\", os.Geteuid()))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing uidmap: %s\", err)\n\t\t}\n\t\tmapOptions.UIDMappings = append(mapOptions.UIDMappings, uidMap)\n\n\t\tgidMap, err := idtools.ParseMapping(fmt.Sprintf(\"0:%d:1\", os.Getegid()))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error parsing gidmap: %s\", err)\n\t\t}\n\t\tmapOptions.GIDMappings = append(mapOptions.GIDMappings, gidMap)\n\t}\n\n\tengineExt, err := umoci.OpenLayout(b.Path)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error opening layout: %s\", err)\n\t}\n\n\t// Obtain the manifest\n\timageSource, err := tmpfsRef.NewImageSource(context.Background(), sysCtx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error creating image source: %s\", err)\n\t}\n\tmanifestData, mediaType, err := imageSource.GetManifest(context.Background(), nil)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error obtaining manifest source: %s\", err)\n\t}\n\tif mediaType != imgspecv1.MediaTypeImageManifest {\n\t\treturn fmt.Errorf(\"error verifying manifest media type: %s\", mediaType)\n\t}\n\tvar manifest imgspecv1.Manifest\n\tjson.Unmarshal(manifestData, &manifest)\n\n\t// UnpackRootfs from umoci v0.4.2 expects a path to a non-existing directory\n\tos.RemoveAll(b.Rootfs())\n\n\t// Unpack root filesystem\n\treturn umocilayer.UnpackRootfs(context.Background(), engineExt, b.Rootfs(), manifest, &mapOptions)\n}",
"func (detacher *azureDiskDetacher) UnmountDevice(deviceMountPath string) error {\n\tvolume := path.Base(deviceMountPath)\n\tif err := util.UnmountPath(deviceMountPath, detacher.mounter); err != nil {\n\t\tglog.Errorf(\"Error unmounting %q: %v\", volume, err)\n\t\treturn err\n\t} else {\n\t\treturn nil\n\t}\n}",
"func Unmount(d Driver, vName string) error {\n\tlog.Debugf(\"Entering Unmount: name: %s\", vName)\n\td.GetLock().Lock()\n\tdefer d.GetLock().Unlock()\n\tv, m, err := getVolumeMount(d, vName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif m.GetConnections() <= 1 {\n\t\tcmd := fmt.Sprintf(\"/usr/bin/umount %s\", m.GetPath())\n\t\tif err := d.RunCmd(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tSetN(0, m, v)\n\t} else {\n\t\tAddN(-1, m, v)\n\t}\n\n\treturn d.SaveConfig()\n}",
"func (m *KMount) Close() error {\n\tvar umntErr error\n\tfor i := 0; i < 4; i++ {\n\t\tif umntErr = syscall.Unmount(m.mntPoint, syscall.MNT_DETACH); umntErr != nil {\n\t\t\ttime.Sleep(250 * time.Millisecond)\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\tif umntErr != nil {\n\t\treturn fmt.Errorf(\"unmount failed: %v\", umntErr)\n\t}\n\n\tif err := m.loop.Detach(); err != nil {\n\t\treturn fmt.Errorf(\"loopback detach failed: %v\", err)\n\t}\n\tif m.mntPoint != \"\" {\n\t\treturn os.Remove(m.mntPoint)\n\t}\n\treturn nil\n}",
"func (d *MinioDriver) unmountVolume(volume *minioVolume) error {\n\treturn exec.Command(\"umount\", volume.mountpoint).Run()\n}",
"func (d ImagefsDriver) Unmount(r *volume.UnmountRequest) error {\n\tfmt.Printf(\"-> Unmount %+v\\n\", r)\n\tcontainerID, err := d.FindVolumeContainer(r.Name)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\n\ttimeout := time.Second * 5\n\terr = d.cli.ContainerStop(\n\t\tcontext.Background(),\n\t\tcontainerID,\n\t\t&timeout,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"unexpected error: %s\", err)\n\t}\n\tfmt.Printf(\"<- OK\\n\")\n\treturn nil\n}",
"func (driver *Driver) Unmount(volumeName, volumeID string) error {\n\tif volumeName == \"\" && volumeID == \"\" {\n\t\treturn errors.New(\"Missing volume name or ID\")\n\t}\n\n\tinstances, err := driver.sdm.GetInstance()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(instances) == 0:\n\t\treturn errors.New(\"No instances\")\n\tcase len(instances) > 1:\n\t\treturn errors.New(\"Too many instances returned, limit the storagedrivers\")\n\t}\n\n\tvolumes, err := driver.sdm.GetVolume(volumeID, volumeName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch {\n\tcase len(volumes) == 0:\n\t\treturn errors.New(\"No volumes returned by name\")\n\tcase len(volumes) > 1:\n\t\treturn errors.New(\"Multiple volumes returned by name\")\n\t}\n\n\tvolumeAttachment, err := driver.sdm.GetVolumeAttach(volumes[0].VolumeID, instances[0].InstanceID)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(volumeAttachment) == 0 {\n\t\treturn nil\n\t}\n\n\tmounts, err := driver.osdm.GetMounts(volumeAttachment[0].DeviceName, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(mounts) == 0 {\n\t\treturn nil\n\t}\n\n\terr = driver.osdm.Unmount(mounts[0].Mountpoint)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = driver.sdm.DetachVolume(false, volumes[0].VolumeID, \"\")\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func (b *BootstrapClient) unmountGeneric() error {\n\treturn b.usingVaultRootToken(func() error {\n\t\tfor _, entry := range b.config.genericData {\n\t\t\tif err := b.unmount(entry.Path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n}",
"func (img *Image) UnmountAndUnmap(mountPoint string) error {\n\treturn devUnmountAndUnmap(img, mountPoint)\n}",
"func (z *zfsctl) Umount(ctx context.Context, name string, force, all bool) *execute {\n\targs := []string{\"umount\"}\n\tif force {\n\t\targs = append(args, \"-f\")\n\t}\n\tif all {\n\t\targs = append(args, \"-a\")\n\t} else {\n\t\targs = append(args, name)\n\t}\n\treturn &execute{ctx: ctx, name: z.cmd, args: args}\n}",
"func (c *flockerVolumeUnmounter) TearDown() error {\n\treturn c.TearDownAt(c.GetPath())\n}",
"func releaseMountpoint(path string) error {\n\tif _, err := os.Stat(path); os.IsNotExist(err) {\n\t\treturn nil\n\t}\n\tif err := mymount.Unmount(path); err != nil {\n\t\tlogrus.Errorf(\"releaseMountpoint: Failed to umount: %s, error: %s, still try to remove path\", path, err)\n\t}\n\tif err := os.RemoveAll(path); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n\n}",
"func UnregisterVolatileFileSystem(fs *VolatileFileSystem) {\n\tvolatileVFSLock.Lock()\n\tdefer volatileVFSLock.Unlock()\n\n\tfor iFs := range volatileVFSs {\n\t\tif volatileVFSs[iFs] == fs.vfs {\n\t\t\tC.sqlite3VolatileUnregister(fs.vfs.pVfs)\n\t\t\tC.free(unsafe.Pointer(fs.zName))\n\t\t\tdelete(volatileVFSs, iFs)\n\t\t\treturn\n\t\t}\n\t}\n\n\tpanic(\"unknown volatile file system\")\n}",
"func (fs FilesystemStorage) TempDir() string {\n\treturn filepath.Join(fs.String(), \"tmp\")\n}",
"func (d *detacherDefaults) UnmountDevice(deviceMountPath string) error {\n\tklog.Warning(logPrefix(d.plugin.flexVolumePlugin), \"using default UnmountDevice for device mount path \", deviceMountPath)\n\treturn mount.CleanupMountPoint(deviceMountPath, d.plugin.host.GetMounter(d.plugin.GetPluginName()), false)\n}",
"func Mount(f fs.Fs, mountpoint string) error {\n\tif debugFUSE {\n\t\tfuse.Debug = func(msg interface{}) {\n\t\t\tfs.Debug(\"fuse\", \"%v\", msg)\n\t\t}\n\t}\n\n\t// Mount it\n\terrChan, err := mount(f, mountpoint)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to mount FUSE fs\")\n\t}\n\n\t// Wait for umount\n\terr = <-errChan\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to umount FUSE fs\")\n\t}\n\n\treturn nil\n}",
"func (n *nfsManager) UnExportFs(path string, host string) error {\n\treturn n.commandRetrier(unExportFSCommandLine(path, host), n.Command)\n}",
"func (env Env) unmountDevice(mnt mount) error {\n\tlog.Printf(\"Attempting to unmount device %s at %s.\", mnt.device, mnt.mountPoint)\n\t_, err := env.OsCommandRunner.Run(wrapToEnterHostMountNamespace(\"umount\", mnt.mountPoint)...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"Failed to unmount %s at %s: %v\", mnt.device, mnt.mountPoint, err)\n\t}\n\treturn nil\n}",
"func UnmountViaEmptyDir(dir string, host volume.VolumeHost, volName string, volSpec volume.Spec, podUID utypes.UID) error {\n\tklog.V(3).Infof(\"Tearing down volume %v for pod %v at %v\", volName, podUID, dir)\n\n\t// Wrap EmptyDir, let it do the teardown.\n\twrapped, err := host.NewWrapperUnmounter(volName, volSpec, podUID)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn wrapped.TearDownAt(dir)\n}",
"func (s *ServicesWidget) Unmount() error {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\ts.mounted = false\n\treturn nil\n\n}",
"func (c *flockerVolumeUnmounter) TearDownAt(dir string) error {\n\treturn util.UnmountPath(dir, c.mounter)\n}",
"func (c *CryptohomeMountInfo) CleanUpMount(ctx context.Context, user string) error {\n\tif _, err := c.cryptohome.Unmount(ctx, user); err != nil {\n\t\treturn errors.Wrap(err, \"failed to unmount\")\n\t}\n\tif _, err := c.cryptohome.RemoveVault(ctx, user); err != nil {\n\t\treturn errors.Wrap(err, \"failed to remove vault\")\n\t}\n\tmounted, err := c.IsMounted(ctx, user)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed to get mount info\")\n\t}\n\tif mounted {\n\t\treturn errors.Errorf(\"mount point of %q still exists\", user)\n\t}\n\treturn nil\n}",
"func deleteTempDir() error {\n\tif !FileExists(tempDir) {\n\t\treturn nil\n\t}\n\n\tDebugFunc(tempDir)\n\n\terr := filepath.Walk(tempDir, func(path string, info os.FileInfo, err error) error {\n\t\tif !info.IsDir() && !IsFileReadOnly(path) {\n\t\t\terr := SetFileReadOnly(path, false)\n\t\t\tif Error(err) {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\treturn nil\n\t})\n\tif Error(err) {\n\t\treturn err\n\t}\n\n\terr = os.RemoveAll(tempDir)\n\tif Error(err) {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (b *BootstrapClient) unmountTransit() error {\n\treturn b.usingVaultRootToken(func() error {\n\t\tfor _, entry := range b.config.transitData {\n\t\t\tif err := b.unmount(entry.OutputPath); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\treturn b.unmount(b.config.transitMountPath)\n\t})\n}",
"func mount(\n\tdir string,\n\tcfg *MountConfig,\n\tready chan<- error) (dev *os.File, err error) {\n\t// On linux, mounting is never delayed.\n\tready <- nil\n\n\t// Create a socket pair.\n\tfds, err := syscall.Socketpair(syscall.AF_FILE, syscall.SOCK_STREAM, 0)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Socketpair: %v\", err)\n\t\treturn\n\t}\n\n\t// Wrap the sockets into os.File objects that we will pass off to fusermount.\n\twriteFile := os.NewFile(uintptr(fds[0]), \"fusermount-child-writes\")\n\tdefer writeFile.Close()\n\n\treadFile := os.NewFile(uintptr(fds[1]), \"fusermount-parent-reads\")\n\tdefer readFile.Close()\n\n\t// Start fusermount, passing it pipes for stdout and stderr.\n\tcmd := exec.Command(\n\t\t\"fusermount\",\n\t\t\"-o\", cfg.toOptionsString(),\n\t\t\"--\",\n\t\tdir,\n\t)\n\n\tcmd.Env = append(os.Environ(), \"_FUSE_COMMFD=3\")\n\tcmd.ExtraFiles = []*os.File{writeFile}\n\n\tstdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"StdoutPipe: %v\", err)\n\t\treturn\n\t}\n\n\tstderr, err := cmd.StderrPipe()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"StderrPipe: %v\", err)\n\t\treturn\n\t}\n\n\terr = cmd.Start()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"Starting fusermount: %v\", err)\n\t\treturn\n\t}\n\n\t// Log fusermount output until it closes stdout and stderr.\n\tvar wg sync.WaitGroup\n\twg.Add(2)\n\tgo lineLogger(&wg, \"mount helper output\", stdout)\n\tgo lineLogger(&wg, \"mount helper error\", stderr)\n\twg.Wait()\n\n\t// Wait for the command.\n\terr = cmd.Wait()\n\tif err != nil {\n\t\terr = fmt.Errorf(\"fusermount: %v\", err)\n\t\treturn\n\t}\n\n\t// Wrap the socket file in a connection.\n\tc, err := net.FileConn(readFile)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"FileConn: %v\", err)\n\t\treturn\n\t}\n\tdefer c.Close()\n\n\t// We expect to have a Unix domain socket.\n\tuc, ok := c.(*net.UnixConn)\n\tif !ok {\n\t\terr = fmt.Errorf(\"Expected UnixConn, got %T\", c)\n\t\treturn\n\t}\n\n\t// Read a message.\n\tbuf := make([]byte, 32) // expect 1 byte\n\toob := make([]byte, 32) // expect 24 bytes\n\t_, oobn, _, _, err := uc.ReadMsgUnix(buf, oob)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ReadMsgUnix: %v\", err)\n\t\treturn\n\t}\n\n\t// Parse the message.\n\tscms, err := syscall.ParseSocketControlMessage(oob[:oobn])\n\tif err != nil {\n\t\terr = fmt.Errorf(\"ParseSocketControlMessage: %v\", err)\n\t\treturn\n\t}\n\n\t// We expect one message.\n\tif len(scms) != 1 {\n\t\terr = fmt.Errorf(\"expected 1 SocketControlMessage; got scms = %#v\", scms)\n\t\treturn\n\t}\n\n\tscm := scms[0]\n\n\t// Pull out the FD returned by fusermount\n\tgotFds, err := syscall.ParseUnixRights(&scm)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"syscall.ParseUnixRights: %v\", err)\n\t\treturn\n\t}\n\n\tif len(gotFds) != 1 {\n\t\terr = fmt.Errorf(\"wanted 1 fd; got %#v\", gotFds)\n\t\treturn\n\t}\n\n\t// Turn the FD into an os.File.\n\tdev = os.NewFile(uintptr(gotFds[0]), \"/dev/fuse\")\n\n\treturn\n}",
"func (d *btrfs) Unmount() (bool, error) {\n\t// Unmount the pool.\n\tourUnmount, err := forceUnmount(GetPoolMountPath(d.name))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\treturn ourUnmount, nil\n}",
"func (fs *Memory) TempFileSystem() *afero.Afero {\n\treturn fs.tempFs\n}",
"func TestTmpfsDevShmNoDupMount(t *testing.T) {\n\tskip.If(t, os.Getuid() != 0, \"skipping test that requires root\")\n\tc := &container.Container{\n\t\tShmPath: \"foobar\", // non-empty, for c.IpcMounts() to work\n\t\tHostConfig: &containertypes.HostConfig{\n\t\t\tIpcMode: containertypes.IPCModeShareable, // default mode\n\t\t\t// --tmpfs /dev/shm:rw,exec,size=NNN\n\t\t\tTmpfs: map[string]string{\n\t\t\t\t\"/dev/shm\": \"rw,exec,size=1g\",\n\t\t\t},\n\t\t},\n\t}\n\td := setupFakeDaemon(t, c)\n\n\t_, err := d.createSpec(context.TODO(), &configStore{}, c)\n\tassert.Check(t, err)\n}",
"func Unlinkat(dirfd int, path string) (err error) {\n\treturn unlinkat(dirfd, path, 0)\n}",
"func Unlink(mountDirectory, outDirectory string) error {\n\tif fi, err := os.Stat(mountDirectory); err != nil {\n\t\treturn err\n\t} else {\n\t\tif !fi.IsDir() {\n\t\t\treturn NotADirectory\n\t\t}\n\t}\n\tif fi, err := os.Stat(outDirectory); err != nil {\n\t\treturn err\n\t} else {\n\t\tif !fi.IsDir() {\n\t\t\treturn NotADirectory\n\t\t}\n\t}\n\tfis, err := ioutil.ReadDir(mountDirectory)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, fi := range fis {\n\t\tif !fi.IsDir() {\n\t\t\tcontinue\n\t\t}\n\t\terr := unmount(path.Join(mountDirectory, fi.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = os.RemoveAll(path.Join(mountDirectory, fi.Name()))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\toutFis, err := ioutil.ReadDir(outDirectory)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, fi := range outFis {\n\t\tif _, err := os.Lstat(path.Join(outDirectory, fi.Name())); err == nil {\n\t\t\terr := os.Remove(path.Join(outDirectory, fi.Name()))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tcontinue\n\t\t} else if os.IsNotExist(err) {\n\t\t\tcontinue\n\t\t}\n\t\tswitch {\n\t\tcase fi.IsDir():\n\t\t\terr = os.RemoveAll(path.Join(outDirectory, fi.Name()))\n\t\tcase !fi.IsDir():\n\t\t\terr = os.Remove(path.Join(outDirectory, fi.Name()))\n\t\tdefault:\n\t\t\tpanic(\"unhandled case\")\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (c *CrosDisks) Unmount(ctx context.Context, devicePath string, options []string) error {\n\tvar status MountError\n\tif err := c.call(ctx, \"Unmount\", devicePath, options).Store(&status); err != nil {\n\t\treturn err\n\t}\n\n\tif status != MountErrorNone {\n\t\treturn status\n\t}\n\n\treturn nil\n}",
"func CleanupTempFile(t *testing.T, fileName string) {\n\terr := os.Remove(fileName)\n\tif err != nil {\n\t\tt.Logf(\"Could not remove temp file: %v. Err: %v\\n\", fileName, err)\n\t}\n}"
] | [
"0.64639926",
"0.6294327",
"0.6294327",
"0.6149197",
"0.60893816",
"0.5964245",
"0.59148586",
"0.5852159",
"0.58511484",
"0.580987",
"0.576928",
"0.5747482",
"0.5734651",
"0.57193625",
"0.57184225",
"0.5698055",
"0.56522834",
"0.562887",
"0.5627233",
"0.56222004",
"0.5548657",
"0.5496173",
"0.5451199",
"0.5448476",
"0.54171",
"0.53806645",
"0.5326383",
"0.5319245",
"0.5308713",
"0.53037864",
"0.5296785",
"0.52834296",
"0.5279671",
"0.52781427",
"0.5259762",
"0.5253814",
"0.5187831",
"0.5178616",
"0.5178336",
"0.5161143",
"0.5156518",
"0.51532865",
"0.51490974",
"0.5135029",
"0.51255757",
"0.51200724",
"0.5116425",
"0.5107129",
"0.51054186",
"0.51016515",
"0.5079356",
"0.5064578",
"0.50298345",
"0.5023131",
"0.50090927",
"0.4994523",
"0.4987722",
"0.49777323",
"0.49749613",
"0.4946043",
"0.4937425",
"0.49146268",
"0.4908133",
"0.48906937",
"0.4890437",
"0.48592126",
"0.48508072",
"0.48467553",
"0.48389566",
"0.48382393",
"0.4837887",
"0.48281422",
"0.479871",
"0.4798532",
"0.479157",
"0.47845912",
"0.47751907",
"0.47640982",
"0.47532856",
"0.4747057",
"0.47441",
"0.4736056",
"0.4734299",
"0.47320217",
"0.47286943",
"0.4725557",
"0.47223768",
"0.46884277",
"0.46699882",
"0.465985",
"0.4652884",
"0.46352264",
"0.46236596",
"0.46147048",
"0.46096787",
"0.4609516",
"0.45987645",
"0.45970875",
"0.4595904"
] | 0.817883 | 1 |
fungsi variadic jumlah parameter yang disisipkan tidak terbatas yang ditmpung sama variabelnya | func main(){
var rata_rata = hitung(10,2,5,8,7,8,2,5,6,12)
var pesan = fmt.Sprintf("Rata-rata : %.2f",rata_rata)
fmt.Println(pesan)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func FuncChangeToVariadic(_ ...int) {}",
"func example2(a string, val ...int) {\n\tfmt.Println(a)\n\tfmt.Println(val)\n}",
"func FuncAddVariadic(_ ...int) {}",
"func variadic(x ...int) {\n\tfmt.Println(x)\n\tfmt.Printf(\"%T\\n\", x)\n\t// Receives the variables in a slice of the type of the parameters. ([]int)\n\n\tsum := 0\n\tfor _, v := range x {\n\t\tsum += v\n\t}\n\tfmt.Printf(\"The total: %v\\n\", sum)\n}",
"func (TypesObject) IsVariadicParam() bool { return boolResult }",
"func variadic(a ...string) string {\n\tfor _, val := range a {\n\t\tfmt.Print(val)\n\t}\n\treturn \"\"\n}",
"func myFunction1(param [10]int){\n\tfmt.Println(param)\n}",
"func variadic(x ...int){\n\tfmt.Println(x,\" \")\n\tfor k,v:=range x{\n\t\tfmt.Println(k,v)\n\t}\n}",
"func Ln(v ...interface{}) {}",
"func parseVariadic(token Token, argCount int) (bool, int, int) {\n\tif !strings.HasPrefix(token.Text, \"{args[\") {\n\t\treturn false, 0, 0\n\t}\n\tif !strings.HasSuffix(token.Text, \"]}\") {\n\t\treturn false, 0, 0\n\t}\n\n\targRange := strings.TrimSuffix(strings.TrimPrefix(token.Text, \"{args[\"), \"]}\")\n\tif argRange == \"\" {\n\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\"Placeholder \"+token.Text+\" cannot have an empty index\",\n\t\t\tzap.String(\"file\", token.File+\":\"+strconv.Itoa(token.Line)), zap.Strings(\"import_chain\", token.imports))\n\t\treturn false, 0, 0\n\t}\n\n\tstart, end, found := strings.Cut(argRange, \":\")\n\n\t// If no \":\" delimiter is found, this is not a variadic.\n\t// The replacer will pick this up.\n\tif !found {\n\t\treturn false, 0, 0\n\t}\n\n\tvar (\n\t\tstartIndex = 0\n\t\tendIndex = argCount\n\t\terr error\n\t)\n\tif start != \"\" {\n\t\tstartIndex, err = strconv.Atoi(start)\n\t\tif err != nil {\n\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\"Variadic placeholder \"+token.Text+\" has an invalid start index\",\n\t\t\t\tzap.String(\"file\", token.File+\":\"+strconv.Itoa(token.Line)), zap.Strings(\"import_chain\", token.imports))\n\t\t\treturn false, 0, 0\n\t\t}\n\t}\n\tif end != \"\" {\n\t\tendIndex, err = strconv.Atoi(end)\n\t\tif err != nil {\n\t\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\t\"Variadic placeholder \"+token.Text+\" has an invalid end index\",\n\t\t\t\tzap.String(\"file\", token.File+\":\"+strconv.Itoa(token.Line)), zap.Strings(\"import_chain\", token.imports))\n\t\t\treturn false, 0, 0\n\t\t}\n\t}\n\n\t// bound check\n\tif startIndex < 0 || startIndex > endIndex || endIndex > argCount {\n\t\tcaddy.Log().Named(\"caddyfile\").Warn(\n\t\t\t\"Variadic placeholder \"+token.Text+\" indices are out of bounds, only \"+strconv.Itoa(argCount)+\" argument(s) exist\",\n\t\t\tzap.String(\"file\", token.File+\":\"+strconv.Itoa(token.Line)), zap.Strings(\"import_chain\", token.imports))\n\t\treturn false, 0, 0\n\t}\n\treturn true, startIndex, endIndex\n}",
"func main() {\n\n\t// We will create a []int then implement the two functions we created below\n\txi := []int{2, 4, 6, 8, 10}\n\n\t//To send a slice through variadic param, we need ot \"unfurl\" it first\n\tf := foo(xi...)\n\tfmt.Println(\"foo sum:\", f)\n\n\t//Since bar was defined to take []int, we don't need to unfurl the values\n\tb := bar(xi)\n\tfmt.Println(\"bar sum:\", b)\n\n}",
"func Log(v ...interface{}) {\n fmt.Println(v...)\n}",
"func variadicFunction(parameters ...int) int {\n\ttotal := 0\n\tfor _, value := range parameters {\n\t\ttotal += value\n\t}\n\treturn total\n}",
"func D(v ...interface{}) {}",
"func returnArgs(a, b string ){\n\tfmt.Println(a + \" \" + b)\n}",
"func p(a ...interface{}) {\n\tfmt.Println(a)\n}",
"func variadic(x ...int) int {\n\tfmt.Println(x)\n\tfmt.Printf(\"%T\\n\",x)\n\tsum :=0\n\tfor _,v := range x{\n\t\tsum = sum + v\n\t}\n\treturn sum\n}",
"func saludarVarios(edad uint8, nombres ...string) {\n\t// para saber que tivo de datos es una variable\n\tfmt.Printf(\"%T\\n\", nombres)\n\tfor _, v := range nombres {\n\t\tfmt.Println(\"Hola\", v, \"edad\", edad)\n\t}\n}",
"func log(args ...Any) {\n\tfmt.Println(args...)\n}",
"func arglist(s, n int) string {\n\tvar l string\n\tfor i := 0; i < n; i++ {\n\t\tif i > 0 {\n\t\t\tl += fmt.Sprintf(\", $%d\", s+i)\n\t\t} else {\n\t\t\tl += fmt.Sprintf(\"$%d\", s+i)\n\t\t}\n\t}\n\treturn l\n}",
"func vararg(i Instruction, ls *LuaState) {\n\ta, b, _ := i.ABC()\n\ta += 1\n\n\tif b != 1 { // b==0 or b>1\n\t\tls.loadVararg(b - 1)\n\t\t_popResults(a, b, ls)\n\t}\n}",
"func P(v ...interface{}) {}",
"func variadic(val ...int) int {\n\ttotal := 0\n\tfor _, v := range val {\n\t\ttotal += v\n\t}\n\treturn total\n}",
"func variaticArgumentFizzBuzz(vals ...int) {\n\tfizzBuzz(vals)\n}",
"func variadicFunc(nums ...int) {\n\tfmt.Print(nums, \" \")\n\ttotal := 0\n\tfor _, num := range nums {\n\t\ttotal += num\n\t}\n\tfmt.Println(total)\n}",
"func varArgs(nums ...int) {\n for _, n := range nums {\n fmt.Println(n)\n }\n}",
"func twoInt(values ...int) { // expected two params\n\tfmt.Println(values)\n}",
"func Debug(v ...interface{}){\n log.Debug(v)\n}",
"func (c NotificationTarget_addOngoing) Args() NotificationTarget_addOngoing_Params {\n\treturn NotificationTarget_addOngoing_Params{Struct: c.Call.Args()}\n}",
"func Info(args ...interface{}) {\n\n}",
"func validateArgs(linkIndex int, fn reflect.Type, args []Argument) error {\n\tif !fn.IsVariadic() && (fn.NumIn() != len(args)) {\n\t\treturn argumentMismatchError(linkIndex, len(args), fn.NumIn())\n\t}\n\n\treturn nil\n}",
"func InfoArgsSubcmd(args []string) {\n\tfr := gub.CurFrame()\n\tfn := fr.Fn()\n\tif len(args) == 2 {\n\t\tif len(fn.Params) == 0 {\n\t\t\tgub.Msg(\"Function `%s()' has no parameters\", fn.Name())\n\t\t\treturn\n\t\t}\n\t\tfor i, p := range fn.Params {\n\t\t\tgub.Msg(\"%s %s\", fn.Params[i], interp.ToInspect(fr.Env()[p], nil))\n\t\t}\n\t} else {\n\t\tvarname := args[2]\n\t\tfor i, p := range fn.Params {\n\t\t\tif varname == fn.Params[i].Name() {\n\t\t\t\tgub.Msg(\"%s %s\", fn.Params[i], interp.ToInspect(fr.Env()[p], nil))\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}",
"func (fn *formulaFuncs) prepareFinvArgs(name string, argsList *list.List) formulaArg {\n\tif argsList.Len() != 3 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s requires 3 arguments\", name))\n\t}\n\tvar probability, d1, d2 formulaArg\n\tif probability = argsList.Front().Value.(formulaArg).ToNumber(); probability.Type != ArgNumber {\n\t\treturn probability\n\t}\n\tif d1 = argsList.Front().Next().Value.(formulaArg).ToNumber(); d1.Type != ArgNumber {\n\t\treturn d1\n\t}\n\tif d2 = argsList.Back().Value.(formulaArg).ToNumber(); d2.Type != ArgNumber {\n\t\treturn d2\n\t}\n\tif probability.Number <= 0 || probability.Number > 1 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, formulaErrorNUM)\n\t}\n\tif d1.Number < 1 || d1.Number >= math.Pow10(10) {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, formulaErrorNUM)\n\t}\n\tif d2.Number < 1 || d2.Number >= math.Pow10(10) {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, formulaErrorNUM)\n\t}\n\treturn newListFormulaArg([]formulaArg{probability, d1, d2})\n}",
"func TestVariadicFunctions(t *testing.T) {\n\ta := []int{1, 2, 3}\n\tfmt.Println(\"a = \", a)\n\t// this is legal\n\tresult := append(a, 4, 5, 6)\n\tfmt.Println(\"result = \", result)\n\t// and so is this\n\tb := []int{4, 5, 6}\n\tresult = append(a, b...)\n\tfmt.Println(\"result = \", result)\n\t// but these are NOT\n\t// result = append(a, b) // only first param is a slice, others must be elements\n\t// result = append(a, 7, b...) // can only use triple-dot if there are no other elements\n\t// result = append(a, b..., 7 ) // can only use triple-dot if there are no other elements\n}",
"func (pc *programCode) createParams(argSlice []string) {\n\tcode := \"\"\n\tregisterSlice := []string{\"rdi\", \"rsi\", \"rdx\", \"rcx\", \"r8\", \"r9\"} // SysV ABI calling register for parameters\n\tfor i := 0; i < len(argSlice) && i < 6; i++ {\n\t\tif _, err := strconv.Atoi(argSlice[i]); err == nil {\n\t\t\tcode += \"\\tmov \" + registerSlice[i] + argSlice[i] + \"\\n\"\n\t\t} else {\n\t\t\tcode += \"\\tmov \" + registerSlice[i] + \"[\" + argSlice[i] + \"]\\n\"\n\t\t}\n\t}\n\tpc.appendCode(code)\n}",
"func Warn(v ...interface{}){\n log.Warn(v)\n}",
"func reflectArgs(fnType reflect.Type, args []Argument) []reflect.Value {\n\tin := make([]reflect.Value, len(args))\n\n\tfor k, arg := range args {\n\t\tif arg == nil {\n\t\t\t// Use the zero value of the function parameter type,\n\t\t\t// since \"reflect.Call\" doesn't accept \"nil\" parameters\n\t\t\tin[k] = reflect.New(fnType.In(k)).Elem()\n\t\t} else {\n\t\t\tin[k] = reflect.ValueOf(arg)\n\t\t}\n\t}\n\n\treturn in\n}",
"func (e *Encoder) Args(v []driver.Value) error {\n\t_, err := e.Int16(int16(len(v)))\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor i := 0; i < len(v); i++ {\n\t\t_, err = e.Marshal(v[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func (fi *funcInfo) emitVararg(line, a, n int) {\r\n\tfi.emitABC(line, OP_VARARG, a, n+1, 0)\r\n}",
"func formatSetValuesAsArgs(setValues map[string]string, flag string) []string {\n\targs := []string{}\n\n\t// To make it easier to test, go through the keys in sorted order\n\tkeys := collections.Keys(setValues)\n\tfor _, key := range keys {\n\t\tvalue := setValues[key]\n\t\targValue := fmt.Sprintf(\"%s=%s\", key, value)\n\t\targs = append(args, flag, argValue)\n\t}\n\n\treturn args\n}",
"func (Service) TooManyArguments(a, b string) {\n}",
"func (p *FuncInfo) Vargs(in ...reflect.Type) exec.FuncInfo {\n\tif in[len(in)-1].Kind() != reflect.Slice {\n\t\tlog.Panicln(\"Vargs failed: last argument must be a slice.\")\n\t}\n\tp.in = in\n\tp.setVariadic(nVariadicVariadicArgs)\n\treturn p\n}",
"func Info(v ...interface{}){\n log.Info(v)\n}",
"func printMethodArg(b *strings.Builder, t reflect.Type) {\n\tif t.Kind() == reflect.Pointer {\n\t\tt = t.Elem()\n\t}\n\tswitch t.Kind() {\n\tcase reflect.Map:\n\t\tb.WriteString(\"(...)\")\n\tcase reflect.Struct:\n\t\tBuildTypeString(b, t)\n\t}\n}",
"func callValuesI(b bindingInterface, inj Injections, args []interface{}) (ret []reflect.Value) {\n\ttargetArgCount := b.argCount()\n\tret = make([]reflect.Value, targetArgCount)\n\tic := 0 // count of found injections\n\tiai := 0\n\tfor ai := 0; ai < targetArgCount; ai++ {\n\t\tat := b.argType(ai)\n\t\tvar av reflect.Value\n\n\t\t// Check if this parameter needs to be injected\n\t\tif _, ok := b.base().injections[ai]; ok {\n\t\t\tif in, ok := inj[at]; ok { // a object of type at is provided by InvokeI call\n\t\t\t\tav = reflect.ValueOf(in).Convert(at)\n\t\t\t} else {\n\t\t\t\tpanic(fmt.Errorf(\"Injection for type \\\"%s\\\" not found.\", at))\n\t\t\t}\n\n\t\t\tic++ // skip one input param\n\t\t} else {\n\t\t\tif iai >= len(args) {\n\t\t\t\tpanic(fmt.Errorf(\"Invalid parameter count: %d/%d (%d injections applied)\", iai, len(args), ic))\n\t\t\t}\n\t\t\tav = reflect.ValueOf(args[iai]) // Value object of the current parameter\n\t\t\tiai++ //proceed to next input argument\n\t\t}\n\n\t\t// Assign final value to final call vector.\n\t\tret[ai] = b.base().container.convertParameterValue(av, at)\n\t}\n\n\tif targetArgCount != (iai + ic) {\n\t\tpanic(fmt.Errorf(\"Argument count does not match for method \\\"%s\\\". %d/%d. (%d injections applied)\", b.base().elemName, targetArgCount, (iai + ic), ic))\n\t}\n\n\treturn\n}",
"func (c DBStoreUpdateUploadRetentionFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1, c.Arg2}\n}",
"func (logger *Logger) logGenericArgs(msgTemplate string, err error, args Args, stackDepth int) {\n\tfile, function, line := GetStackInfo(stackDepth + 1)\n\tmsg := msgTemplate\n\tif args != nil {\n\t\tt, templateErr := template.New(\"\").Parse(msgTemplate)\n\t\tif templateErr != nil {\n\t\t\t// While we're sure this is the developer's fault,\n\t\t\t// and this is typically the kind of scenario where we'd panic at yell at them,\n\t\t\t// let's not panic here, because it's especially easy to have logging code\n\t\t\t// that is hard to test (certain kinds of error reporting, for example).\n\t\t\t// Instead let's make the best of the situation.\n\t\t\targs[\"_templateErr\"] = templateErr.Error()\n\t\t} else {\n\t\t\tvar buf bytes.Buffer\n\t\t\ttemplateErr := t.Execute(&buf, args)\n\t\t\tif templateErr != nil {\n\t\t\t\t// see above comment about panicking.\n\t\t\t\targs[\"_templateErr\"] = templateErr.Error()\n\t\t\t} else {\n\t\t\t\tmsg = buf.String()\n\t\t\t}\n\t\t}\n\t}\n\n\tfullArgs := Args{\n\t\t\"msgTemplate\": msgTemplate,\n\t\t\"msg\": msg,\n\t\t\"time\": time.Now().Format(time.RFC3339Nano),\n\t\t\"level\": logger.Level,\n\t\t\"file\": file,\n\t\t\"func\": function,\n\t\t\"line\": line,\n\t\t\"process\": loggerExeName,\n\t}\n\n\tfor k, v := range args {\n\t\tfullArgs[\"arg_\"+k] = v\n\t}\n\n\tif err != nil {\n\t\tfullArgs[\"error\"] = err.Error()\n\t}\n\n\tjsonWriter.Encode(fullArgs)\n\n\tif logger.IsFatal {\n\t\tpanic(msg)\n\t}\n}",
"func PrintFaint(format string, a ...interface{}) { fmt.Println(Faint(format, a...)) }",
"func variadic(typ int, op string, ods exprlist) *expr {\n\treturn &expr{\n\t\tsexp: append(exprlist{atomic(typ, op)}, ods...),\n\t}\n}",
"func (fn *formulaFuncs) prepareDurationArgs(name string, argsList *list.List) formulaArg {\n\tif argsList.Len() != 5 && argsList.Len() != 6 {\n\t\treturn newErrorFormulaArg(formulaErrorVALUE, fmt.Sprintf(\"%s requires 5 or 6 arguments\", name))\n\t}\n\targs := fn.prepareDataValueArgs(2, argsList)\n\tif args.Type != ArgList {\n\t\treturn args\n\t}\n\tsettlement, maturity := args.List[0], args.List[1]\n\tif settlement.Number >= maturity.Number {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, fmt.Sprintf(\"%s requires maturity > settlement\", name))\n\t}\n\tcoupon := argsList.Front().Next().Next().Value.(formulaArg).ToNumber()\n\tif coupon.Type != ArgNumber {\n\t\treturn coupon\n\t}\n\tif coupon.Number < 0 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, fmt.Sprintf(\"%s requires coupon >= 0\", name))\n\t}\n\tyld := argsList.Front().Next().Next().Next().Value.(formulaArg).ToNumber()\n\tif yld.Type != ArgNumber {\n\t\treturn yld\n\t}\n\tif yld.Number < 0 {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, fmt.Sprintf(\"%s requires yld >= 0\", name))\n\t}\n\tfrequency := argsList.Front().Next().Next().Next().Next().Value.(formulaArg).ToNumber()\n\tif frequency.Type != ArgNumber {\n\t\treturn frequency\n\t}\n\tif !validateFrequency(frequency.Number) {\n\t\treturn newErrorFormulaArg(formulaErrorNUM, formulaErrorNUM)\n\t}\n\tbasis := newNumberFormulaArg(0)\n\tif argsList.Len() == 6 {\n\t\tif basis = argsList.Back().Value.(formulaArg).ToNumber(); basis.Type != ArgNumber {\n\t\t\treturn newErrorFormulaArg(formulaErrorNUM, formulaErrorNUM)\n\t\t}\n\t}\n\treturn newListFormulaArg([]formulaArg{settlement, maturity, coupon, yld, frequency, basis})\n}",
"func QuietLog(string, ...interface{}) {\n}",
"func ForeachArgs(msg *Message) (kn, vn string, hkn, hvn bool, ev *Message) {\n\tif len(msg.Args) == 3 {\n\t\tkn = msg.ArgAt(0).Name()\n\t\tvn = msg.ArgAt(1).Name()\n\t\tev = msg.ArgAt(2)\n\t\thkn, hvn = true, true\n\t} else if len(msg.Args) == 2 {\n\t\tvn = msg.ArgAt(0).Name()\n\t\tev = msg.ArgAt(1)\n\t\thvn = true\n\t} else if len(msg.Args) == 1 {\n\t\tev = msg.ArgAt(0)\n\t}\n\treturn\n}",
"func prepareArgs(n *Notification) ([]string, error) {\n\tswitch {\n\tcase n.Conversation == \"\":\n\t\treturn nil, ErrorMissingConversation\n\tcase n.Message == \"\":\n\t\treturn nil, ErrorMissingMessage\n\tcase n.ExplodingLifetime < 0:\n\t\treturn nil, ErrorBadExplodingTime\n\t}\n\n\targs := []string{\"chat\", \"send\"}\n\tif n.ChannelName != \"\" {\n\t\targs = append(args, \"--channel\", n.ChannelName)\n\t}\n\tif n.Public {\n\t\targs = append(args, \"--public\")\n\t}\n\tif n.ExplodingLifetime > 0 {\n\t\targs = append(args, \"--exploding-lifetime\", fmt.Sprint(n.ExplodingLifetime))\n\t}\n\targs = append(args, n.Conversation, n.Message)\n\treturn args, nil\n}",
"func argsFn(args ...OBJ) OBJ {\n\tl := len(os.Args[1:])\n\tresult := make([]OBJ, l)\n\tfor i, txt := range os.Args[1:] {\n\t\tresult[i] = &object.String{Value: txt}\n\t}\n\treturn &object.Array{Elements: result}\n}",
"func parameteriseValues(args []string, valueMap map[string]string) []string {\n\tfor k, v := range valueMap {\n\t\tkey := strings.Replace(k, \"_\", \"-\", -1)\n\t\targs = append(args, \"--\"+key)\n\n\t\tif fmt.Sprintf(\"%v\", v) != \"\" {\n\t\t\targs = append(args, fmt.Sprintf(\"%v\", v))\n\t\t}\n\t}\n\n\treturn args\n}",
"func testabc(a,b int){\n\n}",
"func (c ExtensionStoreGetByUUIDFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}",
"func Fatalln(v ...interface{}) {\n\tlog.Fatalln(v...)\n}",
"func (c ResolverUpdateIndexConfigurationByRepositoryIDFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1, c.Arg2}\n}",
"func calcSum(a ...int) int {\n\tfmt.Printf(\"%T type and %v values\\n\", a, a)\n\treturn 2\n}",
"func (c UploadServiceGetUploadsByIDsFuncCall) Args() []interface{} {\n\ttrailing := []interface{}{}\n\tfor _, val := range c.Arg1 {\n\t\ttrailing = append(trailing, val)\n\t}\n\n\treturn append([]interface{}{c.Arg0}, trailing...)\n}",
"func (c AutoIndexingServiceRepositoryIDsWithErrorsFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1, c.Arg2}\n}",
"func Ln(v ...interface{}) {\n\tlog.Println(v...)\n}",
"func (o *arg) reduce(position int, args *[]string) {\n\tif o.GetPositional() {\n\t\to.reducePositional(position, args)\n\t} else {\n\t\to.reduceLongName(position, args)\n\t\to.reduceShortName(position, args)\n\t}\n}",
"func goParams(prefix string, vars []*types.Var) string {\n\tif len(vars) == 0 {\n\t\treturn \"\"\n\t}\n\tvar buf strings.Builder\n\tfor i, v := range vars {\n\t\tif i != 0 {\n\t\t\tbuf.WriteString(\", \")\n\t\t}\n\t\tbuf.WriteString(prefix)\n\t\tbuf.WriteString(v.Name())\n\t}\n\tbuf.WriteString(\" C.SEXP\")\n\treturn buf.String()\n}",
"func helloSum(s string, x ...int) int {\n\tfmt.Println(x)\n\tfmt.Printf(\"%T\\n\", x)\n\t\n\tfmt.Println(s)\n\t\n\tsum := 0\n\tfor _, v := range x {\n\t\tsum = sum + v\n\t}\n\t\n\treturn sum\n}",
"func Warnf(format string, params ...interface{}){\n log.Warnf(format, params)\n}",
"func (p *Parser) buildArg(argDef Value, argType reflect.Type, index int, args *[]reflect.Value) error {\n\tswitch argType.Name() {\n\tcase \"Setter\":\n\t\tfallthrough\n\tcase \"GetSetter\":\n\t\targ, err := p.pathParser(argDef.Path)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Getter\":\n\t\targ, err := p.newGetter(argDef)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v %w\", index, err)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(arg))\n\tcase \"Enum\":\n\t\targ, err := p.enumParser(argDef.Enum)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v must be an Enum\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*arg))\n\tcase \"string\":\n\t\tif argDef.String == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an string\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.String))\n\tcase \"float64\":\n\t\tif argDef.Float == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an float\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Float))\n\tcase \"int64\":\n\t\tif argDef.Int == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be an int\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(*argDef.Int))\n\tcase \"bool\":\n\t\tif argDef.Bool == nil {\n\t\t\treturn fmt.Errorf(\"invalid argument at position %v, must be a bool\", index)\n\t\t}\n\t\t*args = append(*args, reflect.ValueOf(bool(*argDef.Bool)))\n\t}\n\treturn nil\n}",
"func FuncChangeToVariadicDiffType(_ ...uint) {}",
"func (c LSIFStoreTransactFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func severalStrings(strings ...string) {\n\tfmt.Println(strings)\n}",
"func example_test_func(a uint64, b, c interface{}, d uint64) {\n\n}",
"func message(str string, a ...interface{}) {\n\tlog.Printf(str, a...)\n}",
"func Println(args ...interface{}){\n\tfmt.Println(args...)\n}",
"func (c DBStoreDirtyRepositoriesFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func (c DBStoreDirtyRepositoriesFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func transformArgs(n ir.InitNode) {\n\tvar list []ir.Node\n\tswitch n := n.(type) {\n\tdefault:\n\t\tbase.Fatalf(\"transformArgs %+v\", n.Op())\n\tcase *ir.CallExpr:\n\t\tlist = n.Args\n\t\tif n.IsDDD {\n\t\t\treturn\n\t\t}\n\tcase *ir.ReturnStmt:\n\t\tlist = n.Results\n\t}\n\tif len(list) != 1 {\n\t\treturn\n\t}\n\n\tt := list[0].Type()\n\tif t == nil || !t.IsFuncArgStruct() {\n\t\treturn\n\t}\n\n\t// Save n as n.Orig for fmt.go.\n\tif ir.Orig(n) == n {\n\t\tn.(ir.OrigNode).SetOrig(ir.SepCopy(n))\n\t}\n\n\t// Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).\n\ttypecheck.RewriteMultiValueCall(n, list[0])\n}",
"func (c UploadServiceGetAuditLogsForUploadFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}",
"func newSqreenCallbackParams(recv *dst.FieldList, params *dst.FieldList, ignoredParamPrefix string) (callbackTypeParamList *dst.FieldList, callbackCallParams []dst.Expr) {\n\tvar callbackTypeParams []*dst.Field\n\tvar hookedParams []*dst.Field\n\tif recv != nil {\n\t\thookedParams = recv.List\n\t}\n\tif params != nil {\n\t\thookedParams = append(hookedParams, params.List...)\n\t}\n\tp := 0\n\tfor _, hookedParam := range hookedParams {\n\t\tcallbackTypeParam := &dst.Field{Type: newSqreenCallbackParamType(hookedParam.Type)}\n\t\tif len(hookedParam.Names) == 0 {\n\t\t\t// Case where the parameter has no name such as f(string): no longer\n\t\t\t// ignore it and name it.\n\t\t\t// - The hooked function parameter must be named.\n\t\t\thookedParam.Names = []*dst.Ident{newSqreenParamIdent(ignoredParamPrefix, p)}\n\t\t\t// - The callback type expects this parameter type.\n\t\t\tcallbackTypeParams = append(callbackTypeParams, callbackTypeParam)\n\t\t\t// - The callback call must pass the hooked function parameter.\n\t\t\tcallbackCallParams = append(callbackCallParams, newSqreenCallbackCallParam(newSqreenParamIdent(ignoredParamPrefix, p)))\n\t\t\tp++\n\t\t} else {\n\t\t\t// Case where the parameters are named, but still possibly ignored.\n\t\t\tfor _, name := range hookedParam.Names {\n\t\t\t\tif name.Name == \"_\" {\n\t\t\t\t\t// Case where the parameter is ignored using `_` such as\n\t\t\t\t\t// f(_ string): no longer ignore it and name it.\n\t\t\t\t\t*name = *newSqreenParamIdent(ignoredParamPrefix, p)\n\t\t\t\t}\n\t\t\t\tcallbackTypeParam = dst.Clone(callbackTypeParam).(*dst.Field)\n\n\t\t\t\t// The callback type expects this parameter type.\n\t\t\t\tcallbackTypeParams = append(callbackTypeParams, callbackTypeParam)\n\t\t\t\t// The callback call must pass the hooked function parameter.\n\t\t\t\tcallbackCallParams = append(callbackCallParams, newSqreenCallbackCallParam(dst.NewIdent(name.Name)))\n\t\t\t\tp++\n\t\t\t}\n\t\t}\n\t}\n\treturn &dst.FieldList{List: callbackTypeParams}, callbackCallParams\n}",
"func F(f string, v ...interface{}) {}",
"func Send_test() {\n\t // Args\n arg :=os.Args\n Project :=arg[1]\n Module :=arg[2]\n Operation :=arg[3]\n Status :=arg[4]\n BlockId :=arg[5]\n AccountId :=arg[6]\n Send_Info(Project, Module, Operation, Status, BlockId,AccountId)\n}",
"func (c ReleaseStoreGetLatestBatchFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}\n}",
"func Sprint(a ...interface{}) string { return fmt.Sprint(a...) }",
"func main() {\n\tfmt.Println(foo(1, 2))\n\tfmt.Println(foo(1, 2, 3))\n\taSlice := []int{1, 2, 3}\n\tfmt.Println(foo(aSlice...))\n\tfmt.Println(foo())\n}",
"func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {\n\tnin := t.NumIn()\n\tif t.IsVariadic() {\n\t\tnin--\n\t}\n\tvar p *Parameter\n\tfor i := 0; i < nin; i++ {\n\t\tp, err = parameterFromType(t.In(i))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tin = append(in, p)\n\t}\n\tif t.IsVariadic() {\n\t\tp, err = parameterFromType(t.In(nin).Elem())\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tvariadic = p\n\t}\n\tfor i := 0; i < t.NumOut(); i++ {\n\t\tp, err = parameterFromType(t.Out(i))\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\t\tout = append(out, p)\n\t}\n\treturn\n}",
"func (c ResolverGetUploadsByIDsFuncCall) Args() []interface{} {\n\ttrailing := []interface{}{}\n\tfor _, val := range c.Arg1 {\n\t\ttrailing = append(trailing, val)\n\t}\n\n\treturn append([]interface{}{c.Arg0}, trailing...)\n}",
"func (m *Mixpanel) makeArgs(date time.Time) url.Values {\n\targs := url.Values{}\n\n\targs.Set(\"format\", \"json\")\n\targs.Set(\"api_key\", m.Key)\n\targs.Set(\"expire\", fmt.Sprintf(\"%d\", time.Now().Unix()+10000))\n\n\tday := date.Format(\"2006-01-02\")\n\n\targs.Set(\"from_date\", day)\n\targs.Set(\"to_date\", day)\n\n\treturn args\n}",
"func VoidLogger(format string, args ...interface{}) {\n\n}",
"func (c ResolverGetUploadByIDFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1}\n}",
"func main() {\n\tflag.Parse() // should be executed into \"main\" method\n\tfmt.Println(stringParam)\n\tfmt.Println(intParam)\n\tfmt.Printf(\"%#v\\n\", listParam)\n}",
"func ABIchangeType(param []interface{}, arg interface{}, t string) []interface{} {\n\t//变长则弥补0, 定长直接转换\n\tif strings.HasPrefix(t, \"bytes\") {\n\t\tif strings.Contains(t, \"[]\") {\n\t\t\tif len(t) > 7 {\n\t\t\t\tl := len(t)\n\t\t\t\tle := t[5 : l-2]\n\t\t\t\tlength, err := strconv.Atoi(le)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(err)\n\t\t\t\t}\n\t\t\t\tif length > 32 {\n\t\t\t\t\tglog.Error(\"[]bytes too long: \", length)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tswitch length {\n\t\t\t\tcase 1:\n\t\t\t\t\tvar bb [][1]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [1]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 2:\n\t\t\t\t\tvar bb [][2]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [2]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 3:\n\t\t\t\t\tvar bb [][3]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [3]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 4:\n\t\t\t\t\tvar bb [][4]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [4]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 5:\n\t\t\t\t\tvar bb [][5]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [5]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 6:\n\t\t\t\t\tvar bb [][6]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [6]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 7:\n\t\t\t\t\tvar bb [][7]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [7]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 8:\n\t\t\t\t\tvar bb [][8]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [8]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 9:\n\t\t\t\t\tvar bb [][9]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [9]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 10:\n\t\t\t\t\tvar bb [][10]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [10]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 11:\n\t\t\t\t\tvar bb [][11]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [11]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 12:\n\t\t\t\t\tvar bb [][12]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [12]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 13:\n\t\t\t\t\tvar bb [][13]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [13]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 14:\n\t\t\t\t\tvar bb [][14]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [14]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 15:\n\t\t\t\t\tvar bb [][15]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [15]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 16:\n\t\t\t\t\tvar bb [][16]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [16]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 17:\n\t\t\t\t\tvar bb [][17]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [17]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 18:\n\t\t\t\t\tvar bb [][18]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [18]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 19:\n\t\t\t\t\tvar bb [][19]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [19]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 20:\n\t\t\t\t\tvar bb [][20]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [20]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 21:\n\t\t\t\t\tvar bb [][21]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [21]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 22:\n\t\t\t\t\tvar bb [][22]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [22]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 23:\n\t\t\t\t\tvar bb [][23]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [23]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 24:\n\t\t\t\t\tvar bb [][24]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [24]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 25:\n\t\t\t\t\tvar bb [][25]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [25]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 26:\n\t\t\t\t\tvar bb [][26]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [26]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 27:\n\t\t\t\t\tvar bb [][27]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [27]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 28:\n\t\t\t\t\tvar bb [][28]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [28]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 29:\n\t\t\t\t\tvar bb [][29]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [29]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 30:\n\t\t\t\t\tvar bb [][30]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [30]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 31:\n\t\t\t\t\tvar bb [][31]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [31]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\tcase 32:\n\t\t\t\t\tvar bb [][32]byte\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tvar b [32]byte\n\t\t\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\t\t\tbb = append(bb, b)\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, bb)\n\t\t\t\t\treturn param\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tvar bb [][]byte\n\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\tvar b []byte\n\t\t\t\tcopy(b[:], v.(string))\n\t\t\t\tbb = append(bb, b)\n\t\t\t}\n\t\t\tparam = append(param, bb)\n\t\t\treturn param\n\t\t}\n\t\ts := arg.(string)\n\t\tif len(t) > 5 {\n\t\t\tl := len(t)\n\t\t\tle := t[5:l]\n\t\t\tlength, err := strconv.Atoi(le)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t}\n\t\t\tif length > 32 {\n\t\t\t\tglog.Error(\"[]bytes too long: \", length)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tswitch length {\n\t\t\tcase 1:\n\t\t\t\tvar b [1]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 2:\n\t\t\t\tvar b [2]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 3:\n\t\t\t\tvar b [3]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 4:\n\t\t\t\tvar b [4]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 5:\n\t\t\t\tvar b [4]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 6:\n\t\t\t\tvar b [6]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 7:\n\t\t\t\tvar b [7]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 8:\n\t\t\t\tvar b [8]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 9:\n\t\t\t\tvar b [9]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 10:\n\t\t\t\tvar b [10]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 11:\n\t\t\t\tvar b [11]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 12:\n\t\t\t\tvar b [12]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 13:\n\t\t\t\tvar b [13]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 14:\n\t\t\t\tvar b [14]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 15:\n\t\t\t\tvar b [15]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 16:\n\t\t\t\tvar b [16]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 17:\n\t\t\t\tvar b [17]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 18:\n\t\t\t\tvar b [18]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 19:\n\t\t\t\tvar b [19]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 20:\n\t\t\t\tvar b [20]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 21:\n\t\t\t\tvar b [21]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 22:\n\t\t\t\tvar b [22]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 23:\n\t\t\t\tvar b [23]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 24:\n\t\t\t\tvar b [24]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 25:\n\t\t\t\tvar b [25]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 26:\n\t\t\t\tvar b [26]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 27:\n\t\t\t\tvar b [27]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 28:\n\t\t\t\tvar b [28]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 29:\n\t\t\t\tvar b [29]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 30:\n\t\t\t\tvar b [30]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 31:\n\t\t\t\tvar b [31]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\tcase 32:\n\t\t\t\tvar b [32]byte\n\t\t\t\tcopy(b[:], s)\n\t\t\t\tparam = append(param, b)\n\t\t\t\treturn param\n\t\t\t}\n\t\t}\n\t\tb := []byte(s)\n\t\tparam = append(param, b)\n\t\treturn param\n\t}\n\n\t// uint部分,包括数组\n\tif strings.Contains(t, \"uint\") {\n\t\tif strings.Contains(t, \"[]\") {\n\t\t\tif len(t) > 6 {\n\t\t\t\tl := len(t)\n\t\t\t\tle := t[4 : l-2]\n\t\t\t\tlength, err := strconv.Atoi(le)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(err)\n\t\t\t\t}\n\t\t\t\tif length > 256 {\n\t\t\t\t\tglog.Error(\"uint too long: \", length)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tswitch length {\n\t\t\t\tcase 8:\n\t\t\t\t\tvar tmp []uint8\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uint8 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, uint8(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 16:\n\t\t\t\t\tvar tmp []uint16\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uint16 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, uint16(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 32:\n\t\t\t\t\tvar tmp []uint32\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uint32 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, uint32(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 64:\n\t\t\t\t\tvar tmp []uint64\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uint64 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, uint64(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tdefault:\n\t\t\t\t\tvar tmp []*big.Int\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uintBig error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, big.NewInt(int64(uintNum)))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tvar tmp []uint\n\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(\"[]uint error: \", err)\n\t\t\t\t\treturn param\n\t\t\t\t}\n\t\t\t\ttmp = append(tmp, uint(uintNum))\n\t\t\t}\n\t\t\tparam = append(param, tmp)\n\t\t\treturn param\n\n\t\t}\n\t\ts := arg.(string)\n\t\tuintNum, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t}\n\t\tif len(t) > 4 {\n\t\t\tl := len(t)\n\t\t\tle := t[4:l]\n\t\t\tlength, err := strconv.Atoi(le)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t}\n\t\t\tif length > 256 {\n\t\t\t\tglog.Error(\"uint too long: \", length)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tswitch length {\n\t\t\tcase 8:\n\t\t\t\tparam = append(param, uint8(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 16:\n\t\t\t\tparam = append(param, uint16(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 32:\n\t\t\t\tparam = append(param, uint32(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 64:\n\t\t\t\tparam = append(param, uint64(uintNum))\n\t\t\t\treturn param\n\t\t\tdefault:\n\t\t\t\tu := big.NewInt(int64(uintNum))\n\t\t\t\tparam = append(param, u)\n\t\t\t\treturn param\n\t\t\t}\n\n\t\t}\n\n\t\tparam = append(param, uint(uintNum))\n\t\treturn param\n\t}\n\n\t// int部分,包括数组\n\tif strings.Contains(t, \"int\") {\n\t\tif strings.Contains(t, \"[]\") {\n\t\t\tif len(t) > 5 {\n\t\t\t\tl := len(t)\n\t\t\t\tle := t[3 : l-2]\n\t\t\t\tlength, err := strconv.Atoi(le)\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(err)\n\t\t\t\t}\n\t\t\t\tif length > 256 {\n\t\t\t\t\tglog.Error(\"int too long: \", length)\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\t\tswitch length {\n\t\t\t\tcase 8:\n\t\t\t\t\tvar tmp []int8\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]int8 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, int8(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 16:\n\t\t\t\t\tvar tmp []int16\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]int16 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, int16(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 32:\n\t\t\t\t\tvar tmp []int32\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]int32 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, int32(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tcase 64:\n\t\t\t\t\tvar tmp []int64\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]int64 error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, int64(uintNum))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\tdefault:\n\t\t\t\t\tvar tmp []*big.Int\n\t\t\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\t\t\tif err != nil {\n\t\t\t\t\t\t\tglog.Error(\"[]uintBig error: \", err)\n\t\t\t\t\t\t\treturn param\n\t\t\t\t\t\t}\n\t\t\t\t\t\ttmp = append(tmp, big.NewInt(int64(uintNum)))\n\t\t\t\t\t}\n\t\t\t\t\tparam = append(param, tmp)\n\t\t\t\t\treturn param\n\t\t\t\t}\n\n\t\t\t}\n\t\t\tvar tmp []int\n\t\t\tfor _, v := range arg.([]interface{}) {\n\t\t\t\tuintNum, err := strconv.Atoi(v.(string))\n\t\t\t\tif err != nil {\n\t\t\t\t\tglog.Error(\"[]int error: \", err)\n\t\t\t\t\treturn param\n\t\t\t\t}\n\t\t\t\ttmp = append(tmp, int(uintNum))\n\t\t\t}\n\t\t\tparam = append(param, tmp)\n\t\t\treturn param\n\n\t\t}\n\t\ts := arg.(string)\n\t\tuintNum, err := strconv.Atoi(s)\n\t\tif err != nil {\n\t\t\tglog.Error(err)\n\t\t}\n\t\tif len(t) > 3 {\n\t\t\tl := len(t)\n\t\t\tle := t[3:l]\n\t\t\tlength, err := strconv.Atoi(le)\n\t\t\tif err != nil {\n\t\t\t\tglog.Error(err)\n\t\t\t}\n\t\t\tif length > 256 {\n\t\t\t\tglog.Error(\"uint too long: \", length)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tswitch length {\n\t\t\tcase 8:\n\t\t\t\tparam = append(param, int8(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 16:\n\t\t\t\tparam = append(param, int16(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 32:\n\t\t\t\tparam = append(param, int32(uintNum))\n\t\t\t\treturn param\n\t\t\tcase 64:\n\t\t\t\tparam = append(param, int64(uintNum))\n\t\t\t\treturn param\n\t\t\tdefault:\n\t\t\t\tu := big.NewInt(int64(uintNum))\n\t\t\t\tparam = append(param, u)\n\t\t\t\treturn param\n\t\t\t}\n\n\t\t}\n\n\t\tparam = append(param, int(uintNum))\n\t\treturn param\n\t}\n\n\tparam = append(param, arg.(string))\n\treturn param\n}",
"func FixVariadicCall(call *ir.CallExpr) {\n\tfntype := call.X.Type()\n\tif !fntype.IsVariadic() || call.IsDDD {\n\t\treturn\n\t}\n\n\tvi := fntype.NumParams() - 1\n\tvt := fntype.Params().Field(vi).Type\n\n\targs := call.Args\n\textra := args[vi:]\n\tslice := MakeDotArgs(call.Pos(), vt, extra)\n\tfor i := range extra {\n\t\textra[i] = nil // allow GC\n\t}\n\n\tcall.Args = append(args[:vi], slice)\n\tcall.IsDDD = true\n}",
"func (c DBStoreTransactFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func (c DBStoreTransactFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func Debugf(format string, params ...interface{}){\n log.Debugf(format, params)\n}",
"func showInt(xx ...int){\n\tprintln(\"\\nint type:\")\n\ts:=len(xx)\n\tfor i:=0; i<s; i++ {\n\t\tfmt.Println(xx[i])\n\t}\n}",
"func PrintParams(args ...string) {\n logger.Log(fmt.Sprintln(settingsToParams(0, false)))\n}",
"func foo(x ...int) {\n\tfmt.Println(x)\n\tfmt.Printf(\"%T\\n\", x)\n}",
"func (c ExtensionStoreGetFeaturedExtensionsFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0}\n}",
"func (o *Aliyun) makeMapArgs(args []map[string]string) string {\n\tstr := \"\"\n\tif len(args) > 0 {\n\t\tfor _, v := range args {\n\t\t\tfor kk, vv := range v {\n\t\t\t\tstr += \"&\" + kk + \"=\" + vv + \"&\"\n\t\t\t}\n\t\t}\n\t}\n\treturn str[:len(str)-1]\n}",
"func (c DBStoreSelectRepositoriesForRetentionScanFuncCall) Args() []interface{} {\n\treturn []interface{}{c.Arg0, c.Arg1, c.Arg2}\n}"
] | [
"0.6316769",
"0.6277749",
"0.60786474",
"0.5912926",
"0.57932216",
"0.568458",
"0.5678213",
"0.56359667",
"0.5634861",
"0.56316566",
"0.5580856",
"0.5573374",
"0.55704665",
"0.5554061",
"0.55481213",
"0.5543412",
"0.55362815",
"0.5525843",
"0.5489114",
"0.548236",
"0.54810214",
"0.54800266",
"0.54578054",
"0.5444903",
"0.538709",
"0.5369978",
"0.5368814",
"0.53488994",
"0.5333247",
"0.5310456",
"0.5277826",
"0.5247066",
"0.5232077",
"0.5224742",
"0.52092844",
"0.52074176",
"0.51958233",
"0.5184711",
"0.5183887",
"0.5176629",
"0.516988",
"0.51603925",
"0.51501447",
"0.5130079",
"0.51290566",
"0.5123816",
"0.5122511",
"0.51121944",
"0.50869465",
"0.50626874",
"0.50590676",
"0.50533575",
"0.50523394",
"0.50465906",
"0.5044629",
"0.50415033",
"0.5039185",
"0.5038781",
"0.50364053",
"0.5035359",
"0.5030558",
"0.50269985",
"0.5021283",
"0.5019801",
"0.50172937",
"0.5011049",
"0.50002414",
"0.49941832",
"0.49906257",
"0.49835786",
"0.49826342",
"0.49786335",
"0.4970732",
"0.49678499",
"0.495084",
"0.495084",
"0.4943098",
"0.49430346",
"0.49418545",
"0.49402568",
"0.4936717",
"0.4936287",
"0.49327052",
"0.49303603",
"0.49294066",
"0.4928916",
"0.4926316",
"0.49256638",
"0.49229428",
"0.4920881",
"0.49171454",
"0.49161017",
"0.49151883",
"0.49151883",
"0.49146694",
"0.49096328",
"0.49079177",
"0.49068254",
"0.49057794",
"0.49052915",
"0.4903614"
] | 0.0 | -1 |
Equals asserts equality of two values. | func Equals(t *testing.T, actual, expected interface{}, format string, a ...interface{}) {
if !reflect.DeepEqual(actual, expected) {
msg := fmt.Sprintf(format, a...)
t.Errorf("not equal: %s\nactual=%+v\nexpected=%+v", msg, actual, expected)
}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Equal(t Testing, expected, actual interface{}, formatAndArgs ...interface{}) bool {\n\tif !AreEqualObjects(expected, actual) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Expected values are NOT equal.%s\",\n\t\t\t\tdiffValues(expected, actual),\n\t\t\t),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}",
"func Equal(t *testing.T, expected, actual interface{}, message ...string) {\n\tif !compareEquality(expected, actual) {\n\t\tt.Errorf(\"%v\\nExpected \\n\\t[%#v]\\nto be\\n\\t[%#v]\\n%v \", message, actual, expected, callerInfo(2 +callStackAdjust))\n\t}\n}",
"func Equal(expected, actual interface{}) Truth {\n\tmustBeCleanStart()\n\treturn Truth{\n\t\tValue:reflect.DeepEqual(expected, actual),\n\t\tDump:fmt.Sprintf(\"%#v vs. %#v\", expected, actual),\n\t}\n}",
"func Equal(t *testing.T, expected, actual interface{}) {\n\tt.Helper()\n\n\tif expected != actual {\n\t\tt.Errorf(`%s: expected \"%v\" actual \"%v\"`, t.Name(), expected, actual)\n\t}\n}",
"func Equal(values ...interface{}) (failureMessage string) {\n\tif values[0] != values[1] {\n\t\tfailureMessage = fmt.Sprintf(\"Expected `%v` to equal `%v`\", values[0], values[1])\n\t}\n\treturn\n}",
"func TestEquals(t *testing.T) {\n\tt.Parallel()\n\tfor ti, tt := range []struct {\n\t\tm1, m2 MatrixExp\n\t\teq bool\n\t}{\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: true,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralZeros(1, 10),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(10, 1),\n\t\t\tm2: GeneralZeros(1, 1),\n\t\t\teq: false,\n\t\t},\n\t\t{\n\t\t\tm1: GeneralZeros(1, 1),\n\t\t\tm2: GeneralOnes(1, 1),\n\t\t\teq: false,\n\t\t},\n\t} {\n\t\tif v := Equals(tt.m1, tt.m2); v != tt.eq {\n\t\t\tt.Errorf(\"%d: Equals(%v,%v) equals %v, want %v\", ti, tt.m1, tt.m2, v, tt.eq)\n\t\t}\n\t}\n}",
"func Equal(t TestingT, expected, actual interface{}, extras ...interface{}) bool {\n\tif !DeepEqual(expected, actual) {\n\t\treturn Errorf(t, \"Expect to be equal\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"Diff\",\n\t\t\t\tcontent: diff(expected, actual),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}",
"func equal(t *testing.T, expected, actual interface{}) {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tt.Errorf(\"Expected %v (type %v) - Got %v (type %v)\", expected, reflect.TypeOf(expected), actual, reflect.TypeOf(actual))\n\t}\n}",
"func Equal(t *testing.T, a, b interface{}) {\n\tif a != b && !reflect.DeepEqual(a, b) {\n\t\tt.Errorf(\"%v Not Equal: %v == %v\", line(), a, b)\n\t}\n}",
"func EqualValues(t Testing, expected, actual interface{}, formatAndArgs ...interface{}) bool {\n\tif !AreEqualValues(expected, actual) {\n\t\treturn Fail(t,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Expected values are NOT equal in value.%s\",\n\t\t\t\tdiffValues(expected, actual),\n\t\t\t),\n\t\t\tformatAndArgs...)\n\t}\n\n\treturn true\n}",
"func EqualValues(t TestingT, expected, actual interface{}, extras ...interface{}) bool {\n\tif !DeepEqualValues(expected, actual) {\n\t\treturn Errorf(t, \"Expect to be equal in values\", []labeledOutput{\n\t\t\t{\n\t\t\t\tlabel: labelMessages,\n\t\t\t\tcontent: formatExtras(extras...),\n\t\t\t},\n\t\t\t{\n\t\t\t\tlabel: \"Diff\",\n\t\t\t\tcontent: diff(expected, actual),\n\t\t\t},\n\t\t})\n\t}\n\n\treturn true\n}",
"func Equal(left Value, right Value) bool {\n\t// TODO: Stop-gap for now, this will need to be much more sophisticated.\n\treturn CoerceString(left) == CoerceString(right)\n}",
"func Equal(t *testing.T, expected, result interface{}) {\n\tif !reflect.DeepEqual(result, expected) {\n\t\tt.Errorf(\"should be %v instead of %v\", expected, result)\n\t}\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase string, int, int64, byte, float32, float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func Equals(a, b interface{}) bool {\n\treturn neogointernal.Opcode2(\"EQUAL\", a, b).(bool)\n}",
"func Equal[T any](t testing.TB, expected, actual T, msgAndArgs ...interface{}) {\n\tif objectsAreEqual(expected, actual) {\n\t\treturn\n\t}\n\tt.Helper()\n\tmsg := formatMsgAndArgs(\"Expected values to be equal:\", msgAndArgs...)\n\tt.Fatalf(\"%s\\n%s\", msg, diff(expected, actual))\n}",
"func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn EqualValues(a.t, expected, actual, msgAndArgs...)\n}",
"func (v *Values) Equal(other *Values) bool {\n\tv.lock.RLock()\n\tdefer v.lock.RUnlock()\n\tother.lock.RLock()\n\tdefer other.lock.RUnlock()\n\n\treturn v.root.equal(other.root)\n}",
"func (a *Assertions) Equal(expected interface{}, actual interface{}, userMessageComponents ...interface{}) bool {\n\ta.assertion()\n\tif didFail, message := shouldBeEqual(expected, actual); didFail {\n\t\treturn a.fail(message, userMessageComponents...)\n\t}\n\treturn true\n}",
"func equals(t testing.TB, got, exp interface{}) {\n\tif !cmp.Equal(exp, got) {\n\t\tt.Fatalf(\"\\n\\tgot: %#v\\n\\texp: %#v\\n\", got, exp)\n\t}\n}",
"func eq(o1, o2 interface{}) bool {\n\n\tf1, ok1 := ToFloat(o1)\n\tf2, ok2 := ToFloat(o2)\n\tif ok1 && ok2 {\n\t\treturn f1 == f2\n\t}\n\n\tb1, ok1 := ToBool(o1)\n\tb2, ok1 := ToBool(o2)\n\tif ok1 && ok2 {\n\t\treturn b1 == b2\n\t}\n\n\treturn o1 == o2\n}",
"func equals(tb testing.TB, got, want interface{}) {\n\ttb.Helper()\n\tif !reflect.DeepEqual(got, want) {\n\t\ttb.Fatalf(\"\\033[31m\\n\\n\\tgot: %#v\\n\\n\\twant: %#v\\033[39m\\n\\n\", got, want)\n\t}\n}",
"func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\treturn Equal(a.t, expected, actual, msgAndArgs...)\n}",
"func AssertEqual(t *testing.T, msg string, a interface{}, b interface{}) {\n\tif a == b {\n\t\treturn\n\t}\n\t// debug.PrintStack()\n\tt.Errorf(\"%s was incorrect, received %v, expected %v.\", msg, a, b)\n}",
"func assertEQ(a, b interface{}, t *testing.T) {\n\tif a != b {\n\t\tdebug.PrintStack()\n\t\tt.Fatal(\"assertEQ failed: \", a, \"!=\", b, \"\\n\")\n\t}\n}",
"func Equal(t testing.TB, expected, actual interface{}, msgAndArgs ...interface{}) bool {\n\tif err := validateEqualArgs(expected, actual); err != nil {\n\t\treturn failTest(t, 1, fmt.Sprintf(\"Equal: invalid operation `%#v` == `%#v` (%v)\", expected, actual, err), msgAndArgs...)\n\t}\n\n\tif !IsObjectEqual(expected, actual) {\n\t\treturn failTest(t, 1, fmt.Sprintf(\"Equal: expected `%#v`, actual `%#v`\", expected, actual), msgAndArgs...)\n\t}\n\n\treturn true\n}",
"func Equals(tb testing.TB, expected, actual interface{}) {\n\ttb.Helper()\n\tif !reflect.DeepEqual(expected, actual) {\n\t\ttb.Fatalf(\"expected: %#[1]v (%[1]T) but got: %#[2]v (%[2]T)\\n\", expected, actual)\n\t}\n}",
"func equals(t types.Type, x, y value) bool {\n\tswitch x := x.(type) {\n\tcase bool:\n\t\treturn x == y.(bool)\n\tcase int:\n\t\treturn x == y.(int)\n\tcase int8:\n\t\treturn x == y.(int8)\n\tcase int16:\n\t\treturn x == y.(int16)\n\tcase int32:\n\t\treturn x == y.(int32)\n\tcase int64:\n\t\treturn x == y.(int64)\n\tcase uint:\n\t\treturn x == y.(uint)\n\tcase uint8:\n\t\treturn x == y.(uint8)\n\tcase uint16:\n\t\treturn x == y.(uint16)\n\tcase uint32:\n\t\treturn x == y.(uint32)\n\tcase uint64:\n\t\treturn x == y.(uint64)\n\tcase uintptr:\n\t\treturn x == y.(uintptr)\n\tcase float32:\n\t\treturn x == y.(float32)\n\tcase float64:\n\t\treturn x == y.(float64)\n\tcase complex64:\n\t\treturn x == y.(complex64)\n\tcase complex128:\n\t\treturn x == y.(complex128)\n\tcase string:\n\t\treturn x == y.(string)\n\tcase *value:\n\t\treturn x == y.(*value)\n\tcase chan value:\n\t\treturn x == y.(chan value)\n\tcase structure:\n\t\treturn x.eq(t, y)\n\tcase array:\n\t\treturn x.eq(t, y)\n\tcase iface:\n\t\treturn x.eq(t, y)\n\tcase rtype:\n\t\treturn x.eq(t, y)\n\t}\n\n\t// Since map, func and slice don't support comparison, this\n\t// case is only reachable if one of x or y is literally nil\n\t// (handled in eqnil) or via interface{} values.\n\tpanic(fmt.Sprintf(\"comparing uncomparable type %s\", t))\n}",
"func Equal(a, b uint64) bool {\n\treturn a == b\n}",
"func ExampleValueEquals() {\n\tassert := assert.To(nil)\n\tassert.For(\"1 Equals 1\").That(1).Equals(1)\n\tassert.For(\"2 Equals 3\").That(2).Equals(3)\n\t// Output:\n\t// Error:2 Equals 3\n\t// Got 2\n\t// Expect == 3\n}",
"func (value *Value) Equal(other *Value) bool {\n\treturn reflect.DeepEqual(value, other)\n}",
"func (a Assert) Equal(want interface{}, have interface{}) {\n\tequal(a.t, want, have)\n}",
"func Same(expected, actual interface{}) Truth {\n\tmustBeCleanStart()\n\treturn Truth{\n\t\tValue: nice(expected) == nice(actual) && reflect.DeepEqual(actual, expected),\n\t\tDump:fmt.Sprintf(\"%#v\", actual),\n\t}\n}",
"func AssertEquals(expected string, actual string, t *testing.T) {\n\tif expected != actual {\n\t\tt.Errorf(\"\\nE: %s\\nA: %s\", strconv.Quote(expected), strconv.Quote(actual))\n\t}\n}",
"func AssertEqual(t *testing.T, a interface{}, b interface{}) {\n\tif a == b {\n\t\treturn\n\t}\n\t// debug.PrintStack()\n\tt.Errorf(\"Received %v (type %v), expected %v (type %v)\", a, reflect.TypeOf(a), b, reflect.TypeOf(b))\n}",
"func EqualAssert(v1,v2 int) bool{\n\tif v1 == v2 {\n\t\treturn true\n\t} else {\n\t\tif show == true {\n\t\t\tfmt.Printf(\"Failed! %d and %d are not equal : \\n\",v1,v2)\n\t\t}\n\t\treturn false\n\t}\n}",
"func Equal(expected, actual interface{}) bool {\n\tif err := validateEqualArgs(expected, actual); err != nil {\n\t\treturn false\n\t}\n\n\tif !ObjectsAreEqual(expected, actual) {\n\t\treturn false\n\t}\n\n\treturn true\n}",
"func (t *T) Equal(have, want interface{}, desc ...string) {\n\tt.EqualWithIgnores(have, want, nil, desc...)\n}",
"func equal(lhs, rhs semantic.Expression) semantic.Expression {\n\treturn &semantic.BinaryOp{Type: semantic.BoolType, LHS: lhs, Operator: ast.OpEQ, RHS: rhs}\n}",
"func Equal(t, other Tuplelike) bool {\n\tfor idx, value := range t.Values() {\n\t\tif !inEpsilon(value, other.At(idx)) {\n\t\t\treturn false\n\t\t}\n\n\t}\n\treturn true\n}",
"func main() {\n\temp1 := Employee{}\n\temp1.Name=\"Gowtham\"\n\n\temp2 := Employee{}\n\temp2.Name=\"Gowtham\"\n\n\tprintln(\"the emp1 and emp2 are equal ?\" , emp1 == emp2)\n}",
"func assertEq(x, y interface{}) {\n if !reflect.DeepEqual(x, y) {\n panic(\"arguments not equal!\")\n }\n}",
"func Equal(t t, want interface{}, have interface{}) {\n\tequal(t, want, have)\n}",
"func TestEqual(t *testing.T) {\n\tslc := make([]float64, 10)\n\tfor i := range slc {\n\t\tslc[i] = float64(i)\n\t}\n\n\tv := NewFrom(slc)\n\tw := NewFrom(slc)\n\tif !Equal(v, w) {\n\t\tt.Error(\"Equal() != true for equal vectors.\")\n\t}\n\n\tw = New(10)\n\tif Equal(v, w) {\n\t\tt.Error(\"Equal() == true for unequal vectors.\")\n\t}\n}",
"func (r Representative) Equal(a, b uint64) bool {\n\tif r == nil {\n\t\treturn Equal(a, b)\n\t}\n\treturn r(a) == r(b)\n}",
"func Equals(v1, v2 interface{}) bool {\n\t// TODO(EItanya): Should this be `proto.Equal` since these values are usually proto messages\n\treturn reflect.DeepEqual(v1, v2)\n}",
"func (v Value) Equal(w Value) bool {\n\treturn v.v == w.v\n}",
"func (null Null) Equal(other Value) bool {\n\tswitch other.(type) {\n\tcase Null:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}",
"func AssertEqual(a interface{}, b interface{}) {\n\tif a != b {\n\t\tpanic(\"Assertion Fail!\")\n\t}\n}",
"func expectEqual(value, expected interface{}) {\n\tif value != expected {\n\t\tfmt.Printf(\"Fehler: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t} else {\n\t\tfmt.Printf(\"OK: %v bekommen, erwartet war aber %v.\\n\", value, expected)\n\t}\n}",
"func Equal(a, b interface{}) bool {\n\tif reflect.TypeOf(a) == reflect.TypeOf(b) {\n\t\treturn reflect.DeepEqual(a, b)\n\t}\n\tswitch a.(type) {\n\tcase int, int8, int16, int32, int64:\n\t\tswitch b.(type) {\n\t\tcase int, int8, int16, int32, int64:\n\t\t\treturn reflect.ValueOf(a).Int() == reflect.ValueOf(b).Int()\n\t\t}\n\tcase uint, uint8, uint16, uint32, uint64:\n\t\tswitch b.(type) {\n\t\tcase uint, uint8, uint16, uint32, uint64:\n\t\t\treturn reflect.ValueOf(a).Uint() == reflect.ValueOf(b).Uint()\n\t\t}\n\tcase float32, float64:\n\t\tswitch b.(type) {\n\t\tcase float32, float64:\n\t\t\treturn reflect.ValueOf(a).Float() == reflect.ValueOf(b).Float()\n\t\t}\n\tcase string:\n\t\tswitch b.(type) {\n\t\tcase []byte:\n\t\t\treturn a.(string) == string(b.([]byte))\n\t\t}\n\tcase []byte:\n\t\tswitch b.(type) {\n\t\tcase string:\n\t\t\treturn b.(string) == string(a.([]byte))\n\t\t}\n\t}\n\treturn false\n}",
"func TestSetEqual(t *T) {\n\t// Degenerate case\n\ts1, s2 := NewSet(), NewSet()\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n\n\t// False with different sizes\n\ts1, _ = s1.SetVal(1)\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// False with same sizes\n\ts2, _ = s2.SetVal(2)\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// Now true\n\ts1, _ = s1.SetVal(2)\n\ts2, _ = s2.SetVal(1)\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n\n\t// False with embedded set\n\ts1, _ = s1.SetVal(NewSet(3))\n\tassert.Equal(t, false, s1.Equal(s2))\n\tassert.Equal(t, false, s2.Equal(s1))\n\n\t// True with embedded set\n\ts2, _ = s2.SetVal(NewSet(3))\n\tassert.Equal(t, true, s1.Equal(s2))\n\tassert.Equal(t, true, s2.Equal(s1))\n}",
"func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}",
"func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}",
"func ExpectEqual(alert func(format string, args ...interface{}),\n\texpected interface{}, actual interface{}) bool {\n\texpectedValue, actualValue := reflect.ValueOf(expected), reflect.ValueOf(actual)\n\tequal := false\n\tswitch {\n\tcase expected == nil && actual == nil:\n\t\treturn true\n\tcase expected != nil && actual == nil:\n\t\tequal = expectedValue.IsNil()\n\tcase expected == nil && actual != nil:\n\t\tequal = actualValue.IsNil()\n\tdefault:\n\t\tif actualType := reflect.TypeOf(actual); actualType != nil {\n\t\t\tif expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) {\n\t\t\t\tequal = reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual)\n\t\t\t}\n\t\t}\n\t}\n\tif !equal {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\talert(\"%s:%d: missmatch, expect %v but %v\", file, line, expected, actual)\n\t\treturn false\n\t}\n\treturn true\n}",
"func (tc *TestConfig) Equal(source interface{}, target interface{}) bool {\n\tvar a, b float64\n\tswitch source.(type) {\n\tcase int:\n\t\ta = float64(source.(int))\n\tcase float64:\n\t\ta = float64(source.(float64))\n\tdefault:\n\t\treturn false\n\t}\n\n\tswitch target.(type) {\n\tcase int:\n\t\tb = float64(target.(int))\n\tcase float64:\n\t\tb = float64(target.(float64))\n\tdefault:\n\t\treturn false\n\t}\n\n\treturn (a == b)\n}",
"func Equals(tb testing.TB, exp, act interface{}) {\n\ttb.Helper()\n\tif !reflect.DeepEqual(exp, act) {\n\t\ttb.Fatalf(\"\\nexp:\\t%[1]v (%[1]T)\\ngot:\\t%[2]v (%[2]T)\", exp, act)\n\t}\n}",
"func (b *BooleanObject) equal(e *BooleanObject) bool {\n\treturn b.value == e.value\n}",
"func TestEquals(t *testing.T) {\n\tv := strings.Title(\"foo\")\n\tequals(t, v, \"foo\")\n}",
"func eq(args ...interface{}) bool {\n\tif len(args) == 0 {\n\t\treturn false\n\t}\n\tx := args[0]\n\tswitch x := x.(type) {\n\tcase int:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase int64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase int:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase int64:\n\t\t\t\tif int64(x) == int64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float32:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase float64:\n\t\tfor _, y := range args[1:] {\n\t\t\tswitch y := y.(type) {\n\t\t\tcase float32:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\tcase float64:\n\t\t\t\tif float64(x) == float64(y) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\treturn false\n\n\tcase string, byte:\n\t\tfor _, y := range args[1:] {\n\t\t\tif x == y {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t\treturn false\n\t}\n\n\tfor _, y := range args[1:] {\n\t\tif reflect.DeepEqual(x, y) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tlog.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func assertEqual(t *testing.T, expected, actual interface{}) bool {\n\n\tif assert.ObjectsAreEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\tmessage := fmt.Sprintf(\n\t\t\"Not equal: \\nexpected: %s\\nactual : %s\",\n\t\texpected,\n\t\tactual,\n\t)\n\n\treturn assert.Fail(t, message)\n}",
"func assertEqual(t *testing.T, expected, actual interface{}) bool {\n\n\tif assert.ObjectsAreEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\tmessage := fmt.Sprintf(\n\t\t\"Not equal: \\nexpected: %s\\nactual : %s\",\n\t\texpected,\n\t\tactual,\n\t)\n\n\treturn assert.Fail(t, message)\n}",
"func (l *LabelPair) Equal(o *LabelPair) bool {\n\tswitch {\n\tcase l.Name != o.Name:\n\t\treturn false\n\tcase l.Value != o.Value:\n\t\treturn false\n\tdefault:\n\t\treturn true\n\t}\n}",
"func AssertEqual(t *testing.T, a interface{}, b interface{}) {\n if a != b {\n t.Fatalf(\"%s != %s\", a, b)\n }\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%str:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.Fail()\n\t}\n}",
"func Equal(lhs, rhs Expression) Expression {\n\treturn NewCall(\"equal\", []Expression{lhs, rhs}, nil)\n}",
"func (s *Suite) Equal(exp, act interface{}, message ...string) bool {\n\ts.setup()\n\tif exp != act {\n\t\tif len(message) > 0 {\n\t\t\treturn s.Status.failWithCustomMsg(message[0], s.callerInfo)\n\t\t}\n\t\treturn s.Status.fail(exp, act, s.callerInfo)\n\t}\n\treturn s.Status.pass()\n}",
"func TestEqual(t *testing.T) {\n\ttables := []struct {\n\t\tx []string\n\t\ty []string\n\t\texpected bool\n\t}{\n\t\t{[]string{}, []string{}, true},\n\t\t{[]string{}, []string{\"\"}, false},\n\t\t{[]string{\"\"}, []string{\"\"}, true},\n\t\t{[]string{\"\"}, []string{\"a\"}, false},\n\t\t{[]string{\"a\"}, []string{\"a\", \"a\"}, false},\n\t\t{[]string{\"b\"}, []string{\"a\"}, false},\n\t\t{[]string{\"\", \"\", \"\"}, []string{\"\", \"\", \"\"}, true},\n\t\t{[]string{\"a\", \"b\", \"c\"}, []string{\"a\", \"b\", \"e\"}, false},\n\t}\n\n\tfor _, table := range tables {\n\t\tresult := Equal(table.x, table.y)\n\t\tif result != table.expected {\n\t\t\tt.Errorf(\"Match failed for (%s, %s). Expected %t, got %t\",\n\t\t\t\ttable.x, table.y, table.expected, result)\n\t\t}\n\t}\n}",
"func (seq SeqEq[S, T]) Equal(a, b S) bool {\n\tseqA := a\n\tseqB := b\n\tfor !seq.Seq.IsVoid(seqA) && !seq.Seq.IsVoid(seqB) {\n\t\theadA := seq.Seq.Head(seqA)\n\t\theadB := seq.Seq.Head(seqB)\n\t\tif headA == nil || headB == nil || !seq.Eq.Equal(*headA, *headB) {\n\t\t\treturn false\n\t\t}\n\n\t\tseqA = seq.Seq.Tail(seqA)\n\t\tseqB = seq.Seq.Tail(seqB)\n\t}\n\n\treturn seq.Seq.IsVoid(seqA) && seq.Seq.IsVoid(seqB)\n}",
"func compareEquality(expected, actual interface{}) bool {\n\n\tif expected == nil || actual == nil {\n\t\treturn expected == actual\n\t}\n\n\tif reflect.DeepEqual(expected, actual) {\n\t\treturn true\n\t}\n\n\texpectedValue := reflect.ValueOf(expected)\n\tactualValue := reflect.ValueOf(actual)\n\n\tif expectedValue == actualValue {\n\t\treturn true\n\t}\n\n\t// Attempt comparison after type conversion\n\tif actualValue.Type().ConvertibleTo(expectedValue.Type()) && expectedValue == actualValue.Convert(expectedValue.Type()) {\n\t\treturn true\n\t}\n\n\t// Last ditch effort\n\tif fmt.Sprintf(\"%#v\", expected) == fmt.Sprintf(\"%#v\", actual) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func equals(tb testing.TB, exp, act interface{}) {\n\tif !reflect.DeepEqual(exp, act) {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d:\\n\\n\\texp: %#v\\n\\n\\tgot: %#v\\033[39m\\n\\n\", filepath.Base(file), line, exp, act)\n\t\ttb.FailNow()\n\t}\n}",
"func eq(t *testing.T, s, exp string) {\n\tt.Helper()\n\tif s != exp {\n\t\tt.Errorf(\"got %q, expected %q\", s, exp)\n\t}\n}",
"func (v PlainTextValue) Equal(other ValueNode) bool {\n\totherV, ok := other.(PlainTextValue)\n\treturn ok && v == otherV\n}",
"func Equal(object, expected interface{}) error {\n\t// shortcuts\n\tif expected == nil && object == nil {\n\t\treturn nil\n\t}\n\n\tif (expected == nil && object != nil) || (expected != nil && object == nil) {\n\t\treturn xerrors.New(stringJoin(\"\\n\", \"not equal\", actualExpectedDiff(object, expected)))\n\t}\n\n\t// we might be able to convert this\n\tcompareData := misc.MakeTypeCopy(expected)\n\n\terr := converter.Convert(object, &compareData)\n\tif err == nil {\n\t\tobject = compareData\n\t}\n\tif !cmp.Equal(expected, object) {\n\t\treturn xerrors.New(stringJoin(\"\\n\", \"not equal\", actualExpectedDiff(object, expected)))\n\t}\n\treturn nil\n}",
"func Eq(one, other interface{}) bool {\n\treturn reflect.DeepEqual(one, other)\n}",
"func (t Tags) Equal(other Tags) bool {\n\tif len(t.Values()) != len(other.Values()) {\n\t\treturn false\n\t}\n\tfor i := 0; i < len(t.Values()); i++ {\n\t\tequal := t.values[i].Name.Equal(other.values[i].Name) &&\n\t\t\tt.values[i].Value.Equal(other.values[i].Value)\n\t\tif !equal {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}",
"func assertEq(expected string, actual string, t *testing.T) {\n\tt.Helper()\n\tif actual != expected {\n\t\tt.Error(\"expected: \", expected, \" but got: \", actual)\n\t}\n}",
"func equal(a, b float64) bool {\n\treturn math.Abs(a-b) <= equalityThreshold\n}",
"func isEqual(a interface{}, b interface{}) bool {\n\treturn a == b\n}",
"func (NullValue) Equal(other ValueNode) bool {\n\t_, isNull := other.(NullValue)\n\treturn isNull\n}",
"func (val Value) Equal(o Value) bool {\n\tif val.Type() == nil && o.Type() == nil && val.value == nil && o.value == nil {\n\t\treturn true\n\t}\n\tif val.Type() == nil {\n\t\treturn false\n\t}\n\tif o.Type() == nil {\n\t\treturn false\n\t}\n\tif !val.Type().Equal(o.Type()) {\n\t\treturn false\n\t}\n\tdiff, err := val.Diff(o)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn len(diff) < 1\n}",
"func ValueEqual(v1 interface{}, v2 interface{}) bool {\n\treturn corelib.ValueEqual(v1, v2)\n}",
"func TestAtomicProposition_Equals1(t *testing.T) {\n\t// Constants\n\tap1 := AtomicProposition{Name: \"A\"}\n\tap2 := AtomicProposition{Name: \"B\"}\n\tap3 := AtomicProposition{Name: \"A\"}\n\n\tif ap1.Equals(ap2) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be different from ap2 (%v).\", ap1.Name, ap2.Name)\n\t}\n\n\tif !ap1.Equals(ap3) {\n\t\tt.Errorf(\"ap1 (%v) is supposed to be the same as ap3 (%v).\", ap1.Name, ap3.Name)\n\t}\n\n}",
"func AssertEqual(t *testing.T, actual interface{}, expected interface{}) error {\n\tif !reflect.DeepEqual(expected, actual) {\n\t\tmsg := fmt.Sprintf(\"\\n(%v) Not Equal:\\n\"+\" - Expected: %#v\\n - Received: %#v\\n\", t.Name(), expected, actual)\n\t\tfmt.Println(msg)\n\t\tt.Fail()\n\t\treturn errors.New(msg)\n\t}\n\treturn nil\n}",
"func (d DummyEqualer) Equal(u Equaler) bool {\n\t_, ok := u.(DummyEqualer)\n\tif !ok {\n\t\treturn false\n\t}\n\treturn true\n}"
] | [
"0.7212346",
"0.7140438",
"0.7125134",
"0.7118771",
"0.7050982",
"0.70416135",
"0.7012862",
"0.6971075",
"0.6900521",
"0.68735236",
"0.6835189",
"0.68320405",
"0.682615",
"0.68192345",
"0.68192345",
"0.68109655",
"0.6790581",
"0.6789338",
"0.6768855",
"0.675772",
"0.67543143",
"0.6737002",
"0.67353547",
"0.67259204",
"0.6724521",
"0.6715959",
"0.6683861",
"0.6676617",
"0.6643534",
"0.6615046",
"0.6610375",
"0.66078323",
"0.6583524",
"0.6581345",
"0.65704125",
"0.6537372",
"0.6534007",
"0.65145105",
"0.6510344",
"0.649782",
"0.6493807",
"0.6493629",
"0.64763594",
"0.6449647",
"0.6445912",
"0.64373505",
"0.6403111",
"0.63803524",
"0.63730466",
"0.6364916",
"0.6362773",
"0.6357717",
"0.6348705",
"0.6344788",
"0.6344788",
"0.6344788",
"0.6339634",
"0.6339089",
"0.63157326",
"0.629963",
"0.62870735",
"0.62845373",
"0.62834585",
"0.62834585",
"0.6275061",
"0.6271157",
"0.62683517",
"0.626602",
"0.62548107",
"0.6253844",
"0.6253703",
"0.62459683",
"0.6242387",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6239447",
"0.6236766",
"0.6224426",
"0.62240785",
"0.6215256",
"0.6201027",
"0.61871076",
"0.6173586",
"0.6172015",
"0.6167114",
"0.61614263",
"0.61602074",
"0.61501706",
"0.61433935",
"0.6134816"
] | 0.67351145 | 23 |
write a json error with header to the output | func JsonError(writer http.ResponseWriter, error_message string) {
writer.WriteHeader(http.StatusInternalServerError)
writer.Header().Set("Content-Type", "application/json")
writer.Write([]byte("{\"error\": \"" + error_message + "\"}"))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (a *API) jsonError(res http.ResponseWriter, err common.DetailedError, startedAt time.Time) {\n\ta.logError(&err, startedAt)\n\tjsonErr, _ := json.Marshal(err)\n\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tres.WriteHeader(err.Status)\n\tres.Write(jsonErr)\n}",
"func WriteErrJSON(l *zap.SugaredLogger, w http.ResponseWriter, r *http.Request, err error, httpCode int) {\n\t// log outgoing errors\n\tl.With(\"requestId\", GetReqID(r.Context())).Error(err)\n\n\t// write error to response\n\te := HTTPError{\n\t\tHTTPStatusCode: httpCode,\n\t\tMsg: err.Error(),\n\t\tInternalErrCode: -1,\n\t}\n\n\tif err := WriteJSON(w, e, httpCode); err != nil {\n\t\tl.Errorw(\"error while sending err json\", \"err\", err)\n\t}\n}",
"func writeError(w http.ResponseWriter, err error, code int) error {\n\ttags := []string{\n\t\t\"error_\" + strings.ToLower(\n\t\t\tstrings.ReplaceAll(http.StatusText(code), \" \", \"_\"),\n\t\t),\n\t}\n\n\tdata, err := jsoniter.ConfigFastest.Marshal(map[string]interface{}{\n\t\t\"success\": false,\n\t\t\"code\": code,\n\t\t\"message\": err.Error(),\n\t\t\"tags\": tags,\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"writeError:%w\", err)\n\t}\n\n\treturn err\n}",
"func jsonError(w http.ResponseWriter, serverMsg string, clientMsg string) {\n\tlog.Error(serverMsg)\n\tpayload := Message{\n\t\tError: clientMsg,\n\t}\n\tresJSON, err := json.Marshal(payload)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to marshal result : %v\", err)\n\t\thttpError(w, msg, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(w, \"%s\\n\", string(resJSON))\n\treturn\n}",
"func ATJsonError(writer http.ResponseWriter, error_message string) {\n\twriter.WriteHeader(http.StatusInternalServerError)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tobj := model.ATResultList{Error: error_message}\n\tjson_bytes, _ := json.Marshal(obj)\n\twriter.Write(json_bytes)\n}",
"func writeError(httpStatus int, e error, response *restful.Response) {\n\tvar resp *endpointError.EndpointError\n\tif endpointErr, ok := e.(*endpointError.EndpointError); ok {\n\t\tresp = endpointErr\n\t} else {\n\t\tresp = &endpointError.EndpointError{\n\t\t\tErrorMessage: e.Error(),\n\t\t\tErrorCode: \"96\",\n\t\t}\n\t}\n\n\terr := response.WriteHeaderAndJson(httpStatus, resp, restful.MIME_JSON)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\treturn\n\t}\n}",
"func writeErrorResponse(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\ter := errorResponse{Message: \"unable to process request\"}\n\tbs, err := json.Marshal(er)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tif _, err := w.Write(bs); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func writeErrorResponse(w http.ResponseWriter, status int, body string) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(status)\n\n\t_, _ = fmt.Fprintf(os.Stderr, \"error: %s\", body)\n\tif _, err := w.Write([]byte(body)); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"cannot write to stream: %v\\n\", err)\n\t\treturn\n\t}\n}",
"func WriteErrorResponse(w http.ResponseWriter, code int, err error) {\n\ttype e struct {\n\t\tDescription string `json:\"description\"`\n\t}\n\tdata, err := json.Marshal(&e{\n\t\tDescription: err.Error(),\n\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}",
"func (s Status) WriteJSON(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(s.Code)\n\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\treturn err\n}",
"func setError(w http.ResponseWriter, desc string, status int) {\n\te := map[string]interface{}{\"code\": status, \"msg\": desc}\n\tmsg, _ := json.Marshal(e)\n\tlog.DebugJson(e)\n\t//w.WriteHeader(status)\n\tw.Write(msg)\n}",
"func writeInsightError(w http.ResponseWriter, str string) {\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tw.WriteHeader(http.StatusBadRequest)\n\tio.WriteString(w, str)\n}",
"func writeError(w http.ResponseWriter, status int, err error) {\n\twrite(w, status, Error{Err: err.Error()})\n}",
"func WriteErrResponse(w http.ResponseWriter, code int, err error) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": code,\n\t\t\t\"error\": err.Error(),\n\t\t},\n\t}\n\n\tres, err := json.MarshalIndent(env, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.WriteHeader(code)\n\t_, err = w.Write(res)\n\treturn err\n}",
"func SendError(w http.ResponseWriter, status int, errMsg string) {\n header(w, status)\n data := ErrJson {\n Status: status,\n Error: errMsg,\n }\n json.NewEncoder(w).Encode(data)\n}",
"func writeServiceError(w http.ResponseWriter) {\n\t// TODO log error\n\tw.WriteHeader(http.StatusServiceUnavailable)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(response{\"Fail connection on DB.\"})\n}",
"func respondError(writer http.ResponseWriter, err string) {\n\twriter.WriteHeader(http.StatusInternalServerError)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tio.WriteString(writer, fmt.Sprintf(`{ \"status\": \"ERROR\", \"problem\": \"%s\"}`, err))\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n w.WriteHeader(http.StatusInternalServerError)\n /* TODO...\n\tswitch err {\n\tcase cargo.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\t*/\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func writeApiReqErr(w http.ResponseWriter, status int, message string) {\n\tw.Header().Set(\"Content-Type\", util.ContentTypeTextPlainUTF8)\n\tif status > 0 && status != 200 {\n\t\tw.WriteHeader(status)\n\t} else {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tw.Write([]byte(message))\n}",
"func errorHandler(w http.ResponseWriter, req *http.Request, status int) {\n w.WriteHeader(status)\n if status == http.StatusNotFound {\n e := Error{ErrMsg: \"MessageResponse not found\", ErrCode:http.StatusNotFound}\n json.NewEncoder(w).Encode(e)\n }\n if status == http.StatusBadRequest {\n e := Error{ErrMsg: \"Cannot process your request\", ErrCode:http.StatusBadRequest}\n json.NewEncoder(w).Encode(e)\n }\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tif err == nil {\n\t\tpanic(\"encodeError with nil error\")\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Write actual error code\n\tcode := codeFrom(err)\n\tw.WriteHeader(code)\n\n\t// write out the error message\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"code\": code,\n\t\t\"message\": err.Error(),\n\t})\n}",
"func writeErr(w http.ResponseWriter, err error) {\n\twriteResponse(w, &authorization.Response{Err: err.Error()})\n}",
"func writeErrorResponse(w http.ResponseWriter, errorMsg string) {\n\tresponse := Response{false, []common.Bike{}, errorMsg}\n\twriteResponse(w, response)\n}",
"func sendJsonError(response http.ResponseWriter, status int, message string) {\n\toutput := map[string]string{\n\t\t\"status\": \"error\",\n\t\t\"message\": message,\n\t}\n\n\tjsonBytes, err := json.Marshal(output)\n\tif err != nil {\n\t\tlog.Errorf(\"Error encoding json error response: %s\", err.Error())\n\t\thttp.Error(response, \"Interval server error\", 500)\n\t\treturn\n\t}\n\n\tresponse.Header().Set(\"Content-Type\", \"application/json\")\n\tresponse.WriteHeader(status)\n\t_, err = response.Write(jsonBytes)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to write JSON error: %s\", err)\n\t}\n}",
"func Err(w http.ResponseWriter, e *ae.Error) {\n\tlog.Err(*e)\n\tlog.Err(string(debug.Stack()))\n\tjsonData, err := json.Marshal(e)\n\tif err != nil {\n\t\tfmt.Fprint(w, \"Error\")\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(e.HttpStatus)\n\tfmt.Fprintln(w, string(jsonData))\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase cargo.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func (w *RESPWriter) writeError(err error) {\n\tw.buf.WriteRune(respERROR)\n\tif err != nil {\n\t\tw.buf.WriteString(err.Error())\n\t}\n\tw.buf.Write(DELIMS)\n}",
"func writeJSON(w http.ResponseWriter, obj interface{}) {\n\tif json.NewEncoder(w).Encode(obj) != nil {\n\t\thttp.Error(w, \"Failed to encode response\", http.StatusInternalServerError)\n\t}\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tswitch err {\n\tcase data.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func writeErrorResponse(w http.ResponseWriter, response interface{}, acceptsType contentType) []byte {\n\tvar bytesBuffer bytes.Buffer\n\tvar encoder encoder\n\t// write common headers\n\twriteCommonHeaders(w, getContentString(acceptsType))\n\tswitch acceptsType {\n\tcase xmlType:\n\t\tencoder = xml.NewEncoder(&bytesBuffer)\n\tcase jsonType:\n\t\tencoder = json.NewEncoder(&bytesBuffer)\n\t}\n\tencoder.Encode(response)\n\treturn bytesBuffer.Bytes()\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func (c *Operation) writeErrorResponse(rw http.ResponseWriter, status int, msg string) {\n\tlogger.Errorf(msg)\n\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write([]byte(msg)); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}",
"func writeJSON(w http.ResponseWriter, data interface{}) error {\n\tif err, ok := data.(error); ok {\n\t\tdata = struct{ Error string }{err.Error()}\n\t\tw.WriteHeader(400)\n\t}\n\to, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(o)\n\treturn err\n}",
"func (h *Encoder) WriteErrResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) {\n\tcontentType := \"application/json; charset=utf-8\"\n\n\te := jsonEncoderPool.Get().(*jsonEncoder) // nolint:errcheck\n\n\te.buf.Reset()\n\tdefer jsonEncoderPool.Put(e)\n\n\terr := e.enc.Encode(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(e.buf.Len()))\n\tw.Header().Set(\"Content-Type\", contentType)\n\tw.WriteHeader(statusCode)\n\n\tif r.Method == http.MethodHead {\n\t\treturn\n\t}\n\n\t_, err = w.Write(e.buf.Bytes())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\treturn\n\t}\n}",
"func (app *application) errorResponse(w http.ResponseWriter, r *http.Request, status int, message interface{}) {\n\tresp := clientResponse{\"error\": message}\n\t// Write the response using the helper method.\n\terr := app.writeJSON(w, status, resp)\n\tif err != nil {\n\t\tapp.logError(r, err)\n\t\tw.WriteHeader(500)\n\t}\n}",
"func errorResponse(w http.ResponseWriter, reason string, statusCode int) error {\n\tw.WriteHeader(statusCode)\n\terrResponse := ErrorResponse{Err: reason}\n\terr := json.NewEncoder(w).Encode(errResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func writeJSON(w http.ResponseWriter, code int, v interface{}) error {\n\t// Set content type as json\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t// write the HTTP status code\n\tw.WriteHeader(code)\n\n\t// Write the Json output\n\treturn json.NewEncoder(w).Encode(v)\n}",
"func fail(res http.ResponseWriter, code int, message string) {\n\tres.WriteHeader(code)\n\tbody, _ := json.Marshal(ErrorResponse{message})\n\tres.Write(body)\n}",
"func writeJsonRespStructured(w http.ResponseWriter, err error, respBody interface{}, status int, apiErrors []*util.ApiError) {\n\tresponse := ResponseV2{}\n\tresponse.Code = status\n\tresponse.Status = http.StatusText(status)\n\tif err == nil {\n\t\tresponse.Result = respBody\n\t} else {\n\t\tresponse.Errors = apiErrors\n\t}\n\tb, err := json.Marshal(response)\n\tif err != nil {\n\t\tutil.GetLogger().Error(\"error in marshaling err object\", err)\n\t\tstatus = 500\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(b)\n}",
"func respondErr(w http.ResponseWriter, apiErr APIErrorRoot) {\n\tlog.Error(apiErr.Body.Code, \"\\t\", apiErr.Body.Message)\n\t// set the response code\n\tw.WriteHeader(apiErr.Body.Code)\n\t// Output API Erorr object to JSON\n\toutput, _ := json.MarshalIndent(apiErr, \"\", \" \")\n\tw.Write(output)\n}",
"func (h *Handler) error(w http.ResponseWriter, error string, code int) {\n\t// TODO: Return error as JSON.\n\thttp.Error(w, error, code)\n}",
"func errorEncoder(_ context.Context, err error, w http.ResponseWriter) {\n\tw.WriteHeader(err2code(err))\n\tjson.NewEncoder(w).Encode(errorWrapper{Error: err.Error()})\n}",
"func (s *Status) Write(w http.ResponseWriter) error {\n\tw.WriteHeader(s.Code)\n\tswitch ct := w.Header().Get(\"Content-Type\"); ct {\n\tcase \"application/json\":\n\t\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\t\treturn err\n\tdefault:\n\t\t_, err := io.WriteString(w, s.String())\n\t\treturn err\n\t}\n}",
"func invalid_request(w http.ResponseWriter, statCode int, message string){\n w.Header().Set(\"Content-Type\", \"application/json\")\n switch statCode {\n case 400: w.WriteHeader(http.StatusBadRequest)\n case 403: w.WriteHeader(http.StatusForbidden)\n case 404: w.WriteHeader(http.StatusNotFound)\n default: w.WriteHeader(http.StatusNotFound)\n }\n err := Error {\n StatusCode: statCode,\n ErrorMessage: message}\n json.NewEncoder(w).Encode(err)\n}",
"func writeJSON(w http.ResponseWriter, o interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\te.Encode(o)\n}",
"func header(w http.ResponseWriter, status int) {\n w.Header().Add(\"Content-Type\", \"application/json\")\n w.WriteHeader(status)\n}",
"func writeJSON(w http.ResponseWriter, status int, data mapStringInterface) error {\n\tjs, err := json.Marshal(data)\n\t//js, err := json.MarshalIndent(data, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\treturn nil\n}",
"func (l *HTTPLib) JSON(w http.ResponseWriter, code int, data interface{}) {\n\tresp := newResponse(code, data)\n\tw.WriteHeader(code)\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{\"data\":\"internal server error\"}`))\n\t\t// http.Error(w, `{\"data\":\"internal server error\"}`, http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func messageResponse(w http.ResponseWriter, s int, m string) {\n\tw.WriteHeader(s)\n\tif m == \"\" {\n\t\treturn\n\t}\n\n\tb, err := json.Marshal(errorMessage{m})\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Could not wrap message in JSON: %s\", m)\n\t\treturn\n\t}\n\tw.Write(b)\n}",
"func (app *application) writeJSON(w http.ResponseWriter, status int, data envelope, headers http.Header) error {\n\t// Encode the data to JSON, return error if any.\n\tjs, err := json.MarshalIndent(data, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Append a newline to make it easier to view in terminal applications.\n\tjs = append(js, '\\n')\n\n\t// Loop through the header map and add each header to the http.ResponseWriter header map.\n\tfor key, value := range headers {\n\t\tw.Header()[key] = value\n\t}\n\n\t// Add the \"Content-Type: application/json\" header.\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t// Write status code.\n\tw.WriteHeader(status)\n\tw.Write(js)\n\n\treturn nil\n}",
"func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}",
"func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}",
"func jsonResponse(rw http.ResponseWriter, code int, msg string) {\n\trw.Header().Set(\"Content-Type\", \"application/json\")\n\trw.WriteHeader(code)\n\trw.Write([]byte(fmt.Sprintf(`{\"message\":\"%s\"}`, msg)))\n}",
"func writeJsonResponse(w http.ResponseWriter, content *[]byte, code int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(*content)\n}",
"func countErrResponse(w http.ResponseWriter) {\n\terrResponse := errorResponse{\n\t\t\"invalid count\",\n\t\t\"The number of dice requested is invalid.\",\n\t}\n\tw.WriteHeader(http.StatusNotAcceptable)\n\tenc := json.NewEncoder(w)\n\tjsonEncode(w, enc, errResponse)\n\n\treturn\n}",
"func writeJSON(w http.ResponseWriter, statusCode int, response interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\n\tencoder := json.NewEncoder(w)\n\tencoder.Encode(response)\n}",
"func errJson(err error) map[string]string {\n\t//return fmt.Sprintf(`{error: \"%s\"}`, err)\n\treturn map[string]string{\n\t\t\"error\": err.Error(),\n\t}\n}",
"func writeJSON(w http.ResponseWriter, v interface{}, status int) error {\n\t// set application/json header\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tw.WriteHeader(status)\n\n\t// json encoder\n\tvar encoder = json.NewEncoder(w)\n\n\t// encodes interface to json\n\treturn encoder.Encode(v)\n}",
"func Write(w http.ResponseWriter, result interface{}, status int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tjson.NewEncoder(w).Encode(result)\n}",
"func (handler Handler) WriteJSONHeader(w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\thandler.wroteHeader = true\n}",
"func Write(w http.ResponseWriter, r *http.Request, data interface{}) *Error {\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn Errorf(err, http.StatusInternalServerError, http.StatusText(http.StatusInternalServerError))\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\tif data != nil {\n\t\tw.Write(js)\n\t}\n\n\treturn nil\n}",
"func handleErr(w http.ResponseWriter, statusCode int, msg string) {\n\tw.WriteHeader(statusCode)\n\tw.Write([]byte(msg + \"\\n\"))\n}",
"func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}",
"func sidesErrResponse(w http.ResponseWriter) {\n\terrResponse := errorResponse{\n\t\t\"invalid sides\",\n\t\t\"The dice requested is not available.\",\n\t}\n\tw.WriteHeader(http.StatusNotAcceptable)\n\tenc := json.NewEncoder(w)\n\tjsonEncode(w, enc, errResponse)\n\n\treturn\n}",
"func sendErrorMessage(w io.Writer, err error) {\n\tif err == nil {\n\t\tpanic(errors.Wrap(err, \"Cannot send error message if error is nil\"))\n\t}\n\tfmt.Printf(\"ERROR: %+v\\n\", err)\n\twriteJSON(w, map[string]string{\n\t\t\"status\": \"error\",\n\t\t\"message\": err.Error(),\n\t})\n}",
"func writeResponse(w http.ResponseWriter, body interface{}, e error) error {\n\tvar (\n\t\tpayload []byte\n\t\tresponse Response\n\t\terr error\n\t)\n\tresponse = Response{\n\t\tResult: body,\n\t\tError: ResponseError{},\n\t}\n\tif e != nil {\n\t\tresponse.Error.Error = fmt.Sprintf(\"%v\", e)\n\t}\n\tpayload, err = json.MarshalIndent(response, \"\", \"\\t\")\n\n\tif !util.ErrorCheck(err) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(payload)\n\t}\n\treturn err\n}",
"func respond(writer http.ResponseWriter, data interface{}, err *errors.MyError) {\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tif err != nil {\n\t\tlog.Println(err.Err)\n\t\terrCode := err.ErrorCode\n\t\tif errCode == 0 {\n\t\t\terrCode = http.StatusInternalServerError\n\t\t}\n\t\thttp.Error(writer, err.Err, errCode)\n\t} else {\n\t\tjson.NewEncoder(writer).Encode(data)\n\t}\n}",
"func ServerErrResponse(error string, writer http.ResponseWriter) {\n\ttype servererrdata struct {\n\t\tStatusCode int\n\t\tMessage string\n\t}\n\ttemp := &servererrdata{StatusCode: 500, Message: error}\n\n\t//Send header, status code and output to writer\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.WriteHeader(http.StatusInternalServerError)\n\tjson.NewEncoder(writer).Encode(temp)\n}",
"func writeJSON(w http.ResponseWriter, code int, value interface{}) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(value)\n}",
"func sendErrorJsonGenerator(c *gin.Context, err error, code int) {\n\tc.JSON(code, gin.H{\n\t\t\"status\": false,\n\t\t\"error\": err.Error(),\n\t\t\"code\": http.StatusText(code),\n\t})\n}",
"func write(w http.ResponseWriter, status int, payload interface{}) {\n\tw.WriteHeader(status)\n\traw, err := json.Marshal(payload)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, _ = w.Write(raw)\n}",
"func jsonResponse(w http.ResponseWriter, d interface{}, c int) {\n\tdj, err := json.Marshal(d)\n\tif err != nil {\n\t\thttp.Error(w, \"Error creating JSON response\", http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(c)\n\tfmt.Fprintf(w, \"%s\", dj)\n}",
"func Write(w http.ResponseWriter, result interface{}, status int) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\treturn json.NewEncoder(w).Encode(result)\n}",
"func errorf(w http.ResponseWriter, code int, format string, a ...interface{}) {\n\tvar out struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tout.Code = code\n\tout.Message = fmt.Sprintf(format, a...)\n\n\tb, err := json.Marshal(out)\n\tif err != nil {\n\t\thttp.Error(w, `{\"code\": 500, \"message\": \"Could not format JSON for original message.\"}`, 500)\n\t\treturn\n\t}\n\n\thttp.Error(w, string(b), code)\n}",
"func errorf(w http.ResponseWriter, code int, format string, a ...interface{}) {\n\tvar out struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tout.Code = code\n\tout.Message = fmt.Sprintf(format, a...)\n\n\tb, err := json.Marshal(out)\n\tif err != nil {\n\t\thttp.Error(w, `{\"code\": 500, \"message\": \"Could not format JSON for original message.\"}`, 500)\n\t\treturn\n\t}\n\n\thttp.Error(w, string(b), code)\n}",
"func WriteErr(w http.ResponseWriter, r *http.Request, err error, opts ...int) {\n\tif httpErr := Err2HTTPErr(err); httpErr != nil {\n\t\tstatus := http.StatusBadRequest\n\t\tif len(opts) > 0 && opts[0] > status {\n\t\t\tstatus = opts[0]\n\t\t}\n\t\thttpErr.Status = status\n\t\thttpErr.write(w, r, len(opts) > 1 /*silent*/)\n\t} else if errors.Is(err, &ErrNotFound{}) {\n\t\tif len(opts) > 0 {\n\t\t\t// Override the status code.\n\t\t\topts[0] = http.StatusNotFound\n\t\t} else {\n\t\t\t// Add status code if not set.\n\t\t\topts = append(opts, http.StatusNotFound)\n\t\t}\n\t\tWriteErrMsg(w, r, err.Error(), opts...)\n\t} else {\n\t\tWriteErrMsg(w, r, err.Error(), opts...)\n\t}\n}",
"func Err(w http.ResponseWriter, statusCode int, err error) {\n\tpresentation.JSON(w, statusCode, struct {\n\t\tErr string `json:\"erro\"`\n\t}{\n\t\tErr: err.Error(),\n\t})\n}",
"func (app *application) writeJSON(w http.ResponseWriter, status int, data interface{}) error {\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(js)\n\n\treturn nil\n}",
"func (ctx *Context) WriteJSON(data interface{}) *HTTPError {\n\tctx.setDefaultHeaders()\n\tjson, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn serverError(err)\n\t}\n\tctx.ContentType(\"application/json\", true)\n\t// set the default content-type\n\tctx.WriteHeader(http.StatusOK)\n\tif _, err := ctx.ResponseWriter.Write(json); err != nil {\n\t\treturn serverError(err)\n\t}\n\treturn nil\n}",
"func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}",
"func writeJSONResponse(payload interface{}, statusCode int, w http.ResponseWriter) {\n\t// Headers must be set before call WriteHeader or Write. see https://golang.org/pkg/net/http/#ResponseWriter\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(statusCode)\n\tif payload != nil {\n\t\ttypes.PanicIfError(json.NewEncoder(w).Encode(payload))\n\t}\n\n\tlogrus.Infof(\"%d Response sent. Payload: %#v\", statusCode, payload)\n}",
"func errWriter(w http.ResponseWriter, httpSts int, err error) {\n\tlog.Print(err)\n\thttp.Error(w, http.StatusText(httpSts), httpSts)\n}",
"func WriteError(w http.ResponseWriter, err string, status int) {\n\terrCust := response.CustomError{\n\t\tMessage: err,\n\t\tHTTPCode: status,\n\t}\n\terrs := []error{errCust}\n\tres := response.BuildError(errs)\n\tresponse.Write(w, res, status)\n}",
"func ProcessResponseError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase cargo.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase servicecommons.ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func write(resp *Response, w http.ResponseWriter) {\n\tjs, _ := json.Marshal(resp)\n\tfmt.Fprint(w, string(js))\n}",
"func writeJSON(w http.ResponseWriter, thing interface{}, indent string) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tencoder := json.NewEncoder(w)\n\tencoder.SetIndent(\"\", indent)\n\tif err := encoder.Encode(thing); err != nil {\n\t\tapiLog.Warnf(\"JSON encode error: %v\", err)\n\t}\n}",
"func (app *application) writeJSON(w http.ResponseWriter, status int, data interface{}, headers http.Header) error {\n\t// Encode the data to JSON, returning the error if there was one.\n\tjs, err := json.Marshal(data)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Append a newline to make it easier to view in terminal applications.\n\tjs = append(js, '\\n')\n\n\t// At this point, we know that we won't encounter any more errors before writing the\n\t// response, so it's safe to add any headers that we want to include. We loop\n\t// through the header map and add each header to the http.ResponseWriter header map.\n\t// Note that it's OK if the provided header map is nil. Go doesn't throw an error\n\t// if you try to range over (or generally, read from) a nil map.\n\tfor key, value := range headers {\n\t\tw.Header()[key] = value\n\t}\n\n\t// Add the \"Content-Type: application/json\" header, then write the status code and\n\t// JSON response.\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\n\treturn nil\n}",
"func writeJSON(w http.ResponseWriter, data interface{}) {\n\tb, err := json.Marshal(data)\n\tif err != nil {\n\t\twriteErr(w, 500, err)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(b)\n}",
"func (m *manager) sendErr(w http.ResponseWriter, errorCode int64, errorData interface{}) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\tenc := json.NewEncoder(w)\n\tenc.Encode(map[string]interface{}{\n\t\t\"ok\": false,\n\t\t\"error_code\": errorCode,\n\t\t\"error_data\": errorData,\n\t})\n}",
"func HttpErrResponse(w http.ResponseWriter, statusCode int, obj interface{}) {\n\tw.Header().Set(rest.HEADER_RESPONSE_STATUS, strconv.Itoa(statusCode))\n\tw.Header().Set(rest.HEADER_CONTENT_TYPE, rest.CONTENT_TYPE_TEXT)\n\tw.WriteHeader(statusCode)\n\tif obj == nil {\n\t\treturn\n\t}\n\n\tobjJSON, err := json.Marshal(obj)\n\tif err != nil {\n\t\tlog.Errorf(nil, \"Http error response marshaling failed.\")\n\t\treturn\n\t}\n\tw.Header().Set(rest.HEADER_CONTENT_TYPE, rest.CONTENT_TYPE_JSON)\n\t_, err = fmt.Fprintln(w, string(objJSON))\n\tif err != nil {\n\t\tlog.Errorf(nil, \"Send http response fail.\")\n\t}\n}",
"func writeError(w http.ResponseWriter, message string){\n\ttype Out struct {\n\t\tMessage string\n\t}\n\n\t/* Write HTML */\n\tdata := Out{message}\n\ttmpl := template.Must(template.ParseFiles(\"static/html/error.html\", \"static/html/top.html\", \"static/html/head.html\"))\n\ttmpl.ExecuteTemplate(w, \"error\", data)\n}",
"func output500Error(r render.Render, err error) {\n\tfmt.Println(err)\n\tr.JSON(500, map[string]interface{}{\"error\": err.Error()})\n}",
"func (r *response) write(c context.Context, w http.ResponseWriter) {\n\tbody := r.body\n\tswitch r.code {\n\tcase codes.Internal, codes.Unknown:\n\t\t// res.body is error message.\n\t\tlogging.Fields{\n\t\t\t\"code\": r.code,\n\t\t}.Errorf(c, \"%s\", body)\n\t\tbody = []byte(\"Internal Server Error\\n\")\n\t}\n\n\tfor h, vs := range r.header {\n\t\tw.Header()[h] = vs\n\t}\n\tw.Header().Set(prpc.HeaderGRPCCode, strconv.Itoa(int(r.code)))\n\n\tstatus := r.status\n\tif status == 0 {\n\t\tstatus = codeStatus(r.code)\n\t}\n\tw.WriteHeader(status)\n\n\tif _, err := w.Write(body); err != nil {\n\t\tlogging.WithError(err).Errorf(c, \"Could not respond\")\n\t\t// The header is already written. There is nothing more we can do.\n\t\treturn\n\t}\n}",
"func Test_JsonWriter__FailedJsonMarhsal(t *testing.T) {\n\tres := &Response{\n\t\tCode: http.StatusBadRequest,\n\t\tData: map[float64]int{2.5: 1},\n\t}\n\n\tw := httptest.NewRecorder()\n\tJsonWriter(w, res)\n\tassert.Equal(t, \"{error: \\\"json: unsupported type: map[float64]int\\\"}\", w.Body.String())\n\tassert.Equal(t, http.StatusBadRequest, w.Code)\n\tassert.Equal(t, \"application/json; charset=utf-8\", w.Header().Get(\"Content-Type\"))\n}",
"func renderErrors(w http.ResponseWriter, errors map[string]string, code int) {\n\tw.WriteHeader(code)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tencoder := json.NewEncoder(w)\n\tif err := encoder.Encode(&errors); err != nil {\n\t\thttp.Error(w, fmt.Sprintf(\"Cannot encode response data\"), 500)\n\t}\n}",
"func JsonMessage(writer http.ResponseWriter, info_code int, info_message string) {\n\twriter.WriteHeader(info_code)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.Write([]byte(\"{\\\"message\\\": \\\"\" + info_message + \"\\\"}\"))\n}",
"func errHandle(w http.ResponseWriter, longmsg string, shortmsg string, status int) {\n\tlog.Errorf(longmsg)\n\terrorResponse := ErrorResponse{\n\t\tStatus: status,\n\t\tErrorMessage: shortmsg,\n\t}\n\tdata, _ := json.Marshal(errorResponse)\n\tresponse := JSONResponse{}\n\tresponse.status = http.StatusUnauthorized\n\tresponse.data = data\n\tresponse.Write(w)\n}",
"func writeJSON(w http.ResponseWriter, v interface{}) error {\n\t// Indent the JSON so it's easier to read for hackers.\n\tdata, err := json.MarshalIndent(v, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tw.Header().Set(\"content-type\", \"application/json; charset=utf-8\")\n\t_, err = w.Write(data)\n\treturn err\n}",
"func Problem(w http.ResponseWriter, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}"
] | [
"0.7448407",
"0.721666",
"0.7147713",
"0.709014",
"0.69657683",
"0.695088",
"0.6933193",
"0.6830498",
"0.6773857",
"0.6768427",
"0.66996765",
"0.6683061",
"0.6680793",
"0.65883744",
"0.65724826",
"0.6554943",
"0.6554021",
"0.6543048",
"0.6539291",
"0.65299034",
"0.6495163",
"0.64757407",
"0.64165074",
"0.64040226",
"0.6394948",
"0.6389173",
"0.6373016",
"0.63707674",
"0.63509977",
"0.6348309",
"0.6340584",
"0.6340584",
"0.63042605",
"0.62901884",
"0.62623113",
"0.62265337",
"0.6192809",
"0.6188105",
"0.6164221",
"0.6164125",
"0.612748",
"0.61145616",
"0.6105051",
"0.6093208",
"0.60871285",
"0.60538626",
"0.6048328",
"0.6047693",
"0.60338235",
"0.602968",
"0.60240763",
"0.6020815",
"0.6020815",
"0.6009467",
"0.5991774",
"0.59839",
"0.5969807",
"0.59585065",
"0.5951137",
"0.593711",
"0.59369355",
"0.59247476",
"0.5920769",
"0.5918716",
"0.5914548",
"0.5907964",
"0.5906694",
"0.5905546",
"0.5898173",
"0.5887944",
"0.5880079",
"0.5878507",
"0.5854948",
"0.58515227",
"0.58500975",
"0.58500975",
"0.5849432",
"0.5846599",
"0.58162016",
"0.581404",
"0.5811421",
"0.580906",
"0.58016926",
"0.5801016",
"0.5789283",
"0.57885045",
"0.57797",
"0.57696486",
"0.57677907",
"0.5767215",
"0.5763704",
"0.5755728",
"0.57494336",
"0.57383156",
"0.5735044",
"0.57210374",
"0.5708173",
"0.5706905",
"0.570261",
"0.5697638"
] | 0.7304042 | 1 |
write a success message with header to the output | func JsonMessage(writer http.ResponseWriter, info_code int, info_message string) {
writer.WriteHeader(info_code)
writer.Header().Set("Content-Type", "application/json")
writer.Write([]byte("{\"message\": \"" + info_message + "\"}"))
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Success(w io.Writer, format string, args ...any) {\n\tformat = strings.TrimRight(format, \"\\r\\n\") + \"\\n\"\n\tfmt.Fprintf(w, \"\\n\"+Wrap(BoldGreen(\"SUCCESS: \")+format, DefaultTextWidth)+\"\\n\", args...)\n}",
"func writeSuccess(w http.ResponseWriter, targetLanguage language.Tag, targetPhrase string) {\n\theaders := w.Header()\n\n\theaders.Set(\"Content-Type\", \"text/plain\")\n\theaders.Set(\"Content-Language\", targetLanguage.String())\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, targetPhrase)\n}",
"func (srv *Service) WriteOk(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusOK)\n}",
"func WriteSuccess(w http.ResponseWriter, data interface{}, status int) {\n\tres := response.BuildSuccess(data, response.MetaInfo{HTTPStatus: status})\n\tresponse.Write(w, res, status)\n}",
"func Success(w http.ResponseWriter, message string, code int) {\n\tif code == 0 {\n\t\tcode = http.StatusOK\n\t}\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(map[string]string{\"success\": message})\n}",
"func Success(w http.ResponseWriter) {\n\tw.Write(JSON(SuccessResponse{Message: \"success\", Code: 200}))\n}",
"func PrintSuccessMessage(w http.ResponseWriter, obj interface{}) {\n\tmodel, err := json.Marshal(obj)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(model)\n}",
"func writeSuccessResponse(w http.ResponseWriter, bikes []common.Bike) {\n\tresponse := Response{true, bikes, \"\"}\n\twriteResponse(w, response)\n}",
"func writeSuccessResponse(w http.ResponseWriter, data interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tif _, err := w.Write(bs); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func (r *Router) Success(ctx context.Context, w http.ResponseWriter, code int) {\n\treqID := r.GetRequestID(ctx)\n\n\tr.setDefaultHeaders(ctx, w)\n\n\tif code != http.StatusOK {\n\t\tw.WriteHeader(code)\n\t}\n\n\tr.logger.Debugw(\"response\",\n\t\t\"request_id\", reqID,\n\t\t\"status_code\", code,\n\t)\n}",
"func SuccessResponse(msg string, writer http.ResponseWriter) {\n\ttype errdata struct {\n\t\tStatusCode int\n\t\tMessage string\n\t}\n\ttemp := &errdata{StatusCode: 200, Message: msg}\n\n\t//Send header, status code and output to writer\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(writer).Encode(temp)\n}",
"func Success(format string, a ...interface{}) {\n\tif Level >= 3 {\n\t\ta, w := extractLoggerArgs(format, a...)\n\t\ts := fmt.Sprintf(label(format, SuccessLabel), a...)\n\n\t\tif Color {\n\t\t\tw = color.Output\n\t\t\ts = color.GreenString(s)\n\t\t}\n\n\t\tfmt.Fprintf(w, s)\n\t}\n}",
"func OK(w http.ResponseWriter, data interface{}, message string) {\n\tsuccessResponse := BuildSuccess(data, message, MetaInfo{HTTPStatus: http.StatusOK})\n\tWrite(w, successResponse, http.StatusOK)\n}",
"func Success(t *testing.T, msg string, args ...interface{}) {\n\tm := fmt.Sprintf(msg, args...)\n\tt.Log(fmt.Sprintf(\"\\t %-80s\", m), Succeed)\n}",
"func PrintSuccess() {\n\telapsed := endTime.Sub(startTime)\n\tfmt.Print(\"\\n\")\n\tlog.Infof(\"Ingestion completed in %v\", elapsed)\n}",
"func WriteSuccessResponse(w http.ResponseWriter) {\n\tjson, _ := json.Marshal(NewSuccessResponse())\n\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Add(\"content-type\", \"application/json\")\n\tw.Write(json)\n}",
"func Success(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprint(s, formatted)\n\n}",
"func UploadSuccessHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _ = io.WriteString(w, \"upload succeed\")\n}",
"func (t *Test) PrintSuccess() {\n\tlog.Successf(\"ok: %s\", t.Name)\n}",
"func respond_with_success(w http.ResponseWriter,\n\tstatus int,\n\tmessage string) {\n\tdetail := fmt.Sprintf(\"Success (%d). Sending response: %s\", status, message)\n\tif logutil.GPS_DEBUG > 0 {\n\t\tlogutil.LogRestResponse(detail)\n\t}\n\tadd_json_header(w)\n\tset_response_status(w, status)\n\tfmt.Fprintf(w, \"%s\\n\", message)\n}",
"func (v *View) Success(w http.ResponseWriter, r *http.Request, yield interface{}, message string) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\n\tvar vd Data\n\tvd.User.ID = r.Header.Get(\"userID\")\n\tadmin, err := strconv.ParseBool(r.Header.Get(\"admin\"))\n\tif err != nil {\n\t\tvd.User.Admin = false\n\t} else {\n\t\tvd.User.Admin = admin\n\t}\n\tvd.Yield = yield\n\tvd.Messages.Success = message\n\n\tv.Template.ExecuteTemplate(w, v.Layout, vd)\n}",
"func (s *STS) sendSuccessfulResponse(w http.ResponseWriter, tokenData []byte) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tif _, err := w.Write(tokenData); err != nil {\n\t\tlog.Printf(\"failure in sending STS success response: %v\", err)\n\t\treturn\n\t}\n}",
"func healthcheckok(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(200)\n}",
"func (c *Context) Status(code int) {\n\tc.Writer.WriteHeader(code)\n}",
"func Successf(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string, args ...interface{}) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprintf(s, formatted, args...)\n\n}",
"func logSuccess(a *Attempt) {\n\t/* Print message to log */\n\tlog.Printf(\"[%v] SUCCESS %v@%v - %v\", a.Tasknum, a.Config.User, a.Host,\n\t\ta.Pass)\n\t/* Write message to file */\n\tgo appendLine(*gc.Sfile, fmt.Sprintf(\"%v@%v %v\\n\", a.Config.User,\n\t\ta.Host, a.Pass))\n}",
"func Success(w http.ResponseWriter, code int, msg string, data interface{}) error {\n\treturn sendResponse(w, Resp{SUCCESS, code, msg, data, SuccessHttpCode})\n}",
"func (response BasicJSONResponse) Success(writer http.ResponseWriter) {\n\tSuccess(writer, response)\n}",
"func PrintSuccess(message string) {\n\tfmt.Printf(green + \"✔ \" + message + noFormat + \"\\n\")\n}",
"func header(req *restful.Request, resp *restful.Response) {\n\tresp.WriteHeader(200)\n}",
"func Success(message string, scope string) error {\n\treturn printLine(\"✔\", color.FgHiGreen, message, scope)\n}",
"func Successln(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprintln(s, formatted)\n\n}",
"func Success(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"Operation was fine\")\n}",
"func (c *Context) Success(name string) {\n\tc.HTML(http.StatusOK, name)\n}",
"func (o *PingOK) WriteResponse(rw http.ResponseWriter, producer httpkit.Producer) {\n\n\trw.WriteHeader(200)\n}",
"func (m *Main) PrintSuccess() {\n\tfmt.Println(\"\")\n\tlog.Successf(\"ok: successfully tested %s\", m.Name)\n\tfmt.Println(\"\")\n}",
"func (resp *Response) StatusOk(w http.ResponseWriter) {\n\tresp.Ok = true\n\twrite(resp, w)\n}",
"func send(status int, out http.ResponseWriter, format string, args ...interface{}) {\n\tout.WriteHeader(status)\n\n\tif len(args) == 0 {\n\t\tfmt.Fprintf(out, format)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(out, format, args...)\n}",
"func SuccessStatusEvent(w io.Writer, fmtstr string, a ...interface{}) {\n\tif runtime.GOOS == windowsOS {\n\t\tfmt.Fprintf(w, \"%s\\n\", fmt.Sprintf(fmtstr, a...))\n\t} else {\n\t\tfmt.Fprintf(w, \"✅ %s\\n\", fmt.Sprintf(fmtstr, a...))\n\t}\n}",
"func writeSuccessResponseXML(w http.ResponseWriter, response []byte) {\n\twriteResponse(w, http.StatusOK, response, mimeXML)\n}",
"func Success(v ...interface{}) {\n\tprint(SuccessFont)\n\tfmt.Print(v...)\n\tterminal.Reset()\n}",
"func (o *CreateProgramOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func Success(v ...interface{}) string {\n\treturn logr.Success(v...)\n}",
"func UploadSuccessHandler(w http.ResponseWriter, r *http.Request) {\n\tio.WriteString(w, \"uploaded successfully!\")\n}",
"func (o *CreateFacilityUsersOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}",
"func Success(file *os.File) *log.Logger {\n\tLogSucc := log.New(file, \"SUCCESS: \", log.Ldate|log.Ltime|log.Lshortfile)\n\treturn LogSucc\n}",
"func (w *responseWriter) WriteHeader(code int) {\n\tw.status = code\n\tw.started = true\n\tw.writer.WriteHeader(code)\n}",
"func (w *responseWriter) WriteHeader(code int) {\n\tw.status = code\n\tw.started = true\n\tw.writer.WriteHeader(code)\n}",
"func Success(ctx ...interface{}) {\n\tlogNormal(successStatus, time.Now(), ctx...)\n}",
"func psuccess(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(CGREEN+format+CEND+\"\\n\", a...)\n}",
"func respondOk(writer http.ResponseWriter) {\n\twriter.WriteHeader(http.StatusOK)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tio.WriteString(writer, `{ \"status\": \"OK\" }`)\n}",
"func (o *GetPingOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}",
"func Success(format string, a ...interface{}) {\n\tprefix := green(succ)\n\tlog.Println(prefix, fmt.Sprintf(format, a...))\n}",
"func (o *CreateUserOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (o *GetProjectSummaryOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tpayload := o.Payload\n\tif err := producer.Produce(rw, payload); err != nil {\n\t\tpanic(err) // let the recovery middleware deal with this\n\t}\n}",
"func CreateSuccessResponse(w http.ResponseWriter, data interface{}) {\n\tif data != nil {\n\t\tbytes, err := json.Marshal(data)\n\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\t\tlog.Println(err)\n\t\t\treturn\n\t\t}\n\n\t\tw.Write(bytes)\n\t}\n}",
"func (a *App) Ok(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tlogerr(w.Write(randomByteSlice(10, \"OK\", \"0123456789abcdef\")))\n}",
"func (l *Logr) Success(v ...interface{}) string {\n\treturn log(S, false, Interfaces(v).SSV(), l.meta)\n}",
"func (o *VerifyEmailTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (c *customResponseWriter) WriteHeader(status int) {\n\tc.status = status\n\tc.ResponseWriter.WriteHeader(status)\n}",
"func SendSuccess(w http.ResponseWriter, message string) (int, error) {\n\tdata := struct {\n\t\tMessage string `json:\"message\"`\n\t}{\n\t\tMessage: message,\n\t}\n\tpayload, err := json.Marshal(data)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Could not marshal data into payload, %v\", err)\n\t\tlog.Printf(msg)\n\t\treturn 0, errors.New(msg)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tbytesWritten, err := w.Write(payload)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"There was an error sending the response, %v\", err)\n\t\tlog.Printf(msg)\n\t\treturn 0, errors.New(msg)\n\t}\n\n\tlog.Printf(\"Sent %s\", string(payload))\n\n\treturn bytesWritten, nil\n}",
"func (o *BofaChkUpdateOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (o *CreateTaskCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Location\n\n\tlocation := o.Location.String()\n\tif location != \"\" {\n\t\trw.Header().Set(\"Location\", location)\n\t}\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}",
"func (c *requestContext) ok() {\n\tc.Writer.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tc.Writer.WriteHeader(200)\n\tfmt.Fprintln(c.Writer, \"OK\")\n}",
"func (w *logResponseWritter) WriteHeader(statusCode int) {\n\n\tw.status = statusCode\n\tw.ResponseWriter.WriteHeader(statusCode)\n}",
"func (o *UpdateClusterOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (o *CreateTCPCheckCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(201)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func (o *VerifyAccountCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(201)\n}",
"func ResponseSuccess(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(JSONSuccess{true})\n}",
"func (o *CreateTCPCheckDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\t// response header Configuration-Version\n\n\tconfigurationVersion := o.ConfigurationVersion\n\tif configurationVersion != \"\" {\n\t\trw.Header().Set(\"Configuration-Version\", configurationVersion)\n\t}\n\n\trw.WriteHeader(o._statusCode)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func HS200t(w http.ResponseWriter, b []byte) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.Header().Set(\"Cache-Control\", \"no-cache,no-store\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(b)\n}",
"func (w *customResponseWriter) WriteHeader(status int) {\n\tw.status = status\n\tw.ResponseWriter.WriteHeader(status)\n}",
"func (r *Response) WriteHeader(status int) {\n\tr.status = status\n\tr.rendered = true\n\tr.ResponseWriter.WriteHeader(status)\n}",
"func Success(a ...interface{}) {\n\tcolor.Set(color.FgHiGreen)\n\tdefer color.Unset()\n\tsuccessLogger.Println(a...)\n}",
"func (w *LoggingResponseWriter) WriteHeader(status int) {\n\tw.status = status\n\tw.writer.WriteHeader(status)\n}",
"func (w *ResponseWriterTee) WriteHeader(statusCode int) {\n\tw.StatusCode = statusCode\n\tw.w.WriteHeader(statusCode)\n}",
"func (c *Context) Status(code int) {\n\tc.StatusCode = code\n\tc.Writer.WriteHeader(code)\n}",
"func (o *PostKeysKeyOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n}",
"func Success(taskDescription string) {\n\tcolor.Green(\"Success - \" + taskDescription)\n}",
"func (c *CountHandler) OkResponse(resp http.ResponseWriter, req *http.Request) {\n\tc.numRequests++\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write([]byte(\"{\\\"stat\\\": \\\"ok\\\"}\"))\n}",
"func (r *LogRecord) WriteHeader(status int) {\n\tr.status = status\n\tr.ResponseWriter.WriteHeader(status)\n}",
"func writePlainText(statusCode int, text string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(statusCode)\n\tfmt.Fprintln(w, text)\n}",
"func writeSuccessNoContent(w http.ResponseWriter) {\n\twriteResponse(w, http.StatusNoContent, nil, mimeNone)\n}",
"func (o *CreateProjectOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func PrintSuccess(msg interface{}) {\n\tswitch os.Getenv(\"GHORG_COLOR\") {\n\tcase \"enabled\":\n\t\tcolor.New(color.FgGreen).Println(msg)\n\tdefault:\n\t\tfmt.Println(msg)\n\t}\n}",
"func OkFinisher(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ok\"))\n}",
"func genericSuccess(caller, inp, msg string) error {\n\tresp, err := json.Marshal(&Response{Status: \"Success\", Message: msg})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprintAndLog(caller, inp, string(resp))\n\treturn nil\n}",
"func (grh *GRPCResponseHandler) WriteHeader(code int) {\n\tgrh.statusCode = code\n\tgrh.ResponseWriter.WriteHeader(code)\n}",
"func ResponseSuccess(w http.ResponseWriter, result *Result, message string) {\n\tresponseData := httpResponse{\n\t\tSuccess: true,\n\t\tData: result.Data,\n\t\tMessage: message,\n\t\tCode: http.StatusOK,\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(w).Encode(responseData)\n\treturn\n}",
"func returnCreatedRecordResponse(w http.ResponseWriter) {\n\tformatResponseWriter(w, http.StatusCreated, \"text/plain\", []byte(OKResponseBodyMessage))\n}",
"func (h *Encoder) WriteSuccessfulResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\toutput interface{},\n\tht rest.HandlerTrait,\n) {\n\tif h.unwrapInterface {\n\t\toutput = reflect.ValueOf(output).Elem().Interface()\n\t}\n\n\tif etagged, ok := output.(rest.ETagged); ok {\n\t\tetag := etagged.ETag()\n\t\tif etag != \"\" {\n\t\t\tw.Header().Set(\"Etag\", etag)\n\t\t}\n\t}\n\n\tif h.outputHeadersEncoder != nil && !h.whiteHeader(w, r, output, ht) {\n\t\treturn\n\t}\n\n\tskipRendering := h.skipRendering\n\tif !skipRendering {\n\t\tif nc, ok := output.(noContent); ok {\n\t\t\tskipRendering = nc.NoContent()\n\t\t\tif skipRendering && ht.SuccessStatus == 0 {\n\t\t\t\tht.SuccessStatus = http.StatusNoContent\n\t\t\t}\n\t\t}\n\t}\n\n\tif ht.SuccessStatus == 0 {\n\t\tht.SuccessStatus = http.StatusOK\n\t}\n\n\tif skipRendering {\n\t\tif ht.SuccessStatus != http.StatusOK {\n\t\t\tw.WriteHeader(ht.SuccessStatus)\n\t\t}\n\n\t\treturn\n\t}\n\n\th.writeJSONResponse(w, r, output, ht)\n}",
"func (o *VerifyHealthCredentialOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (s *session) respondOK(format string, args ...interface{}) error {\n\treturn s.writer.PrintfLine(fmt.Sprintf(\"+OK %s\", format), args...)\n}",
"func (o *DepositNewFileOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses\n\n\trw.WriteHeader(200)\n}",
"func (r *response) WriteHeader(code int) {\n\tif r.headersSend {\n\t\t//r.webapp.Logger().Warn(\"headers already send\")\n\t\treturn\n\t}\n\tr.status = code\n\tr.ResponseWriter.WriteHeader(r.status)\n\tr.headersSend = true\n}",
"func (r *Responder) OK() { r.write(http.StatusOK) }",
"func (sr *StatusRecorder) WriteHeader(status int) {\n\tsr.status = status\n\tsr.ResponseWriter.WriteHeader(status)\n}",
"func (l *Logger) Success(a ...interface{}) {\r\n\tl.logInternal(SuccessLevel, 4, a...)\r\n}",
"func (w *MonResponseWriter) WriteHeader(statusCode int) {\n\t// Store the status code\n\tw.status = statusCode\n\t// Write the status code onward.\n\tw.ResponseWriter.WriteHeader(statusCode)\n}",
"func (r *loggingWriter) WriteHeader(status int) {\n\tr.accessStats.status = status\n\tr.ResponseWriter.WriteHeader(status)\n}"
] | [
"0.7720925",
"0.75208837",
"0.6925237",
"0.67952013",
"0.67613465",
"0.66899836",
"0.6652819",
"0.6578009",
"0.6520311",
"0.64321697",
"0.64295083",
"0.63760626",
"0.63639045",
"0.63398534",
"0.62433517",
"0.624165",
"0.6211086",
"0.62089276",
"0.6171703",
"0.6163954",
"0.61552304",
"0.6148136",
"0.613542",
"0.610665",
"0.6074591",
"0.60710764",
"0.60635024",
"0.6050342",
"0.60479575",
"0.6047643",
"0.6047258",
"0.6046203",
"0.60179853",
"0.60095924",
"0.5996439",
"0.5972505",
"0.5959121",
"0.5909706",
"0.5903094",
"0.5892327",
"0.589154",
"0.58791935",
"0.5877668",
"0.586009",
"0.58599365",
"0.58426756",
"0.5840475",
"0.5825509",
"0.5825509",
"0.5825271",
"0.58145016",
"0.58002526",
"0.5798717",
"0.5789244",
"0.57830775",
"0.5776287",
"0.5765459",
"0.5758734",
"0.57561255",
"0.5752481",
"0.5751439",
"0.57497466",
"0.5744479",
"0.5741446",
"0.57319087",
"0.57244074",
"0.5724183",
"0.5722719",
"0.5721963",
"0.5719459",
"0.57161945",
"0.57131326",
"0.57124436",
"0.5705155",
"0.5696667",
"0.5692696",
"0.5690923",
"0.56715447",
"0.567112",
"0.5670362",
"0.5669959",
"0.56661284",
"0.5662814",
"0.5661038",
"0.56577635",
"0.56518847",
"0.56465626",
"0.5642628",
"0.5636827",
"0.56362873",
"0.5636078",
"0.5635353",
"0.56305474",
"0.5629421",
"0.56276166",
"0.5621195",
"0.56203616",
"0.56194174",
"0.56168157",
"0.5607433",
"0.5606254"
] | 0.0 | -1 |
write a teach/ask json error with header to the output | func ATJsonError(writer http.ResponseWriter, error_message string) {
writer.WriteHeader(http.StatusInternalServerError)
writer.Header().Set("Content-Type", "application/json")
obj := model.ATResultList{Error: error_message}
json_bytes, _ := json.Marshal(obj)
writer.Write(json_bytes)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func writeError(w http.ResponseWriter, err error, code int) error {\n\ttags := []string{\n\t\t\"error_\" + strings.ToLower(\n\t\t\tstrings.ReplaceAll(http.StatusText(code), \" \", \"_\"),\n\t\t),\n\t}\n\n\tdata, err := jsoniter.ConfigFastest.Marshal(map[string]interface{}{\n\t\t\"success\": false,\n\t\t\"code\": code,\n\t\t\"message\": err.Error(),\n\t\t\"tags\": tags,\n\t})\n\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\n\t_, err = w.Write(data)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"writeError:%w\", err)\n\t}\n\n\treturn err\n}",
"func (a *API) jsonError(res http.ResponseWriter, err common.DetailedError, startedAt time.Time) {\n\ta.logError(&err, startedAt)\n\tjsonErr, _ := json.Marshal(err)\n\n\tres.Header().Add(\"content-type\", \"application/json\")\n\tres.WriteHeader(err.Status)\n\tres.Write(jsonErr)\n}",
"func jsonError(w http.ResponseWriter, serverMsg string, clientMsg string) {\n\tlog.Error(serverMsg)\n\tpayload := Message{\n\t\tError: clientMsg,\n\t}\n\tresJSON, err := json.Marshal(payload)\n\tif err != nil {\n\t\tmsg := fmt.Sprintf(\"Failed to marshal result : %v\", err)\n\t\thttpError(w, msg, msg, http.StatusInternalServerError)\n\t\treturn\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tfmt.Fprintf(w, \"%s\\n\", string(resJSON))\n\treturn\n}",
"func writeErrorResponse(w http.ResponseWriter, status int, body string) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(status)\n\n\t_, _ = fmt.Fprintf(os.Stderr, \"error: %s\", body)\n\tif _, err := w.Write([]byte(body)); err != nil {\n\t\t_, _ = fmt.Fprintf(os.Stderr, \"cannot write to stream: %v\\n\", err)\n\t\treturn\n\t}\n}",
"func JsonError(writer http.ResponseWriter, error_message string) {\n\twriter.WriteHeader(http.StatusInternalServerError)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.Write([]byte(\"{\\\"error\\\": \\\"\" + error_message + \"\\\"}\"))\n}",
"func writeInsightError(w http.ResponseWriter, str string) {\n\tw.Header().Set(\"Content-Type\", \"text/html; charset=utf-8\")\n\tw.WriteHeader(http.StatusBadRequest)\n\tio.WriteString(w, str)\n}",
"func WriteErrJSON(l *zap.SugaredLogger, w http.ResponseWriter, r *http.Request, err error, httpCode int) {\n\t// log outgoing errors\n\tl.With(\"requestId\", GetReqID(r.Context())).Error(err)\n\n\t// write error to response\n\te := HTTPError{\n\t\tHTTPStatusCode: httpCode,\n\t\tMsg: err.Error(),\n\t\tInternalErrCode: -1,\n\t}\n\n\tif err := WriteJSON(w, e, httpCode); err != nil {\n\t\tl.Errorw(\"error while sending err json\", \"err\", err)\n\t}\n}",
"func respondError(writer http.ResponseWriter, err string) {\n\twriter.WriteHeader(http.StatusInternalServerError)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tio.WriteString(writer, fmt.Sprintf(`{ \"status\": \"ERROR\", \"problem\": \"%s\"}`, err))\n}",
"func writeError(w http.ResponseWriter, status int, err error) {\n\twrite(w, status, Error{Err: err.Error()})\n}",
"func writeErrorResponse(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\ter := errorResponse{Message: \"unable to process request\"}\n\tbs, err := json.Marshal(er)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tif _, err := w.Write(bs); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func setError(w http.ResponseWriter, desc string, status int) {\n\te := map[string]interface{}{\"code\": status, \"msg\": desc}\n\tmsg, _ := json.Marshal(e)\n\tlog.DebugJson(e)\n\t//w.WriteHeader(status)\n\tw.Write(msg)\n}",
"func writeError(httpStatus int, e error, response *restful.Response) {\n\tvar resp *endpointError.EndpointError\n\tif endpointErr, ok := e.(*endpointError.EndpointError); ok {\n\t\tresp = endpointErr\n\t} else {\n\t\tresp = &endpointError.EndpointError{\n\t\t\tErrorMessage: e.Error(),\n\t\t\tErrorCode: \"96\",\n\t\t}\n\t}\n\n\terr := response.WriteHeaderAndJson(httpStatus, resp, restful.MIME_JSON)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t\treturn\n\t}\n}",
"func writeApiReqErr(w http.ResponseWriter, status int, message string) {\n\tw.Header().Set(\"Content-Type\", util.ContentTypeTextPlainUTF8)\n\tif status > 0 && status != 200 {\n\t\tw.WriteHeader(status)\n\t} else {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tw.Write([]byte(message))\n}",
"func SendError(w http.ResponseWriter, status int, errMsg string) {\n header(w, status)\n data := ErrJson {\n Status: status,\n Error: errMsg,\n }\n json.NewEncoder(w).Encode(data)\n}",
"func writeErr(w http.ResponseWriter, err error) {\n\twriteResponse(w, &authorization.Response{Err: err.Error()})\n}",
"func writeErrorResponse(w http.ResponseWriter, errorMsg string) {\n\tresponse := Response{false, []common.Bike{}, errorMsg}\n\twriteResponse(w, response)\n}",
"func respondErr(w http.ResponseWriter, apiErr APIErrorRoot) {\n\tlog.Error(apiErr.Body.Code, \"\\t\", apiErr.Body.Message)\n\t// set the response code\n\tw.WriteHeader(apiErr.Body.Code)\n\t// Output API Erorr object to JSON\n\toutput, _ := json.MarshalIndent(apiErr, \"\", \" \")\n\tw.Write(output)\n}",
"func (w *RESPWriter) writeError(err error) {\n\tw.buf.WriteRune(respERROR)\n\tif err != nil {\n\t\tw.buf.WriteString(err.Error())\n\t}\n\tw.buf.Write(DELIMS)\n}",
"func sendJsonError(response http.ResponseWriter, status int, message string) {\n\toutput := map[string]string{\n\t\t\"status\": \"error\",\n\t\t\"message\": message,\n\t}\n\n\tjsonBytes, err := json.Marshal(output)\n\tif err != nil {\n\t\tlog.Errorf(\"Error encoding json error response: %s\", err.Error())\n\t\thttp.Error(response, \"Interval server error\", 500)\n\t\treturn\n\t}\n\n\tresponse.Header().Set(\"Content-Type\", \"application/json\")\n\tresponse.WriteHeader(status)\n\t_, err = response.Write(jsonBytes)\n\tif err != nil {\n\t\tlog.Errorf(\"Unable to write JSON error: %s\", err)\n\t}\n}",
"func WriteErrorResponse(w http.ResponseWriter, code int, err error) {\n\ttype e struct {\n\t\tDescription string `json:\"description\"`\n\t}\n\tdata, err := json.Marshal(&e{\n\t\tDescription: err.Error(),\n\t})\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(data)\n}",
"func writeServiceError(w http.ResponseWriter) {\n\t// TODO log error\n\tw.WriteHeader(http.StatusServiceUnavailable)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tjson.NewEncoder(w).Encode(response{\"Fail connection on DB.\"})\n}",
"func WriteErrResponse(w http.ResponseWriter, code int, err error) error {\n\tenv := map[string]interface{}{\n\t\t\"meta\": map[string]interface{}{\n\t\t\t\"code\": code,\n\t\t\t\"error\": err.Error(),\n\t\t},\n\t}\n\n\tres, err := json.MarshalIndent(env, \"\", \" \")\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn err\n\t}\n\n\tw.WriteHeader(code)\n\t_, err = w.Write(res)\n\treturn err\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n w.WriteHeader(http.StatusInternalServerError)\n /* TODO...\n\tswitch err {\n\tcase cargo.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\t*/\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tif err == nil {\n\t\tpanic(\"encodeError with nil error\")\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\t// Write actual error code\n\tcode := codeFrom(err)\n\tw.WriteHeader(code)\n\n\t// write out the error message\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"code\": code,\n\t\t\"message\": err.Error(),\n\t})\n}",
"func (h *Handler) error(w http.ResponseWriter, error string, code int) {\n\t// TODO: Return error as JSON.\n\thttp.Error(w, error, code)\n}",
"func errorHandler(w http.ResponseWriter, req *http.Request, status int) {\n w.WriteHeader(status)\n if status == http.StatusNotFound {\n e := Error{ErrMsg: \"MessageResponse not found\", ErrCode:http.StatusNotFound}\n json.NewEncoder(w).Encode(e)\n }\n if status == http.StatusBadRequest {\n e := Error{ErrMsg: \"Cannot process your request\", ErrCode:http.StatusBadRequest}\n json.NewEncoder(w).Encode(e)\n }\n}",
"func writeErrorResponse(w http.ResponseWriter, response interface{}, acceptsType contentType) []byte {\n\tvar bytesBuffer bytes.Buffer\n\tvar encoder encoder\n\t// write common headers\n\twriteCommonHeaders(w, getContentString(acceptsType))\n\tswitch acceptsType {\n\tcase xmlType:\n\t\tencoder = xml.NewEncoder(&bytesBuffer)\n\tcase jsonType:\n\t\tencoder = json.NewEncoder(&bytesBuffer)\n\t}\n\tencoder.Encode(response)\n\treturn bytesBuffer.Bytes()\n}",
"func responseWithErrorTxt(w http.ResponseWriter, code int, errTxt string) {\n\tresponseWithJSON(w, code, map[string]string{\"error\": errTxt})\n}",
"func sendErrorMessage(w io.Writer, err error) {\n\tif err == nil {\n\t\tpanic(errors.Wrap(err, \"Cannot send error message if error is nil\"))\n\t}\n\tfmt.Printf(\"ERROR: %+v\\n\", err)\n\twriteJSON(w, map[string]string{\n\t\t\"status\": \"error\",\n\t\t\"message\": err.Error(),\n\t})\n}",
"func Err(w http.ResponseWriter, e *ae.Error) {\n\tlog.Err(*e)\n\tlog.Err(string(debug.Stack()))\n\tjsonData, err := json.Marshal(e)\n\tif err != nil {\n\t\tfmt.Fprint(w, \"Error\")\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"X-Content-Type-Options\", \"nosniff\")\n\tw.WriteHeader(e.HttpStatus)\n\tfmt.Fprintln(w, string(jsonData))\n}",
"func writeJSON(w http.ResponseWriter, obj interface{}) {\n\tif json.NewEncoder(w).Encode(obj) != nil {\n\t\thttp.Error(w, \"Failed to encode response\", http.StatusInternalServerError)\n\t}\n}",
"func (app *application) errorResponse(w http.ResponseWriter, r *http.Request, status int, message interface{}) {\n\tresp := clientResponse{\"error\": message}\n\t// Write the response using the helper method.\n\terr := app.writeJSON(w, status, resp)\n\tif err != nil {\n\t\tapp.logError(r, err)\n\t\tw.WriteHeader(500)\n\t}\n}",
"func (s Status) WriteJSON(w http.ResponseWriter) error {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(s.Code)\n\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\treturn err\n}",
"func respond(writer http.ResponseWriter, data interface{}, err *errors.MyError) {\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tif err != nil {\n\t\tlog.Println(err.Err)\n\t\terrCode := err.ErrorCode\n\t\tif errCode == 0 {\n\t\t\terrCode = http.StatusInternalServerError\n\t\t}\n\t\thttp.Error(writer, err.Err, errCode)\n\t} else {\n\t\tjson.NewEncoder(writer).Encode(data)\n\t}\n}",
"func fail(res http.ResponseWriter, code int, message string) {\n\tres.WriteHeader(code)\n\tbody, _ := json.Marshal(ErrorResponse{message})\n\tres.Write(body)\n}",
"func writeError(w http.ResponseWriter, message string){\n\ttype Out struct {\n\t\tMessage string\n\t}\n\n\t/* Write HTML */\n\tdata := Out{message}\n\ttmpl := template.Must(template.ParseFiles(\"static/html/error.html\", \"static/html/top.html\", \"static/html/head.html\"))\n\ttmpl.ExecuteTemplate(w, \"error\", data)\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tswitch err {\n\tcase data.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase cargo.ErrUnknown:\n\t\tw.WriteHeader(http.StatusNotFound)\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func (l *HTTPLib) JSON(w http.ResponseWriter, code int, data interface{}) {\n\tresp := newResponse(code, data)\n\tw.WriteHeader(code)\n\tif err := json.NewEncoder(w).Encode(resp); err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\tw.Write([]byte(`{\"data\":\"internal server error\"}`))\n\t\t// http.Error(w, `{\"data\":\"internal server error\"}`, http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func encodeError(_ context.Context, err error, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tswitch err {\n\tcase ErrInvalidArgument:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\tdefault:\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func errorResponse(w http.ResponseWriter, reason string, statusCode int) error {\n\tw.WriteHeader(statusCode)\n\terrResponse := ErrorResponse{Err: reason}\n\terr := json.NewEncoder(w).Encode(errResponse)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func invalid_request(w http.ResponseWriter, statCode int, message string){\n w.Header().Set(\"Content-Type\", \"application/json\")\n switch statCode {\n case 400: w.WriteHeader(http.StatusBadRequest)\n case 403: w.WriteHeader(http.StatusForbidden)\n case 404: w.WriteHeader(http.StatusNotFound)\n default: w.WriteHeader(http.StatusNotFound)\n }\n err := Error {\n StatusCode: statCode,\n ErrorMessage: message}\n json.NewEncoder(w).Encode(err)\n}",
"func RespondErr(w http.ResponseWriter, status int, data string) {\n\tw.WriteHeader(status)\n\tfmt.Fprintf(w, `{\n\t\t\"error\": %s\n\t}`, data)\n}",
"func sidesErrResponse(w http.ResponseWriter) {\n\terrResponse := errorResponse{\n\t\t\"invalid sides\",\n\t\t\"The dice requested is not available.\",\n\t}\n\tw.WriteHeader(http.StatusNotAcceptable)\n\tenc := json.NewEncoder(w)\n\tjsonEncode(w, enc, errResponse)\n\n\treturn\n}",
"func countErrResponse(w http.ResponseWriter) {\n\terrResponse := errorResponse{\n\t\t\"invalid count\",\n\t\t\"The number of dice requested is invalid.\",\n\t}\n\tw.WriteHeader(http.StatusNotAcceptable)\n\tenc := json.NewEncoder(w)\n\tjsonEncode(w, enc, errResponse)\n\n\treturn\n}",
"func (w *Writer) Err(m string) error {}",
"func Problem(w http.ResponseWriter, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tjson.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\"error\": err.Error(),\n\t})\n}",
"func write_std_error(err interface{}, stack string){\n\tif StdErr==true{\n\t\tfmt.Println(\"runtime error,panic:%v,stack:%s\", err, stack)\n\t}\n}",
"func (c *Operation) writeErrorResponse(rw http.ResponseWriter, status int, msg string) {\n\tlogger.Errorf(msg)\n\n\trw.WriteHeader(status)\n\n\tif _, err := rw.Write([]byte(msg)); err != nil {\n\t\tlogger.Errorf(\"Unable to send error message, %s\", err)\n\t}\n}",
"func messageResponse(w http.ResponseWriter, s int, m string) {\n\tw.WriteHeader(s)\n\tif m == \"\" {\n\t\treturn\n\t}\n\n\tb, err := json.Marshal(errorMessage{m})\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"Could not wrap message in JSON: %s\", m)\n\t\treturn\n\t}\n\tw.Write(b)\n}",
"func writeJSON(w http.ResponseWriter, data interface{}) error {\n\tif err, ok := data.(error); ok {\n\t\tdata = struct{ Error string }{err.Error()}\n\t\tw.WriteHeader(400)\n\t}\n\to, err := json.MarshalIndent(data, \"\", \" \")\n\tif err != nil {\n\t\treturn err\n\t}\n\t_, err = w.Write(o)\n\treturn err\n}",
"func handleErr(w http.ResponseWriter, statusCode int, msg string) {\n\tw.WriteHeader(statusCode)\n\tw.Write([]byte(msg + \"\\n\"))\n}",
"func Err(w http.ResponseWriter, statusCode int, err error) {\n\tpresentation.JSON(w, statusCode, struct {\n\t\tErr string `json:\"erro\"`\n\t}{\n\t\tErr: err.Error(),\n\t})\n}",
"func sendErrorJsonGenerator(c *gin.Context, err error, code int) {\n\tc.JSON(code, gin.H{\n\t\t\"status\": false,\n\t\t\"error\": err.Error(),\n\t\t\"code\": http.StatusText(code),\n\t})\n}",
"func errWriter(w http.ResponseWriter, httpSts int, err error) {\n\tlog.Print(err)\n\thttp.Error(w, http.StatusText(httpSts), httpSts)\n}",
"func respond(writer http.ResponseWriter, data interface{}, err *exchanges.MyError) {\n\twriter.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(writer, err.Error(), err.Code)\n\t} else {\n\t\tjson.NewEncoder(writer).Encode(data)\n\t}\n}",
"func jsonResponse(rw http.ResponseWriter, code int, msg string) {\n\trw.Header().Set(\"Content-Type\", \"application/json\")\n\trw.WriteHeader(code)\n\trw.Write([]byte(fmt.Sprintf(`{\"message\":\"%s\"}`, msg)))\n}",
"func (m *manager) sendErr(w http.ResponseWriter, errorCode int64, errorData interface{}) {\n\tw.WriteHeader(http.StatusInternalServerError)\n\tenc := json.NewEncoder(w)\n\tenc.Encode(map[string]interface{}{\n\t\t\"ok\": false,\n\t\t\"error_code\": errorCode,\n\t\t\"error_data\": errorData,\n\t})\n}",
"func errorf(w http.ResponseWriter, code int, format string, a ...interface{}) {\n\tvar out struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tout.Code = code\n\tout.Message = fmt.Sprintf(format, a...)\n\n\tb, err := json.Marshal(out)\n\tif err != nil {\n\t\thttp.Error(w, `{\"code\": 500, \"message\": \"Could not format JSON for original message.\"}`, 500)\n\t\treturn\n\t}\n\n\thttp.Error(w, string(b), code)\n}",
"func errorf(w http.ResponseWriter, code int, format string, a ...interface{}) {\n\tvar out struct {\n\t\tCode int `json:\"code\"`\n\t\tMessage string `json:\"message\"`\n\t}\n\n\tout.Code = code\n\tout.Message = fmt.Sprintf(format, a...)\n\n\tb, err := json.Marshal(out)\n\tif err != nil {\n\t\thttp.Error(w, `{\"code\": 500, \"message\": \"Could not format JSON for original message.\"}`, 500)\n\t\treturn\n\t}\n\n\thttp.Error(w, string(b), code)\n}",
"func writeResponse(w http.ResponseWriter, body interface{}, e error) error {\n\tvar (\n\t\tpayload []byte\n\t\tresponse Response\n\t\terr error\n\t)\n\tresponse = Response{\n\t\tResult: body,\n\t\tError: ResponseError{},\n\t}\n\tif e != nil {\n\t\tresponse.Error.Error = fmt.Sprintf(\"%v\", e)\n\t}\n\tpayload, err = json.MarshalIndent(response, \"\", \"\\t\")\n\n\tif !util.ErrorCheck(err) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.Write(payload)\n\t}\n\treturn err\n}",
"func (s *SubmissionHandler) writeJSON(i interface{}) error {\n\tif e := util.WriteJSON(s.conn, i); e != nil {\n\t\treturn e\n\t}\n\t_, e := s.conn.Write([]byte(util.EOT))\n\treturn e\n}",
"func WriteErr(w http.ResponseWriter, r *http.Request, err error, opts ...int) {\n\tif httpErr := Err2HTTPErr(err); httpErr != nil {\n\t\tstatus := http.StatusBadRequest\n\t\tif len(opts) > 0 && opts[0] > status {\n\t\t\tstatus = opts[0]\n\t\t}\n\t\thttpErr.Status = status\n\t\thttpErr.write(w, r, len(opts) > 1 /*silent*/)\n\t} else if errors.Is(err, &ErrNotFound{}) {\n\t\tif len(opts) > 0 {\n\t\t\t// Override the status code.\n\t\t\topts[0] = http.StatusNotFound\n\t\t} else {\n\t\t\t// Add status code if not set.\n\t\t\topts = append(opts, http.StatusNotFound)\n\t\t}\n\t\tWriteErrMsg(w, r, err.Error(), opts...)\n\t} else {\n\t\tWriteErrMsg(w, r, err.Error(), opts...)\n\t}\n}",
"func errorResponse(conn net.Conn, response string) {\n\tconn.Write(append(data.PackString(\"ERROR\"), data.PackString(response)...))\n}",
"func writeResponse(body []byte, w *http.ResponseWriter) {\n\t(*w).Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\t_, err := (*w).Write(body)\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\t(*w).WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n}",
"func HttpErrResponse(w http.ResponseWriter, statusCode int, obj interface{}) {\n\tw.Header().Set(rest.HEADER_RESPONSE_STATUS, strconv.Itoa(statusCode))\n\tw.Header().Set(rest.HEADER_CONTENT_TYPE, rest.CONTENT_TYPE_TEXT)\n\tw.WriteHeader(statusCode)\n\tif obj == nil {\n\t\treturn\n\t}\n\n\tobjJSON, err := json.Marshal(obj)\n\tif err != nil {\n\t\tlog.Errorf(nil, \"Http error response marshaling failed.\")\n\t\treturn\n\t}\n\tw.Header().Set(rest.HEADER_CONTENT_TYPE, rest.CONTENT_TYPE_JSON)\n\t_, err = fmt.Fprintln(w, string(objJSON))\n\tif err != nil {\n\t\tlog.Errorf(nil, \"Send http response fail.\")\n\t}\n}",
"func write(resp *Response, w http.ResponseWriter) {\n\tjs, _ := json.Marshal(resp)\n\tfmt.Fprint(w, string(js))\n}",
"func output500Error(r render.Render, err error) {\n\tfmt.Println(err)\n\tr.JSON(500, map[string]interface{}{\"error\": err.Error()})\n}",
"func errJson(err error) map[string]string {\n\t//return fmt.Sprintf(`{error: \"%s\"}`, err)\n\treturn map[string]string{\n\t\t\"error\": err.Error(),\n\t}\n}",
"func ServerErrResponse(error string, writer http.ResponseWriter) {\n\ttype servererrdata struct {\n\t\tStatusCode int\n\t\tMessage string\n\t}\n\ttemp := &servererrdata{StatusCode: 500, Message: error}\n\n\t//Send header, status code and output to writer\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.WriteHeader(http.StatusInternalServerError)\n\tjson.NewEncoder(writer).Encode(temp)\n}",
"func writeJsonRespStructured(w http.ResponseWriter, err error, respBody interface{}, status int, apiErrors []*util.ApiError) {\n\tresponse := ResponseV2{}\n\tresponse.Code = status\n\tresponse.Status = http.StatusText(status)\n\tif err == nil {\n\t\tresponse.Result = respBody\n\t} else {\n\t\tresponse.Errors = apiErrors\n\t}\n\tb, err := json.Marshal(response)\n\tif err != nil {\n\t\tutil.GetLogger().Error(\"error in marshaling err object\", err)\n\t\tstatus = 500\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(b)\n}",
"func respondWithError(w http.ResponseWriter, code int, message string) {\n respondWithJSON(w, code, map[string]string{\"error\": message})\n}",
"func respondWithError(w http.ResponseWriter, message string) {\n\tw.WriteHeader(http.StatusBadRequest)\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.Write([]byte(message))\n}",
"func (h *Encoder) WriteErrResponse(w http.ResponseWriter, r *http.Request, statusCode int, response interface{}) {\n\tcontentType := \"application/json; charset=utf-8\"\n\n\te := jsonEncoderPool.Get().(*jsonEncoder) // nolint:errcheck\n\n\te.buf.Reset()\n\tdefer jsonEncoderPool.Put(e)\n\n\terr := e.enc.Encode(response)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Length\", strconv.Itoa(e.buf.Len()))\n\tw.Header().Set(\"Content-Type\", contentType)\n\tw.WriteHeader(statusCode)\n\n\tif r.Method == http.MethodHead {\n\t\treturn\n\t}\n\n\t_, err = w.Write(e.buf.Bytes())\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\n\t\treturn\n\t}\n}",
"func WriteErrOut(ctx context.Context, w http.ResponseWriter, err error) {\n\ts := opentracing.SpanFromContext(ctx)\n\t// Tag this span with an error. Tag error, http.status_code and log event/message\n\t!!YOUR_CODE!!\n\thttp.Error(w, err.Error(), http.StatusExpectationFailed)\n}",
"func errorEncoder(_ context.Context, err error, w http.ResponseWriter) {\n\tw.WriteHeader(err2code(err))\n\tjson.NewEncoder(w).Encode(errorWrapper{Error: err.Error()})\n}",
"func writeJSON(w http.ResponseWriter, code int, v interface{}) error {\n\t// Set content type as json\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\n\t// write the HTTP status code\n\tw.WriteHeader(code)\n\n\t// Write the Json output\n\treturn json.NewEncoder(w).Encode(v)\n}",
"func writeJSON(w http.ResponseWriter, status int, data mapStringInterface) error {\n\tjs, err := json.Marshal(data)\n\t//js, err := json.MarshalIndent(data, \"\", \"\\t\")\n\n\tif err != nil {\n\t\treturn err\n\t}\n\tjs = append(js, '\\n')\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(status)\n\tw.Write(js)\n\treturn nil\n}",
"func write(w http.ResponseWriter, info *auth.UserInfo, err error) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Header().Set(\"x-content-type-options\", \"nosniff\")\n\n\tresp := auth.TokenReview{\n\t\tTypeMeta: metav1.TypeMeta{\n\t\t\tAPIVersion: auth.SchemeGroupVersion.String(),\n\t\t\tKind: \"TokenReview\",\n\t\t},\n\t}\n\n\tif err != nil {\n\t\tcode := http.StatusUnauthorized\n\t\tif v, ok := err.(httpStatusCode); ok {\n\t\t\tcode = v.Code()\n\t\t}\n\t\tprintStackTrace(err)\n\t\tw.WriteHeader(code)\n\t\tresp.Status = auth.TokenReviewStatus{\n\t\t\tAuthenticated: false,\n\t\t\tError: err.Error(),\n\t\t}\n\t} else {\n\t\tw.WriteHeader(http.StatusOK)\n\t\tresp.Status = auth.TokenReviewStatus{\n\t\t\tAuthenticated: true,\n\t\t\tUser: *info,\n\t\t}\n\t}\n\n\tif glog.V(10) {\n\t\tdata, _ := json.MarshalIndent(resp, \"\", \" \")\n\t\tglog.V(10).Infoln(string(data))\n\t}\n\n\terr = json.NewEncoder(w).Encode(resp)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (s *Status) Write(w http.ResponseWriter) error {\n\tw.WriteHeader(s.Code)\n\tswitch ct := w.Header().Get(\"Content-Type\"); ct {\n\tcase \"application/json\":\n\t\t_, err := fmt.Fprintf(w, `{\"error\":%q}`, s.String())\n\t\treturn err\n\tdefault:\n\t\t_, err := io.WriteString(w, s.String())\n\t\treturn err\n\t}\n}",
"func writeResponse(w http.ResponseWriter, authZRes *authorization.Response) {\n\n\tdata, err := json.Marshal(authZRes)\n\tif err != nil {\n\t\tlog.Fatal(\"Failed to marshel authz response %q\", err.Error())\n\t} else {\n\t\tw.Write(data)\n\t}\n\n\tif authZRes == nil || authZRes.Err != \"\" {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t}\n}",
"func printSyntaxError(js string, off *[5000]int, err interface{}) {\r\n\tsyntax, ok := err.(*json.SyntaxError)\r\n\tif !ok {\r\n fmt.Println(\"*********** ERR trying to get syntax error location **************\\n\", err)\r\n\t\treturn\r\n\t}\r\n\t\r\n\tstart, end := strings.LastIndex(js[:syntax.Offset], \"\\n\")+1, len(js)\r\n\tif idx := strings.Index(js[start:], \"\\n\"); idx >= 0 {\r\n\t\tend = start + idx\r\n\t}\r\n\t\r\n\tline, pos := strings.Count(js[:start], \"\\n\"), int(syntax.Offset) - start -1\r\n\t\r\n\tfmt.Printf(\"Error in line %d: %s \\n\", off[line]+1, err)\r\n\tfmt.Printf(\"%s\\n%s^\\n\\n\", js[start:end], strings.Repeat(\" \", pos))\r\n}",
"func writeJSON(w http.ResponseWriter, o interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\te := json.NewEncoder(w)\n\te.SetIndent(\"\", \"\\t\")\n\te.Encode(o)\n}",
"func writeResponse(w http.ResponseWriter, h int, p interface{}) {\n\t// I set the content type...\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=UTF-8\")\n\t// ... I write the specified status code...\n\tw.WriteHeader(h)\n\t// ... and I write the response\n\tb, _ := json.Marshal(p)\n\tw.Write(b)\n}",
"func respond_with_error(w http.ResponseWriter,\n\terr int,\n\tmethod string,\n\tpath string,\n\tdetails ...string) {\n\tfooter := \"\"\n\tif len(details) > 0 {\n\t\tfooter = fmt.Sprintf(\" Details: %s\", details[0])\n\t}\n\tmessage := fmt.Sprintf(\"Received: %s, %s.%s\", method, path, footer)\n\tif logutil.GPS_DEBUG > 0 {\n\t\tlogutil.LogRestResponse(fmt.Sprintf(\"%s (%d). %s\", http.StatusText(err), err, message))\n\t}\n\tadd_json_header(w)\n\tset_response_status(w, err)\n\tfmt.Fprintf(w, \"{\\\"error\\\":%q}\\n\", message)\n}",
"func (s *Service) sendError(rsp http.ResponseWriter, req *Request, err error) {\n var m string\n var r int\n var c error\n var h map[string]string\n \n switch v := err.(type) {\n case *Error:\n r = v.Status\n h = v.Headers\n c = v.Cause\n m = fmt.Sprintf(\"%s: [%v] %v\", s.name, req.Id, c)\n if d := formatDetail(c); d != \"\" {\n m += \"\\n\"+ d\n }\n default:\n r = http.StatusInternalServerError\n c = basicError{http.StatusInternalServerError, err.Error()}\n m = fmt.Sprintf(\"%s: [%v] %v\", s.name, req.Id, err)\n }\n \n // propagate non-success, non-client errors; just log others\n if r < 200 || r >= 500 {\n alt.Error(m, nil, nil)\n }else{\n alt.Debug(m)\n }\n if req.Accepts(\"text/html\") {\n s.sendEntity(rsp, req, r, h, htmlError(r, h, c))\n }else{\n s.sendEntity(rsp, req, r, h, c)\n }\n}",
"func writeHTTPResponseInWriter(httpRes http.ResponseWriter, httpReq *http.Request, nobelPrizeWinnersResponse []byte, err error) {\n\tif err != nil {\n\t\tlog.Println(err.Error())\n\t\thttp.Error(httpRes, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\tlog.Printf(\"Request %s Succesfully Completed\", httpReq.RequestURI)\n\thttpRes.Header().Set(\"Content-Type\", \"application/json\")\n\thttpRes.Write(nobelPrizeWinnersResponse)\n}",
"func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}",
"func writeJSONResponse(w http.ResponseWriter, body interface{}) {\n\tbs, err := json.Marshal(body)\n\tif err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tif _, err := w.Write(bs); err != nil {\n\t\thttp.Error(w, err.Error(), http.StatusInternalServerError)\n\t}\n}",
"func (app *application) writeJSON(w http.ResponseWriter, status int, data envelope, headers http.Header) error {\n\t// Encode the data to JSON, return error if any.\n\tjs, err := json.MarshalIndent(data, \"\", \"\\t\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Append a newline to make it easier to view in terminal applications.\n\tjs = append(js, '\\n')\n\n\t// Loop through the header map and add each header to the http.ResponseWriter header map.\n\tfor key, value := range headers {\n\t\tw.Header()[key] = value\n\t}\n\n\t// Add the \"Content-Type: application/json\" header.\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t// Write status code.\n\tw.WriteHeader(status)\n\tw.Write(js)\n\n\treturn nil\n}",
"func printResponse(resp interface{}, err error) {\n\tif err == nil {\n\t\tjtext, err := json.MarshalIndent(resp, \"\", \" \")\n\t\tif err == nil {\n\t\t\tfmt.Println(string(jtext))\n\t\t}\n\t}\n\tif err != nil {\n\t\tfmt.Printf(\"err: %s\\n\", err)\n\t}\n}",
"func HttpWriteJson(w http.ResponseWriter, Status string, Msg string, httpStatus int) {\n\tmsgJsonStruct := &JsonMsg{Status, Msg}\n\tmsgJson, errj := json.Marshal(msgJsonStruct)\n\tif errj != nil {\n\t\tmsg := `{\"status\":\"error\",\"message\":\"We could not generate the json error!\"}`\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\t//io.WriteString(w, msg)\n\t\tw.Write([]byte(msg))\n\t\treturn\n\t}\n\tw.WriteHeader(httpStatus)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.Write(msgJson)\n}",
"func reportError(w http.ResponseWriter, code int, err error) {\n\tres, _ := json.Marshal(&RequestStatus{\n\t\tCode: code,\n\t\tError: err.Error(),\n\t})\n\tfmt.Printf(\"Error: %v %v\\n\", code, err)\n\tfmt.Fprintln(w, string(res))\n}",
"func Test_JsonWriter__FailedJsonMarhsal(t *testing.T) {\n\tres := &Response{\n\t\tCode: http.StatusBadRequest,\n\t\tData: map[float64]int{2.5: 1},\n\t}\n\n\tw := httptest.NewRecorder()\n\tJsonWriter(w, res)\n\tassert.Equal(t, \"{error: \\\"json: unsupported type: map[float64]int\\\"}\", w.Body.String())\n\tassert.Equal(t, http.StatusBadRequest, w.Code)\n\tassert.Equal(t, \"application/json; charset=utf-8\", w.Header().Get(\"Content-Type\"))\n}",
"func ERROR(w http.ResponseWriter, statusCode int, err error) {\n\tif err != nil {\n\t\tJSON(w, statusCode, struct {\n\t\t\tError string `json:\"error\"`\n\t\t}{\n\t\t\tError: err.Error(),\n\t\t})\n\t} else {\n\t\tJSON(w, http.StatusBadRequest, nil)\n\t}\n}",
"func writeJSON(w http.ResponseWriter, thing interface{}, indent string) {\n\tw.Header().Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\tencoder := json.NewEncoder(w)\n\tencoder.SetIndent(\"\", indent)\n\tif err := encoder.Encode(thing); err != nil {\n\t\tapiLog.Warnf(\"JSON encode error: %v\", err)\n\t}\n}",
"func errHandle(w http.ResponseWriter, longmsg string, shortmsg string, status int) {\n\tlog.Errorf(longmsg)\n\terrorResponse := ErrorResponse{\n\t\tStatus: status,\n\t\tErrorMessage: shortmsg,\n\t}\n\tdata, _ := json.Marshal(errorResponse)\n\tresponse := JSONResponse{}\n\tresponse.status = http.StatusUnauthorized\n\tresponse.data = data\n\tresponse.Write(w)\n}",
"func jsonResponse(w http.ResponseWriter, d interface{}, c int) {\n\tdj, err := json.Marshal(d)\n\tif err != nil {\n\t\thttp.Error(w, \"Error creating JSON response\", http.StatusInternalServerError)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(c)\n\tfmt.Fprintf(w, \"%s\", dj)\n}",
"func writeJsonResponse(w http.ResponseWriter, content *[]byte, code int) {\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(code)\n\tw.Write(*content)\n}"
] | [
"0.6981968",
"0.6872307",
"0.67248374",
"0.6696121",
"0.669428",
"0.66882426",
"0.66854435",
"0.667467",
"0.6583421",
"0.65646803",
"0.6551456",
"0.64788187",
"0.644141",
"0.6431342",
"0.6420616",
"0.6388388",
"0.63693845",
"0.6315508",
"0.6286771",
"0.62613755",
"0.6195387",
"0.61946654",
"0.6190749",
"0.6168781",
"0.61619633",
"0.6141766",
"0.6124296",
"0.61207366",
"0.6113442",
"0.61023426",
"0.6088285",
"0.60765785",
"0.6074263",
"0.6070222",
"0.60623115",
"0.60574925",
"0.6057397",
"0.6057073",
"0.60481405",
"0.60402983",
"0.60402983",
"0.60315204",
"0.6014297",
"0.6006813",
"0.5971474",
"0.5968436",
"0.596398",
"0.59579235",
"0.5953463",
"0.5931107",
"0.59234965",
"0.5902716",
"0.58875936",
"0.58845675",
"0.58819675",
"0.5863093",
"0.586122",
"0.58450586",
"0.58062285",
"0.5794374",
"0.5794374",
"0.57834107",
"0.5781486",
"0.57654727",
"0.57509965",
"0.574776",
"0.5746109",
"0.5743687",
"0.57289165",
"0.5727253",
"0.568533",
"0.568125",
"0.5677718",
"0.56764853",
"0.5670837",
"0.56638855",
"0.56605476",
"0.5657151",
"0.5647336",
"0.5646896",
"0.56330335",
"0.5622745",
"0.5622249",
"0.5616746",
"0.56155723",
"0.5605795",
"0.55951226",
"0.5589779",
"0.5586942",
"0.5586942",
"0.5572282",
"0.5546777",
"0.55415744",
"0.5539356",
"0.5532516",
"0.55178493",
"0.5515818",
"0.55127054",
"0.5512597",
"0.5496234"
] | 0.64621174 | 12 |
write a teach/ask success message with header to the output | func ATJsonMessage(writer http.ResponseWriter, info_code int, info_message string) {
writer.WriteHeader(info_code)
writer.Header().Set("Content-Type", "application/json")
obj := model.ATResultList{ ResultList: make([]model.ATResult,0) }
obj.ResultList = append(obj.ResultList, model.ATResult{Text: info_message,
Timestamp: util.GetTimeNowSting(), Topic: "K/AI"})
json_bytes, _ := json.Marshal(obj)
writer.Write(json_bytes)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Success(w io.Writer, format string, args ...any) {\n\tformat = strings.TrimRight(format, \"\\r\\n\") + \"\\n\"\n\tfmt.Fprintf(w, \"\\n\"+Wrap(BoldGreen(\"SUCCESS: \")+format, DefaultTextWidth)+\"\\n\", args...)\n}",
"func writeSuccess(w http.ResponseWriter, targetLanguage language.Tag, targetPhrase string) {\n\theaders := w.Header()\n\n\theaders.Set(\"Content-Type\", \"text/plain\")\n\theaders.Set(\"Content-Language\", targetLanguage.String())\n\tw.WriteHeader(http.StatusOK)\n\tio.WriteString(w, targetPhrase)\n}",
"func Success(t *testing.T, msg string, args ...interface{}) {\n\tm := fmt.Sprintf(msg, args...)\n\tt.Log(fmt.Sprintf(\"\\t %-80s\", m), Succeed)\n}",
"func Successf(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string, args ...interface{}) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprintf(s, formatted, args...)\n\n}",
"func Successln(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprintln(s, formatted)\n\n}",
"func (s *session) respondOK(format string, args ...interface{}) error {\n\treturn s.writer.PrintfLine(fmt.Sprintf(\"+OK %s\", format), args...)\n}",
"func Success(format string, a ...interface{}) {\n\tif Level >= 3 {\n\t\ta, w := extractLoggerArgs(format, a...)\n\t\ts := fmt.Sprintf(label(format, SuccessLabel), a...)\n\n\t\tif Color {\n\t\t\tw = color.Output\n\t\t\ts = color.GreenString(s)\n\t\t}\n\n\t\tfmt.Fprintf(w, s)\n\t}\n}",
"func Success(message string, scope string) error {\n\treturn printLine(\"✔\", color.FgHiGreen, message, scope)\n}",
"func Success(v ...interface{}) {\n\tprint(SuccessFont)\n\tfmt.Print(v...)\n\tterminal.Reset()\n}",
"func writePlainText(statusCode int, text string, w http.ResponseWriter) {\n\tw.Header().Set(\"Content-Type\", \"text/plain\")\n\tw.WriteHeader(statusCode)\n\tfmt.Fprintln(w, text)\n}",
"func Success(a interfaces.AssumeCredentialProcess, emoji string, prefix string, message string) {\n\ts := a.GetDestination()\n\n\tformatted := format(a, textColorSuccess, emoji, prefix, message)\n\tfmt.Fprint(s, formatted)\n\n}",
"func writeSuccessResponse(w http.ResponseWriter, bikes []common.Bike) {\n\tresponse := Response{true, bikes, \"\"}\n\twriteResponse(w, response)\n}",
"func logSuccess(a *Attempt) {\n\t/* Print message to log */\n\tlog.Printf(\"[%v] SUCCESS %v@%v - %v\", a.Tasknum, a.Config.User, a.Host,\n\t\ta.Pass)\n\t/* Write message to file */\n\tgo appendLine(*gc.Sfile, fmt.Sprintf(\"%v@%v %v\\n\", a.Config.User,\n\t\ta.Host, a.Pass))\n}",
"func PrintSuccess(message string) {\n\tfmt.Printf(green + \"✔ \" + message + noFormat + \"\\n\")\n}",
"func (t *Test) PrintSuccess() {\n\tlog.Successf(\"ok: %s\", t.Name)\n}",
"func PrintSuccess() {\n\telapsed := endTime.Sub(startTime)\n\tfmt.Print(\"\\n\")\n\tlog.Infof(\"Ingestion completed in %v\", elapsed)\n}",
"func PrintSuccessMessage(w http.ResponseWriter, obj interface{}) {\n\tmodel, err := json.Marshal(obj)\n\tif err != nil {\n\t\tfmt.Println(err)\n\t}\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(model)\n}",
"func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII TestLab - Basic Connectivity Tests\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}",
"func Success(taskDescription string) {\n\tcolor.Green(\"Success - \" + taskDescription)\n}",
"func SuccessStatusEvent(w io.Writer, fmtstr string, a ...interface{}) {\n\tif runtime.GOOS == windowsOS {\n\t\tfmt.Fprintf(w, \"%s\\n\", fmt.Sprintf(fmtstr, a...))\n\t} else {\n\t\tfmt.Fprintf(w, \"✅ %s\\n\", fmt.Sprintf(fmtstr, a...))\n\t}\n}",
"func (v *View) Success(w http.ResponseWriter, r *http.Request, yield interface{}, message string) {\n\tw.Header().Set(\"Content-Type\", \"text/html\")\n\n\tvar vd Data\n\tvd.User.ID = r.Header.Get(\"userID\")\n\tadmin, err := strconv.ParseBool(r.Header.Get(\"admin\"))\n\tif err != nil {\n\t\tvd.User.Admin = false\n\t} else {\n\t\tvd.User.Admin = admin\n\t}\n\tvd.Yield = yield\n\tvd.Messages.Success = message\n\n\tv.Template.ExecuteTemplate(w, v.Layout, vd)\n}",
"func OkFinisher(w http.ResponseWriter, r *http.Request) {\n\tw.Write([]byte(\"Ok\"))\n}",
"func (m *Main) PrintSuccess() {\n\tfmt.Println(\"\")\n\tlog.Successf(\"ok: successfully tested %s\", m.Name)\n\tfmt.Println(\"\")\n}",
"func (srv *Service) WriteOk(w http.ResponseWriter) {\n\tw.WriteHeader(http.StatusOK)\n}",
"func send(status int, out http.ResponseWriter, format string, args ...interface{}) {\n\tout.WriteHeader(status)\n\n\tif len(args) == 0 {\n\t\tfmt.Fprintf(out, format)\n\t\treturn\n\t}\n\n\tfmt.Fprintf(out, format, args...)\n}",
"func OK(w http.ResponseWriter, data interface{}, message string) {\n\tsuccessResponse := BuildSuccess(data, message, MetaInfo{HTTPStatus: http.StatusOK})\n\tWrite(w, successResponse, http.StatusOK)\n}",
"func psuccess(format string, a ...interface{}) (n int, err error) {\n\treturn fmt.Printf(CGREEN+format+CEND+\"\\n\", a...)\n}",
"func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII Server\")\n\tfmt.Println(\"Copyright, Bret Jordan\")\n\tfmt.Println(\"Version:\", sVersion)\n\tfmt.Println(\"\")\n}",
"func (w *Writer) Alert(m string) error {}",
"func Success(w http.ResponseWriter, message string, code int) {\n\tif code == 0 {\n\t\tcode = http.StatusOK\n\t}\n\tw.WriteHeader(code)\n\tjson.NewEncoder(w).Encode(map[string]string{\"success\": message})\n}",
"func Success(w http.ResponseWriter) {\n\tw.Write(JSON(SuccessResponse{Message: \"success\", Code: 200}))\n}",
"func printCoinFlipSuccess(destination string) {\n\tfmt.Println(\"FLIPPED COIN sending rumor to \" + destination)\n}",
"func feedback(t *tufCommander, payload []byte) error {\n\t// We only get here when everything goes well, since the flag \"quiet\" was\n\t// provided, we output nothing but just return.\n\tif t.quiet {\n\t\treturn nil\n\t}\n\n\t// Flag \"quiet\" was not \"true\", that's why we get here.\n\tif t.output != \"\" {\n\t\treturn ioutil.WriteFile(t.output, payload, 0644)\n\t}\n\n\tos.Stdout.Write(payload)\n\treturn nil\n}",
"func WriteHeader(text interface{}) {\n\tfmt.Printf(\"\\n[-] %s\\n\", text)\n}",
"func Ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(\"\\033[31m%s:%d: unexpected error: %s\\033[39m\\n\\n\", filepath.Base(file), line, err.Error())\n\t\ttb.FailNow()\n\t}\n}",
"func respond_with_success(w http.ResponseWriter,\n\tstatus int,\n\tmessage string) {\n\tdetail := fmt.Sprintf(\"Success (%d). Sending response: %s\", status, message)\n\tif logutil.GPS_DEBUG > 0 {\n\t\tlogutil.LogRestResponse(detail)\n\t}\n\tadd_json_header(w)\n\tset_response_status(w, status)\n\tfmt.Fprintf(w, \"%s\\n\", message)\n}",
"func WriteSuccess(w http.ResponseWriter, data interface{}, status int) {\n\tres := response.BuildSuccess(data, response.MetaInfo{HTTPStatus: status})\n\tresponse.Write(w, res, status)\n}",
"func SuccessResponse(msg string, writer http.ResponseWriter) {\n\ttype errdata struct {\n\t\tStatusCode int\n\t\tMessage string\n\t}\n\ttemp := &errdata{StatusCode: 200, Message: msg}\n\n\t//Send header, status code and output to writer\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\twriter.WriteHeader(http.StatusOK)\n\tjson.NewEncoder(writer).Encode(temp)\n}",
"func Success(format string, a ...interface{}) {\n\tprefix := green(succ)\n\tlog.Println(prefix, fmt.Sprintf(format, a...))\n}",
"func printOutputHeader() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"FreeTAXII - STIX Table Creator\")\n\tfmt.Println(\"Copyright: Bret Jordan\")\n\tfmt.Println(\"Version:\", Version)\n\tif Build != \"\" {\n\t\tfmt.Println(\"Build:\", Build)\n\t}\n\tfmt.Println(\"\")\n}",
"func(this *GView) Done(youWin games.Outcome) {\n\tif youWin == games.Win {\n\t\tthis.inOut.Write(([]byte) (this.name + \" won.\\n\"))\n\t} else if youWin == games.Draw {\n\t\tthis.inOut.Write(([]byte) (\"There was a tie.\\n\"))\n\t} else {\n\t\tthis.inOut.Write(([]byte) (this.name + \" lost.\\n\"))\n\t}\n}",
"func genericSuccess(caller, inp, msg string) error {\n\tresp, err := json.Marshal(&Response{Status: \"Success\", Message: msg})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tprintAndLog(caller, inp, string(resp))\n\treturn nil\n}",
"func (a *App) Ok(w http.ResponseWriter, r *http.Request) {\n\tw.WriteHeader(http.StatusOK)\n\tlogerr(w.Write(randomByteSlice(10, \"OK\", \"0123456789abcdef\")))\n}",
"func (i *CmdLine) PrintSuccess() {\n\tfmt.Printf(\"Richtig!\")\n}",
"func UploadSuccessHandler(w http.ResponseWriter, r *http.Request) {\n\t_, _ = io.WriteString(w, \"upload succeed\")\n}",
"func Successln(v ...interface{}) {\n\tSuccess(v...)\n\tprintln()\n}",
"func send_response() {\r\n\r\n\tfmt.Printf(\"<RESPONSE>\\n<STATUS>\\n\")\r\n\tfmt.Printf(\"<status_code>%d</status_code>\\n\",status_info.status_code)\r\n\tfmt.Printf(\"<error_message>%s</error_message>\\n\",status_info.error_message)\r\n\tfmt.Printf(\"<error_details>%s</error_details>\\n\",status_info.error_details)\r\n\tfmt.Printf(\"</STATUS>\\n\")\r\n\tif response_data != \"\" {\r\n\t\tfmt.Printf(\"%s\",response_data)\r\n\t}\r\n\tfmt.Printf(\"</RESPONSE>\")\r\n\tos.Exit(0)\r\n}",
"func respondOk(writer http.ResponseWriter) {\n\twriter.WriteHeader(http.StatusOK)\n\twriter.Header().Set(\"Content-Type\", \"application/json\")\n\tio.WriteString(writer, `{ \"status\": \"OK\" }`)\n}",
"func write_response(fd io.ReadWriteCloser,\n\tstatus int,\n\tresponse_text string) (n int, err error) {\n\tvar buf bytes.Buffer\n\tinput_message := \"%d %s\\r\\n\"\n\tsuccess_message := \"%d text/gemini\\r\\n%s\\r\\n\"\n\terror_message := \"%d %s\\r\\n\"\n\tif status < 20 {\n\t\tn, err := fmt.Fprintf(&buf, input_message, status, response_text)\n\t\tif err != nil {\n\t\t\tfmt.Printf(err.Error())\n\t\t\treturn n, err\n\t\t}\n\t} else if status < 30 {\n\t\tn, err := fmt.Fprintf(&buf, success_message, status, response_text)\n\t\tif err != nil {\n\t\t\tfmt.Printf(err.Error())\n\t\t\treturn n, err\n\t\t}\n\t} else {\n\t\tn, err := fmt.Fprintf(&buf, error_message, status, response_text)\n\t\tif err != nil {\n\t\t\tfmt.Printf(err.Error())\n\t\t\treturn n, err\n\t\t}\n\t}\n\treturn fd.Write(buf.Bytes())\n}",
"func (c *Context) Success(name string) {\n\tc.HTML(http.StatusOK, name)\n}",
"func displayAccount(w io.Writer) {\n\taccount := doAccount()\n\tif account.Status != \"active\" {\n\t\tlog.Errorf(\"DO Account issue expected status (active) got (%s) \", account.Status)\n\t\treturn\n\t}\n\tfmt.Fprintf(w, account.String())\n}",
"func displayAddResponse(apiResp JobApiResponse) {\n\n\tif apiResp.Status == apiSuccess {\n\n\t\tvar display = template.Must(template.New(\"JobAddSuccess\").Parse(addTemplSuccess))\n\n\t\tif err := display.Execute(os.Stdout, apiResp); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\n\t} else {\n\t\tvar display = template.Must(template.New(\"ApiError\").Parse(apiErrorTempl))\n\n\t\tif err := display.Execute(os.Stdout, apiResp); err != nil {\n\t\t\tlog.Fatal(err)\n\t\t}\n\t}\n\n}",
"func Successy(name string, obj interface{}) {\n\tyam, err := SPrintYAML(obj)\n\tif err != nil {\n\t\tError(err)\n\t\tSuccess(obj)\n\t\treturn\n\t}\n\tSuccessf(\"%s >> \\n\\n%s\\n\", name, yam)\n}",
"func Success(output string) *discordgo.MessageEmbed {\n\treturn &discordgo.MessageEmbed{\n\t\tTitle: \"Success\",\n\t\tTimestamp: time.Now().Format(time.RFC3339),\n\t\tColor: 0x00ff00,\n\t\tFields: []*discordgo.MessageEmbedField{\n\t\t\t{\n\t\t\t\tName: \"Output\",\n\t\t\t\tValue: \"```\" + strings.ReplaceAll(output, \"`\", \"'\") + \"```\",\n\t\t\t},\n\t\t},\n\t}\n}",
"func (c *requestContext) ok() {\n\tc.Writer.Header().Set(\"Content-Type\", \"text/plain; charset=utf-8\")\n\tc.Writer.WriteHeader(200)\n\tfmt.Fprintln(c.Writer, \"OK\")\n}",
"func (w *OutputWriter) printResult(r TestResult) {\n\tif !r.Success {\n\t\tw.fprintf(w.au.Red(w.template.testResult(r)))\n\t\treturn\n\t}\n\tw.fprintf(w.template.testResult(r))\n}",
"func (s *STS) sendSuccessfulResponse(w http.ResponseWriter, tokenData []byte) {\n\tw.Header().Add(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK)\n\tif _, err := w.Write(tokenData); err != nil {\n\t\tlog.Printf(\"failure in sending STS success response: %v\", err)\n\t\treturn\n\t}\n}",
"func (r renderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {}",
"func PrintSuccess(msg interface{}) {\n\tswitch os.Getenv(\"GHORG_COLOR\") {\n\tcase \"enabled\":\n\t\tcolor.New(color.FgGreen).Println(msg)\n\tdefault:\n\t\tfmt.Println(msg)\n\t}\n}",
"func ok(w http.ResponseWriter, r *http.Request, c *Context) {\n\tfmt.Fprintln(w, \"ok\")\n}",
"func (c *Context) Status(code int) {\n\tc.Writer.WriteHeader(code)\n}",
"func writeSuccessResponse(w http.ResponseWriter, data interface{}) {\n\tw.WriteHeader(http.StatusOK)\n\tbs, err := json.Marshal(data)\n\tif err != nil {\n\t\tlog.Error(err)\n\t\treturn\n\t}\n\tif _, err := w.Write(bs); err != nil {\n\t\tlog.Error(err)\n\t}\n}",
"func (o *GetTeacherOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) {\n\n\trw.WriteHeader(200)\n\tif o.Payload != nil {\n\t\tpayload := o.Payload\n\t\tif err := producer.Produce(rw, payload); err != nil {\n\t\t\tpanic(err) // let the recovery middleware deal with this\n\t\t}\n\t}\n}",
"func respondOK(w http.ResponseWriter, output []byte) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Write(output)\n}",
"func Status(w http.ResponseWriter, r *http.Request) {\n if game == nil {\n initGame()\n }\n fmt.Fprintln(w, game)\n\n fmt.Fprintf(w, \"Next player is: Player %v\\n\", currentPlayer)\n\n winner := game.GetWinner()\n if winner != 0 {\n fmt.Fprintf(w, \"** PLAYER %v WINS THE GAME **\\n\", winner)\n } else {\n fmt.Fprintf(w, \"No winner currently\\n\")\n }\n}",
"func healthcheckok(writer http.ResponseWriter, request *http.Request) {\n\twriter.WriteHeader(200)\n}",
"func logSuccess(str string) {\n\tcolor.Println(color.Green(\"[✔] \") + color.Yellow(str))\n}",
"func Header(out io.StringWriter) {\n\tout.WriteString(\"ID Due Date Pri Description/Status\\n\")\n\tout.WriteString(terminal.HorizontalLine())\n\tout.WriteString(\"\\n\")\n}",
"func header(req *restful.Request, resp *restful.Response) {\n\tresp.WriteHeader(200)\n}",
"func Success(w http.ResponseWriter, code int, msg string, data interface{}) error {\n\treturn sendResponse(w, Resp{SUCCESS, code, msg, data, SuccessHttpCode})\n}",
"func Success(ctx ...interface{}) {\n\tlogNormal(successStatus, time.Now(), ctx...)\n}",
"func Success(c echo.Context) error {\n\treturn c.String(http.StatusOK, \"Operation was fine\")\n}",
"func Ok(ctx *fiber.Ctx, msg string, data interface{}) error {\n\treturn Success(ctx, msg, data, http.StatusOK)\n}",
"func (l *Logger) Successf(format string, a ...interface{}) {\r\n\tl.logInternal(SuccessLevel, 4, fmt.Sprintf(format, a...))\r\n}",
"func (this *commonResult) Succeed() {\n\tthis.was_successful = true\n}",
"func taskFailureSubject(ctx AlertContext) string {\n\tsubj := &bytes.Buffer{}\n\tfailed := []string{}\n\tfor _, test := range ctx.Task.LocalTestResults {\n\t\tif test.Status == evergreen.TestFailedStatus {\n\t\t\tfailed = append(failed, cleanTestName(test.TestFile))\n\t\t}\n\t}\n\n\tswitch {\n\tcase ctx.Task.Details.TimedOut:\n\t\tsubj.WriteString(\"Task Timed Out: \")\n\tcase len(failed) == 1:\n\t\tsubj.WriteString(\"Test Failure: \")\n\tcase len(failed) > 1:\n\t\tsubj.WriteString(\"Test Failures: \")\n\tcase ctx.Task.Details.Description == task.AgentHeartbeat:\n\t\tsubj.WriteString(\"Task System Failure: \")\n\tcase ctx.Task.Details.Type == evergreen.CommandTypeSystem:\n\t\tsubj.WriteString(\"Task System Failure: \")\n\tcase ctx.Task.Details.Type == evergreen.CommandTypeSetup:\n\t\tsubj.WriteString(\"Task Setup Failure: \")\n\tdefault:\n\t\tsubj.WriteString(\"Task Failed: \")\n\t}\n\n\tfmt.Fprintf(subj, \"%s on %s \", ctx.Task.DisplayName, ctx.Build.DisplayName)\n\n\t// include test names if <= 4 failed, otherwise print two plus the number remaining\n\tif len(failed) > 0 {\n\t\tsubj.WriteString(\"(\")\n\t\tif len(failed) <= 4 {\n\t\t\tsubj.WriteString(strings.Join(failed, \", \"))\n\t\t} else {\n\t\t\tfmt.Fprintf(subj, \"%s, %s, +%v more\", failed[0], failed[1], len(failed)-2)\n\t\t}\n\t\tsubj.WriteString(\") \")\n\t}\n\n\tfmt.Fprintf(subj, \"// %s @ %s\", ctx.ProjectRef.DisplayName, ctx.Version.Revision[0:8])\n\treturn subj.String()\n}",
"func logSuccess(c, r [16]byte, p string, pot *os.File) error {\n\t/* String indicating cracking */\n\tans := fmt.Sprintf(\n\t\t\"$vnc$*%02X*%02X:%s\",\n\t\tc,\n\t\tr,\n\t\tstrings.TrimRight(p, \"\\x00\"),\n\t)\n\t/* If we have a potfile, update it */\n\tif nil != pot {\n\t\tif _, err := fmt.Fprintf(pot, \"%s\\n\", ans); nil != err {\n\t\t\treturn nil\n\t\t}\n\t}\n\t/* Tell the user */\n\tslog.Printf(\"FOUND %s\", ans)\n\treturn nil\n}",
"func responseOk(w http.ResponseWriter, res inp, vars []string) {\n\tw.WriteHeader(http.StatusOK)\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tbody := map[string]string{\n\t\tvars[0]: res.First,\n\t\tvars[1]: res.Second,\n\t}\n\tjson.NewEncoder(w).Encode(body)\n}",
"func SendSubtitleDownloadSuccess(successAPI string) {\n\treturn\n}",
"func Result(message string) error {\n\tif message == \"\" {\n\t\treturn newRequiredParam(\"message\")\n\t}\n\n\t_, err := os.Stdout.WriteString(message + \"\\n\")\n\treturn err\n}",
"func Successf(format string, a ...interface{}) {\n\tif GlobalLevel >= InfoLevel {\n\t\tsuccessf(Timestamps, Color, format, a...)\n\t}\n}",
"func authOk(sucProb byte) error {\n\t/* Get a random number */\n\tb := make([]byte, 1)\n\tif _, err := rand.Read(b); nil != err {\n\t\treturn fmt.Errorf(\"random read: %v\", err)\n\t}\n\t/* See if it's a winner */\n\tif b[0] <= sucProb {\n\t\treturn nil\n\t}\n\treturn errors.New(\"permission denied\")\n}",
"func Ok(tb testing.TB, err error) {\n\tif err != nil {\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"file\": filepath.Base(file),\n\t\t\t\"line\": line,\n\t\t\t\"error\": err.Error(),\n\t\t}).Error(\"unexpected error\")\n\t\ttb.FailNow()\n\t}\n}",
"func OK(t *testing.T, ok bool, messages ...string) {\n\t_ok(t, ok, callerLine(1), messages...)\n}",
"func Success(v ...interface{}) string {\n\treturn logr.Success(v...)\n}",
"func Success(a ...interface{}) {\n\tcolor.Set(color.FgHiGreen)\n\tdefer color.Unset()\n\tsuccessLogger.Println(a...)\n}",
"func (r *Router) Success(ctx context.Context, w http.ResponseWriter, code int) {\n\treqID := r.GetRequestID(ctx)\n\n\tr.setDefaultHeaders(ctx, w)\n\n\tif code != http.StatusOK {\n\t\tw.WriteHeader(code)\n\t}\n\n\tr.logger.Debugw(\"response\",\n\t\t\"request_id\", reqID,\n\t\t\"status_code\", code,\n\t)\n}",
"func (c *CountHandler) OkResponse(resp http.ResponseWriter, req *http.Request) {\n\tc.numRequests++\n\tresp.WriteHeader(http.StatusOK)\n\tresp.Write([]byte(\"{\\\"stat\\\": \\\"ok\\\"}\"))\n}",
"func handleLinkSuccess(lh *linkHandler) tea.Cmd {\n\treturn func() tea.Msg {\n\t\treturn linkSuccessMsg(<-lh.success)\n\t}\n}",
"func (resp *Response) StatusOk(w http.ResponseWriter) {\n\tresp.Ok = true\n\twrite(resp, w)\n}",
"func (s *Server) writeInfoResponse(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\tmessage []byte,\n\tstatus int,\n\theaders map[string]string,\n) {\n\tfor k, v := range headers {\n\t\tw.Header().Add(k, v)\n\t}\n\n\tw.WriteHeader(status)\n\tw.Write(message)\n}",
"func Ok(tb testing.TB, err error) {\r\n\tif err != nil {\r\n\t\t_, file, line, _ := runtime.Caller(1)\r\n\t\tfmt.Printf(\"%s:%d: unexpected error: %s\\n\\n\", filepath.Base(file), line, err.Error())\r\n\t\ttb.FailNow()\r\n\t}\r\n}",
"func (hangman *Hangman) displayResult(screenOutputer ScreenOutputerInterface, result bool) {\n notFoundTemplate := template.New(\"Not Found Template\")\n notFoundTemplate, _ = notFoundTemplate.Parse(\"Sorry letter not found you have {{.Guesses}} guesses left \\n\")\n\n if (result == false) {\n fmt.Println(screenOutputer.selectImage(hangman.guesses))\n hangman.guesses++\n\n notFoundTemplate.Execute(os.Stdout, struct {\n Guesses int\n }{\n 11 - hangman.guesses,\n })\n\n\n } else {\n fmt.Println(\"Well done you guessed correctly\")\n }\n}",
"func printHeader(w io.Writer, info *athena.QueryExecution) {\n\tfmt.Fprintf(w, \"Query: %s;\\n\", aws.StringValue(info.Query))\n}",
"func printMyResult(sentence string) string {\n\tnewSentence := \"Saya sedang belajar \" + sentence\n\treturn newSentence\n}",
"func Ok(err error) {\n\tif err != nil {\n\t\tfmt.Println(err)\n\t\t_, file, line, _ := runtime.Caller(1)\n\t\tfmt.Printf(okFmt, filepath.Base(file), line, err.Error())\n\t\togl.SetLevel(ogl.WARN)\n\t\tpanic(\"Ok fail\")\n\t}\n}",
"func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) {\n\tif is11 {\n\t\tbw.WriteString(\"HTTP/1.1 \")\n\t} else {\n\t\tbw.WriteString(\"HTTP/1.0 \")\n\t}\n\tif text, ok := statusText[code]; ok {\n\t\tbw.Write(strconv.AppendInt(scratch[:0], int64(code), 10))\n\t\tbw.WriteByte(' ')\n\t\tbw.WriteString(text)\n\t\tbw.WriteString(\"\\r\\n\")\n\t} else {\n\t\t// don't worry about performance\n\t\tfmt.Fprintf(bw, \"%03d status code %d\\r\\n\", code, code)\n\t}\n}",
"func writeFTPReplySingleline(writer io.WriteCloser, buf *bytes.Buffer, code int, params ...interface{}) {\n\tbuf.Reset()\n\treply, ok := ReplyCodes[code]\n\tif !ok {\n\t\tpanic(fmt.Errorf(\"writeFTPReply: %d not a valid reply code\", code))\n\t}\n\n\t// We (should) avoid small writes and form a buffer.\n\tbuf.Write(strconv.AppendInt(nil, int64(code), 10))\n\tbuf.WriteByte(' ')\n\n\tif len(params) == 0 {\n\t\tbuf.Write(reply) // This should be a much faster path.\n\t} else {\n\t\tfmt.Fprintf(buf, string(reply), params...)\n\t}\n\n\tbuf.WriteString(\"\\r\\n\")\n\n\t_, err := buf.WriteTo(writer)\n\tif err != nil {\n\t\twriter.Close()\n\t}\n}",
"func (m *Main) ReportSuccess(cmd string) {\n\tres := &Result{Command: cmd, Pass: true}\n\tm.Report.Results = append(m.Report.Results, res)\n}",
"func displayWelcomeMessage() {\n\tart :=\n\t\t` \t\n================================================================================================ \n ___ __ __ _ _ ___ ___ __ ___ __ ___ __ ___ ___ \n| | |__ | / ' / \\ | \\/ | |__ | / \\ |\\ | |__ / \\ /__ / \\ |__ | \n|/\\| |___ |___\\__, \\__/ | | |___ | \\__/ | \\| |___ \\__/ ___/ \\__/ | | \n\n================================================================================================= \n\t \n\t\n\t`\n\tfmt.Println(art)\n\tquitMessage := \"\\nPress Ctrl + C to stop the tracker..\"\n\tfmt.Println(quitMessage)\n\n}",
"func greet(w http.ResponseWriter, req *http.Request) {\n\tfmt.Fprintf(w, \"Welcome\")\n}"
] | [
"0.69103014",
"0.68044895",
"0.5927801",
"0.5798956",
"0.57739043",
"0.57026905",
"0.5700581",
"0.56719536",
"0.56467646",
"0.5633215",
"0.5616624",
"0.559593",
"0.55451703",
"0.5498684",
"0.5488845",
"0.5468093",
"0.5461246",
"0.5452703",
"0.54313964",
"0.53798497",
"0.5377338",
"0.5360924",
"0.53566027",
"0.5342361",
"0.53271294",
"0.53164184",
"0.5307126",
"0.5304567",
"0.5288724",
"0.5283885",
"0.5281048",
"0.5267253",
"0.5252106",
"0.523336",
"0.5231339",
"0.5227139",
"0.5224396",
"0.52234113",
"0.51950806",
"0.5184093",
"0.5183226",
"0.5182262",
"0.5179677",
"0.5164205",
"0.5162785",
"0.516261",
"0.5150508",
"0.5142071",
"0.51341045",
"0.5128661",
"0.5115476",
"0.511538",
"0.5105212",
"0.5098417",
"0.5085423",
"0.505844",
"0.505688",
"0.503186",
"0.49943146",
"0.49935752",
"0.49850896",
"0.49791917",
"0.49765745",
"0.4975799",
"0.4973549",
"0.49730322",
"0.4972555",
"0.49680886",
"0.49576455",
"0.49541277",
"0.49517715",
"0.49479684",
"0.49217483",
"0.49178898",
"0.4917106",
"0.49122062",
"0.4911623",
"0.48940226",
"0.48921293",
"0.4891253",
"0.4889784",
"0.48838484",
"0.4876113",
"0.48657262",
"0.4864547",
"0.48640057",
"0.48594797",
"0.4856626",
"0.48539108",
"0.48528212",
"0.48442686",
"0.48433134",
"0.4843247",
"0.4826067",
"0.48258242",
"0.48243344",
"0.48218673",
"0.48185423",
"0.48153672",
"0.48149347",
"0.48085672"
] | 0.0 | -1 |
RoleExists returns whether profile exists | func GetRole(iamc aws.IAMAPI, roleName *string) (*Role, error) {
out, err := iamc.GetRole(&iam.GetRoleInput{
RoleName: roleName,
})
if err != nil {
return nil, err
}
outRole := Role{
Arn: out.Role.Arn,
Tags: map[string]*string{},
}
if out.Role.Tags != nil {
for _, tag := range out.Role.Tags {
if tag.Key == nil {
continue
}
outRole.Tags[*tag.Key] = tag.Value
}
}
return &outRole, nil
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (_RBAC *RBACCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _RBAC.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func (_Superuserable *SuperuserableCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Superuserable.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func HasRole(c *JWTClaims, r string) bool {\n\tfor _, role := range c.Roles {\n\t\tif r == role {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (_Userable *UserableCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Userable.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func HasRole(ctx context.Context, role model.Role) (bool, error) {\n\tlogger := appcontext.ZLogger(ctx)\n\tprincipal := appcontext.Principal(ctx)\n\tswitch role {\n\tcase model.RoleEasiUser:\n\t\tif !principal.AllowEASi() {\n\t\t\tlogger.Info(\"does not have EASi job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\tlogger.Info(\"user authorized as EASi user\", zap.Bool(\"Authorized\", true))\n\t\treturn true, nil\n\tcase model.RoleEasiGovteam:\n\t\tif !principal.AllowGRT() {\n\t\t\tlogger.Info(\"does not have Govteam job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\tlogger.Info(\"user authorized as Govteam member\", zap.Bool(\"Authorized\", true))\n\t\treturn true, nil\n\tcase model.RoleEasi508Tester:\n\t\tif !principal.Allow508Tester() {\n\t\t\tlogger.Info(\"does not have 508 tester job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\tlogger.Info(\"user authorized as 508 Tester\", zap.Bool(\"Authorized\", true))\n\t\treturn true, nil\n\tcase model.RoleEasi508User:\n\t\tif !principal.Allow508User() {\n\t\t\tlogger.Info(\"does not have 508 User job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\tlogger.Info(\"user authorized as 508 User\", zap.Bool(\"Authorized\", true))\n\t\treturn true, nil\n\tcase model.RoleEasi508TesterOrUser:\n\t\tis508UserOrTester := principal.Allow508Tester() || principal.Allow508User()\n\t\tif !is508UserOrTester {\n\t\t\tlogger.Info(\"does not have 508 User nor Tester job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\treturn true, nil\n\tcase model.RoleEasiTrbAdmin:\n\t\tif !principal.AllowTRBAdmin() {\n\t\t\tlogger.Info(\"does not have TRB Admin job code\")\n\t\t\treturn false, nil\n\t\t}\n\t\tlogger.Info(\"user authorized as TRB admin\", zap.Bool(\"Authorized\", true))\n\t\treturn true, nil\n\tdefault:\n\t\tlogger.With(zap.String(\"Role\", role.String())).Info(\"Unrecognized user role\")\n\t\treturn false, nil\n\t}\n}",
"func (store *Store) HasRole(name string) (bool, error) {\n\tstore.imutex.RLock()\n\tdefer store.imutex.RUnlock()\n\t_, ok := store.roles[name]\n\treturn ok, nil\n}",
"func (u User) HasRole(candidate string) bool {\n\tfor _, role := range u.Roles {\n\t\tif role == candidate {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (u ScimUser) HasRole(role string) bool {\n\tfor _, r := range u.Roles {\n\t\tif r.Value == role {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (_Auditable *AuditableCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Auditable.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func (_SweetToken *SweetTokenCaller) HasRole(opts *bind.CallOpts, roleName string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _SweetToken.contract.Call(opts, out, \"hasRole\", roleName)\n\treturn *ret0, err\n}",
"func (_TellorMesosphere *TellorMesosphereCaller) HasRole(opts *bind.CallOpts, role [32]byte, account common.Address) (bool, error) {\n\tvar out []interface{}\n\terr := _TellorMesosphere.contract.Call(opts, &out, \"hasRole\", role, account)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (_BREMFactory *BREMFactoryCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _BREMFactory.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func hasRole(m *discordgo.Member, autorizedRoles map[string]struct{}) bool {\n\tfor _, r := range m.Roles {\n\t\tif _, ok := autorizedRoles[r]; ok {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (_DelegationController *DelegationControllerCaller) HasRole(opts *bind.CallOpts, role [32]byte, account common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _DelegationController.contract.Call(opts, out, \"hasRole\", role, account)\n\treturn *ret0, err\n}",
"func (z *User) HasRole(role string) bool {\n\tresult := false\n\tif len(z.Roles) > 0 {\n\t\tfor _, value := range z.Roles {\n\t\t\tif value == role {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn result\n}",
"func HasRole(role string, token *jwt.Token) bool {\n\tfor _, r := range Roles(token) {\n\t\tif r == role {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (_BREM *BREMCaller) HasRole(opts *bind.CallOpts, _operator common.Address, _role string) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _BREM.contract.Call(opts, out, \"hasRole\", _operator, _role)\n\treturn *ret0, err\n}",
"func (_Distributor *DistributorCaller) HasRole(opts *bind.CallOpts, role [32]byte, account common.Address) (bool, error) {\n\tvar (\n\t\tret0 = new(bool)\n\t)\n\tout := ret0\n\terr := _Distributor.contract.Call(opts, out, \"hasRole\", role, account)\n\treturn *ret0, err\n}",
"func (_Superuserable *SuperuserableSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Superuserable.Contract.HasRole(&_Superuserable.CallOpts, _operator, _role)\n}",
"func (_Superuserable *SuperuserableCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Superuserable.Contract.HasRole(&_Superuserable.CallOpts, _operator, _role)\n}",
"func (_TellorMesosphere *TellorMesosphereSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _TellorMesosphere.Contract.HasRole(&_TellorMesosphere.CallOpts, role, account)\n}",
"func checkProfileExists(credFile *string, profileName *string) (bool, error) {\n\tconfig, err := configparser.Read(*credFile)\n\tif err != nil {\n\t\tfmt.Println(\"Could not find credentials file\")\n\t\tfmt.Println(err.Error())\n\t\treturn false, err\n\t}\n\tsection, err := config.Section(*profileName)\n\tif err != nil {\n\t\tfmt.Println(\"Could not find profile in credentials file\")\n\t\treturn false, nil\n\t}\n\tif !section.Exists(\"aws_access_key_id\") {\n\t\tfmt.Println(\"Could not find access key in profile\")\n\t\treturn false, nil\n\t}\n\n\treturn true, nil\n}",
"func (_AccessControl *AccessControlCaller) HasRole(opts *bind.CallOpts, role [32]byte, account common.Address) (bool, error) {\n\tvar out []interface{}\n\terr := _AccessControl.contract.Call(opts, &out, \"hasRole\", role, account)\n\n\tif err != nil {\n\t\treturn *new(bool), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(bool)).(*bool)\n\n\treturn out0, err\n\n}",
"func (_TellorMesosphere *TellorMesosphereCallerSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _TellorMesosphere.Contract.HasRole(&_TellorMesosphere.CallOpts, role, account)\n}",
"func (_RBAC *RBACCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _RBAC.Contract.HasRole(&_RBAC.CallOpts, _operator, _role)\n}",
"func (_BREMFactory *BREMFactoryCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _BREMFactory.Contract.HasRole(&_BREMFactory.CallOpts, _operator, _role)\n}",
"func (_AccessControl *AccessControlCallerSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _AccessControl.Contract.HasRole(&_AccessControl.CallOpts, role, account)\n}",
"func (_RBAC *RBACSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _RBAC.Contract.HasRole(&_RBAC.CallOpts, _operator, _role)\n}",
"func (_DelegationController *DelegationControllerCallerSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _DelegationController.Contract.HasRole(&_DelegationController.CallOpts, role, account)\n}",
"func (_Auditable *AuditableCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Auditable.Contract.HasRole(&_Auditable.CallOpts, _operator, _role)\n}",
"func (_BREMFactory *BREMFactorySession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _BREMFactory.Contract.HasRole(&_BREMFactory.CallOpts, _operator, _role)\n}",
"func (_Auditable *AuditableSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Auditable.Contract.HasRole(&_Auditable.CallOpts, _operator, _role)\n}",
"func (_Userable *UserableCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Userable.Contract.HasRole(&_Userable.CallOpts, _operator, _role)\n}",
"func (o *UserDisco) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (_SweetToken *SweetTokenCallerSession) HasRole(roleName string) (bool, error) {\n\treturn _SweetToken.Contract.HasRole(&_SweetToken.CallOpts, roleName)\n}",
"func (_DelegationController *DelegationControllerSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _DelegationController.Contract.HasRole(&_DelegationController.CallOpts, role, account)\n}",
"func (_SweetToken *SweetTokenSession) HasRole(roleName string) (bool, error) {\n\treturn _SweetToken.Contract.HasRole(&_SweetToken.CallOpts, roleName)\n}",
"func (backend *ESClient) ProfileExists(hash string) (bool, error) {\n\tidsQuery := elastic.NewIdsQuery(mappings.DocType)\n\tidsQuery.Ids(hash)\n\n\tsearchResult, err := backend.client.Search().\n\t\tIndex(relaxting.CompProfilesIndex).\n\t\tQuery(idsQuery).\n\t\tSize(0).\n\t\tDo(context.Background())\n\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, fmt.Sprintf(\"ProfileExists unable to complete search for %s\", hash))\n\t}\n\n\tlogrus.Debugf(\"ProfileExists got %d profiles in %d milliseconds\\n\", searchResult.TotalHits(), searchResult.TookInMillis)\n\n\treturn searchResult.TotalHits() > 0, nil\n}",
"func (_Distributor *DistributorCallerSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _Distributor.Contract.HasRole(&_Distributor.CallOpts, role, account)\n}",
"func (_Userable *UserableSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _Userable.Contract.HasRole(&_Userable.CallOpts, _operator, _role)\n}",
"func (o *InlineObject72) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (_Distributor *DistributorSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _Distributor.Contract.HasRole(&_Distributor.CallOpts, role, account)\n}",
"func (g ScimGroup) HasRole(role string) bool {\n\tfor _, groupRole := range g.Roles {\n\t\tif groupRole.Value == role {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (_AccessControl *AccessControlSession) HasRole(role [32]byte, account common.Address) (bool, error) {\n\treturn _AccessControl.Contract.HasRole(&_AccessControl.CallOpts, role, account)\n}",
"func (p *ProfilesExperiences) Exists(qu Queryer, id int64) (exists bool, err error) {\n\tconst stmt = \"SELECT EXISTS(SELECT 1 FROM `profiles__experiences__` WHERE id = ? LIMIT 1) AS `exists`\"\n\tvar count int\n\trow := qu.QueryRow(stmt, id)\n\tif err = row.Scan(&count); err != nil {\n\t\treturn\n\t}\n\treturn count > 0, nil\n}",
"func (_BREM *BREMCallerSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _BREM.Contract.HasRole(&_BREM.CallOpts, _operator, _role)\n}",
"func (m *GormIdentityRoleRepository) CheckExists(ctx context.Context, id string) error {\n\tdefer goa.MeasureSince([]string{\"goa\", \"db\", \"identity_role\", \"exists\"}, time.Now())\n\treturn base.CheckExistsWithCustomIDColumn(ctx, m.db, m.TableName(), \"identity_role_id\", id)\n}",
"func (o *StorageVdMemberEpAllOf) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (o *MemberResponse) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (account *Account) HasRole(roleID int) bool {\n\tif _, ok := account.accountRoles[roleID]; ok {\n\t\treturn true\n\t}\n\treturn false\n}",
"func (urq *UserRoleQuery) Exist(ctx context.Context) (bool, error) {\n\tif err := urq.prepareQuery(ctx); err != nil {\n\t\treturn false, err\n\t}\n\treturn urq.sqlExist(ctx)\n}",
"func (db *MySQLDB) IsRoleRecIDExist(ctx context.Context, recID string) (bool, error) {\n\tfLog := mysqlLog.WithField(\"func\", \"IsUserRecIDExist\").WithField(\"RequestID\", ctx.Value(constants.RequestID))\n\tq := \"SELECT COUNT(*) AS CNT FROM HANSIP_ROLE WHERE REC_ID=?\"\n\trows, err := db.instance.QueryContext(ctx, q, recID)\n\tif err != nil {\n\t\tfLog.Errorf(\"db.instance.ExecContext got %s. SQL = %s\", err.Error(), q)\n\t\treturn false, &ErrDBQueryError{\n\t\t\tWrapped: err,\n\t\t\tMessage: \"Error IsRoleRecIDExist\",\n\t\t\tSQL: q,\n\t\t}\n\t}\n\tdefer rows.Close()\n\tif rows.Next() {\n\t\tcount := 0\n\t\terr := rows.Scan(&count)\n\t\tif err != nil {\n\t\t\tfLog.Errorf(\"db.instance.IsRoleRecIDExist cant scan\")\n\t\t\treturn false, &ErrDBScanError{\n\t\t\t\tWrapped: err,\n\t\t\t\tMessage: \"Error IsRoleRecIDExist\",\n\t\t\t\tSQL: q,\n\t\t\t}\n\t\t}\n\t\treturn count > 0, nil\n\t}\n\treturn false, nil\n}",
"func (_BREM *BREMSession) HasRole(_operator common.Address, _role string) (bool, error) {\n\treturn _BREM.Contract.HasRole(&_BREM.CallOpts, _operator, _role)\n}",
"func hasAuthorizedRole(sourceRole, providedRole string) bool {\n\tswitch sourceRole {\n\tcase ViewerRoleName:\n\t\tswitch providedRole {\n\t\tcase ViewerRoleName, EditorRoleName, AdminRoleName:\n\t\t\treturn true\n\t\t}\n\tcase EditorRoleName:\n\t\tswitch providedRole {\n\t\tcase EditorRoleName, AdminRoleName:\n\t\t\treturn true\n\t\t}\n\tcase AdminRoleName:\n\t\tswitch providedRole {\n\t\tcase AdminRoleName:\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}",
"func (ctx *serverRequestContextImpl) hasRole(role string) (bool, error) {\n\tif ctx.callerRoles == nil {\n\t\tctx.callerRoles = make(map[string]bool)\n\t}\n\n\troleStatus, hasRole := ctx.callerRoles[role]\n\tif hasRole {\n\t\treturn roleStatus, nil\n\t}\n\n\tcaller, err := ctx.GetCaller()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\troleAttr, err := caller.GetAttribute(role)\n\tif err != nil {\n\t\treturn false, caerrors.NewAuthorizationErr(caerrors.ErrInvokerMissAttr, \"Invoker does not have following role'%s': '%s'\", role, err)\n\t}\n\troleStatus, err = strconv.ParseBool(roleAttr.Value)\n\tif err != nil {\n\t\treturn false, caerrors.NewHTTPErr(400, caerrors.ErrInvalidBool, \"Failed to get boolean value of '%s': '%s'\", role, err)\n\t}\n\tctx.callerRoles[role] = roleStatus\n\n\treturn ctx.callerRoles[role], nil\n}",
"func (o *AccessRequestData) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func appArmorProfileExists(profile string) (bool, error) {\n\tif profile == \"\" {\n\t\treturn false, errors.New(\"nil apparmor profile is not supported\")\n\t}\n\tprofiles, err := os.Open(\"/sys/kernel/security/apparmor/profiles\")\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tdefer profiles.Close()\n\n\trbuff := bufio.NewReader(profiles)\n\tfor {\n\t\tline, err := rbuff.ReadString('\\n')\n\t\tswitch err {\n\t\tcase nil:\n\t\t\tif strings.HasPrefix(line, profile+\" (\") {\n\t\t\t\treturn true, nil\n\t\t\t}\n\t\tcase io.EOF:\n\t\t\treturn false, nil\n\t\tdefault:\n\t\t\treturn false, err\n\t\t}\n\t}\n}",
"func (ctx *serverRequestContextImpl) HasRole(role string) error {\n\thasRole, err := ctx.hasRole(role)\n\tif err != nil {\n\t\treturn err\n\t}\n\tif !hasRole {\n\t\treturn caerrors.NewAuthorizationErr(caerrors.ErrMissingRole, \"Caller has a value of 'false' for attribute/role '%s'\", role)\n\t}\n\treturn nil\n}",
"func ProfileExistsByName(name string, tx *sql.Tx) (bool, error) {\n\tcount := 0\n\tif err := tx.QueryRow(`SELECT count(*) from profile where name = $1`, name).Scan(&count); err != nil {\n\t\treturn false, errors.New(\"querying profile existence from name: \" + err.Error())\n\t}\n\treturn count > 0, nil\n}",
"func (ua *UserAuth) Exists() bool {\n\treturn ua._exists\n}",
"func (lr *LoginRecord) Exists() bool {\n\treturn lr._exists\n}",
"func (o *UserGoogle) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\treturn UserGoogleExists(ctx, exec, o.GoogleID)\n}",
"func (o *Content) HasRole() bool {\n\tif o != nil && o.Role != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (r *RoleCreate) HasMembers() bool {\n\treturn r.hasMembers\n}",
"func (t *teamsImpl) Exists(ctx context.Context, name string) (bool, error) {\n\t// @step: we check the user management service for teams\n\treturn t.persistenceMgr.Teams().Exists(ctx, name)\n}",
"func userHaveRole(user User, role string) (b bool){\n\tb = true\n\tfor _, e := range user.Roles {\n\t\tif e == role {\n\t\t\treturn\n\t\t}\n\t}\n\tb = false\n\treturn\n}",
"func (q cmfFamilyUserPoliciesTakeQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if cmf_family_user_policies_take exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (o *AuthNMappingUpdateRelationships) HasRole() bool {\n\treturn o != nil && o.Role != nil\n}",
"func (auup *AuthUserUserPermission) Exists() bool {\n\treturn auup._exists\n}",
"func (a Anonymous) HasAnyRole(roles ...string) bool { return false }",
"func (wu *WxUser) Exists() bool { //wx_users\n\treturn wu._exists\n}",
"func HasRole(roles ...string) web.Middleware {\n\n\t// This is the actual middleware function to be executed.\n\tf := func(after web.Handler) web.Handler {\n\n\t\th := func(ctx context.Context, w http.ResponseWriter, r *http.Request) error {\n\t\t\tctx, span := trace.StartSpan(ctx, \"internal.mid.HasRole\")\n\t\t\tdefer span.End()\n\n\t\t\tclaims, ok := ctx.Value(auth.Key).(auth.Claims)\n\t\t\tif !ok {\n\t\t\t\treturn errors.New(\"claims missing from context: HasRole called without/before Authenticate\")\n\t\t\t}\n\n\t\t\tif !claims.HasRole(roles...) {\n\t\t\t\treturn ErrForbidden\n\t\t\t}\n\n\t\t\treturn after(ctx, w, r)\n\t\t}\n\n\t\treturn h\n\t}\n\n\treturn f\n}",
"func (b *azureAuthBackend) pathRoleExistenceCheck(ctx context.Context, req *logical.Request, data *framework.FieldData) (bool, error) {\n\trole, err := b.role(ctx, req.Storage, data.Get(\"name\").(string))\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn role != nil, nil\n}",
"func (u Users) HasRole(role RoleInterface, userID Owner) (bool, error) {\n\tif _, ok := userID.(string); ok {\n\t\tif userID.(string) == \"\" {\n\t\t\treturn false, ErrUserRequired\n\t\t}\n\t} else if _, ok := userID.(int64); ok {\n\t\tif userID.(int64) == 0 {\n\t\t\treturn false, ErrUserRequired\n\t\t}\n\t}\n\n\troleID, err := u.rbac.Roles().GetRoleID(role)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tquery := fmt.Sprintf(`\n\tSELECT COUNT(*) FROM user_roles AS TUR\n\tJOIN roles AS TRdirect ON (TRdirect.ID=TUR.role_id)\n\tJOIN roles AS TR ON (TR.Lft BETWEEN TRdirect.Lft AND TRdirect.Rght)\n\tWHERE\n\tTUR.user_id=? AND TR.ID=?`)\n\n\tvar result int64\n\terr = u.rbac.db.QueryRow(query, userID, roleID).Scan(&result)\n\tif err != nil {\n\t\tif err != sql.ErrNoRows {\n\t\t\treturn false, err\n\t\t}\n\t}\n\n\tif result > 0 {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}",
"func hasRole(c context.Context, metas []*api.PrefixMetadata, role api.Role) (bool, error) {\n\tcaller := string(auth.CurrentIdentity(c)) // e.g. \"user:[email protected]\"\n\n\t// E.g. if 'role' is READER, 'roles' will be {READER, WRITER, OWNER}.\n\troles := impliedRolesRev[role]\n\tif roles == nil {\n\t\troles = roleSet(role)\n\t}\n\n\t// Enumerate the set of principals that have any of the requested roles in any\n\t// of the prefixes. Exit early if hitting the direct match, otherwise proceed\n\t// to more expensive group membership checks. Note that we don't use isInACL\n\t// here because we want to postpone all group checks until the very end,\n\t// checking memberships in all groups mentioned in 'metas' at once.\n\tgroups := stringset.New(10) // 10 is picked arbitrarily\n\tfor _, meta := range metas {\n\t\tfor _, acl := range meta.Acls {\n\t\t\tif _, ok := roles[acl.Role]; !ok {\n\t\t\t\tcontinue // not the role we are interested in\n\t\t\t}\n\t\t\tfor _, p := range acl.Principals {\n\t\t\t\tif p == caller {\n\t\t\t\t\treturn true, nil // the caller was specified in ACLs explicitly\n\t\t\t\t}\n\t\t\t\t// Is this a reference to a group?\n\t\t\t\tif s := strings.SplitN(p, \":\", 2); len(s) == 2 && s[0] == \"group\" {\n\t\t\t\t\tgroups.Add(s[1])\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tyes, err := auth.IsMember(c, groups.ToSlice()...)\n\tif err != nil {\n\t\treturn false, errors.Annotate(err, \"failed to check group memberships when checking ACLs for role %s\", role).Err()\n\t}\n\treturn yes, nil\n}",
"func (q cmfUserExperienceLogQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if cmf_user_experience_log exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (s *Signup) Exists() bool {\n\treturn s._exists\n}",
"func (u *User) Exists() bool {\n\treturn u._exists\n}",
"func (o *Tenant) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\treturn TenantExists(ctx, exec, o.ID)\n}",
"func (sp *SalePermission) Exists() bool { //sale_permission\n\treturn sp._exists\n}",
"func (m *GormRoleMappingRepository) CheckExists(ctx context.Context, ID uuid.UUID) (bool, error) {\n\tdefer goa.MeasureSince([]string{\"goa\", \"db\", \"role_mapping\", \"exists\"}, time.Now())\n\n\tvar exists bool\n\tquery := fmt.Sprintf(`\n\t\tSELECT EXISTS (\n\t\t\tSELECT 1 FROM %[1]s\n\t\t\tWHERE\n\t\t\t\trole_mapping_id=$1\n\t\t\t\tAND deleted_at IS NULL\n\t\t)`, m.TableName())\n\n\terr := m.db.CommonDB().QueryRow(query, ID.String()).Scan(&exists)\n\tif err == nil && !exists {\n\t\treturn exists, errors.NewNotFoundError(m.TableName(), ID.String())\n\t}\n\tif err != nil {\n\t\treturn false, errors.NewInternalError(ctx, errs.Wrapf(err, \"unable to verify if %s exists\", m.TableName()))\n\t}\n\treturn exists, nil\n}",
"func (tu *TempUser) Exists() bool {\n\treturn tu._exists\n}",
"func (o *MicrosoftGraphEducationUser) HasPrimaryRole() bool {\n\tif o != nil && o.PrimaryRole != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (q cmfUserSuperQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if cmf_user_super exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (s *storager) Exists(ctx context.Context, resourceID string, options ...storage.Option) (bool, error) {\n\tresource, err := newResource(resourceID)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\tclient := s.secretManager(resource.Region)\n\t_, err = client.GetSecretValueWithContext(ctx,\n\t\t&secretsmanager.GetSecretValueInput{\n\t\t\tSecretId: &resource.Secret,\n\t\t\tVersionStage: aws.String(\"AWSCURRENT\"),\n\t\t})\n\treturn !isNotFound(err), nil\n}",
"func (dau *DdgAdminUser) Exists() bool { //ddg_admin_user\n\treturn dau._exists\n}",
"func (ll *LoginLimit) Exists() bool {\n\treturn ll._exists\n}",
"func CMFFamilyUserPoliciesTakeExists(ctx context.Context, exec boil.ContextExecutor, iD int) (bool, error) {\n\tvar exists bool\n\tsql := \"select exists(select 1 from `cmf_family_user_policies_take` where `id`=? limit 1)\"\n\n\tif boil.IsDebug(ctx) {\n\t\twriter := boil.DebugWriterFrom(ctx)\n\t\tfmt.Fprintln(writer, sql)\n\t\tfmt.Fprintln(writer, iD)\n\t}\n\trow := exec.QueryRowContext(ctx, sql, iD)\n\n\terr := row.Scan(&exists)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: unable to check if cmf_family_user_policies_take exists\")\n\t}\n\n\treturn exists, nil\n}",
"func ProfileExistsByID(id int64, tx *sql.Tx) (bool, error) {\n\tcount := 0\n\tif err := tx.QueryRow(`SELECT count(*) from profile where id = $1`, id).Scan(&count); err != nil {\n\t\treturn false, errors.New(\"querying profile existence from id: \" + err.Error())\n\t}\n\treturn count > 0, nil\n}",
"func (c *Client) CheckRoleExists(orgID string, roleName string, awsAccountID string) (bool, string, string, error) {\n\texists, _, selectedARN, err := c.CheckIfAWSAccountExists(orgID, awsAccountID)\n\tif err != nil {\n\t\treturn false, \"\", \"\", err\n\t}\n\tif !exists {\n\t\treturn false, \"\", \"\", nil\n\t}\n\texistingRole := strings.SplitN(selectedARN, \"/\", 2)\n\tif len(existingRole) > 1 && existingRole[1] == roleName {\n\t\treturn false, \"\", \"\", nil\n\t}\n\treturn true, existingRole[1], selectedARN, nil\n}",
"func TestUserExists(t *testing.T) {\n\tprof := Profile{\n\t\tUserName: \"test\",\n\t\tCompanyName: \"test company\",\n\t\tPwHash: []byte(\"1234\"),\n\t\tAddress: \"1234 lane\",\n\t}\n\tif err := db.Create(&prof).Error; err != nil {\n\t\tt.Errorf(\"Error creating profile not expected. err: %v\", err)\n\t}\n\tdefer db.Unscoped().Delete(&Profile{})\n\tif !dm.userExists(\"test\", \"test company\") {\n\t\tt.Error(\"User should exist but does not.\")\n\t}\n\tif dm.userExists(\"not test\", \"test company\") {\n\t\tt.Error(\"User should not exist but does.\")\n\t}\n}",
"func (q authUserQuery) Exists() (bool, error) {\n\tvar count int64\n\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRow().Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if auth_user exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (user *EcbUserProfile) HasAdminRole(encryptedProfile []byte) (bool, error) {\n\tplainText, err := user.cipher.Decrypt(encryptedProfile)\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\t// Remove padding and just assume it's correct\n\tplainTextLength := len(plainText)\n\tplainText = plainText[:plainTextLength-int(plainText[plainTextLength-1])]\n\n\tdict := util.DecodeQueryString(string(plainText), \"=\", \"&\")\n\treturn dict[\"role\"] == \"admin\", nil\n}",
"func (ust *UsersShopTrace) Exists() bool { //users_shop_trace\n\treturn ust._exists\n}",
"func (sp *ScyllaUserProvider) Exists(email string) (bool, derrors.Error) {\n\n\tsp.Lock()\n\tdefer sp.Unlock()\n\n\tvar returnedEmail string\n\n\t// check connection\n\tif err := sp.checkAndConnect(); err != nil {\n\t\treturn false, err\n\t}\n\n\tstmt, names := qb.Select(userTable).Columns(userTablePK).Where(qb.Eq(userTablePK)).ToCql()\n\tq := gocqlx.Query(sp.Session.Query(stmt), names).BindMap(qb.M{\n\t\tuserTablePK: email})\n\n\terr := q.GetRelease(&returnedEmail)\n\tif err != nil {\n\t\tif err.Error() == rowNotFound {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, derrors.AsError(err, \"cannot determinate if user exists\")\n\t\t}\n\t}\n\n\tstmt, names = qb.Select(userPhotoTable).Columns(userPhotoTablePK).Where(qb.Eq(userPhotoTablePK)).ToCql()\n\tq = gocqlx.Query(sp.Session.Query(stmt), names).BindMap(qb.M{\n\t\tuserPhotoTablePK: email})\n\n\terr = q.GetRelease(&returnedEmail)\n\tif err != nil {\n\t\tif err.Error() == rowNotFound {\n\t\t\treturn false, nil\n\t\t} else {\n\t\t\treturn false, derrors.AsError(err, \"there seems to be an issue with the user in the userphotos table\")\n\t\t}\n\t}\n\n\treturn true, nil\n}",
"func (q usernameListingQuery) Exists(ctx context.Context, exec boil.ContextExecutor) (bool, error) {\n\tvar count int64\n\n\tqueries.SetSelect(q.Query, nil)\n\tqueries.SetCount(q.Query)\n\tqueries.SetLimit(q.Query, 1)\n\n\terr := q.Query.QueryRowContext(ctx, exec).Scan(&count)\n\tif err != nil {\n\t\treturn false, errors.Wrap(err, \"models: failed to check if username_listings exists\")\n\t}\n\n\treturn count > 0, nil\n}",
"func (r *RoleCreate) HasName() bool {\n\treturn r.hasName\n}",
"func (r *RoleList) HasPage() bool {\n\treturn r.hasPage\n}",
"func (r *RoleUndelete) HasRoleID() bool {\n\treturn r.hasRoleID\n}",
"func HasOwnerRole() predicate.ProfileUKM {\n\treturn predicate.ProfileUKM(func(s *sql.Selector) {\n\t\tstep := sqlgraph.NewStep(\n\t\t\tsqlgraph.From(Table, FieldID),\n\t\t\tsqlgraph.To(OwnerRoleTable, FieldID),\n\t\t\tsqlgraph.Edge(sqlgraph.M2O, true, OwnerRoleTable, OwnerRoleColumn),\n\t\t)\n\t\tsqlgraph.HasNeighbors(s, step)\n\t})\n}",
"func (r *RoleTriggerScript) HasRoleID() bool {\n\treturn r.hasRoleID\n}"
] | [
"0.6787896",
"0.6785139",
"0.67097485",
"0.6696827",
"0.6692673",
"0.66723514",
"0.66641665",
"0.6617847",
"0.65938026",
"0.6579737",
"0.6568183",
"0.6541493",
"0.6508191",
"0.65061635",
"0.6498168",
"0.6493239",
"0.64108336",
"0.640935",
"0.63917893",
"0.6374794",
"0.6353769",
"0.63524324",
"0.6334391",
"0.6328811",
"0.6304593",
"0.6283307",
"0.62815",
"0.6270238",
"0.6269905",
"0.6256607",
"0.62455523",
"0.6237992",
"0.6236422",
"0.6232345",
"0.6231518",
"0.62030977",
"0.6197037",
"0.6182001",
"0.61758643",
"0.61725354",
"0.61654526",
"0.61585176",
"0.61478555",
"0.6146645",
"0.6129574",
"0.61051685",
"0.6099357",
"0.6097886",
"0.6095115",
"0.6059817",
"0.60412294",
"0.60340273",
"0.60085434",
"0.5971268",
"0.5936892",
"0.593519",
"0.59341675",
"0.5896328",
"0.58896846",
"0.5875648",
"0.5864475",
"0.5847333",
"0.5846819",
"0.5815261",
"0.58144325",
"0.5813073",
"0.5808003",
"0.5807947",
"0.58074343",
"0.57946056",
"0.57923216",
"0.5771246",
"0.57613236",
"0.5731437",
"0.5717601",
"0.5676664",
"0.56535155",
"0.56461483",
"0.5618165",
"0.560979",
"0.55915403",
"0.5577224",
"0.55642706",
"0.5553832",
"0.55369616",
"0.5514937",
"0.5513306",
"0.55064046",
"0.5468115",
"0.54643863",
"0.545809",
"0.5456279",
"0.5452871",
"0.54478663",
"0.54414743",
"0.5418264",
"0.5414294",
"0.54029155",
"0.53999674",
"0.53946763",
"0.5391824"
] | 0.0 | -1 |
New returns a new server with state equal to the return value of the given factory. | func New(factory func() interface{}) *Server {
t := topic.New()
t.AddSubscriber(1, &subscriber{state: factory()})
return &Server{topic: t}
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func New(addr string) *Server {\n if addr == \"\" {\n addr = DefaultAddr\n }\n return &Server{\n addr: DefaultAddr,\n ds: newDataStore(),\n done: make(chan struct{}),\n }\n}",
"func NewServerFactory(ctx context.Context, rcmd *channels.Remote, s string, l *logrus.Logger, wg *sync.WaitGroup) *Server {\n\treturn &Server{\n\t\tlogger: l,\n\t\tunixSocket: s,\n\t\trcmd: rcmd,\n\t\tctx: ctx,\n\t\twg: wg,\n\t}\n}",
"func (c *Config) serverFactory(name string, weight int, host string, port int) *ServerDetail {\n\treturn &ServerDetail{\n\t\tName: name,\n\t\tHost: host,\n\t\tPort: port,\n\t\tUnixSock: \"\",\n\t\tWeight: weight,\n\t\tMaxConn: 1000,\n\t\tCheck: false,\n\t\tCheckInterval: 10,\n\t}\n}",
"func New(\n\tserverID string,\n\ttracer *zipkin.Tracer,\n\tfS fetching.Service,\n\taS adding.Service,\n\tmS modifying.Service,\n\trS removing.Service,\n) Server {\n\ta := &server{\n\t\tserverID: serverID,\n\t\ttracer: tracer,\n\t\tfetching: fS,\n\t\tadding: aS,\n\t\tmodifying: mS,\n\t\tremoving: rS}\n\trouter(a)\n\n\treturn a\n}",
"func New(\n\tserverID string,\n\ttracer *zipkin.Tracer,\n\tfS fetching.Service,\n\taS adding.Service,\n\tmS modifying.Service,\n\trS removing.Service,\n) Server {\n\ta := &server{\n\t\tserverID: serverID,\n\t\ttracer: tracer,\n\t\tfetching: fS,\n\t\tadding: aS,\n\t\tmodifying: mS,\n\t\tremoving: rS}\n\trouter(a)\n\n\treturn a\n}",
"func NewServer() *Server {}",
"func (f *FactoryFake) New(address string) (client.Interface, error) {\n\tc, _ := f.Clients[address]\n\treturn c, nil\n}",
"func New(client remote.Client) (*Server, error) {\n\tctx, cancel := context.WithCancel(context.Background())\n\ts := Server{\n\t\tctx: ctx,\n\t\tctxCancel: &cancel,\n\t\tclient: client,\n\t\tinstalling: system.NewAtomicBool(false),\n\t\ttransferring: system.NewAtomicBool(false),\n\t\trestoring: system.NewAtomicBool(false),\n\t\tpowerLock: system.NewLocker(),\n\t\tsinks: map[system.SinkName]*system.SinkPool{\n\t\t\tsystem.LogSink: system.NewSinkPool(),\n\t\t\tsystem.InstallSink: system.NewSinkPool(),\n\t\t},\n\t}\n\tif err := defaults.Set(&s); err != nil {\n\t\treturn nil, errors.Wrap(err, \"server: could not set default values for struct\")\n\t}\n\tif err := defaults.Set(&s.cfg); err != nil {\n\t\treturn nil, errors.Wrap(err, \"server: could not set defaults for server configuration\")\n\t}\n\ts.resources.State = system.NewAtomicString(environment.ProcessOfflineState)\n\treturn &s, nil\n}",
"func NewServer() *Server {\n return &Server{\n Addr: DefaultAddr,\n }\n}",
"func NewServer(db Factory) *Server {\n\tif db == nil {\n\t\tpanic(\"db cannot be <nil>.\")\n\t}\n\treturn &Server{db: db}\n}",
"func Factory() (net.PacketConn, error) {\n\tfrpc := &FakeRandomPacketConn{\n\t\tFakePacketConn: FakePacketConn{\n\t\t\tclosedChan: make(chan struct{}),\n\t\t},\n\t}\n\treturn frpc, nil\n}",
"func New(config Config) *Server {\n\treturn &Server{\n\t\tconfig: config,\n\t\tregistrars: make([]Registration, 0, 1),\n\t}\n}",
"func New(token string) *Server {\n\treturn &Server{\n\t\ttoken: token,\n\t\tproviders: make(map[string]provider),\n\t}\n}",
"func New() *Server {\n\treturn &Server{\n\t\tsystems: make(map[string]*system),\n\t}\n}",
"func New(srv *cmutation.Server) *Server {\n\treturn &Server{srv}\n}",
"func New(cfg config.ServerConfig, db database.Database) *Server {\n\treturn &Server{\n\t\trouter: gin.Default(),\n\t\tport: cfg.Port,\n\t\tdb: db,\n\t}\n}",
"func New() (s *Server) {\n\tp := NewPopulation(eliteSize, genSize)\n\ts = &Server{\n\t\tPopulation: p,\n\t}\n\treturn s\n}",
"func New(addr string, port int) *Server {\n\tctx, cancel := context.WithCancel(context.Background())\n\treturn &Server{\n\t\taddr: addr,\n\t\tport: port,\n\t\tctx: ctx,\n\t\tctxCancel: cancel,\n\t}\n}",
"func New(address string) *server {\n\tlog.Println(\"Creating server with address\", address)\n\tserver := &server{\n\t\taddress: address,\n\t}\n\n\tserver.SetNewClientCB(func(c *Client) {})\n\tserver.SetNewMessageCB(func(c *Client, message string) {})\n\tserver.SetClientConnectionClosedCB(func(c *Client, err error) {})\n\n\treturn server\n}",
"func newServer(notifier *notifier, key string) *server {\n\treturn &server{\n\t\tnotifier: notifier,\n\t\tkey: key,\n\t}\n}",
"func newServer() *negroni.Negroni {\n\tn := negroni.Classic()\n\tn.UseHandler(router())\n\treturn n\n}",
"func New(server *http.Server) (*Server, error) {\n\tlistener, err := zerodown.Listen(\"tcp\", server.Addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &Server{\n\t\tserver: server,\n\t\tlistener: listener,\n\t}, nil\n}",
"func New() *Factory {\n\treturn &Factory{}\n}",
"func New() *Factory {\n\treturn &Factory{}\n}",
"func New(\n\taddr string,\n\thandler Handler,\n\tlog *log.Logger,\n\tworkersCount uint8,\n) (srv *Server) {\n\tsrv = &Server{\n\t\taddr: addr,\n\t\thandler: handler,\n\t\tlog: log,\n\t\tClients: newClients(),\n\t\tchStop: make(chan bool, 1),\n\t\tchRequest: make(chan *tRequest, workersCount),\n\t}\n\n\treturn\n}",
"func New() *Server {\n\treturn &Server{}\n}",
"func New() *Server {\n\treturn &Server{}\n}",
"func New(fetcherSvc *services.Fetcher, log *logrus.Entry) *Server {\n\treturn &Server{\n\t\tFetcherSvc: fetcherSvc,\n\t\tLog: log,\n\t}\n}",
"func newPingChannelServer(s *StorjTelehash) func() ChannelHandler {\n\treturn func() ChannelHandler {\n\t\tlogging.Println(\"factory\")\n\t\tlogging.Println(s)\n\t\treturn &pingChannelServer{st: s}\n\t}\n}",
"func New(sto store.Service) *server {\n\ts := &server{sto: sto}\n\n\trouter := mux.NewRouter()\n\n\trouter.Handle(\"/todo\", allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"POST\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodos),\n\t\t\t\"POST\": http.HandlerFunc(s.createTodo),\n\t\t}))\n\n\trouter.Handle(\"/todo/{id}\", idMiddleware(allowedMethods(\n\t\t[]string{\"OPTIONS\", \"GET\", \"PUT\", \"PATCH\", \"DELETE\"},\n\t\thandlers.MethodHandler{\n\t\t\t\"GET\": http.HandlerFunc(s.getTodo),\n\t\t\t\"PUT\": http.HandlerFunc(s.putTodo),\n\t\t\t\"PATCH\": http.HandlerFunc(s.patchTodo),\n\t\t\t\"DELETE\": http.HandlerFunc(s.deleteTodo),\n\t\t})))\n\n\ts.handler = limitBody(defaultHeaders(router))\n\n\treturn s\n}",
"func New(addr string) (*Server, error) {\n\ts := &Server{\n\t\taddr: addr,\n\t\tshutdownTimeout: time.Minute,\n\t\tSessions: make(chan *Session),\n\t}\n\ts.hs = &http.Server{Handler: s}\n\tln, err := net.Listen(\"tcp\", s.addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\ts.ln = ln\n\ts.ListeningAddr = fmt.Sprintf(\":%d\", s.ln.Addr().(*net.TCPAddr).Port)\n\treturn s, nil\n}",
"func New(name, group, address string) *Server {\n\ts := &Server{\n\t\tname: name,\n\t\tgroup: group,\n\t\taddress: address,\n\t}\n\n\treturn s\n}",
"func newServer(config Config) *http.Server {\n\treturn &http.Server{\n\t\tAddr: fmt.Sprintf(\":%s\", config.Port),\n\t\tHandler: newRouter(config),\n\t}\n}",
"func New(conf *Settings) (s *Server, err error) {\n\tif conf == nil {\n\t\tif conf, err = Config(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\t// Set the global level\n\tzerolog.SetGlobalLevel(zerolog.Level(conf.LogLevel))\n\n\t// Set human readable logging if specified\n\tif conf.ConsoleLog {\n\t\tlog.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr})\n\t}\n\n\ts = &Server{conf: conf, echan: make(chan error, 1)}\n\tif s.db, err = gorm.Open(sqlite.Open(conf.DatabaseDSN), &gorm.Config{}); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err = MigrateDB(s.db); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// TODO: mark the VASP local based on name or configuration rather than erroring\n\tif err = s.db.Where(\"is_local = ?\", true).First(&s.vasp).Error; err != nil {\n\t\treturn nil, fmt.Errorf(\"could not fetch local VASP info from database: %s\", err)\n\t}\n\n\tif s.conf.Name != s.vasp.Name {\n\t\treturn nil, fmt.Errorf(\"expected name %q but have database name %q\", s.conf.Name, s.vasp.Name)\n\t}\n\n\t// Create the TRISA service\n\tif s.trisa, err = NewTRISA(s); err != nil {\n\t\treturn nil, fmt.Errorf(\"could not create TRISA service: %s\", err)\n\t}\n\n\t// Create the remote peers using the same credentials as the TRISA service\n\ts.peers = peers.New(s.trisa.certs, s.trisa.chain, s.conf.DirectoryServiceURL)\n\ts.updates = NewUpdateManager()\n\treturn s, nil\n}",
"func New() (*Server, error) {\n\treturn &Server{}, nil\n}",
"func New(cfg *Config) *Server {\n\tdefaultConfig(cfg)\n\tlog.Printf(\"%+v\\n\", cfg)\n\treturn &Server{\n\t\tcfg: cfg,\n\t\thandlers: make([]connectionHandler, cfg.Count),\n\t\tevents: make(chan eventWithData, cfg.Count),\n\t}\n}",
"func NewServer(ports []uint, hosts []string, pipelineFactory *PipelineFactory) *Server {\n\ts := new(Server)\n\ts.ports = ports\n\ts.hosts = hosts\n\ts.pipelineFactory = pipelineFactory\n\ts.eventHandlers = make([]ServerEventHandler, 0)\n\tcurrentServerState = make(chan server_state)\n\n\treturn s\n}",
"func New(prefix string, gIndex *osm.Data, styles map[string]map[string]config.Style) *Server {\n\treturn &Server{\n\t\tprefix: prefix,\n\t\tgIndex: gIndex,\n\t\tstyles: styles,\n\t}\n}",
"func New(addr string) *Server {\n\tsrv := new(Server)\n\tsrv.Context = new(Context)\n\tsrv.Context.Channels = make(map[string]*channel.Channel)\n\tsrv.Address = addr\n\treturn srv\n}",
"func New(L *lua.LState) int {\n\tbind := L.CheckAny(1).String()\n\tl, err := net.Listen(`tcp`, bind)\n\tif err != nil {\n\t\tL.Push(lua.LNil)\n\t\tL.Push(lua.LString(err.Error()))\n\t\treturn 2\n\t}\n\tserver := &luaServer{\n\t\tListener: l,\n\t\tserveData: make(chan *serveData, 1),\n\t}\n\tgo server.serve()\n\tud := L.NewUserData()\n\tud.Value = server\n\tL.SetMetatable(ud, L.GetTypeMetatable(\"http_server_ud\"))\n\tL.Push(ud)\n\treturn 1\n}",
"func New(address string, branch string, secret string, logger *logrus.Logger) http.Handler {\n\tproto := \"tcp\"\n\taddr := address\n\tif strings.HasPrefix(addr, \"unix:\") {\n\t\tproto = \"unix\"\n\t\taddr = addr[5:]\n\t}\n\treturn &Server{\n\t\tproto: proto,\n\t\taddress: addr,\n\t\tbranch: branch,\n\t\tsecret: secret,\n\t\tlogger: logger,\n\t}\n}",
"func New(cfg *config.Config, store *jot.JotStore, manager *auth.PasswordManager) *Server {\n\treturn &Server{\n\t\tmanager: manager,\n\t\tstore: store,\n\t\tcfg: cfg,\n\t}\n}",
"func New(path string, host string, port int) *Server {\n\ts := &Server{\n\t\thost: host,\n\t\tport: port,\n\t\tpath: path,\n\t\t//\tfs: db.NewFs(),\n\t\trouter: mux.NewRouter(),\n\t\trecvQueryReqQ:make(chan *ServerRequestItem, 100000),\n\t\trecvUpdateReqQ:make(chan *ServerRequestItem, 100000),\n\t\tsendRespQ:make(chan *ServerResponceItem, 100000),\n\t}\n\ts.fs = db.NewFs(s.fsNotifyCb)\n\n\ts.facade = NewEventDispatcher(s)\n\ts.facade.AddEventListener(kEventLeaderChanged, s.eventListener)\n\n\tlog.Printf(\"filePath:%v\", filepath.Join(path, \"name\"))\n\t// Read existing name or generate a new one.\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\n\t\ts.name = string(b)\n\t} else {\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\treturn s\n}",
"func newServer(a *Air) *server {\n\treturn &server{\n\t\ta: a,\n\t\tserver: &http.Server{},\n\t\taddressMap: map[string]int{},\n\t\trequestPool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn &Request{}\n\t\t\t},\n\t\t},\n\t\tresponsePool: &sync.Pool{\n\t\t\tNew: func() interface{} {\n\t\t\t\treturn &Response{}\n\t\t\t},\n\t\t},\n\t}\n}",
"func New(version string, state cfg.Config, deployment project.Deployer) (*Server, error) {\n\t// Establish connection with dockerd\n\tcli, err := containers.NewDockerClient()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to start Docker client: %s\", err.Error())\n\t}\n\n\t// Download build tools\n\tgo downloadDeps(cli, state.DockerComposeVersion)\n\n\treturn &Server{\n\t\tversion: version,\n\n\t\tdeployment: deployment,\n\t\tstate: state,\n\n\t\tdocker: cli,\n\t\twebsocket: &websocket.Upgrader{\n\t\t\tHandshakeTimeout: 5 * time.Second,\n\t\t},\n\t}, nil\n}",
"func NewServer(ctx context.Context, factory dependency.Factory) (*Server, error) {\n\tctx1, cancel := context.WithCancel(ctx)\n\n\ts := &Server{\n\t\tctx: ctx1,\n\t\tcancel: cancel,\n\t\tquerynode: qn.NewQueryNode(ctx, factory),\n\t\tgrpcErrChan: make(chan error),\n\t}\n\treturn s, nil\n}",
"func New(address string) *Server {\n connection, err := net.Dial(\"tcp\", address)\n if err != nil {\n return nil;\n }\n reader := bufio.NewReader(connection)\n\n\treturn &Server{address: address, connection: connection, reader: reader}\n}",
"func newServer(deps dependencies) Component {\n\treturn newServerCompat(deps.Config, deps.Log, deps.Replay, deps.Debug, deps.Params.Serverless)\n}",
"func New(m int, k int) *Server {\n\treturn &Server{bf: bloom.New(uint(m), uint(k))}\n}",
"func New(id int, f string) Replicator {\n\tserver := cluster.New(id, f)\n\tnextindex := make([]int, MAX_SERVER)\n\tmatchindex := make([]int, MAX_SERVER)\n\tfor i := 0; i < MAX_SERVER; i++ {\n\t\tnextindex[i] = 1\n\t\tmatchindex[i] = 0\n\t}\n\tfirst_log_index := 0\n\tlast_Index := 0\n\tlast_Term := 0\n\tcount = 0\n\tfor i := range send {\n\t\tsend[i] = make(chan bool)\n\t\tsuccess[i] = make(chan bool)\n\n\t}\n\t//handle if we restart server\n\traft := Replicator{Id: id, server: server, currentTerm: last_Term, leader: false, commitIndex: first_log_index, lastApplied: first_log_index, nextIndex: nextindex, matchIndex: matchindex, logEntry: make(map[int]string), lastLogIndex: last_Index, lastLogTerm: last_Term}\n\n\treturn raft\n}",
"func New(port string) *Server {\n\treturn &Server{\n\t\tport: port,\n\t\tmanager: endly.New(),\n\t}\n}",
"func (c *Config) socketServerFactory(name string, weight int) *ServerDetail {\n\n\treturn &ServerDetail{\n\t\tName: name,\n\t\tHost: \"\",\n\t\tPort: 0,\n\t\tUnixSock: compileSocketName(c.WorkingDir, name),\n\t\tWeight: weight,\n\t\tMaxConn: 1000,\n\t\tCheck: false,\n\t\tCheckInterval: 10,\n\t}\n}",
"func New(router *mux.Router, db db.PGManager) Server {\n\t// This creates a new *server struct instance. Notice the pointer (&): this means when\n\t// the server is returned it will be the same place in memory when used elsewhere (i.e.\n\t// the struct isn't copied).\n\tserver := &server{\n\t\tHandler: router,\n\t\tdb: db,\n\t}\n\t// We set up our routes as part of the constructor function.\n\tserver.routes(router)\n\treturn server\n}",
"func NewServer() *server {\n\ts := &server{\n\t\tstore: make(map[string]*string),\n\t\tops: make(chan func()),\n\t}\n\tgo s.loop()\n\treturn s\n}",
"func New(path string, host string, port int) *Server {\r\n\ts := &Server{\r\n\t\thost: host,\r\n\t\tport: port,\r\n\t\tpath: path,\r\n\t\trouter: mux.NewRouter(),\r\n\t}\r\n\r\n\t// Read existing name or generate a new one.\r\n\tif b, err := ioutil.ReadFile(filepath.Join(path, \"name\")); err == nil {\r\n\t\ts.name = string(b)\r\n\t} else {\r\n\t\ts.name = fmt.Sprintf(\"%07x\", rand.Int())[0:7]\r\n\t\tif err = ioutil.WriteFile(filepath.Join(path, \"name\"), []byte(s.name), 0644); err != nil {\r\n\t\t\tpanic(err)\r\n\t\t}\r\n\t}\r\n\r\n\treturn s\r\n}",
"func New(body string, statusCode int) *FakeHTTPServer {\n\treturn &FakeHTTPServer{\n\t\tserver: httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.Write([]byte(body))\n\t\t\tw.WriteHeader(statusCode)\n\t\t})),\n\t}\n}",
"func New(addr string) *Server {\n\treturn &Server{\n\t\tServer: &http.Server{\n\t\t\tAddr: addr,\n\t\t},\n\t\tlogger: defaultLogger,\n\t}\n}",
"func New(appStateUpdater env.AppStateUpdater, config libkbfs.Config) (\n\ts *Server, err error) {\n\tlogger := config.MakeLogger(\"HTTP\")\n\ts = &Server{\n\t\tappStateUpdater: appStateUpdater,\n\t\tconfig: config,\n\t\tlogger: logger,\n\t\tvlog: config.MakeVLogger(logger),\n\t}\n\tif s.fs, err = lru.New(fsCacheSize); err != nil {\n\t\treturn nil, err\n\t}\n\tif err = s.restart(); err != nil {\n\t\treturn nil, err\n\t}\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo s.monitorAppState(ctx)\n\ts.cancel = cancel\n\tlibmime.Patch(additionalMimeTypes)\n\treturn s, nil\n}",
"func NewFactory(v *viper.Viper, cfg config.Config) (storage.Client, error) {\n\tb := initFromViper(v, cfg)\n\tpool := &redis.Pool{\n\t\tMaxIdle: 80,\n\t\tMaxActive: 12000, // max number of connections\n\t\tDial: func() (redis.Conn, error) {\n\t\t\tc, err := redis.Dial(\"tcp\", b.Endpoint)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, errors.Wrap(err, \"Failed to create redis pool\")\n\t\t\t}\n\t\t\treturn c, err\n\t\t},\n\t}\n\tlogging.GetLogger().WithField(\"Endpoint\", b.Endpoint).Info(\"Creating redis pool\")\n\treturn NewClient(pool), nil\n}",
"func newServer(sc *ServerConfig, b backends.Backend, l log.Logger) (*server, error) {\n\tserver := &server{\n\t\tclientPool: NewPool(sc.MaxClients),\n\t\tclosedListener: make(chan bool, 1),\n\t\tlistenInterface: sc.ListenInterface,\n\t\tstate: ServerStateNew,\n\t\tenvelopePool: mail.NewPool(sc.MaxClients),\n\t}\n\tserver.logStore.Store(l)\n\tserver.backendStore.Store(b)\n\tlogFile := sc.LogFile\n\tif logFile == \"\" {\n\t\t// none set, use the same log file as mainlog\n\t\tlogFile = server.mainlog().GetLogDest()\n\t}\n\t// set level to same level as mainlog level\n\tmainlog, logOpenError := log.GetLogger(logFile, server.mainlog().GetLevel())\n\tserver.mainlogStore.Store(mainlog)\n\tif logOpenError != nil {\n\t\tserver.log().WithError(logOpenError).Errorf(\"Failed creating a logger for server [%s]\", sc.ListenInterface)\n\t}\n\n\tserver.setConfig(sc)\n\tserver.setTimeout(sc.Timeout)\n\tif err := server.configureSSL(); err != nil {\n\t\treturn server, err\n\t}\n\treturn server, nil\n}",
"func New(bind string) *Server {\n\treturn &Server{bind}\n}",
"func New(basepath string, addr []string) *Server {\n\ts := server.NewServer()\n\tstore := make(map[string]fn)\n\n\tbasepath = strings.Trim(basepath, \"/\")\n\tbasepath = \"/\" + basepath\n\n\treturn &Server{store, s, basepath, addr, ModeDebug}\n}",
"func New() *Server {\n\ts := &Server{\n\t\thandlers: map[string][]HandlerFunc{},\n\t\tclosing: make(chan struct{}),\n\t\tclosed: make(chan struct{}),\n\t}\n\ts.pool.New = func() interface{} {\n\t\treturn s.allocateContext()\n\t}\n\treturn s\n}",
"func New(db datalog.DB) *Server {\n\treturn &Server{\n\t\tDB: db,\n\t}\n}",
"func New() Server {\n\trouter := chi.NewRouter()\n\n\trouter.Use(middleware.Timeout(3 * time.Second))\n\trouter.Use(middleware.Logger)\n\trouter.Use(middleware.Recoverer)\n\trouter.Use(middleware.RealIP)\n\trouter.Use(middleware.RequestID)\n\trouter.Use(middleware.Throttle(1000))\n\trouter.Use(middleware.NoCache)\n\trouter.Use(middleware.SetHeader(\"Content-Type\", \"application/json\"))\n\n\treturn &server{Router: router}\n}",
"func NewServer(addr string) (*Server, error) {\n\ts := &Server{\n\t\trequests: make(chan *protocol.NetRequest, 8),\n\t\tresponses: make(chan *protocol.NetResponse, 8),\n\t\tAddr: addr,\n\t\trunning: true,\n\t\tgames: make(map[uint64]poker.GameLike, 0),\n\t}\n\n\tlis, err := net.Listen(\"tcp\", serverAddr())\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to listen: %v\\n\", err)\n\t}\n\tgrpcServer := grpc.NewServer()\n\tpb.RegisterGameServerServer(grpcServer, s)\n\n\tlog.Printf(\"server listening at %v\\n\", lis.Addr())\n\tif err := grpcServer.Serve(lis); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to serve: `%v`\\n\", err)\n\t}\n\n\treturn s, nil\n}",
"func New(pipeName string, hnd daemon.Handler) *Server {\n\treturn nil\n}",
"func newServer(handler connHandler, logger *zap.Logger) *server {\n\ts := &server{\n\t\thandler: handler,\n\t\tlogger: logger.With(zap.String(\"sector\", \"server\")),\n\t}\n\treturn s\n}",
"func New(config *Config) *Server {\n\ts := &Server{\n\t\tconfig: config,\n\t\trouter: chi.NewRouter(),\n\t\tlogger: newLogger(config.LogDebug),\n\t}\n\n\treturn s\n}",
"func newServer(ctx common.Context, self *replica, listener net.Listener, workers int) (net.Server, error) {\n\tserver := &rpcServer{ctx: ctx, logger: ctx.Logger(), self: self}\n\treturn net.NewServer(ctx, listener, serverInitHandler(server), workers)\n}",
"func New(swaggerStore string, hugoStore string, runMode string, externalIP string, hugoDir string) (*Server, error) {\n\t// Return a new struct\n\treturn &Server{\n\t\tServiceMap: make(map[string]string),\n\t\tSwaggerStore: swaggerStore,\n\t\tHugoStore: hugoStore,\n\t\tRunMode: runMode,\n\t\tExternalIP: externalIP,\n\t\tHugoDir: hugoDir,\n\t}, nil\n}",
"func New(m map[string]interface{}, ss *grpc.Server) (rgrpc.Service, error) {\n\tc, err := parseConfig(m)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tgatewaySelector, err := pool.GatewaySelector(c.GatewayAddr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tservice := &service{\n\t\tconf: c,\n\t\tgatewaySelector: gatewaySelector,\n\t}\n\n\treturn service, nil\n}",
"func New(f func(interface{})) Balancer {\n\toutgoing := make(chan message.Message, math.MaxInt16)\n\tincoming := make(chan message.Message, math.MaxInt16)\n\tm := &master{\n\t\tid: generateID(),\n\t\tf: f,\n\t\tincoming: incoming,\n\t\toutgoing: outgoing,\n\t}\n\tm.startWorker() // to have one worker at least waiting for tasks\n\tgo m.start()\n\tgo func() { // logging purposes\n\t\tfor range time.Tick(time.Second) {\n\t\t\tlog.Printf(\"(Worker Count, WIP, WIQ) (%d, %d, %d) go: %d\", m.workerCount, m.wip, m.wiq, runtime.NumGoroutine())\n\t\t}\n\t}()\n\treturn m\n}",
"func New() *Server {\n\treturn &Server{\n\t\tIdleTimeout: 620 * time.Second,\n\t\tTCPKeepAlivePeriod: 3 * time.Minute,\n\t\tGraceTimeout: 30 * time.Second,\n\t\tWaitBeforeShutdown: 10 * time.Second,\n\t\tTrustProxy: Trusted(),\n\t\tHandler: http.NotFoundHandler(),\n\t}\n}",
"func New(cfg *warden.ServerConfig, s *service.Service) *warden.Server {\n\tw := warden.NewServer(cfg)\n\tw.Use(func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {\n\t\tif resp, err = handler(ctx, req); err == nil {\n\t\t\tlog.Infov(ctx,\n\t\t\t\tlog.KV(\"path\", info.FullMethod),\n\t\t\t\tlog.KV(\"caller\", metadata.String(ctx, metadata.Caller)),\n\t\t\t\tlog.KV(\"remote_ip\", metadata.String(ctx, metadata.RemoteIP)),\n\t\t\t\tlog.KV(\"args\", fmt.Sprintf(\"%s\", req)),\n\t\t\t\tlog.KV(\"retVal\", fmt.Sprintf(\"%s\", resp)))\n\t\t}\n\t\treturn\n\t})\n\tv1.RegisterFilterServer(w.Server(), &server{s})\n\tws, err := w.Start()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn ws\n}",
"func New() *Server {\n\treturn &Server{\n\t\tusers: make(map[string]*User),\n\t}\n}",
"func New(\n\trun func(state State, cancel <-chan struct{}) error,\n\tstateChanged func(state State),\n\tonError func(err interface{})) Service {\n\tif run == nil {\n\t\tpanic(\"run must not be nil\")\n\t}\n\tif stateChanged == nil {\n\t\tstateChanged = noOpStateChanged\n\t}\n\tif onError == nil {\n\t\tonError = noOpOnError\n\t}\n\treturn &service{\n\t\tm: sync.Mutex{},\n\t\tstate: Stopped,\n\t\trun: run,\n\t\tstateChanged: stateChanged,\n\t\tonError: onError,\n\t}\n}",
"func NewServer(port string, newClients chan<- *Client, clientInput chan<- *ClientInputMessage) *Server {\n\tid := NewID(\"server\")\n\tlog := NewLogger(id)\n\treturn &Server{id, newClients, clientInput, port, nil, log}\n}",
"func TestNew(t *testing.T) {\n\tserver, err := New(\"tcp\", \"localhost\", \"50000\", 1)\n\tif err != nil || server == nil {\n\t\tt.Errorf(\"return non initialize server or erorr: %v\", err)\n\t}\n}",
"func NewServer(addr address.Address, isMaster result.IsMaster) Server {\r\n\ti := Server{\r\n\t\tAddr: addr,\r\n\r\n\t\tCanonicalAddr: address.Address(isMaster.Me).Canonicalize(),\r\n\t\tCompression: isMaster.Compression,\r\n\t\tElectionID: isMaster.ElectionID,\r\n\t\tLastUpdateTime: time.Now().UTC(),\r\n\t\tLastWriteTime: isMaster.LastWriteTimestamp,\r\n\t\tMaxBatchCount: isMaster.MaxWriteBatchSize,\r\n\t\tMaxDocumentSize: isMaster.MaxBSONObjectSize,\r\n\t\tMaxMessageSize: isMaster.MaxMessageSizeBytes,\r\n\t\tSaslSupportedMechs: isMaster.SaslSupportedMechs,\r\n\t\tSessionTimeoutMinutes: isMaster.LogicalSessionTimeoutMinutes,\r\n\t\tSetName: isMaster.SetName,\r\n\t\tSetVersion: isMaster.SetVersion,\r\n\t\tTags: tag.NewTagSetFromMap(isMaster.Tags),\r\n\t}\r\n\r\n\tif i.CanonicalAddr == \"\" {\r\n\t\ti.CanonicalAddr = addr\r\n\t}\r\n\r\n\tif isMaster.OK != 1 {\r\n\t\ti.LastError = fmt.Errorf(\"not ok\")\r\n\t\treturn i\r\n\t}\r\n\r\n\tfor _, host := range isMaster.Hosts {\r\n\t\ti.Members = append(i.Members, address.Address(host).Canonicalize())\r\n\t}\r\n\r\n\tfor _, passive := range isMaster.Passives {\r\n\t\ti.Members = append(i.Members, address.Address(passive).Canonicalize())\r\n\t}\r\n\r\n\tfor _, arbiter := range isMaster.Arbiters {\r\n\t\ti.Members = append(i.Members, address.Address(arbiter).Canonicalize())\r\n\t}\r\n\r\n\ti.Kind = Standalone\r\n\r\n\tif isMaster.IsReplicaSet {\r\n\t\ti.Kind = RSGhost\r\n\t} else if isMaster.SetName != \"\" {\r\n\t\tif isMaster.IsMaster {\r\n\t\t\ti.Kind = RSPrimary\r\n\t\t} else if isMaster.Hidden {\r\n\t\t\ti.Kind = RSMember\r\n\t\t} else if isMaster.Secondary {\r\n\t\t\ti.Kind = RSSecondary\r\n\t\t} else if isMaster.ArbiterOnly {\r\n\t\t\ti.Kind = RSArbiter\r\n\t\t} else {\r\n\t\t\ti.Kind = RSMember\r\n\t\t}\r\n\t} else if isMaster.Msg == \"isdbgrid\" {\r\n\t\ti.Kind = Mongos\r\n\t}\r\n\r\n\ti.WireVersion = &VersionRange{\r\n\t\tMin: isMaster.MinWireVersion,\r\n\t\tMax: isMaster.MaxWireVersion,\r\n\t}\r\n\r\n\treturn i\r\n}",
"func NewServer() *Server {\n\tctx := NewContext(&State{}, nil)\n\treturn &Server{ctx: ctx}\n}",
"func New(fi Fire, rds Redis, req request.Requestor, clock tsutil.Clock, logger Logger) *Server {\n\tsigchains := keys.NewSigchains(fi)\n\tusrs := users.New(fi, sigchains, users.Requestor(req), users.Clock(clock))\n\treturn &Server{\n\t\tfi: fi,\n\t\trds: rds,\n\t\tclock: tsutil.NewClock(),\n\t\ttasks: newUnsetTasks(),\n\t\tsigchains: sigchains,\n\t\tusers: usrs,\n\t\tlogger: logger,\n\t}\n}",
"func New(c *conf.RPCServer, l *logic.Logic) *grpc.Server {\n\tkeepParams := grpc.KeepaliveParams(keepalive.ServerParameters{\n\t\tMaxConnectionIdle: time.Duration(c.IdleTimeout),\n\t\tMaxConnectionAgeGrace: time.Duration(c.ForceCloseWait),\n\t\tTime: time.Duration(c.KeepAliveInterval),\n\t\tTimeout: time.Duration(c.KeepAliveTimeout),\n\t\tMaxConnectionAge: time.Duration(c.MaxLifeTime),\n\t})\n\tsrv := grpc.NewServer(keepParams)\n\tpb.RegisterLogicServer(srv, &server{l})\n\tlis, err := net.Listen(c.Network, c.Addr)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tif err := srv.Serve(lis); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}()\n\treturn srv\n}",
"func NewFactory() receiver.Factory {\n\treturn receiver.NewFactory(\n\t\ttypeStr,\n\t\tcreateDefaultConfig,\n\t\treceiver.WithTraces(createTraces, component.StabilityLevelStable),\n\t\treceiver.WithMetrics(createMetrics, component.StabilityLevelStable),\n\t\treceiver.WithLogs(createLog, component.StabilityLevelBeta))\n}",
"func New(e *step.Endpoints, uh goagrpc.UnaryHandler) *Server {\n\treturn &Server{\n\t\tListH: NewListHandler(e.List, uh),\n\t\tAddH: NewAddHandler(e.Add, uh),\n\t\tRemoveH: NewRemoveHandler(e.Remove, uh),\n\t\tUpdateH: NewUpdateHandler(e.Update, uh),\n\t}\n}",
"func New(storage Storage) Server {\n\ts := &server{\n\t\tstorage: storage,\n\t\tr: chi.NewMux(),\n\t}\n\ts.routes()\n\treturn s\n}",
"func New(store kvstore.KVStore) KeyValueServer {\n\t// TODO: implement this!\n\tvar server keyValueServer\n\t\n\tserver.clientNum = 0\n\tserver.listener = nil\n\tserver.readChan = make(chan []byte)\n\tserver.channelMap = make(map[net.Conn]chan []byte)\n\t\n\t// 使用接口时,返回接口类型变量, 参考 book p113\n\treturn &server\n}",
"func NewServer(name string, port int) Server {\n\ts := new(serv)\n\ts.name = name\n\ts.ip = Here()\n\ts.port = port\n\n\ts.running = false\n\n\ts.pks = make(chan network.Packet)\n\n\treturn s\n}",
"func New(opts ...Option) Service {\n\to := newOptions(opts...)\n\ts := &server{\n\t\tid: o.ID,\n\t\troutes: make([]Route, 0),\n\t}\n\treturn s\n}",
"func NewServer(run func(string, *sf.DB) *sf.PipeReader, db *sf.DB) *server {\n\treturn &server{run: run, db: db}\n}",
"func New(cfg *config.Config) (*Server, error) {\n\tstorageMgr, err := storage.NewManager(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create storage manager\")\n\t}\n\n\tsourceClient, err := source.NewSourceClient()\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create source client\")\n\t}\n\t// progress manager\n\tprogressMgr, err := progress.NewManager(cfg)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create progress manager\")\n\t}\n\n\t// cdn manager\n\tcdnMgr, err := cdn.NewManager(cfg, storageMgr, progressMgr, sourceClient)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create cdn manager\")\n\t}\n\n\t// task manager\n\ttaskMgr, err := task.NewManager(cfg, cdnMgr, progressMgr, sourceClient)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create task manager\")\n\t}\n\tstorageMgr.SetTaskMgr(taskMgr)\n\tstorageMgr.InitializeCleaners()\n\tprogressMgr.SetTaskMgr(taskMgr)\n\t// gc manager\n\tgcMgr, err := gc.NewManager(cfg, taskMgr, cdnMgr)\n\tif err != nil {\n\t\treturn nil, errors.Wrapf(err, \"failed to create gc manager\")\n\t}\n\n\treturn &Server{\n\t\tConfig: cfg,\n\t\tTaskMgr: taskMgr,\n\t\tGCMgr: gcMgr,\n\t}, nil\n}",
"func (tM *TheaterManager) New(name string, port string, db *sql.DB, redis *redis.Client) {\n\tvar err error\n\n\ttM.socket = new(gs.Socket)\n\ttM.socketUDP = new(gs.SocketUDP)\n\ttM.db = db\n\ttM.redis = redis\n\ttM.name = name\n\ttM.eventsChannel, err = tM.socket.New(tM.name, port, true)\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\ttM.eventsChannelUDP, err = tM.socketUDP.New(tM.name, port, true)\n\tif err != nil {\n\t\tlog.Errorln(err)\n\t}\n\ttM.stopTicker = make(chan bool, 1)\n\n\ttM.gameServerGlobal = new(core.RedisState)\n\ttM.gameServerGlobal.New(tM.redis, \"gameServer-config\")\n\ttM.gameServerGlobal.Set(\"Lobbies\", \"0\")\n\n\tgo tM.run()\n}",
"func New(listener net.Listener, httpServer *http.Server) goroutine.BackgroundRoutine {\n\treturn &server{\n\t\tserver: httpServer,\n\t\tmakeListener: func() (net.Listener, error) { return listener, nil },\n\t}\n}",
"func (s *Server) New() (*http.Server, error) {\n\taddr := s.Address\n\tif addr == \"\" {\n\t\taddr = defaultAddr\n\t}\n\th, err := s.Handler()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tsrv := &http.Server{\n\t\tHandler: h,\n\t\tAddr: addr,\n\t}\n\treturn srv, nil\n}",
"func New(address string) *Server {\n\treturn &Server{\n\t\taddress: address,\n\t\thandlerGet: NewGetHandler(&get.Getter{}),\n\t\thandlerList: NewListHandler(&list.Lister{}),\n\t\thandlerNotFound: notFoundHandler,\n\t\thandlerRegister: NewRegisterHandler(®ister.Registerer{}),\n\t}\n}",
"func New() backend.Backend {\n\treturn &remotestate.Backend{\n\t\tConfigureFunc: configure,\n\n\t\t// Set the schema\n\t\tBackend: &schema.Backend{\n\t\t\tSchema: map[string]*schema.Schema{\n\t\t\t\t\"lock_id\": &schema.Schema{\n\t\t\t\t\tType: schema.TypeString,\n\t\t\t\t\tOptional: true,\n\t\t\t\t\tDescription: \"initializes the state in a locked configuration\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}",
"func New(proxy autocomplete.AVSProxy, cache autocomplete.Database) *Server {\n\treturn &Server{\n\t\tproxy: proxy,\n\t\tcache: cache,\n\t\tlogger: log.New(ioutil.Discard, \"\", 0),\n\t}\n}",
"func NewFactory() receiver.Factory {\n\treturn receiver.NewFactory(\n\t\tmetadata.Type,\n\t\tcreateDefaultConfig,\n\t\treceiver.WithMetrics(createMetricsReceiver, metadata.MetricsStability),\n\t)\n}",
"func New(config *configuration.Config, vs *library.Library, auth *auth.Manager) *Server {\n\treturn &Server{\n\t\tBase: subapp.NewBase(AppName),\n\t\tconfig: config,\n\t\tlibrary: vs,\n\t\tauthManager: auth,\n\t\trender: render.New(),\n\t}\n}",
"func New() *Server {\n\treturn &Server{make([]*websocket.Conn, 0, 10)}\n}"
] | [
"0.6668877",
"0.66384757",
"0.6611179",
"0.654152",
"0.654152",
"0.64563155",
"0.6447488",
"0.6444242",
"0.6434464",
"0.63831216",
"0.6364889",
"0.63534516",
"0.6332981",
"0.63056153",
"0.6242616",
"0.6237104",
"0.6193025",
"0.6185973",
"0.613092",
"0.6101046",
"0.610016",
"0.6099682",
"0.60954905",
"0.60954905",
"0.6038227",
"0.603394",
"0.603394",
"0.6029785",
"0.6026022",
"0.60118896",
"0.6009473",
"0.60089463",
"0.6007154",
"0.60069954",
"0.6002072",
"0.59993064",
"0.5995598",
"0.5985598",
"0.59712166",
"0.59635735",
"0.5962997",
"0.5961422",
"0.59601194",
"0.59598035",
"0.5959132",
"0.59556663",
"0.5946979",
"0.593331",
"0.5929952",
"0.5926401",
"0.592375",
"0.5920837",
"0.59137374",
"0.59130013",
"0.5909386",
"0.5906611",
"0.58862936",
"0.58779705",
"0.5872309",
"0.58670217",
"0.58630246",
"0.5862156",
"0.586046",
"0.58529025",
"0.5848631",
"0.58312273",
"0.582767",
"0.58273244",
"0.5823224",
"0.58216727",
"0.5820019",
"0.5799992",
"0.57948923",
"0.57902473",
"0.5786742",
"0.57819766",
"0.5781839",
"0.577899",
"0.57789165",
"0.57708836",
"0.5770661",
"0.57705015",
"0.5764455",
"0.57587963",
"0.57472575",
"0.57276845",
"0.5726274",
"0.5724339",
"0.57240325",
"0.57227725",
"0.57214063",
"0.5715943",
"0.57095903",
"0.5706848",
"0.570342",
"0.57006896",
"0.5699947",
"0.56972903",
"0.56953317",
"0.56870216"
] | 0.675499 | 0 |
Async evaluates the given function using the state of the server as its only argument, it may return before the function has been evaluated. | func (s *Server) Async(fn func(interface{})) {
s.topic.Publish(fn)
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func ExpensiveFunction(n interface{}) interface{} {\n\tfmt.Printf(\"Executing expensive calculation for %v\\n\", n)\n\ttime.Sleep(5 * time.Second)\n\treturn n\n}",
"func (t TaskFunc) Execute() { t() }",
"func CallVal(f func() interface{}) interface{} {\n\tcheckRun()\n\trespChan := make(chan interface{})\n\tcallQueue <- func() {\n\t\trespChan <- f()\n\t}\n\treturn <-respChan\n}",
"func WithAsyncFunc(f func(r Result)) InvocationOption {\n\treturn func(op InvocationOp) InvocationOp { op.Func = f; op.Async = true; return op }\n}",
"func (s *Server) apiAsync(fn func() error) error {\n\terr := make(chan error)\n\ts.apiAsyncCh <- &Fn{fn: fn, err: err}\n\treturn <-err\n}",
"func (e *executor) call(fn func() error, format string, args ...interface{}) error {\n\treturn e.function(e.opts, fn, format, args...)\n}",
"func (session Runtime) Evaluate(code string, async bool, returnByValue bool) (interface{}, error) {\n\tresult, err := session.evaluate(code, session.currentContext(), async, returnByValue)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn result.Value, nil\n}",
"func (e *Exclusive) CallAsync(key interface{}, value func() (interface{}, error)) <-chan *ExclusiveOutcome {\n\treturn e.CallAfterAsync(key, value, 0)\n}",
"func forwardAsync(fhandler *flowHandler, currentNodeId string, result []byte) ([]byte, error) {\n\tvar hash []byte\n\tstore := make(map[string]string)\n\n\t// get pipeline\n\tpipeline := fhandler.getPipeline()\n\n\t// Get pipeline state\n\tpipelineState := pipeline.GetState()\n\n\tdefaultStore, ok := fhandler.dataStore.(*requestEmbedDataStore)\n\tif ok {\n\t\tstore = defaultStore.store\n\t}\n\n\t// Build request\n\tuprequest := buildRequest(fhandler.id, string(pipelineState), fhandler.query, result, store)\n\n\t// Make request data\n\tdata, _ := uprequest.encode()\n\n\t// Check if HMAC used\n\tif hmacEnabled() {\n\t\tkey := getHmacKey()\n\t\thash = hmac.Sign(data, []byte(key))\n\t}\n\n\t// build url for calling the flow in async\n\thttpreq, _ := http.NewRequest(http.MethodPost, fhandler.asyncUrl, bytes.NewReader(data))\n\thttpreq.Header.Add(\"Accept\", \"application/json\")\n\thttpreq.Header.Add(\"Content-Type\", \"application/json\")\n\n\t// If hmac is enabled set digest\n\tif hmacEnabled() {\n\t\thttpreq.Header.Add(\"X-Hub-Signature\", \"sha1=\"+hex.EncodeToString(hash))\n\t}\n\n\t// extend req span for async call (TODO : Get the value)\n\tfhandler.tracer.extendReqSpan(fhandler.id, currentNodeId,\n\t\tfhandler.asyncUrl, httpreq)\n\n\tclient := &http.Client{}\n\tres, resErr := client.Do(httpreq)\n\tif resErr != nil {\n\t\treturn nil, resErr\n\t}\n\n\tdefer res.Body.Close()\n\tresdata, _ := ioutil.ReadAll(res.Body)\n\n\tif res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {\n\t\treturn resdata, fmt.Errorf(res.Status)\n\t}\n\treturn resdata, nil\n}",
"func (session Runtime) evaluate(expression string, contextID int64, async, returnByValue bool) (*devtool.RemoteObject, error) {\n\tp := &devtool.EvaluatesExpression{\n\t\tExpression: expression,\n\t\tIncludeCommandLineAPI: true,\n\t\tContextID: contextID,\n\t\tAwaitPromise: !async,\n\t\tReturnByValue: returnByValue,\n\t}\n\tresult := new(devtool.EvaluatesResult)\n\tif err := session.call(\"Runtime.evaluate\", p, result); err != nil {\n\t\treturn nil, err\n\t}\n\tif result.ExceptionDetails != nil {\n\t\treturn nil, result.ExceptionDetails\n\t}\n\treturn result.Result, nil\n}",
"func async(fn func() error) <-chan error {\n\terrChan := make(chan error, 0)\n\tgo func() {\n\t\tselect {\n\t\tcase errChan <- fn():\n\t\tdefault:\n\t\t}\n\n\t\tclose(errChan)\n\t}()\n\n\treturn errChan\n}",
"func (cb *CircuitBreaker) Exec(fn RequestFunc) (interface{}, error) {\n\tif cb.blocked {\n\t\treturn nil, ErrBlocked\n\t}\n\n\tswitch cb.state.Status() {\n\tcase StatusClosed:\n\t\tres, err := fn()\n\t\tif err != nil {\n\t\t\tcb.handleError(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tcb.counter.Success()\n\t\treturn res, nil\n\tcase StatusHalfOpen:\n\t\t// half open is intermediate state, where any failure will set back the circuitbreaker into open state\n\t\t// if required number of success responses are received circuitbreaker goes back to the closed state\n\t\tres, err := fn()\n\t\tif err != nil {\n\t\t\tcb.handleError(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tif cb.counter.Success() > cb.successThreshold {\n\t\t\tcb.state.Set(StatusClosed)\n\t\t}\n\t\treturn res, nil\n\tcase StatusOpen:\n\t\treturn nil, ErrRequestDisabled\n\t}\n\treturn nil, nil\n}",
"func (*Execute) Frontend() {}",
"func (t TaskFunc) Run() { t() }",
"func (s *Server) Sync(fn func(interface{}) interface{}) interface{} {\n\tch := make(chan interface{}, 1)\n\ts.Async(func(message interface{}) {\n\t\tdefer close(ch)\n\t\tch <- fn(message)\n\t})\n\treturn <-ch\n}",
"func (server *Server) RunAsync(callback func(errors.Error)) errors.Error {\n\tserver.asyncServer = &http.Server{Addr: server.config.ListenAddress, Handler: server.engine}\n\tvar returnErr errors.Error\n\tgo func() {\n\t\tserver.notifyBeginServing()\n\t\terr := server.asyncServer.ListenAndServe()\n\t\tif err != nil {\n\t\t\tif err == http.ErrServerClosed {\n\t\t\t\treturnErr = ErrGraceShutdown.Make()\n\t\t\t} else {\n\t\t\t\treturnErr = ErrServeFailed.Make().Cause(err)\n\t\t\t}\n\t\t}\n\t\tserver.notifyStopServing()\n\t\tif callback != nil {\n\t\t\tcallback(returnErr)\n\t\t}\n\t}()\n\ttime.Sleep(100 * time.Millisecond)\n\treturn returnErr\n}",
"func asyncExecute(wg *sync.WaitGroup, fn func()) {\n\t(*wg).Add(1)\n\tgo func() {\n\t\tdefer (*wg).Done()\n\t\tfn()\n\t}()\n}",
"func Call(f func()) {\n\tdone := dPool.Get().(chan struct{})\n\tdefer dPool.Put(done)\n\tfq <- fun{fn: f, done: done}\n\t<-done\n}",
"func ExecutorHTTP(fn FuncHTTP) error {\n\treturn ExecutorHTTPWithPolicyType(StandardPolicy, fn)\n}",
"func (p *EventLoop) Execute(fn WorkFunc) WorkUnit {\n\n\tw := &workUnit{\n\t\tdone: make(chan struct{}),\n\t\tfn: fn,\n\t}\n\n\tgo func() {\n\t\tp.m.RLock()\n\t\tif p.closed {\n\t\t\tw.err = &ErrPoolClosed{s: errClosed}\n\t\t\tif w.cancelled.Load() == nil {\n\t\t\t\tclose(w.done)\n\t\t\t}\n\t\t\tp.m.RUnlock()\n\t\t\treturn\n\t\t}\n\n\t\tp.work <- w\n\n\t\tp.m.RUnlock()\n\t}()\n\n\treturn w\n}",
"func (contract *Contract) EvaluatePublicFunction(functionName string) (bool, error) {\r\n // TODO: Check if parameter vals haven't been set yet. Use flags.\r\n\r\n lockingScript, err := contract.GetLockingScript()\r\n if err != nil {\r\n return false, err\r\n }\r\n unlockingScript, err := contract.GetUnlockingScript(functionName)\r\n if err != nil {\r\n return false, err\r\n }\r\n\r\n if ! contract.contextSet {\r\n err = interpreter.NewEngine().Execute(interpreter.WithScripts(lockingScript, unlockingScript))\r\n if err != nil {\r\n return false, err\r\n }\r\n } else {\r\n //input := contract.executionContext.Tx.InputIdx(contract.executionContext.InputIdx)\r\n //if input == nil {\r\n // return false, errors.New(fmt.Sprintf(\"Context transaction has no input with index %d.\", contract.executionContext.InputIdx))\r\n //}\r\n contract.executionContext.Tx.Inputs[contract.executionContext.InputIdx].UnlockingScript = unlockingScript\r\n prevoutSats := contract.executionContext.Tx.InputIdx(contract.executionContext.InputIdx).PreviousTxSatoshis\r\n\r\n engine := interpreter.NewEngine()\r\n err = engine.Execute(\r\n //interpreter.WithScripts(\r\n // lockingScript,\r\n // unlockingScript,\r\n //),\r\n interpreter.WithTx(\r\n contract.executionContext.Tx,\r\n contract.executionContext.InputIdx,\r\n //contract.executionContext.PreviousTxOut,\r\n &bt.Output{LockingScript: lockingScript, Satoshis: prevoutSats},\r\n ),\r\n interpreter.WithFlags(\r\n contract.executionContext.Flags,\r\n ),\r\n )\r\n if err != nil {\r\n return false, err\r\n }\r\n }\r\n\r\n return true, nil\r\n}",
"func (f Func) Call(functionName string, payload []byte, contentType StoreContentType) (<-chan FuncResponse, error) {\n\n\tresponseChan := make(chan FuncResponse)\n\tgo f.parseRawFuncResponse(functionName, payload, contentType, responseChan)\n\treturn responseChan, nil\n\n}",
"func asyncEval(\n\tt testing.TestingT,\n\twg *sync.WaitGroup,\n\terrChan chan error,\n\toptions *EvalOptions,\n\tdownloadedPolicyPath string,\n\tjsonFilePath string,\n\tresultQuery string,\n) {\n\tdefer wg.Done()\n\tcmd := shell.Command{\n\t\tCommand: \"opa\",\n\t\tArgs: formatOPAEvalArgs(options, downloadedPolicyPath, jsonFilePath, resultQuery),\n\n\t\t// Do not log output from shell package so we can log the full json without breaking it up. This is ok, because\n\t\t// opa eval is typically very quick.\n\t\tLogger: logger.Discard,\n\t}\n\terr := runCommandWithFullLoggingE(t, options.Logger, cmd)\n\truleBasePath := filepath.Base(downloadedPolicyPath)\n\tif err == nil {\n\t\toptions.Logger.Logf(t, \"opa eval passed on file %s (policy %s; query %s)\", jsonFilePath, ruleBasePath, resultQuery)\n\t} else {\n\t\toptions.Logger.Logf(t, \"Failed opa eval on file %s (policy %s; query %s)\", jsonFilePath, ruleBasePath, resultQuery)\n\t\tif options.DebugDisableQueryDataOnError == false {\n\t\t\toptions.Logger.Logf(t, \"DEBUG: rerunning opa eval to query for full data.\")\n\t\t\tcmd.Args = formatOPAEvalArgs(options, downloadedPolicyPath, jsonFilePath, \"data\")\n\t\t\t// We deliberately ignore the error here as we want to only return the original error.\n\t\t\trunCommandWithFullLoggingE(t, options.Logger, cmd)\n\t\t}\n\t}\n\terrChan <- err\n}",
"func (am *AsyncMachine) Execute() (AsyncState, error) {\n\trecvCtx, cancelRecvCtx := context.WithCancel(am.ctx)\n\tdefer cancelRecvCtx()\n\n\trecvChan := make(chan net.Message, asyncReceiveBuffer)\n\thandler := func(msg net.Message) {\n\t\trecvChan <- msg\n\t}\n\tam.channel.Recv(recvCtx, handler)\n\n\tcurrentState := am.initialState\n\n\tonStateDone := asyncStateTransition(\n\t\tam.ctx,\n\t\tam.logger,\n\t\tcurrentState,\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-recvChan:\n\t\t\terr := currentState.Receive(msg)\n\t\t\tif err != nil {\n\t\t\t\tam.logger.Errorf(\n\t\t\t\t\t\"[member:%v,state:%T] failed to receive a message: [%v]\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\tcase err := <-onStateDone:\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"failed to initiate state [%T]: [%w]\",\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tnextState, err := currentState.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"failed to complete state [%T]: [%w]\",\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif nextState == nil {\n\t\t\t\tam.logger.Infof(\n\t\t\t\t\t\"[member:%v,state:%T] reached final state\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t)\n\t\t\t\treturn currentState, nil\n\t\t\t}\n\n\t\t\tcurrentState = nextState\n\t\t\tonStateDone = asyncStateTransition(\n\t\t\t\tam.ctx,\n\t\t\t\tam.logger,\n\t\t\t\tcurrentState,\n\t\t\t)\n\n\t\tcase <-am.ctx.Done():\n\t\t\treturn nil, am.ctx.Err()\n\t\t}\n\t}\n}",
"func (salt *Client) StateAsync(tgt []string, state string) (string, error) {\n\n\tsalt.Auth()\n\tsaltRes := Response{}\n\tclient := \"local_async\"\n\tfun := \"state.sls\"\n\tjob := Request{client, fun, state, tgt, \"list\"}\n\tres,err := salt.Post(\"/\", job)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tresbyte,err:= ioutil.ReadAll(res.Body)\n\n\tif err!= nil {\n\t\treturn \"\",err\n\t}\n\tif err := json.Unmarshal(resbyte, &saltRes); err != nil {\n\t\treturn \"\", nil\n\t}\n\treturn saltRes.Return[0].Jid, err\n}",
"func influunt_ExecutorRunAsync(self, args *pyObject) *C.PyObject {\n\teCapsule, inputs, outputs, callback := parse4ObjectFromArgs(args)\n\te := capsuleToPointer(eCapsule)\n\texec := pointer.Restore(e).(*executor.Executor)\n\n\tinputMap, err := convertPyDictNodeMap(inputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\toutputArr, err := convertPyListToNodeArr(outputs)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tgo func() {\n\t\tresponses, err := exec.Run(inputMap, outputArr)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\n\t\tgstate := C.PyGILState_Ensure()\n\t\tdefer C.PyGILState_Release(gstate)\n\n\t\targs := C.PyTuple_New(C.long(len(responses)))\n\t\tfor i, val := range responses {\n\t\t\tpyVal, err := convertGoTypeToPyObject(val)\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\n\t\t\tC.PyTuple_SetItem(args, C.long(i), pyVal)\n\t\t}\n\n\t\tres := C.PyObject_CallObject(callback, args)\n\t\tif res == nil {\n\t\t\t// TODO handle error\n\t\t\tC.PyErr_Print()\n\t\t}\n\t\tpyRelease(args)\n\t}()\n\n\tpyRetain(C.Py_None)\n\treturn C.Py_None\n}",
"func _[T interface{ ~func() }](f T) {\n\tf()\n\tgo f()\n}",
"func (o *SyncStore[T]) Exec(key string, f func() (T, error)) (T, error) {\n\tval, err := o.sf.Do(key, func() (_ interface{}, err error) {\n\t\t// trap any runtime error due to synchronization issues.\n\t\tdefer func() {\n\t\t\tif rErr := recover(); rErr != nil {\n\t\t\t\terr = retrieveError(key, rErr)\n\t\t\t}\n\t\t}()\n\t\tv, err, ok := o.results.Load(key)\n\t\tif ok {\n\t\t\treturn v, err\n\t\t}\n\t\tv, err = f()\n\t\to.results.Store(key, v, err)\n\t\treturn v, err\n\t})\n\tvar defaultT T\n\tif err != nil {\n\t\treturn defaultT, err\n\t}\n\tswitch t := val.(type) {\n\tcase error:\n\t\treturn defaultT, t\n\tcase T:\n\t\treturn t, nil\n\tdefault:\n\t\treturn defaultT, err\n\t}\n}",
"func HandleFunction(L *lua.LState) int {\n\ts := checkServer(L, 1)\n\tf := L.CheckFunction(2)\n\tif len(f.Upvalues) > 0 {\n\t\tL.ArgError(2, \"cannot pass closures\")\n\t}\n\n\t// Stash any args to pass to the function beyond response and request\n\tvar args []lua.LValue\n\ttop := L.GetTop()\n\tfor i := 3; i <= top; i++ {\n\t\targs = append(args, L.Get(i))\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase data := <-s.serveData:\n\t\t\tgo func(sData *serveData) {\n\t\t\t\tstate := newHandlerState(sData)\n\t\t\t\tdefer state.Close()\n\t\t\t\tresponse := state.GetGlobal(\"response\")\n\t\t\t\trequest := state.GetGlobal(\"request\")\n\t\t\t\tf := state.NewFunctionFromProto(f.Proto)\n\t\t\t\tstate.Push(f)\n\t\t\t\tstate.Push(response)\n\t\t\t\tstate.Push(request)\n\t\t\t\t// Push any extra args\n\t\t\t\tfor _, arg := range args {\n\t\t\t\t\tstate.Push(arg)\n\t\t\t\t}\n\t\t\t\tif err := state.PCall(2+len(args), 0, nil); err != nil {\n\t\t\t\t\tlog.Printf(\"[ERROR] handle: %s\\n\", err.Error())\n\t\t\t\t\tdata.done <- true\n\t\t\t\t\tlog.Printf(\"[ERROR] closed connection\\n\")\n\t\t\t\t}\n\t\t\t\tstate.Pop(state.GetTop())\n\t\t\t}(data)\n\t\t}\n\t}\n}",
"func (c *remotingClient) InvokeAsync(ctx context.Context, addr string, request *RemotingCommand, callback func(*ResponseFuture)) error {\n\tconn, err := c.connect(ctx, addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tresp := NewResponseFuture(ctx, request.Opaque, callback)\n\tc.responseTable.Store(resp.Opaque, resp)\n\terr = c.sendRequest(conn, request)\n\tif err != nil {\n\t\treturn err\n\t}\n\tgo primitive.WithRecover(func() {\n\t\tc.receiveAsync(resp)\n\t})\n\treturn nil\n}",
"func await(fn func()) {\n\tmu.Lock()\n\tif !paused {\n\t\tmu.Unlock()\n\t\tfn()\n\t\treturn\n\t}\n\tch := make(chan struct{})\n\twaiters = append(waiters, ch)\n\tmu.Unlock()\n\tgo func() {\n\t\t<-ch\n\t\tfn()\n\t}()\n}",
"func (c *Client) ExecuteFunction(request *ExecuteFunctionRequest) (response *ExecuteFunctionResponse, err error) {\n if request == nil {\n request = NewExecuteFunctionRequest()\n }\n response = NewExecuteFunctionResponse()\n err = c.Send(request, response)\n return\n}",
"func ProxyFunction(f Executor) func(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\treturn func(request events.APIGatewayProxyRequest) (events.APIGatewayProxyResponse, error) {\n\t\tstatusResponse := http.StatusInternalServerError\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tdefer cancel()\n\n\t\tctx = context.WithValue(ctx, ContextKey(\"request\"), &request)\n\n\t\tresult, err := f(ctx)\n\t\tif err == nil {\n\t\t\tstatusResponse = http.StatusOK\n\t\t}\n\n\t\tencodedResult, encodingErr := json.Marshal(result)\n\t\tif err != nil {\n\t\t\treturn events.APIGatewayProxyResponse{\n\t\t\t\tStatusCode: statusResponse,\n\t\t\t\tBody: err.Error(),\n\t\t\t}, encodingErr\n\n\t\t}\n\n\t\tresponse := events.APIGatewayProxyResponse{\n\t\t\tStatusCode: statusResponse,\n\t\t\tBody: string(encodedResult),\n\t\t}\n\n\t\treturn response, err\n\n\t}\n}",
"func EagerPyFuncIsAsync(value bool) EagerPyFuncAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"is_async\"] = value\n\t}\n}",
"func (cl ConcurrencyLimit) Exec(fn func()) {\n\tcl <- struct{}{}\n\tfn()\n\t<-cl\n}",
"func (mgr *Worker) AsynCall0(f func([]interface{}), cb func(), args ...interface{}) {\n\tjob := &Job0{\n\t\targs: args,\n\t\tcb: cb,\n\t\tf: f,\n\t}\n\tmgr.AddJob(job)\n}",
"func runEquationAsync(equationOutput ChanBig, count int) {\n\n\t// These are our factor constants for b,c,d,e in the equation;\n\t// Reference factors.go:\n\tfactors := NewFactors()\n\n\t/////////////////////////////////////\n\t//1. Make channels for our functions:\n\tdivisorA:=makeChanBig(8)\n\tdivisorB:=make(chan int64, 4)\n\tdivisorC:=make(chan int64, 4)\n\tdivisorD:=make(chan int64, 4)\n\tdivisorE:=make(chan int64, 4)\n\n\tequationA:=makeChanBig(4)\n\tequationB:=makeChanBig(4)\n\tequationC:=makeChanBig(4)\n\tequationD:=makeChanBig(4)\n\tequationE:=makeChanBig(4)\n\n\t/////////////////////////////////////\n\t//2. Start all our goroutines.\n\n\t// The former drives the latter:\n\tgo makePowers(divisorA, 16, count)\n\tgo chanBigDivides(equationA, divisorA, factors.bigD1)\n\n\t// makeMultiples drives chanDivides:\n\tgo makeMultiples(divisorB, divisorC, divisorD, divisorE, 8, count)\n\tgo chanDivides(equationB, divisorB, 1, factors.bigD4)\n\tgo chanDivides(equationC, divisorC, 4, factors.bigD2)\n\tgo chanDivides(equationD, divisorD, 5, factors.bigD1)\n\tgo chanDivides(equationE, divisorE, 6, factors.bigD1)\n\n\t// Everything comes out of chanDivides/chanBigDivides into equation()\n\tgo equation1(equationOutput, equationA, equationB, equationC, equationD, equationE, count)\n}",
"func (runner *suiteRunner) runFunc(method *reflect.FuncValue, kind funcKind,\n dispatcher func(c *C)) *C {\n c := runner.forkCall(method, kind, dispatcher)\n <-c.done\n return c\n}",
"func (c *conn) Get(fn func([]byte)) (err error) {\n\tc.mux.Lock()\n\tif err = c.get(fn); err != nil {\n\t\tc.setIdle()\n\t}\n\tc.mux.Unlock()\n\treturn\n}",
"func executeFunction(pipeline *sdk.Pipeline, operation *sdk.Operation, data []byte) ([]byte, error) {\n\tvar err error\n\tvar result []byte\n\n\tname := operation.Function\n\tparams := operation.GetParams()\n\theaders := operation.GetHeaders()\n\n\tgateway := getGateway()\n\turl := buildURL(\"http://\"+gateway, \"function\", name)\n\n\tmethod := os.Getenv(\"default-method\")\n\tif method == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\n\tif m, ok := headers[\"method\"]; ok {\n\t\tmethod = m\n\t}\n\n\thttpreq, err := buildHttpRequest(url, method, data, params, headers)\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"cannot connect to Function on URL: %s\", url)\n\t}\n\n\tif operation.Requesthandler != nil {\n\t\toperation.Requesthandler(httpreq)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(httpreq)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\tif operation.OnResphandler != nil {\n\t\tresult, err = operation.OnResphandler(resp)\n\t} else {\n\t\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t\terr = fmt.Errorf(\"invalid return status %d while connecting %s\", resp.StatusCode, url)\n\t\t\tresult, _ = ioutil.ReadAll(resp.Body)\n\t\t} else {\n\t\t\tresult, err = ioutil.ReadAll(resp.Body)\n\t\t}\n\t}\n\n\treturn result, err\n}",
"func (c *Concurrent) Call(function interface{}, params ...interface{}) error {\n\n\tif c.HasError() {\n\t\treturn c.GetLastError()\n\t}\n\n\tif c.closeSignal {\n\t\treturn ErrVChanClosed\n\t}\n\n\tf := reflect.TypeOf(function)\n\n\t// fucntion callable validate\n\tif f.Kind() != reflect.Func {\n\t\treturn fmt.Errorf(\"Concurrent-handler must be a callable func\")\n\t}\n\n\tfin := f.NumIn()\n\tfout := f.NumOut()\n\n\t// params validate\n\tif fin > len(params) {\n\t\treturn fmt.Errorf(\"Call function <%s> with too few input arguments(%d), need %d\", function, len(params), fin)\n\t}\n\tin := make([]reflect.Value, len(params))\n\tfor i := 0; i < len(params); i++ {\n\t\tin[i] = reflect.ValueOf(params[i])\n\t}\n\n\t// allocate thread\n\tc.semaphore.P()\n\tgo func(ticket uint64) {\n\t\tdefer c.semaphore.V() // free thread\n\t\tvals := reflect.ValueOf(function).Call(in)\n\t\tif fout != len(vals) {\n\t\t\tc.AddError(fmt.Errorf(\"The number of return values does not match\"))\n\t\t\treturn\n\t\t}\n\t\trets := make([]interface{}, fout)\n\t\tfor i, v := range vals {\n\t\t\trets[i] = v.Interface()\n\t\t}\n\t\tif !c.orderSignal {\n\t\t\tc.values <- rets\n\t\t\treturn\n\t\t}\n\t\tc.cond.L.Lock()\n\t\tdefer func() {\n\t\t\tc.cond.L.Unlock()\n\t\t\tc.cond.Broadcast()\n\t\t}()\n\t\tfor ticket != c.finTicket {\n\t\t\tc.cond.Wait()\n\t\t}\n\t\tc.values <- rets\n\t\tc.finTicket++\n\t}(c.totalTicket)\n\tc.totalTicket++\n\treturn nil\n}",
"func compute(fn func(float64, float64) float64) float64 {\n\treturn fn(3, 4)\n}",
"func Exec(f func() error) error {\n\tdone := make(chan error, 1)\n\tgfxfunc <- func() {\n\t\tdone <- f()\n\t}\n\terr := <-done\n\treturn err\n}",
"func simplyRun(f func()) {\n\tgo f()\n}",
"func (s *System) Evaluate(state []float32) []float32 {\n\tif len(state) > 0 {\n\t\treturn s.function(state, s.parametersVector)\n\t} else {\n\t\treturn s.function(s.stateVector, s.parametersVector)\n\t}\n}",
"func (e *Endpoint) Call(function string, args interface{}, reply interface{}) error {\n\tcall := <-e.Go(function, args, reply, make(chan *rpc.Call, 1)).Done\n\treturn call.Error\n}",
"func SafeExecute(executeFunction Execute) (interface{}, error) {\n\tconn, err := getgrpcConnection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer conn.Close()\n\n\treturn executeFunction(conn)\n}",
"func Maybe(fn func(s State)) {\n\t// check if simulation is enabled\n\tif atomic.LoadInt32(&enabled) == 0 {\n\t\t// execute in normal state\n\t\tfn(&noop{})\n\t\treturn\n\t}\n\n\t// execute in simulated state\n\tfn(defaultState)\n}",
"func (server *Server) asyncCallService(conn *ConnDriver, seq uint64, service *service, methodType *methodType, argv, replyv reflect.Value) {\n\tserver.replyCmd(conn, seq, nil, CmdTypeAck)\n\tfunction := methodType.method.Func\n\t// Invoke the method, providing a new value for the reply.\n\tfunction.Call([]reflect.Value{service.rcvr, argv, replyv})\n\treturn\n}",
"func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string,\n\tqueryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) {\n\n\t// If we are not blocking we can skip tracking and allocating - nil WatchSet\n\t// is still valid to call Add on and will just be a no op.\n\tvar ws memdb.WatchSet\n\tvar timeout *time.Timer\n\n\tif hash != \"\" {\n\t\t// TODO(banks) at least define these defaults somewhere in a const. Would be\n\t\t// nice not to duplicate the ones in consul/rpc.go too...\n\t\twait := queryOpts.MaxQueryTime\n\t\tif wait == 0 {\n\t\t\twait = 5 * time.Minute\n\t\t}\n\t\tif wait > 10*time.Minute {\n\t\t\twait = 10 * time.Minute\n\t\t}\n\t\t// Apply a small amount of jitter to the request.\n\t\twait += lib.RandomStagger(wait / 16)\n\t\ttimeout = time.NewTimer(wait)\n\t}\n\n\tfor {\n\t\t// Must reset this every loop in case the Watch set is already closed but\n\t\t// hash remains same. In that case we'll need to re-block on ws.Watch()\n\t\t// again.\n\t\tws = memdb.NewWatchSet()\n\t\tcurHash, curResp, err := fn(ws)\n\t\tif err != nil {\n\t\t\treturn curResp, err\n\t\t}\n\t\t// Return immediately if there is no timeout, the hash is different or the\n\t\t// Watch returns true (indicating timeout fired). Note that Watch on a nil\n\t\t// WatchSet immediately returns false which would incorrectly cause this to\n\t\t// loop and repeat again, however we rely on the invariant that ws == nil\n\t\t// IFF timeout == nil in which case the Watch call is never invoked.\n\t\tif timeout == nil || hash != curHash || ws.Watch(timeout.C) {\n\t\t\tresp.Header().Set(\"X-Consul-ContentHash\", curHash)\n\t\t\treturn curResp, err\n\t\t}\n\t\t// Watch returned false indicating a change was detected, loop and repeat\n\t\t// the callback to load the new value. If agent sync is paused it means\n\t\t// local state is currently being bulk-edited e.g. config reload. In this\n\t\t// case it's likely that local state just got unloaded and may or may not be\n\t\t// reloaded yet. Wait a short amount of time for Sync to resume to ride out\n\t\t// typical config reloads.\n\t\tif syncPauseCh := s.agent.syncPausedCh(); syncPauseCh != nil {\n\t\t\tselect {\n\t\t\tcase <-syncPauseCh:\n\t\t\tcase <-timeout.C:\n\t\t\t}\n\t\t}\n\t}\n}",
"func (s *DefaultServer) Invoke(input *Request) (interface{}, error) {\n\tp, err := json.Marshal(input)\n\n\ttimeout := time.Duration(0)\n\tif deadline, ok := input.Context[\"deadline\"]; ok {\n\t\tif dl, ok := deadline.(string); ok {\n\t\t\tt, err := time.Parse(time.RFC3339, dl)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, BadRequestError(fmt.Sprintf(\"Unable to parse deadline: %s\", err))\n\t\t\t}\n\t\t\ttimeout = time.Until(t)\n\t\t}\n\t}\n\n\tif timeout < 0 {\n\t\treturn nil, TimeoutError(\"Did not invoke, already exceeded timeout\")\n\t}\n\n\ts.client.Timeout = timeout\n\n\ts.resetStreams()\n\n\turl := fmt.Sprintf(\"http://127.0.0.1:%d\", s.GetPort())\n\tresp, err := s.client.Post(url, \"application/json\", bytes.NewBuffer(p))\n\tif err == nil {\n\t\tdefer resp.Body.Close()\n\t}\n\n\tif err != nil {\n\t\tif isTimeout(err) {\n\t\t\treturn nil, TimeoutError(\"Function execution exceeded the timeout\")\n\t\t} else if isConnectionRefused(err) {\n\t\t\treturn nil, ConnectionRefusedError(url)\n\t\t} else {\n\t\t\treturn nil, UnknownSystemError(err.Error())\n\t\t}\n\t}\n\n\tif resp.StatusCode >= 400 {\n\t\tvar e Error\n\t\tjson.NewDecoder(resp.Body).Decode(&e)\n\t\treturn nil, FunctionServerError{\n\t\t\tAPIError: e,\n\t\t}\n\t}\n\n\tvar result interface{}\n\tif err := json.NewDecoder(resp.Body).Decode(&result); err != nil {\n\t\treturn nil, InvalidResponsePayloadError(err.Error())\n\t}\n\n\treturn result, nil\n}",
"func (dtw dispatchTaskWrapper) Func() func(id int64) {\n return dtw.t.Func()\n}",
"func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tswitch function {\n\n\tcase \"execute\":\n\n\t\tif len(args) < 1 {\n\t\t\treturn nil, errors.New(\"execute operation must include single argument, the base64 encoded form of a bitcoin transaction\")\n\t\t}\n\t\ttxDataBase64 := args[0]\n\t\ttxData, err := base64.StdEncoding.DecodeString(txDataBase64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error decoding TX as base64: %s\", err)\n\t\t}\n\n\t\tutxo := util.MakeUTXO(MakeChaincodeStore(stub))\n\t\texecResult, err := utxo.Execute(txData)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error executing TX: %s\", err)\n\t\t}\n\n\t\tfmt.Printf(\"\\nExecResult: Coinbase: %t, SumInputs %d, SumOutputs %d\\n\\n\", execResult.IsCoinbase, execResult.SumPriorOutputs, execResult.SumCurrentOutputs)\n\n\t\tif execResult.IsCoinbase == false {\n\t\t\tif execResult.SumCurrentOutputs > execResult.SumPriorOutputs {\n\t\t\t\treturn nil, fmt.Errorf(\"sumOfCurrentOutputs > sumOfPriorOutputs: sumOfCurrentOutputs = %d, sumOfPriorOutputs = %d\", execResult.SumCurrentOutputs, execResult.SumPriorOutputs)\n\t\t\t}\n\t\t}\n\n\t\treturn nil, nil\n\n\tdefault:\n\t\treturn nil, errors.New(\"Unsupported operation\")\n\t}\n\n}",
"func (lock *lockedBool) execute(task func() error) (err error) {\n\tdefer (func() {\n\t\tr := recover()\n\t\tif r != nil {\n\t\t\tif evalue, ok := r.(error); ok {\n\t\t\t\terr = evalue\n\t\t\t} else {\n\t\t\t\terr = errors.Fail(ErrBadHandler{}, nil, fmt.Sprintf(\"Unknown failure to execute task: %s\", r))\n\t\t\t}\n\t\t}\n\t})()\n\treturn task()\n}",
"func (s *SparkCoreAdaptor) Function(name string, args string) (val int, err error) {\n\tparams := url.Values{\n\t\t\"args\": {args},\n\t\t\"access_token\": {s.AccessToken},\n\t}\n\n\turl := fmt.Sprintf(\"%s/%s\", s.deviceURL(), name)\n\tresp, err := s.requestToSpark(\"POST\", url, params)\n\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tval = int(resp[\"return_value\"].(float64))\n\treturn\n}",
"func (this *Protocol) call(peerId PeerId, fn string, args interface{}, reply interface{}) bool {\n\tc := make(chan bool, 1)\n\n\tgo func() {\n\t\tclient, errx := rpc.Dial(\"tcp\", peerId.String())\n\t\tif errx != nil {\n\t\t\tc <- false\n\t\t\treturn\n\t\t}\n\t\tdefer client.Close()\n\n\t\terr := client.Call(\"Protocol.Handle\"+fn, args, reply)\n\t\tif err == nil {\n\t\t\tc <- true\n\t\t\treturn\n\t\t}\n\n\t\tLog.Warn.Println(err)\n\t\tc <- false\n\t}()\n\n\tselect {\n\t\tcase b := <- c:\n\t\t\treturn b\n\t\tcase <- time.After(time.Second):\n\t\t\treturn false\n\t}\n}",
"func Call(f func()) {\n\tcheckRun()\n\tdone := make(chan struct{})\n\tcallQueue <- func() {\n\t\tf()\n\t\tdone <- struct{}{}\n\t}\n\t<-done\n}",
"func (dt *StdTask) Func() func(id int64) {\n return dt.F\n}",
"func (c *conn) get(fn func([]byte)) (err error) {\n\t// Let's ensure our connection is not closed or idle\n\tswitch c.state {\n\tcase stateClosed:\n\t\treturn errors.ErrIsClosed\n\tcase stateIdle:\n\t\treturn ErrIsIdle\n\t}\n\n\t// Read message length\n\tif c.mlen, err = c.l.Read(c.nc); err != nil {\n\t\treturn\n\t}\n\n\t// Read message\n\tif err = c.rbuf.ReadN(c.nc, c.mlen); err != nil {\n\t\treturn\n\t}\n\n\tif fn != nil {\n\t\t// Please do not use the bytes outside of the called functions\\\n\t\t// I'll be a sad panda if you create a race condition\n\t\tfn(c.rbuf.Bytes())\n\t}\n\n\treturn\n}",
"func Synchronous(b bool) func(e *Endpoint) {\n\treturn func(e *Endpoint) { e.synchronous = b }\n}",
"func (f *functionQuery) Evaluate(t iterator) interface{} {\n\treturn f.Func(f.Input, t)\n}",
"func (ast *Func) Eval(env *Env, ctx *Codegen, gen *ssa.Generator) (\n\tssa.Value, bool, error) {\n\treturn ssa.Undefined, false, nil\n}",
"func (s *Server) RunInServer(fn func()) {\n\tdone := make(chan struct{})\n\ts.funcs <- func() {\n\t\tfn()\n\t\tdone <- struct{}{}\n\t}\n\t<-done\n}",
"func (asyncResult *asyncResult) get() interface{} {\n\t<-asyncResult.done\n\treturn asyncResult.result\n}",
"func (s *System) Function() func(state []float32, parameters []float32) []float32 {\n\treturn s.function\n}",
"func (c *Callee) InvokeAndStoreResult(cp *msg.CallPayload, fn Thunk) error {\n\tttl := cp.TTLAfterRead\n\tstart := time.Now()\n\n\tv, err := fn(cp)\n\tif remain := ttl - time.Now().Sub(start); remain > 0 {\n\t\t// register the result\n\t\treturn c.storeResult(cp, v, err, remain)\n\t}\n\treturn ErrCallExpired\n}",
"func (e *EvaluatedFunctionExpression) Execute(ctx *Context, args *Values) Value {\n\tctx.SetParent(e.this) // this is how closure works\n\treturn e.fn.Execute(ctx, args)\n}",
"func (sf JobFunc) Run() {\n\tsf()\n}",
"func CallNonBlock(f func()) {\n\tcheckRun()\n\tcallQueue <- f\n}",
"func (s *GRPCServer) Request(ctx context.Context, d *api.Data) (*api.Response, error) {\n\n\ts.f.RLock()\n\tdefer s.f.RUnlock()\n\n\thandler, ok := s.f.hosts[d.FunctionIdentifier]\n\n\tif !ok {\n\t\treturn nil, status.Errorf(codes.NotFound,\n\t\t\t\"No such function\")\n\t}\n\n\treq_body := d.Data\n\n\t// call function and return results\n\tresp, err := http.Post(\"http://\"+handler[rand.Intn(len(handler))]+\":8000/fn\", \"application/binary\", strings.NewReader(req_body))\n\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Unavailable,\n\t\t\t\"Invalid response from function handler\")\n\t}\n\n\tdefer resp.Body.Close()\n\tres_body, err := ioutil.ReadAll(resp.Body)\n\n\tif err != nil {\n\t\treturn nil, status.Errorf(codes.Unavailable,\n\t\t\t\"Invalid response from function handler\")\n\n\t}\n\n\treturn &api.Response{\n\t\tResponse: string(res_body),\n\t}, nil\n}",
"func handleAsync(req *netm.Request) netm.Response {\n\treturn netm.Response{Status: &netm.Status{Type: netm.Status_NO_ERROR, Message: \"Success\"}}\n}",
"func (c *Cache) Get(key string, f VEFunc) ([]byte, error) {\n\tc.Lock()\n\te := c.cache[key]\n\tif e == nil || e.time+c.time < time.Now().UnixNano() {\n\t\te = &entry{ready: make(chan struct{})}\n\t\tc.cache[key] = e\n\t\tc.Unlock()\n\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\tclose(e.ready)\n\t\t\t}\n\t\t}()\n\t\te.res.value, e.res.err = f()\n\t\te.time = time.Now().UnixNano()\n\t\tclose(e.ready)\n\t} else {\n\t\tc.Unlock()\n\t\t<-e.ready\n\t}\n\treturn e.res.value, e.res.err\n}",
"func Func() {}",
"func (ck *Clerk) sendRPC(srv *labrpc.ClientEnd, function string, goArgs interface{}, goReply interface{}) (ok_out bool){\n\n\tRPC_returned := make(chan bool)\n\tgo func() {\n\t\tok := srv.Call(function, goArgs, goReply)\n\n\t\tRPC_returned <- ok\n\t}()\n\n\t//Allows for RPC Timeout\n\tok_out = false\n\tselect {\n\tcase <-time.After(time.Millisecond * 300):\n\t \tok_out = false\n\tcase ok_out = <-RPC_returned:\n\t}\n\n\treturn ok_out\n}",
"func TryCall(f reflect.Value, args []reflect.Value) (results []reflect.Value, err error) {\n\t//defer func() {\n\t//\t// Recover from panic and set err.\n\t//\tif e := recover(); e != nil {\n\t//\t\tswitch e := e.(type) {\n\t//\t\tdefault:\n\t//\t\t\terr = errors.New(\"Invoking task caused a panic\")\n\t//\n\t//\t\tcase error:\n\t//\t\t\terr = e\n\t//\t\tcase string:\n\t//\t\t\terr = errors.New(e)\n\t//\t\t}\n\t//\t}\n\t//}()\n\n\tresults = f.Call(args)\n\n\tif len(results) < 2 {\n\t\tlog.Fatalln(fmt.Errorf(\"warning!!! wrong async func define\"))\n\t}\n\n\t// If an error was returned by the task func, propagate it\n\t// to the caller via err.\n\tif !results[len(results)-1].IsNil() {\n\t\treturn nil, results[1].Interface().(error)\n\t}\n\treturn\n}",
"func CallOnUIGoroutine(f func()) {\n\tdriver.CallOnUIGoroutine(f)\n}",
"func (f *Function) Invoke(msg *runtime.Message) (*runtime.Message, error) {\n\titem, err := f.pool.BorrowObject(context.Background())\n\tif err != nil {\n\t\treturn nil, errors.Trace(err)\n\t}\n\tfl := item.(*funclet)\n\tres, err := fl.handle(msg)\n\tif err != nil {\n\t\tf.log.WithError(err).Error(\"Failed to talk with function instance\")\n\t\terr1 := f.pool.InvalidateObject(context.Background(), item)\n\t\tif err1 != nil {\n\t\t\tfl.Close()\n\t\t\tf.log.WithError(err).Error(\"Failed to invalidate function instance\")\n\t\t}\n\t\treturn nil, errors.Trace(err)\n\t}\n\tf.pool.ReturnObject(context.Background(), item)\n\treturn res, nil\n\n}",
"func (t *TaskChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, args := stub.GetFunctionAndParameters()\n\tfmt.Println(\"invoke is running \" + function)\n\n\tif function == \"regist\" {\n\t\treturn t.regist(stub, args)\n\t} else if function == \"pay\" {\n\t\treturn t.pay(stub, args)\n\t} else if function == \"pendingPay\" {\n\t\treturn t.pendingPay(stub, args)\n } else if function == \"confirmPay\" {\n\t\treturn t.confirmPay(stub, args)\n } else if function == \"getBalance\" {\n\t\treturn t.getBalance(stub, args)\n\t} else if function == \"queryPayTxByTaskId\" {\n\t\treturn t.queryPayTxByTaskId(stub, args)\n\t} else if function == \"queryPayTxByPayer\" {\n\t\treturn t.queryPayTxByPayer(stub, args)\n\t} else if function == \"queryPayTxByPayee\" {\n\t\treturn t.queryPayTxByPayee(stub, args)\n\t} else if function == \"queryMembers\" {\n\t\treturn t.queryMembers(stub)\n\t} else {\n\t\treturn shim.Error(\"Function \" + function + \" doesn't exits, make sure function is right!\")\n\t}\n}",
"func FunctionValue(c Callable) Value {\n\treturn Value{iface: c}\n}",
"func (idem *Idempotent) RunSync(f func()) bool {\n\tif !idem.initialised {\n\t\tpanic(\"Idempotent task runner not initialised\")\n\t}\n\n\tselect {\n\tcase idem.queue <- struct{}{}:\n\tdefault:\n\t\treturn false\n\t}\n\t<-idem.ready\n\t<-idem.queue\n\tdefer func() {\n\t\tidem.ready <- struct{}{}\n\t}()\n\tf()\n\treturn true\n}",
"func (s *server) callFunc(r io.Reader, receiverFunc interface{}) (interface{}, error) {\n\n\t// Resolve function's type\n\tfuncType := reflect.TypeOf(receiverFunc)\n\n\t// Deserialize arguments read from procedure call body\n\tfuncArg := reflect.New(funcType.In(0)).Interface()\n\n\tif _, err := xdr.Unmarshal(r, &funcArg); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Call function\n\tfuncValue := reflect.ValueOf(receiverFunc)\n\tfuncArgValue := reflect.Indirect(reflect.ValueOf(funcArg))\n\tfuncRetValue := reflect.New(funcType.In(1).Elem())\n\n\ts.log.Debugf(\"-> %+v\", funcArgValue)\n\tfuncRetError := funcValue.Call([]reflect.Value{funcArgValue, funcRetValue})[0]\n\ts.log.Debugf(\"<- %+v\", funcRetValue)\n\n\tif !funcRetError.IsNil() {\n\t\treturn nil, funcRetError.Interface().(error)\n\t}\n\n\t// Return result computed by the actual function. This is what should be sent back to the remote\n\t// caller.\n\treturn funcRetValue.Interface(), nil\n}",
"func (s *Server) blockingQuery(queryOpts *structs.QueryOptions, queryMeta *structs.QueryMeta,\n\tfn queryFn) error {\n\tvar timeout *time.Timer\n\n\t// Fast path right to the non-blocking query.\n\tif queryOpts.MinQueryIndex == 0 {\n\t\tgoto RUN_QUERY\n\t}\n\n\t// Restrict the max query time, and ensure there is always one.\n\tif queryOpts.MaxQueryTime > maxQueryTime {\n\t\tqueryOpts.MaxQueryTime = maxQueryTime\n\t} else if queryOpts.MaxQueryTime <= 0 {\n\t\tqueryOpts.MaxQueryTime = defaultQueryTime\n\t}\n\n\t// Apply a small amount of jitter to the request.\n\tqueryOpts.MaxQueryTime += lib.RandomStagger(queryOpts.MaxQueryTime / jitterFraction)\n\n\t// Setup a query timeout.\n\ttimeout = time.NewTimer(queryOpts.MaxQueryTime)\n\tdefer timeout.Stop()\n\nRUN_QUERY:\n\t// Update the query metadata.\n\ts.setQueryMeta(queryMeta)\n\n\t// If the read must be consistent we verify that we are still the leader.\n\tif queryOpts.RequireConsistent {\n\t\tif err := s.consistentRead(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Run the query.\n\tmetrics.IncrCounter([]string{\"rpc\", \"query\"}, 1)\n\n\t// Operate on a consistent set of state. This makes sure that the\n\t// abandon channel goes with the state that the caller is using to\n\t// build watches.\n\tstate := s.fsm.State()\n\n\t// We can skip all watch tracking if this isn't a blocking query.\n\tvar ws memdb.WatchSet\n\tif queryOpts.MinQueryIndex > 0 {\n\t\tws = memdb.NewWatchSet()\n\n\t\t// This channel will be closed if a snapshot is restored and the\n\t\t// whole state store is abandoned.\n\t\tws.Add(state.AbandonCh())\n\t}\n\n\t// Block up to the timeout if we didn't see anything fresh.\n\terr := fn(ws, state)\n\t// Note we check queryOpts.MinQueryIndex is greater than zero to determine if\n\t// blocking was requested by client, NOT meta.Index since the state function\n\t// might return zero if something is not initialized and care wasn't taken to\n\t// handle that special case (in practice this happened a lot so fixing it\n\t// systematically here beats trying to remember to add zero checks in every\n\t// state method). We also need to ensure that unless there is an error, we\n\t// return an index > 0 otherwise the client will never block and burn CPU and\n\t// requests.\n\tif err == nil && queryMeta.Index < 1 {\n\t\tqueryMeta.Index = 1\n\t}\n\tif err == nil && queryOpts.MinQueryIndex > 0 && queryMeta.Index <= queryOpts.MinQueryIndex {\n\t\tif expired := ws.Watch(timeout.C); !expired {\n\t\t\t// If a restore may have woken us up then bail out from\n\t\t\t// the query immediately. This is slightly race-ey since\n\t\t\t// this might have been interrupted for other reasons,\n\t\t\t// but it's OK to kick it back to the caller in either\n\t\t\t// case.\n\t\t\tselect {\n\t\t\tcase <-state.AbandonCh():\n\t\t\tdefault:\n\t\t\t\tgoto RUN_QUERY\n\t\t\t}\n\t\t}\n\t}\n\treturn err\n}",
"func (th *Thread) Call(fn Callable, args ...Value) Value {\n\treturn th.CallThis(fn, nil, args...)\n}",
"func isPrimeAsync(number int64, channel chan PrimeResult) {\n\n\tresult:= new (PrimeResult)\n\tresult.number= number\n\tresult.prime= isPrime(number)\n\tchannel <- *result\n}",
"func (self *Worker) Get() interface{} { return `some value` }",
"func Execute(\n\tctx context.Context,\n\thandler Handler,\n\tabortHandler AbortHandler,\n\trequest interface{}) Awaiter {\n\ttask := &task{\n\t\trequest: request,\n\t\thandler: handler,\n\t\tabortHandler: abortHandler,\n\t\tresultQ: make(chan Response, 1),\n\t\trunning: true,\n\t}\n\tgo task.run(ctx) // run handler asynchronously\n\treturn task\n}",
"func NewValue(f func() interface{}) *Value {\n\tresult := &Value{nil, make(chan bool)}\n\tgo func() {\n\t\tresult.value = f()\n\t\tclose(result.ready)\n\t}()\n\treturn result\n}",
"func RunCallClient(caddress string, txnid int64) int64 {\n\t// Set up a connection to the server.\n\tconn, err := grpc.Dial(caddress, grpc.WithInsecure(), grpc.WithBlock())\n\tif err != nil {\n\t\tlog.Fatalf(\"did not connect: %v\", err)\n\t}\n\tdefer conn.Close()\n\tc := NewExecuterCallerClient(conn)\n\tctx, cancel := context.WithTimeout(context.Background(), time.Second)\n\tdefer cancel()\n\tirpccallreq := IrpcCallReq{Txnid: txnid}\n\tr, err := c.ExecuterCall(ctx, &irpccallreq)\n\tif err != nil {\n\t\tlog.Fatalf(\"could not call: %v\", err)\n\t}\n\treturn r.IsSuc\n}",
"func Atomically(fn func(*Tx)) {\nretry:\n\t// run the transaction\n\ttx := &Tx{\n\t\treads: make(map[*Var]uint64),\n\t\twrites: make(map[*Var]interface{}),\n\t}\n\tif catchRetry(fn, tx) {\n\t\t// wait for one of the variables we read to change before retrying\n\t\ttx.wait()\n\t\tgoto retry\n\t}\n\t// verify the read log\n\tglobalLock.Lock()\n\tif !tx.verify() {\n\t\tglobalLock.Unlock()\n\t\tgoto retry\n\t}\n\t// commit the write log and broadcast that variables have changed\n\tif len(tx.writes) > 0 {\n\t\ttx.commit()\n\t\tglobalCond.Broadcast()\n\t}\n\tglobalLock.Unlock()\n}",
"func runHandle(fn func() error) {\n\tif err := fn(); err != nil {\n\t\tlog.Errorf(\"An error occurred when processing a VM update: %v\\n\", err)\n\t}\n}",
"func g1() {\n\tif x := f(func() {\n\t\tif true {}\n\t}); true {\n\t\t_ = x;\n\t}\n}",
"func HTTPClientCallSync(ctx context.Context, req *http.Request, client *http.Client, command string, config *hystrix.CommandConfig, resolveAddress bool) (*http.Response, error) {\n\toutputChan, errorsChan := HTTPClientCall(ctx, req, client, command, config, resolveAddress)\n\tselect {\n\tcase resp := <-outputChan:\n\t\treturn resp, nil\n\tcase err := <-errorsChan:\n\t\treturn nil, err\n\t}\n}",
"func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface) pb.Response {\n\tfunction, args := stub.GetFunctionAndParameters()\n\tfmt.Println(\"invoke is running \" + function)\n\n\t// Handle different functions\n\tif function == \"createTaskMatching\" { //create a new taskmatching\n\t\treturn t.createTaskMatching(stub, args)\n\t} else if function == \"readTaskMatching\" { //reads a taskmatching\n\t\treturn t.readTaskMatching(stub, args)\n\t} else if function == \"Initialize\" { //initialize the network\n\t\treturn t.Initialize(stub)\n\t} else if function == \"calculateTaskMatching\" { //calculate a taskmatching\n\t\tt.calculateTaskMatching(stub, args)\n\n\t\tif t.allPeersDone(stub) {\n\t\t\treturn t.setBestSol(stub)\n\t\t} else {\n\t\t\treturn shim.Success(nil)\n\t\t}\n\t}\n\tfmt.Println(\"invoke did not find func: \" + function) //error\n\treturn shim.Error(\"Received unknown function invocation\")\n}",
"func (sv *SyncVal) Update(f func(interface{}) (interface{}, error)) error {\n\tsv.lock.Lock()\n\tdefer sv.lock.Unlock()\n\n\tval, err := f(sv.val)\n\tif err == nil {\n\t\tsv.val = val\n\t}\n\treturn err\n}",
"func (s *Service) Exec(job interface{}) {\n\n\ts.Lock.RLock()\n\t// In case this routine has to wait for the calculation routine:\n\tif s.InProgress[job] {\n\n\t\t// Make a new channel (reader)\n\t\ts.Lock.RUnlock()\n\t\tresponse := make(chan interface{})\n\t\tdefer close(response)\n\n\t\t// Append the reader to its respective queue (regarding to the job)\n\t\ts.Lock.Lock()\n\t\ts.PendingReaders[job] = append(s.PendingReaders[job], response)\n\t\ts.Lock.Unlock()\n\n\t\t// The reader now only has to wait for the result of the routine\n\t\t// that is calculating the result for the required job\n\t\tfmt.Printf(\"Waiting for Response job: %d\\n\", job)\n\t\tresp := <-response\n\t\tfmt.Printf(\"Response Done, received %d\\n\", resp)\n\t\treturn\n\t}\n\n\t// In case this routine is the one to perform the calculation:\n\n\ts.Lock.RUnlock()\n\ts.Lock.Lock()\n\n\t// Flag to tell other routines that the required job is being calculated\n\ts.InProgress[job] = true\n\ts.Lock.Unlock()\n\n\tfmt.Printf(\"Performing expensive function for job %d\\n\", job)\n\tresult := s.f(job)\n\n\t// Once finished the function call, recall the channels\n\t// to send the result to\n\ts.Lock.RLock()\n\tpendingWorkers, inProgress := s.PendingReaders[job]\n\ts.Lock.RUnlock()\n\n\t// Send the message to all routines via channels\n\tif inProgress {\n\t\tfor _, pendingWorker := range pendingWorkers {\n\t\t\tpendingWorker <- result\n\t\t}\n\t\tfmt.Printf(\"Result sent - all pending workers ready job:%d\\n\", job)\n\t}\n\n\t// Free in-progess flag and delete all readers\n\ts.Lock.Lock()\n\ts.InProgress[job] = false\n\ts.PendingReaders[job] = make([]chan interface{}, 0)\n\ts.Lock.Unlock()\n}",
"func (f *FunctionExpression) Execute(ctx *Context, args *Values) Value {\n\tf.params.BindArguments(ctx, args.values...)\n\tf.body.Execute(ctx)\n\tif ctx.hasret {\n\t\treturn ctx.retval\n\t}\n\treturn ValueFromNil()\n}",
"func DemonstrateAsyncCall(wg *sync.WaitGroup) {\r\n\tvar consumer Consumer\r\n\tconsumer.Init()\r\n\tconsumer.Subscribe(waitOnResult, waitOnError, waitOnCompletion)\r\n\tconsumer.ExecuteAsyncOp(wg)\r\n\tfmt.Println(\"Caller continuing ..\")\r\n\twg.Wait()\r\n}",
"func (vm *VirtualMachine) do(f func(vm *VirtualMachine) error) error {\n\tif vm.Identification == \"\" || vm.Status != \"active\" {\n\t\treturn L.Error(\"vm_not_ready\")\n\t} else if vm.Suspended != \"no\" {\n\t\tif vm.Suspended == \"auto\" {\n\t\t\treturn L.Error(\"vm_suspended_auto\")\n\t\t} else if vm.Suspended == \"manual\" {\n\t\t\treturn L.Error(\"vm_suspended_manual\")\n\t\t} else {\n\t\t\treturn L.Error(\"vm_suspended\")\n\t\t}\n\t} else if vm.TaskPending {\n\t\treturn L.Error(\"vm_has_pending_task\")\n\t}\n\n\treturn f(vm)\n}",
"func Executor(fn Func) error {\n\treturn ExecutorWithPolicyType(StandardPolicy, fn)\n}",
"func (e Executor) Submit(ctx context.Context, f Func) (interface{}, error) {\n\tresultChan := make(chan *result, 1)\n\tselect {\n\tcase <-ctx.Done():\n\t\treturn nil, context.Canceled\n\tcase e <- &Request{\n\t\tFunc: f,\n\t\tContext: ctx,\n\t\tresult: resultChan,\n\t}:\n\t}\n\tres, ok := <-resultChan\n\tif !ok {\n\t\tpanic(\"cgc: result channel closed without any results\")\n\t}\n\treturn res.val, res.err\n}"
] | [
"0.54884326",
"0.53872144",
"0.5377912",
"0.5072936",
"0.5055439",
"0.5038554",
"0.5010995",
"0.50099576",
"0.49985412",
"0.4961088",
"0.49546096",
"0.4948906",
"0.49107867",
"0.4908908",
"0.4825101",
"0.47524828",
"0.474948",
"0.4739776",
"0.4734573",
"0.46940377",
"0.4680192",
"0.46787554",
"0.4664012",
"0.46620664",
"0.46616304",
"0.4645797",
"0.46358377",
"0.46230006",
"0.46035388",
"0.46029854",
"0.45879382",
"0.45728076",
"0.4556773",
"0.45565963",
"0.455324",
"0.45382333",
"0.45317486",
"0.45259267",
"0.45122898",
"0.44927356",
"0.4490071",
"0.44690576",
"0.4466189",
"0.44613057",
"0.4459366",
"0.44511732",
"0.444498",
"0.44387215",
"0.4409115",
"0.4407977",
"0.44041494",
"0.4394756",
"0.4373314",
"0.43669856",
"0.43638125",
"0.4363804",
"0.4361599",
"0.43515107",
"0.4348244",
"0.4347811",
"0.43477386",
"0.4346412",
"0.43447536",
"0.43422338",
"0.43341255",
"0.4332982",
"0.4326866",
"0.43191957",
"0.43174064",
"0.43162572",
"0.43141288",
"0.43034834",
"0.43010303",
"0.42992857",
"0.4282921",
"0.42778054",
"0.42718044",
"0.42694238",
"0.42598718",
"0.42456642",
"0.4243933",
"0.423937",
"0.4236155",
"0.42346328",
"0.42337114",
"0.4230582",
"0.422922",
"0.42273465",
"0.4223186",
"0.42211264",
"0.42173982",
"0.42157394",
"0.42072108",
"0.42002043",
"0.41980723",
"0.41887832",
"0.41747332",
"0.41689968",
"0.41679892",
"0.4166137"
] | 0.4816828 | 15 |
Sync evaluates the given function using the state of the server as its only argument, it return after the function has been evaluated. | func (s *Server) Sync(fn func(interface{}) interface{}) interface{} {
ch := make(chan interface{}, 1)
s.Async(func(message interface{}) {
defer close(ch)
ch <- fn(message)
})
return <-ch
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Sync(f func()) {\n\tsyncChan <- f\n}",
"func TestSyncFunction(t *testing.T) {\n\tmapper, err := NewChannelMapper(`function(doc) {sync(\"foo\", \"bar\"); sync(\"baz\")}`)\n\tassertNoError(t, err, \"Couldn't create mapper\")\n\tchannels, err := mapper.callMapper(`{\"channels\": []}`)\n\tassertNoError(t, err, \"callMapper failed\")\n\tassert.DeepEquals(t, channels, []string{\"foo\", \"bar\", \"baz\"})\n}",
"func (server *Server) Sync() {\n\n}",
"func (o *SyncStore[T]) Exec(key string, f func() (T, error)) (T, error) {\n\tval, err := o.sf.Do(key, func() (_ interface{}, err error) {\n\t\t// trap any runtime error due to synchronization issues.\n\t\tdefer func() {\n\t\t\tif rErr := recover(); rErr != nil {\n\t\t\t\terr = retrieveError(key, rErr)\n\t\t\t}\n\t\t}()\n\t\tv, err, ok := o.results.Load(key)\n\t\tif ok {\n\t\t\treturn v, err\n\t\t}\n\t\tv, err = f()\n\t\to.results.Store(key, v, err)\n\t\treturn v, err\n\t})\n\tvar defaultT T\n\tif err != nil {\n\t\treturn defaultT, err\n\t}\n\tswitch t := val.(type) {\n\tcase error:\n\t\treturn defaultT, t\n\tcase T:\n\t\treturn t, nil\n\tdefault:\n\t\treturn defaultT, err\n\t}\n}",
"func Synchronous(b bool) func(e *Endpoint) {\n\treturn func(e *Endpoint) { e.synchronous = b }\n}",
"func (idem *Idempotent) RunSync(f func()) bool {\n\tif !idem.initialised {\n\t\tpanic(\"Idempotent task runner not initialised\")\n\t}\n\n\tselect {\n\tcase idem.queue <- struct{}{}:\n\tdefault:\n\t\treturn false\n\t}\n\t<-idem.ready\n\t<-idem.queue\n\tdefer func() {\n\t\tidem.ready <- struct{}{}\n\t}()\n\tf()\n\treturn true\n}",
"func CallVal(f func() interface{}) interface{} {\n\tcheckRun()\n\trespChan := make(chan interface{})\n\tcallQueue <- func() {\n\t\trespChan <- f()\n\t}\n\treturn <-respChan\n}",
"func (t TaskFunc) Execute() { t() }",
"func (_m *Syncer) Execute() {\n\t_m.Called()\n}",
"func (e *Ethereum) Syncing() (*ResponseSyncing, bool, error) {\n\tvar (\n\t\tresIF interface{}\n\t\tresMap map[string]string\n\t)\n\n\terr := e.rpcClient.CallContext(e.ctx, &resIF, \"eth_syncing\")\n\tif err != nil {\n\t\treturn nil, false, errors.Wrap(err, \"fail to call client.CallContext(eth_syncing)\")\n\t}\n\n\t// try to cast to bool\n\tbRes, ok := resIF.(bool)\n\tif !ok {\n\t\t// interface can't not be casted to type map\n\t\t// resMap, ok = resIF.(map[string]string)\n\t\terr := e.rpcClient.CallContext(e.ctx, &resMap, \"eth_syncing\")\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.Wrap(err, \"fail to call client.CallContext(eth_syncing)\")\n\t\t}\n\t\t//grok.Value(resMap)\n\t\t//value map[string]string = [\n\t\t//\tstartingBlock string = \"0x606c\" 6\n\t\t//\tcurrentBlock string = \"0x95ac\" 6\n\t\t//\thighestBlock string = \"0x294545\" 8\n\t\t//\tknownStates string = \"0x2084c\" 7\n\t\t//\tpulledStates string = \"0x1eb12\" 7\n\t\t//]\n\n\t\tstartingBlock, err := hexutil.DecodeBig(resMap[\"startingBlock\"])\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.New(\"response is invalid\")\n\t\t}\n\t\tcurrentBlock, err := hexutil.DecodeBig(resMap[\"currentBlock\"])\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.New(\"response is invalid\")\n\t\t}\n\t\thighestBlock, err := hexutil.DecodeBig(resMap[\"highestBlock\"])\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.New(\"response is invalid\")\n\t\t}\n\t\tknownStates, err := hexutil.DecodeBig(resMap[\"knownStates\"])\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.New(\"response is invalid\")\n\t\t}\n\t\tpulledStates, err := hexutil.DecodeBig(resMap[\"pulledStates\"])\n\t\tif err != nil {\n\t\t\treturn nil, false, errors.New(\"response is invalid\")\n\t\t}\n\n\t\tresSync := ResponseSyncing{\n\t\t\tStartingBlock: startingBlock.Int64(),\n\t\t\tCurrentBlock: currentBlock.Int64(),\n\t\t\tHighestBlock: highestBlock.Int64(),\n\t\t\tKnownStates: knownStates.Int64(),\n\t\t\tPulledStates: pulledStates.Int64(),\n\t\t}\n\n\t\treturn &resSync, true, nil\n\t}\n\treturn nil, bRes, nil\n}",
"func ExpensiveFunction(n interface{}) interface{} {\n\tfmt.Printf(\"Executing expensive calculation for %v\\n\", n)\n\ttime.Sleep(5 * time.Second)\n\treturn n\n}",
"func Mutate(ctx context.Context, store *coal.Store, value Value, fn func(bool) error) error {\n\t// trace\n\tctx, span := xo.Trace(ctx, \"glut/Mutate\")\n\tdefer span.End()\n\n\t// get value\n\texists, err := Get(ctx, store, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// run function\n\terr = fn(exists)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// set value\n\t_, err = Set(ctx, store, value)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func (*Execute) Frontend() {}",
"func Maybe(fn func(s State)) {\n\t// check if simulation is enabled\n\tif atomic.LoadInt32(&enabled) == 0 {\n\t\t// execute in normal state\n\t\tfn(&noop{})\n\t\treturn\n\t}\n\n\t// execute in simulated state\n\tfn(defaultState)\n}",
"func (contract *Contract) EvaluatePublicFunction(functionName string) (bool, error) {\r\n // TODO: Check if parameter vals haven't been set yet. Use flags.\r\n\r\n lockingScript, err := contract.GetLockingScript()\r\n if err != nil {\r\n return false, err\r\n }\r\n unlockingScript, err := contract.GetUnlockingScript(functionName)\r\n if err != nil {\r\n return false, err\r\n }\r\n\r\n if ! contract.contextSet {\r\n err = interpreter.NewEngine().Execute(interpreter.WithScripts(lockingScript, unlockingScript))\r\n if err != nil {\r\n return false, err\r\n }\r\n } else {\r\n //input := contract.executionContext.Tx.InputIdx(contract.executionContext.InputIdx)\r\n //if input == nil {\r\n // return false, errors.New(fmt.Sprintf(\"Context transaction has no input with index %d.\", contract.executionContext.InputIdx))\r\n //}\r\n contract.executionContext.Tx.Inputs[contract.executionContext.InputIdx].UnlockingScript = unlockingScript\r\n prevoutSats := contract.executionContext.Tx.InputIdx(contract.executionContext.InputIdx).PreviousTxSatoshis\r\n\r\n engine := interpreter.NewEngine()\r\n err = engine.Execute(\r\n //interpreter.WithScripts(\r\n // lockingScript,\r\n // unlockingScript,\r\n //),\r\n interpreter.WithTx(\r\n contract.executionContext.Tx,\r\n contract.executionContext.InputIdx,\r\n //contract.executionContext.PreviousTxOut,\r\n &bt.Output{LockingScript: lockingScript, Satoshis: prevoutSats},\r\n ),\r\n interpreter.WithFlags(\r\n contract.executionContext.Flags,\r\n ),\r\n )\r\n if err != nil {\r\n return false, err\r\n }\r\n }\r\n\r\n return true, nil\r\n}",
"func (cb *CircuitBreaker) Exec(fn RequestFunc) (interface{}, error) {\n\tif cb.blocked {\n\t\treturn nil, ErrBlocked\n\t}\n\n\tswitch cb.state.Status() {\n\tcase StatusClosed:\n\t\tres, err := fn()\n\t\tif err != nil {\n\t\t\tcb.handleError(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tcb.counter.Success()\n\t\treturn res, nil\n\tcase StatusHalfOpen:\n\t\t// half open is intermediate state, where any failure will set back the circuitbreaker into open state\n\t\t// if required number of success responses are received circuitbreaker goes back to the closed state\n\t\tres, err := fn()\n\t\tif err != nil {\n\t\t\tcb.handleError(err)\n\t\t\treturn nil, err\n\t\t}\n\t\tif cb.counter.Success() > cb.successThreshold {\n\t\t\tcb.state.Set(StatusClosed)\n\t\t}\n\t\treturn res, nil\n\tcase StatusOpen:\n\t\treturn nil, ErrRequestDisabled\n\t}\n\treturn nil, nil\n}",
"func (e *executor) call(fn func() error, format string, args ...interface{}) error {\n\treturn e.function(e.opts, fn, format, args...)\n}",
"func (ck *Clerk) sendRPC(srv *labrpc.ClientEnd, function string, goArgs interface{}, goReply interface{}) (ok_out bool){\n\n\tRPC_returned := make(chan bool)\n\tgo func() {\n\t\tok := srv.Call(function, goArgs, goReply)\n\n\t\tRPC_returned <- ok\n\t}()\n\n\t//Allows for RPC Timeout\n\tok_out = false\n\tselect {\n\tcase <-time.After(time.Millisecond * 300):\n\t \tok_out = false\n\tcase ok_out = <-RPC_returned:\n\t}\n\n\treturn ok_out\n}",
"func (sv *SyncVal) Update(f func(interface{}) (interface{}, error)) error {\n\tsv.lock.Lock()\n\tdefer sv.lock.Unlock()\n\n\tval, err := f(sv.val)\n\tif err == nil {\n\t\tsv.val = val\n\t}\n\treturn err\n}",
"func (e *Engine) syncRPC() {\n\t// TODO(jsing): Make this default to IPv6, if configured.\n\taddr := &net.TCPAddr{\n\t\tIP: e.config.Node.IPv4Addr,\n\t\tPort: e.config.SyncPort,\n\t}\n\tln, err := net.ListenTCP(\"tcp\", addr)\n\tif err != nil {\n\t\tlog.Fatalf(\"Listen failed: %v\", err)\n\t}\n\n\tgo e.syncServer.serve(ln)\n\n\t<-e.shutdownRPC\n\tln.Close()\n\te.shutdownRPC <- true\n}",
"func Atomically(fn func(*Tx)) {\nretry:\n\t// run the transaction\n\ttx := &Tx{\n\t\treads: make(map[*Var]uint64),\n\t\twrites: make(map[*Var]interface{}),\n\t}\n\tif catchRetry(fn, tx) {\n\t\t// wait for one of the variables we read to change before retrying\n\t\ttx.wait()\n\t\tgoto retry\n\t}\n\t// verify the read log\n\tglobalLock.Lock()\n\tif !tx.verify() {\n\t\tglobalLock.Unlock()\n\t\tgoto retry\n\t}\n\t// commit the write log and broadcast that variables have changed\n\tif len(tx.writes) > 0 {\n\t\ttx.commit()\n\t\tglobalCond.Broadcast()\n\t}\n\tglobalLock.Unlock()\n}",
"func (client *Client) Sync() {\n\t/*go func() {*/\n\tstart := time.Now()\n\t// Fetch\n\tconn, err := net.Dial(\"tcp\", client.address)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\t_, _ = fmt.Fprintf(conn, \"fetch \"+strconv.Itoa(client.id))\n\tresponse := make([]byte, 1024)\n\t_, err = conn.Read(response)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tbody := string(response)\n\tif !strings.Contains(body, \"<empty>\") {\n\t\tclient.syncs = body\n\t}\n\t_ = conn.Close()\n\n\t// Then sync\n\tconn, err = net.Dial(\"tcp\", client.address)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\t// Build sync object\n\tdata := \"\"\n\tif len(client.actions) != 0 {\n\t\tfor _, action := range client.actions {\n\t\t\tswitch action.(type) {\n\t\t\tcase *CameraMovementAction:\n\t\t\t\tsync := action.(*CameraMovementAction)\n\t\t\t\tdata += fmt.Sprintf(\"skin %d %f %f %f %f %f %f\",\n\t\t\t\t\tclient.id,\n\t\t\t\t\tsync.position.X(),\n\t\t\t\t\tsync.position.Y(),\n\t\t\t\t\tsync.position.Z(),\n\t\t\t\t\tsync.rotation.X(),\n\t\t\t\t\tsync.rotation.Y(),\n\t\t\t\t\tsync.rotation.Z())\n\t\t\tdefault:\n\t\t\t\tpanic(\"unknown INetworkAction type\")\n\t\t\t}\n\t\t}\n\t} else {\n\t\tdata = \"<empty>\"\n\t}\n\n\t_, _ = fmt.Fprintf(conn, fmt.Sprintf(\"sync %d\\n\", client.id)+data)\n\tresponse = make([]byte, 256)\n\t_, err = conn.Read(response)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tif !strings.Contains(string(response), \"good\") {\n\t\tpanic(\"The server is a fucking liar!\")\n\t}\n\t_ = conn.Close()\n\n\t// All modification send\n\t// Clean modifications\n\tclient.actions = []INetworkAction{}\n\t/*}()*/\n\tfmt.Printf(\"ping :%d\\r\", time.Now().Sub(start).Milliseconds())\n}",
"func (sm *SyncMachine) Execute(startBlockHeight uint64) (SyncState, uint64, error) {\n\trecvChan := make(chan net.Message, syncReceiveBuffer)\n\thandler := func(msg net.Message) {\n\t\trecvChan <- msg\n\t}\n\n\tcurrentState := sm.initialState\n\tctx, cancelCtx := context.WithCancel(context.Background())\n\tsm.channel.Recv(ctx, handler)\n\n\tsm.logger.Infof(\n\t\t\"[member:%v] waiting for block [%v] to start execution\",\n\t\tcurrentState.MemberIndex(),\n\t\tstartBlockHeight,\n\t)\n\terr := sm.blockCounter.WaitForBlockHeight(startBlockHeight)\n\tif err != nil {\n\t\tcancelCtx()\n\t\treturn nil, 0, fmt.Errorf(\"failed to wait for the execution start block\")\n\t}\n\n\tlastStateEndBlockHeight := startBlockHeight\n\n\tblockWaiter, err := stateTransition(\n\t\tctx,\n\t\tsm.logger,\n\t\tcurrentState,\n\t\tlastStateEndBlockHeight,\n\t\tsm.blockCounter,\n\t)\n\tif err != nil {\n\t\tcancelCtx()\n\t\treturn nil, 0, err\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-recvChan:\n\t\t\terr := currentState.Receive(msg)\n\t\t\tif err != nil {\n\t\t\t\tsm.logger.Errorf(\n\t\t\t\t\t\"[member:%v,state:%T] failed to receive a message: [%v]\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\tcase lastStateEndBlockHeight := <-blockWaiter:\n\t\t\tcancelCtx()\n\n\t\t\tnextState, err := currentState.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, 0, fmt.Errorf(\n\t\t\t\t\t\"failed to complete state [%T]: [%w]\",\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif nextState == nil {\n\t\t\t\tsm.logger.Infof(\n\t\t\t\t\t\"[member:%v,state:%T] reached final state at block: [%v]\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t\tlastStateEndBlockHeight,\n\t\t\t\t)\n\t\t\t\treturn currentState, lastStateEndBlockHeight, nil\n\t\t\t}\n\n\t\t\tcurrentState = nextState\n\t\t\tctx, cancelCtx = context.WithCancel(context.Background())\n\t\t\tsm.channel.Recv(ctx, handler)\n\n\t\t\tblockWaiter, err = stateTransition(\n\t\t\t\tctx,\n\t\t\t\tsm.logger,\n\t\t\t\tcurrentState,\n\t\t\t\tlastStateEndBlockHeight,\n\t\t\t\tsm.blockCounter,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\tcancelCtx()\n\t\t\t\treturn nil, 0, err\n\t\t\t}\n\t\t}\n\t}\n}",
"func (cl ConcurrencyLimit) Exec(fn func()) {\n\tcl <- struct{}{}\n\tfn()\n\t<-cl\n}",
"func (c *remotingClient) InvokeSync(ctx context.Context, addr string, request *RemotingCommand) (*RemotingCommand, error) {\n\tconn, err := c.connect(ctx, addr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresp := NewResponseFuture(ctx, request.Opaque, nil)\n\tc.responseTable.Store(resp.Opaque, resp)\n\tdefer c.responseTable.Delete(request.Opaque)\n\terr = c.sendRequest(conn, request)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn resp.waitResponse()\n}",
"func (s *fakeState) Sync(_ context.Context, _ interface{}, _ InfoCatalog) (SyncState, error) {\n\treturn s.syncState, nil\n}",
"func (s *Server) RunInServer(fn func()) {\n\tdone := make(chan struct{})\n\ts.funcs <- func() {\n\t\tfn()\n\t\tdone <- struct{}{}\n\t}\n\t<-done\n}",
"func (l *LogicalReplicator) LogicalSync(ctx context.Context) error { return l.doSync(ctx, false) }",
"func (self *Ring) callSuccessorRPC(key int, function string, args *data.DataStore, consistency int) (result RpcResult) {\n\tclient := self.dialSuccessor(key)\n\tdefer client.Close()\n\tvar err error\n\tif consistency == -1 {\n\t\terr = client.Call(function, args, &result)\n\t} else {\n\n\t\tfunction = function + \"Consistent\"\n\t\tconsistentStore := data.NewConsistentDataStore(args, consistency)\n\t\terr = client.Call(function, consistentStore, &result)\n\t}\n\n\tif err != nil {\n\t\tfmt.Println(\"Error sending data:\", err)\n\t\tresult.Success = -2\n\t\treturn\n\t}\n\tif result.Success != 1 {\n\t\tfmt.Println(\"Error storing data\")\n\t}\n\tfmt.Printf(\"Data Size: %d \\n\", self.KeyValTable.Len())\n\treturn result\n}",
"func SimFunc(f *Func, out io.Writer) {\n\tif len(f.blocks) == 0 {\n\t\treturn\n\t}\n\n\tb := f.blocks[0]\n\tfor {\n\t\tb = simBlock(b, out)\n\t\tif b == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn\n}",
"func (w *Wal) fSync() error {\n\t// Load sync status\n\tss := (*syncState)(atomic.LoadPointer(&w.syncStatePtr))\n\n\t// Read previous status and set to 2. If previous 0, meaning no threads, start a sync thread\n\tpreStatus := atomic.SwapUint32(&w.syncStatus, 2)\n\tif preStatus == 0 {\n\t\tw.wg.Add(1)\n\t\tgo w.threadedSync()\n\t}\n\n\tss.mu.RLock()\n\treturn ss.err\n}",
"func (e *Eval) Mutate(f *Flow, muts ...interface{}) {\n\tif e.Trace != nil {\n\t\tstrs := make([]string, len(muts))\n\t\tfor i := range muts {\n\t\t\tstrs[i] = fmt.Sprint(muts[i])\n\t\t}\n\t\te.Trace.Printf(\"mutate %s: %v\", f, strings.Join(strs, \", \"))\n\t}\n\tvar (\n\t\tprevState, thisState State\n\t\trefresh bool\n\t\tstatusOk = true\n\t)\n\tfor _, mut := range muts {\n\t\tswitch arg := mut.(type) {\n\t\tcase error:\n\t\t\tif arg != nil {\n\t\t\t\tf.Err = errors.Recover(arg)\n\t\t\t}\n\t\tcase State:\n\t\t\tprevState = f.State\n\t\t\tthisState = arg\n\t\t\tf.State = arg\n\t\tcase reflow.Fileset:\n\t\t\tf.Value = values.T(arg)\n\t\tcase Fork:\n\t\t\tf.Fork(arg)\n\t\tcase Value:\n\t\t\tf.Value = arg.Value\n\t\tcase Mutation:\n\t\t\tswitch arg {\n\t\t\tcase Cached:\n\t\t\t\tf.Cached = true\n\t\t\tcase Refresh:\n\t\t\t\trefresh = true\n\t\t\tcase MustIntern:\n\t\t\t\tf.MustIntern = true\n\t\t\tcase NoStatus:\n\t\t\t\tstatusOk = false\n\t\t\tcase Propagate:\n\t\t\t\tif err := e.propagateAssertions(f); err != nil {\n\t\t\t\t\tpanic(fmt.Errorf(\"unexpected propagation error: %v\", err))\n\t\t\t\t}\n\t\t\t}\n\t\tcase SetReserved:\n\t\t\tf.Reserved.Set(reflow.Resources(arg))\n\t\tdefault:\n\t\t\tpanic(fmt.Sprintf(\"invalid argument type %T\", arg))\n\t\t}\n\t}\n\t// When a flow is done (without errors), add all its assertions to the vector clock.\n\tif f.Op.External() && f.State == Done && f.Err == nil {\n\t\terr := e.assertions.AddFrom(f.Value.(reflow.Fileset).Assertions())\n\t\tif err != nil {\n\t\t\tf.Err = errors.Recover(errors.E(\"adding assertions\", f.Digest(), errors.Temporary, err))\n\t\t}\n\t}\n\t// Update task status, if applicable.\n\tif e.Status == nil {\n\t\treturn\n\t}\n\tswitch f.Op {\n\tcase Exec, Intern, Extern:\n\tdefault:\n\t\treturn\n\t}\n\tif (thisState == Running || thisState == Execing) && f.Status == nil && statusOk {\n\t\t// TODO(marius): digest? fmt(\"%-*s %s\", n, ident, f.Digest().Short())\n\t\tf.Status = e.Status.Start(f.Ident)\n\t}\n\tif f.Status == nil || (!refresh && prevState == thisState) {\n\t\treturn\n\t}\n\tvar status string\n\tswitch f.State {\n\tcase Done:\n\t\tif f.Err != nil {\n\t\t\tstatus = fmt.Sprintf(\"%s error %v\", f.Op, f.Err)\n\t\t} else {\n\t\t\tswitch f.Op {\n\t\t\tcase Extern:\n\t\t\t\tstatus = \"done\"\n\t\t\tcase Exec, Intern:\n\t\t\t\tstatus = fmt.Sprintf(\"done %s\", data.Size(f.Value.(reflow.Fileset).Size()))\n\t\t\t}\n\t\t}\n\tcase Running, Execing:\n\t\tswitch f.Op {\n\t\tcase Extern:\n\t\t\tvar sz string\n\t\t\tif fs, ok := f.Deps[0].Value.(reflow.Fileset); ok {\n\t\t\t\tsz = fmt.Sprintf(\" %s\", data.Size(fs.Size()))\n\t\t\t}\n\t\t\tstatus = fmt.Sprintf(\"%s%s\", f.URL, sz)\n\t\tcase Intern:\n\t\t\tstatus = fmt.Sprintf(\"%s\", f.URL)\n\t\tcase Exec:\n\t\t\tstatus = f.AbbrevCmd()\n\t\t}\n\tcase Ready:\n\t\tstatus = \"waiting\"\n\t}\n\tstatus = f.Op.String() + \" \" + status\n\tf.Status.Print(status)\n\tif f.State == Done {\n\t\tf.Status.Done()\n\t\tf.Status = nil\n\t}\n}",
"func (c *Concurrent) Call(function interface{}, params ...interface{}) error {\n\n\tif c.HasError() {\n\t\treturn c.GetLastError()\n\t}\n\n\tif c.closeSignal {\n\t\treturn ErrVChanClosed\n\t}\n\n\tf := reflect.TypeOf(function)\n\n\t// fucntion callable validate\n\tif f.Kind() != reflect.Func {\n\t\treturn fmt.Errorf(\"Concurrent-handler must be a callable func\")\n\t}\n\n\tfin := f.NumIn()\n\tfout := f.NumOut()\n\n\t// params validate\n\tif fin > len(params) {\n\t\treturn fmt.Errorf(\"Call function <%s> with too few input arguments(%d), need %d\", function, len(params), fin)\n\t}\n\tin := make([]reflect.Value, len(params))\n\tfor i := 0; i < len(params); i++ {\n\t\tin[i] = reflect.ValueOf(params[i])\n\t}\n\n\t// allocate thread\n\tc.semaphore.P()\n\tgo func(ticket uint64) {\n\t\tdefer c.semaphore.V() // free thread\n\t\tvals := reflect.ValueOf(function).Call(in)\n\t\tif fout != len(vals) {\n\t\t\tc.AddError(fmt.Errorf(\"The number of return values does not match\"))\n\t\t\treturn\n\t\t}\n\t\trets := make([]interface{}, fout)\n\t\tfor i, v := range vals {\n\t\t\trets[i] = v.Interface()\n\t\t}\n\t\tif !c.orderSignal {\n\t\t\tc.values <- rets\n\t\t\treturn\n\t\t}\n\t\tc.cond.L.Lock()\n\t\tdefer func() {\n\t\t\tc.cond.L.Unlock()\n\t\t\tc.cond.Broadcast()\n\t\t}()\n\t\tfor ticket != c.finTicket {\n\t\t\tc.cond.Wait()\n\t\t}\n\t\tc.values <- rets\n\t\tc.finTicket++\n\t}(c.totalTicket)\n\tc.totalTicket++\n\treturn nil\n}",
"func (s *ConcurrentStateMachine) Sync() error {\n\tpanic(\"Sync called on ConcurrentStateMachine\")\n}",
"func (ds *RegularStateMachineWrapper) Sync() error {\n\tpanic(\"Sync not suppose to be called on RegularStateMachineWrapper\")\n}",
"func (s *RegularStateMachine) Sync() error {\n\tpanic(\"Sync called on RegularStateMachine\")\n}",
"func (s *System) Evaluate(state []float32) []float32 {\n\tif len(state) > 0 {\n\t\treturn s.function(state, s.parametersVector)\n\t} else {\n\t\treturn s.function(s.stateVector, s.parametersVector)\n\t}\n}",
"func Maybex(s State, fn func(State)) {\n\t// check if simulation is enabled\n\tif atomic.LoadInt32(&enabled) == 0 {\n\t\t// execute in normal state\n\t\tfn(&noop{})\n\t\treturn\n\t}\n\n\t// execute in simulated state\n\tfn(s)\n}",
"func simplyRun(f func()) {\n\tgo f()\n}",
"func (s *HTTPServer) agentLocalBlockingQuery(resp http.ResponseWriter, hash string,\n\tqueryOpts *structs.QueryOptions, fn agentLocalBlockingFunc) (interface{}, error) {\n\n\t// If we are not blocking we can skip tracking and allocating - nil WatchSet\n\t// is still valid to call Add on and will just be a no op.\n\tvar ws memdb.WatchSet\n\tvar timeout *time.Timer\n\n\tif hash != \"\" {\n\t\t// TODO(banks) at least define these defaults somewhere in a const. Would be\n\t\t// nice not to duplicate the ones in consul/rpc.go too...\n\t\twait := queryOpts.MaxQueryTime\n\t\tif wait == 0 {\n\t\t\twait = 5 * time.Minute\n\t\t}\n\t\tif wait > 10*time.Minute {\n\t\t\twait = 10 * time.Minute\n\t\t}\n\t\t// Apply a small amount of jitter to the request.\n\t\twait += lib.RandomStagger(wait / 16)\n\t\ttimeout = time.NewTimer(wait)\n\t}\n\n\tfor {\n\t\t// Must reset this every loop in case the Watch set is already closed but\n\t\t// hash remains same. In that case we'll need to re-block on ws.Watch()\n\t\t// again.\n\t\tws = memdb.NewWatchSet()\n\t\tcurHash, curResp, err := fn(ws)\n\t\tif err != nil {\n\t\t\treturn curResp, err\n\t\t}\n\t\t// Return immediately if there is no timeout, the hash is different or the\n\t\t// Watch returns true (indicating timeout fired). Note that Watch on a nil\n\t\t// WatchSet immediately returns false which would incorrectly cause this to\n\t\t// loop and repeat again, however we rely on the invariant that ws == nil\n\t\t// IFF timeout == nil in which case the Watch call is never invoked.\n\t\tif timeout == nil || hash != curHash || ws.Watch(timeout.C) {\n\t\t\tresp.Header().Set(\"X-Consul-ContentHash\", curHash)\n\t\t\treturn curResp, err\n\t\t}\n\t\t// Watch returned false indicating a change was detected, loop and repeat\n\t\t// the callback to load the new value. If agent sync is paused it means\n\t\t// local state is currently being bulk-edited e.g. config reload. In this\n\t\t// case it's likely that local state just got unloaded and may or may not be\n\t\t// reloaded yet. Wait a short amount of time for Sync to resume to ride out\n\t\t// typical config reloads.\n\t\tif syncPauseCh := s.agent.syncPausedCh(); syncPauseCh != nil {\n\t\t\tselect {\n\t\t\tcase <-syncPauseCh:\n\t\t\tcase <-timeout.C:\n\t\t\t}\n\t\t}\n\t}\n}",
"func (this *Protocol) call(peerId PeerId, fn string, args interface{}, reply interface{}) bool {\n\tc := make(chan bool, 1)\n\n\tgo func() {\n\t\tclient, errx := rpc.Dial(\"tcp\", peerId.String())\n\t\tif errx != nil {\n\t\t\tc <- false\n\t\t\treturn\n\t\t}\n\t\tdefer client.Close()\n\n\t\terr := client.Call(\"Protocol.Handle\"+fn, args, reply)\n\t\tif err == nil {\n\t\t\tc <- true\n\t\t\treturn\n\t\t}\n\n\t\tLog.Warn.Println(err)\n\t\tc <- false\n\t}()\n\n\tselect {\n\t\tcase b := <- c:\n\t\t\treturn b\n\t\tcase <- time.After(time.Second):\n\t\t\treturn false\n\t}\n}",
"func (db *DB) Sync() error { return fdatasync(db) }",
"func (c *Client) ExecuteFunction(request *ExecuteFunctionRequest) (response *ExecuteFunctionResponse, err error) {\n if request == nil {\n request = NewExecuteFunctionRequest()\n }\n response = NewExecuteFunctionResponse()\n err = c.Send(request, response)\n return\n}",
"func (s *System) Function() func(state []float32, parameters []float32) []float32 {\n\treturn s.function\n}",
"func Call(f func()) {\n\tdone := dPool.Get().(chan struct{})\n\tdefer dPool.Put(done)\n\tfq <- fun{fn: f, done: done}\n\t<-done\n}",
"func compute(fn func(float64, float64) float64) float64 {\n\treturn fn(3, 4)\n}",
"func TestSetFunction(t *testing.T) {\n\tmapper, err := NewChannelMapper(`function(doc) {sync(doc.channels);}`)\n\tassertNoError(t, err, \"Couldn't create mapper\")\n\tchannels, err := mapper.MapToChannels(`{\"channels\": [\"foo\", \"bar\", \"baz\"]}`)\n\tassertNoError(t, err, \"callMapper failed\")\n\tchanged, err := mapper.SetFunction(`function(doc) {sync(\"all\");}`)\n\tassertTrue(t, changed, \"SetFunction failed\")\n\tassertNoError(t, err, \"SetFunction failed\")\n\tchannels, err = mapper.MapToChannels(`{\"channels\": [\"foo\", \"bar\", \"baz\"]}`)\n\tassertNoError(t, err, \"callMapper failed\")\n\tassert.DeepEquals(t, channels, []string{\"all\"})\n\tmapper.Stop()\n}",
"func (e *Exclusive) Call(key interface{}, value func() (interface{}, error)) (interface{}, error) {\n\treturn e.CallAfter(key, value, 0)\n}",
"func _[T interface{ ~func() }](f T) {\n\tf()\n\tgo f()\n}",
"func (runner *suiteRunner) runFunc(method *reflect.FuncValue, kind funcKind,\n dispatcher func(c *C)) *C {\n c := runner.forkCall(method, kind, dispatcher)\n <-c.done\n return c\n}",
"func (t TaskFunc) Run() { t() }",
"func HTTPClientCallSync(ctx context.Context, req *http.Request, client *http.Client, command string, config *hystrix.CommandConfig, resolveAddress bool) (*http.Response, error) {\n\toutputChan, errorsChan := HTTPClientCall(ctx, req, client, command, config, resolveAddress)\n\tselect {\n\tcase resp := <-outputChan:\n\t\treturn resp, nil\n\tcase err := <-errorsChan:\n\t\treturn nil, err\n\t}\n}",
"func (p *EventLoop) Execute(fn WorkFunc) WorkUnit {\n\n\tw := &workUnit{\n\t\tdone: make(chan struct{}),\n\t\tfn: fn,\n\t}\n\n\tgo func() {\n\t\tp.m.RLock()\n\t\tif p.closed {\n\t\t\tw.err = &ErrPoolClosed{s: errClosed}\n\t\t\tif w.cancelled.Load() == nil {\n\t\t\t\tclose(w.done)\n\t\t\t}\n\t\t\tp.m.RUnlock()\n\t\t\treturn\n\t\t}\n\n\t\tp.work <- w\n\n\t\tp.m.RUnlock()\n\t}()\n\n\treturn w\n}",
"func (o ApplicationStatusOperationStateOperationOutput) Sync() ApplicationStatusOperationStateOperationSyncPtrOutput {\n\treturn o.ApplyT(func(v ApplicationStatusOperationStateOperation) *ApplicationStatusOperationStateOperationSync {\n\t\treturn v.Sync\n\t}).(ApplicationStatusOperationStateOperationSyncPtrOutput)\n}",
"func (f *Factory) Sync(syncFn SyncFunc) *Factory {\n\tf.sync = syncFn\n\treturn f\n}",
"func TestOnlyCallOnChanges(t *testing.T) {\n\tr := New()\n\ti := r.CreateInput(1)\n\tc := r.CreateCompute1(i, func(v int) int {\n\t\tif v > 3 {\n\t\t\treturn v + 1\n\t\t}\n\t\treturn 2\n\t})\n\tvar observedCalled int\n\tc.AddCallback(func(int) {\n\t\tobservedCalled++\n\t})\n\ti.SetValue(1)\n\tif observedCalled != 0 {\n\t\tt.Fatalf(\"observe function called even though input didn't change\")\n\t}\n\ti.SetValue(2)\n\tif observedCalled != 0 {\n\t\tt.Fatalf(\"observe function called even though computed value didn't change\")\n\t}\n}",
"func (c *Callee) InvokeAndStoreResult(cp *msg.CallPayload, fn Thunk) error {\n\tttl := cp.TTLAfterRead\n\tstart := time.Now()\n\n\tv, err := fn(cp)\n\tif remain := ttl - time.Now().Sub(start); remain > 0 {\n\t\t// register the result\n\t\treturn c.storeResult(cp, v, err, remain)\n\t}\n\treturn ErrCallExpired\n}",
"func (c *client) state(me NodeID) (state *State, err error) {\n\terr = c.client.Call(\"Server.State\", me, &state)\n\tfor err == rpc.ErrShutdown {\n\t\terr = c.client.Call(\"Server.State\", me, &state)\n\t}\n\treturn\n}",
"func executeFunction(pipeline *sdk.Pipeline, operation *sdk.Operation, data []byte) ([]byte, error) {\n\tvar err error\n\tvar result []byte\n\n\tname := operation.Function\n\tparams := operation.GetParams()\n\theaders := operation.GetHeaders()\n\n\tgateway := getGateway()\n\turl := buildURL(\"http://\"+gateway, \"function\", name)\n\n\tmethod := os.Getenv(\"default-method\")\n\tif method == \"\" {\n\t\tmethod = \"POST\"\n\t}\n\n\tif m, ok := headers[\"method\"]; ok {\n\t\tmethod = m\n\t}\n\n\thttpreq, err := buildHttpRequest(url, method, data, params, headers)\n\tif err != nil {\n\t\treturn []byte{}, fmt.Errorf(\"cannot connect to Function on URL: %s\", url)\n\t}\n\n\tif operation.Requesthandler != nil {\n\t\toperation.Requesthandler(httpreq)\n\t}\n\n\tclient := &http.Client{}\n\tresp, err := client.Do(httpreq)\n\tif err != nil {\n\t\treturn []byte{}, err\n\t}\n\n\tdefer resp.Body.Close()\n\tif operation.OnResphandler != nil {\n\t\tresult, err = operation.OnResphandler(resp)\n\t} else {\n\t\tif resp.StatusCode < 200 || resp.StatusCode > 299 {\n\t\t\terr = fmt.Errorf(\"invalid return status %d while connecting %s\", resp.StatusCode, url)\n\t\t\tresult, _ = ioutil.ReadAll(resp.Body)\n\t\t} else {\n\t\t\tresult, err = ioutil.ReadAll(resp.Body)\n\t\t}\n\t}\n\n\treturn result, err\n}",
"func (impl *Server) GetAndLock(ID string) (bool, base.ModelInterface) {\n\tvar (\n\t\tc = impl.TemplateImpl.GetConnection()\n\t\tserver = new(entity.Server)\n\t)\n\t// Transaction start.\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"error\": err}).\n\t\t\tWarn(\"DB get and lock server failed, start transaction failed.\")\n\t\treturn false, nil\n\t}\n\ttx.Exec(\"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE;\")\n\tif tx.Where(\"\\\"ID\\\" = ?\", ID).First(server).RecordNotFound() {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID}).\n\t\t\tDebug(\"DB get and lock server failed, server does not exist.\")\n\t\treturn false, nil\n\t}\n\tif !constvalue.ServerLockable(server.State) {\n\t\t// Server not ready, rollback.\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"state\": server.State}).\n\t\t\tDebug(\"DB get and lock server failed, server not lockable.\")\n\t\treturn false, server.ToModel()\n\t}\n\t// Change the state.\n\tif err := tx.Model(server).UpdateColumn(\"State\", constvalue.ServerStateLocked).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"state\": server.State}).\n\t\t\tDebug(\"DB get and lock server failed, update state failed.\")\n\t\treturn false, nil\n\t}\n\t// Commit.\n\tif err := tx.Commit().Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"error\": err}).\n\t\t\tWarn(\"DB get and lock server failed, commit failed.\")\n\t\treturn false, nil\n\t}\n\treturn true, server.ToModel()\n}",
"func (o ApplicationStatusOperationStateOperationPtrOutput) Sync() ApplicationStatusOperationStateOperationSyncPtrOutput {\n\treturn o.ApplyT(func(v *ApplicationStatusOperationStateOperation) *ApplicationStatusOperationStateOperationSync {\n\t\tif v == nil {\n\t\t\treturn nil\n\t\t}\n\t\treturn v.Sync\n\t}).(ApplicationStatusOperationStateOperationSyncPtrOutput)\n}",
"func (t *TestCluster) RunCmdSyncf(m platform.Machine, f string, args ...interface{}) {\n\tt.RunCmdSync(m, fmt.Sprintf(f, args...))\n}",
"func (am *AsyncMachine) Execute() (AsyncState, error) {\n\trecvCtx, cancelRecvCtx := context.WithCancel(am.ctx)\n\tdefer cancelRecvCtx()\n\n\trecvChan := make(chan net.Message, asyncReceiveBuffer)\n\thandler := func(msg net.Message) {\n\t\trecvChan <- msg\n\t}\n\tam.channel.Recv(recvCtx, handler)\n\n\tcurrentState := am.initialState\n\n\tonStateDone := asyncStateTransition(\n\t\tam.ctx,\n\t\tam.logger,\n\t\tcurrentState,\n\t)\n\n\tfor {\n\t\tselect {\n\t\tcase msg := <-recvChan:\n\t\t\terr := currentState.Receive(msg)\n\t\t\tif err != nil {\n\t\t\t\tam.logger.Errorf(\n\t\t\t\t\t\"[member:%v,state:%T] failed to receive a message: [%v]\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\tcase err := <-onStateDone:\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"failed to initiate state [%T]: [%w]\",\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tnextState, err := currentState.Next()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\n\t\t\t\t\t\"failed to complete state [%T]: [%w]\",\n\t\t\t\t\tcurrentState,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif nextState == nil {\n\t\t\t\tam.logger.Infof(\n\t\t\t\t\t\"[member:%v,state:%T] reached final state\",\n\t\t\t\t\tcurrentState.MemberIndex(),\n\t\t\t\t\tcurrentState,\n\t\t\t\t)\n\t\t\t\treturn currentState, nil\n\t\t\t}\n\n\t\t\tcurrentState = nextState\n\t\t\tonStateDone = asyncStateTransition(\n\t\t\t\tam.ctx,\n\t\t\t\tam.logger,\n\t\t\t\tcurrentState,\n\t\t\t)\n\n\t\tcase <-am.ctx.Done():\n\t\t\treturn nil, am.ctx.Err()\n\t\t}\n\t}\n}",
"func (s *StateSyncer) doSync(matcherName, matcherValue, curState, newState string) (needUpdate bool) {\n\tif curState == \"inactive\" {\n\t\treturn false\n\t}\n\n\t//only take ation when the state is not the same\n\tif newState != curState {\n\n\t\t//the alert is muted by user (curState == muted), but it already went away in alertmanager side (newState == active)\n\t\t//then we need to remove the silence rule and update the state in CRD\n\t\tif curState == \"muted\" && newState == \"active\" {\n\t\t\terr := s.alertManager.RemoveSilenceRule(matcherName, matcherValue)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error occurred while remove silence : %v\", err)\n\t\t\t}\n\t\t\treturn true\n\t\t}\n\t\t//the alert is unmuted by user, but it is still muted in alertmanager side\n\t\t//need to remove the silence rule, but do not have to update the CRD\n\t\tif curState == \"alerting\" && newState == \"muted\" {\n\t\t\terr := s.alertManager.RemoveSilenceRule(matcherName, matcherValue)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error occurred while remove silence : %v\", err)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\n\t\t//the alert is muted by user, but it is still alerting in alertmanager side\n\t\t//need to add silence rule to alertmanager\n\t\tif curState == \"muted\" && newState == \"alerting\" {\n\t\t\terr := s.alertManager.AddSilenceRule(matcherName, matcherValue)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.Errorf(\"Error occurred while remove silence : %v\", err)\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\treturn true\n\t}\n\n\treturn false\n\n}",
"func (c *Controller) callAndWait(p []*Packet, checkError bool, f func(*Packet) bool) error {\n\tc.packetConnLock.Lock()\n\tdefer c.packetConnLock.Unlock()\n\n\tcheckSeqs := map[uint16]bool{}\n\tfor _, packet := range p {\n\t\tif seq, err := packet.Seq(); err == nil {\n\t\t\tcheckSeqs[seq] = true\n\t\t}\n\t}\n\n\tconn, err := NewPacketConn()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tsessInfo := c.getSessionInfo()\n\tif err := conn.Auth(sessInfo.UserID, sessInfo.Authorize, c.timeout); err != nil {\n\t\treturn err\n\t}\n\n\t// Prevent the bg thread from blocking on a\n\t// channel send forever.\n\tdoneChan := make(chan struct{}, 1)\n\tdefer close(doneChan)\n\n\tpackets := make(chan *Packet, 16)\n\terrChan := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(packets)\n\t\tfor {\n\t\t\tpacket, err := conn.Read()\n\t\t\tif err != nil {\n\t\t\t\terrChan <- err\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif checkError && packet.IsResponse {\n\t\t\t\tseq, err := packet.Seq()\n\t\t\t\tif err == nil && checkSeqs[seq] && len(packet.Data) > 0 {\n\t\t\t\t\tif packet.Data[len(packet.Data)-1] != 0 {\n\t\t\t\t\t\terrChan <- RemoteCallError\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tselect {\n\t\t\tcase packets <- packet:\n\t\t\tcase <-doneChan:\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tfor _, subPacket := range p {\n\t\tif err := conn.Write(subPacket); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ttimeout := time.After(c.timeout)\n\tfor {\n\t\tselect {\n\t\tcase packet, ok := <-packets:\n\t\t\tif !ok {\n\t\t\t\t// Could be a race condition between packets and errChan.\n\t\t\t\tselect {\n\t\t\t\tcase err := <-errChan:\n\t\t\t\t\treturn err\n\t\t\t\tdefault:\n\t\t\t\t\treturn errors.New(\"connection closed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif f(packet) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\tcase err := <-errChan:\n\t\t\treturn err\n\t\tcase <-timeout:\n\t\t\treturn errors.New(\"timeout waiting for response\")\n\t\t}\n\t}\n}",
"func (w *Wallet) SynchronizeRPC(chainClient chain.Interface) {\n\tw.quitMu.Lock()\n\tselect {\n\tcase <-w.quit:\n\t\tw.quitMu.Unlock()\n\t\treturn\n\tdefault:\n\t}\n\tw.quitMu.Unlock()\n\n\t// TODO: Ignoring the new client when one is already set breaks callers\n\t// who are replacing the client, perhaps after a disconnect.\n\tw.chainClientLock.Lock()\n\tif w.chainClient != nil {\n\t\tw.chainClientLock.Unlock()\n\t\treturn\n\t}\n\tw.chainClient = chainClient\n\n\t// If the chain client is a NeutrinoClient instance, set a birthday so\n\t// we don't download all the filters as we go.\n\tswitch cc := chainClient.(type) {\n\tcase *chain.NeutrinoClient:\n\t\tcc.SetStartTime(w.Manager.Birthday())\n\t}\n\tw.chainClientLock.Unlock()\n\n\t// TODO: It would be preferable to either run these goroutines\n\t// separately from the wallet (use wallet mutator functions to\n\t// make changes from the RPC client) and not have to stop and\n\t// restart them each time the client disconnects and reconnets.\n\tw.wg.Add(4)\n\tgo w.handleChainNotifications()\n\tgo w.rescanBatchHandler()\n\tgo w.rescanProgressHandler()\n\tgo w.rescanRPCHandler()\n}",
"func (s *Syncer) Sync() error {\n\ts.called = true\n\treturn s.err\n}",
"func (p *Promise) Force() interface{} {\n\tp.once.Do(func() { p.value = p.f() })\n\treturn p.value\n}",
"func (session Runtime) evaluate(expression string, contextID int64, async, returnByValue bool) (*devtool.RemoteObject, error) {\n\tp := &devtool.EvaluatesExpression{\n\t\tExpression: expression,\n\t\tIncludeCommandLineAPI: true,\n\t\tContextID: contextID,\n\t\tAwaitPromise: !async,\n\t\tReturnByValue: returnByValue,\n\t}\n\tresult := new(devtool.EvaluatesResult)\n\tif err := session.call(\"Runtime.evaluate\", p, result); err != nil {\n\t\treturn nil, err\n\t}\n\tif result.ExceptionDetails != nil {\n\t\treturn nil, result.ExceptionDetails\n\t}\n\treturn result.Result, nil\n}",
"func (lock *lockedBool) execute(task func() error) (err error) {\n\tdefer (func() {\n\t\tr := recover()\n\t\tif r != nil {\n\t\t\tif evalue, ok := r.(error); ok {\n\t\t\t\terr = evalue\n\t\t\t} else {\n\t\t\t\terr = errors.Fail(ErrBadHandler{}, nil, fmt.Sprintf(\"Unknown failure to execute task: %s\", r))\n\t\t\t}\n\t\t}\n\t})()\n\treturn task()\n}",
"func Func() {}",
"func (s *server) callFunc(r io.Reader, receiverFunc interface{}) (interface{}, error) {\n\n\t// Resolve function's type\n\tfuncType := reflect.TypeOf(receiverFunc)\n\n\t// Deserialize arguments read from procedure call body\n\tfuncArg := reflect.New(funcType.In(0)).Interface()\n\n\tif _, err := xdr.Unmarshal(r, &funcArg); err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Call function\n\tfuncValue := reflect.ValueOf(receiverFunc)\n\tfuncArgValue := reflect.Indirect(reflect.ValueOf(funcArg))\n\tfuncRetValue := reflect.New(funcType.In(1).Elem())\n\n\ts.log.Debugf(\"-> %+v\", funcArgValue)\n\tfuncRetError := funcValue.Call([]reflect.Value{funcArgValue, funcRetValue})[0]\n\ts.log.Debugf(\"<- %+v\", funcRetValue)\n\n\tif !funcRetError.IsNil() {\n\t\treturn nil, funcRetError.Interface().(error)\n\t}\n\n\t// Return result computed by the actual function. This is what should be sent back to the remote\n\t// caller.\n\treturn funcRetValue.Interface(), nil\n}",
"func HandleFunction(L *lua.LState) int {\n\ts := checkServer(L, 1)\n\tf := L.CheckFunction(2)\n\tif len(f.Upvalues) > 0 {\n\t\tL.ArgError(2, \"cannot pass closures\")\n\t}\n\n\t// Stash any args to pass to the function beyond response and request\n\tvar args []lua.LValue\n\ttop := L.GetTop()\n\tfor i := 3; i <= top; i++ {\n\t\targs = append(args, L.Get(i))\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase data := <-s.serveData:\n\t\t\tgo func(sData *serveData) {\n\t\t\t\tstate := newHandlerState(sData)\n\t\t\t\tdefer state.Close()\n\t\t\t\tresponse := state.GetGlobal(\"response\")\n\t\t\t\trequest := state.GetGlobal(\"request\")\n\t\t\t\tf := state.NewFunctionFromProto(f.Proto)\n\t\t\t\tstate.Push(f)\n\t\t\t\tstate.Push(response)\n\t\t\t\tstate.Push(request)\n\t\t\t\t// Push any extra args\n\t\t\t\tfor _, arg := range args {\n\t\t\t\t\tstate.Push(arg)\n\t\t\t\t}\n\t\t\t\tif err := state.PCall(2+len(args), 0, nil); err != nil {\n\t\t\t\t\tlog.Printf(\"[ERROR] handle: %s\\n\", err.Error())\n\t\t\t\t\tdata.done <- true\n\t\t\t\t\tlog.Printf(\"[ERROR] closed connection\\n\")\n\t\t\t\t}\n\t\t\t\tstate.Pop(state.GetTop())\n\t\t\t}(data)\n\t\t}\n\t}\n}",
"func (e *Endpoint) Call(function string, args interface{}, reply interface{}) error {\n\tcall := <-e.Go(function, args, reply, make(chan *rpc.Call, 1)).Done\n\treturn call.Error\n}",
"func g1() {\n\tif x := f(func() {\n\t\tif true {}\n\t}); true {\n\t\t_ = x;\n\t}\n}",
"func (ls *Libstore) handleSyncOps() {\n\tfor {\n\t\tsyncop := <-ls.syncopchan\n\t\tswitch syncop.op {\n\t\tcase GETRPCCLI:\n\t\t\tcli, err := ls.getRPCClient(syncop.key)\n\t\t\tls.clireplychan <- &rpccli{cli, err}\n\t\tcase WANTLEASE:\n\t\t\twantlease := ls.wantLease(syncop.key)\n\t\t\tls.successreplychan <- wantlease\n\t\tcase CACHEGET:\n\t\t\tvalue, found := ls.cache[syncop.key]\n\t\t\tls.cachereplychan <- &cachevalue{value, found}\n\t\tcase CACHEPUT:\n\t\t\tls.cache[syncop.key] = syncop.value\n\t\t\tls.successreplychan <- true\n\t\tcase CACHEDEL:\n\t\t\tdelete(ls.cache, syncop.key)\n\t\t\tls.successreplychan <- true\n\t\t}\n\t}\n}",
"func (t *SimpleChaincode) Invoke(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tswitch function {\n\n\tcase \"execute\":\n\n\t\tif len(args) < 1 {\n\t\t\treturn nil, errors.New(\"execute operation must include single argument, the base64 encoded form of a bitcoin transaction\")\n\t\t}\n\t\ttxDataBase64 := args[0]\n\t\ttxData, err := base64.StdEncoding.DecodeString(txDataBase64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error decoding TX as base64: %s\", err)\n\t\t}\n\n\t\tutxo := util.MakeUTXO(MakeChaincodeStore(stub))\n\t\texecResult, err := utxo.Execute(txData)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"Error executing TX: %s\", err)\n\t\t}\n\n\t\tfmt.Printf(\"\\nExecResult: Coinbase: %t, SumInputs %d, SumOutputs %d\\n\\n\", execResult.IsCoinbase, execResult.SumPriorOutputs, execResult.SumCurrentOutputs)\n\n\t\tif execResult.IsCoinbase == false {\n\t\t\tif execResult.SumCurrentOutputs > execResult.SumPriorOutputs {\n\t\t\t\treturn nil, fmt.Errorf(\"sumOfCurrentOutputs > sumOfPriorOutputs: sumOfCurrentOutputs = %d, sumOfPriorOutputs = %d\", execResult.SumCurrentOutputs, execResult.SumPriorOutputs)\n\t\t\t}\n\t\t}\n\n\t\treturn nil, nil\n\n\tdefault:\n\t\treturn nil, errors.New(\"Unsupported operation\")\n\t}\n\n}",
"func (i *ServerDBImplement) GetAndLockServer(ID string) (bool, *model.Server) {\n\tc := commonDB.GetConnection()\n\t// Transaction start.\n\ttx := c.Begin()\n\tif err := tx.Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"error\": err}).\n\t\t\tWarn(\"Get and lock server in DB failed, start transaction failed.\")\n\t\treturn false, nil\n\t}\n\tvar s = new(entity.Server)\n\tif tx.Where(\"ID = ?\", ID).First(s).RecordNotFound() {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID}).\n\t\t\tDebug(\"Get and lock server in DB failed, server does not exist.\")\n\t\treturn false, nil\n\t}\n\tif !constValue.ServerLockable(s.State) {\n\t\t// Server not ready, rollback.\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"state\": s.State}).\n\t\t\tDebug(\"Get and lock server in DB failed, server not lockable.\")\n\t\treturn false, createServerModel(s)\n\t}\n\t// Change the state.\n\tif err := tx.Model(s).UpdateColumn(\"State\", constValue.ServerStateLocked).Error; err != nil {\n\t\ttx.Rollback()\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"state\": s.State}).\n\t\t\tDebug(\"Get and lock server in DB failed, update state failed.\")\n\t\treturn false, nil\n\t}\n\t// Commit.\n\tif err := tx.Commit().Error; err != nil {\n\t\tlog.WithFields(log.Fields{\n\t\t\t\"id\": ID,\n\t\t\t\"error\": err}).\n\t\t\tWarn(\"Get and lock server in DB failed, commit failed.\")\n\t\treturn false, nil\n\t}\n\treturn true, createServerModel(s)\n}",
"func (session Runtime) Evaluate(code string, async bool, returnByValue bool) (interface{}, error) {\n\tresult, err := session.evaluate(code, session.currentContext(), async, returnByValue)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn result.Value, nil\n}",
"func TestSyncFunctionTakesArray(t *testing.T) {\n\tmapper, err := NewChannelMapper(`function(doc) {sync([\"foo\", \"bar\",\"baz\"])}`)\n\tassertNoError(t, err, \"Couldn't create mapper\")\n\tchannels, err := mapper.callMapper(`{\"channels\": []}`)\n\tassertNoError(t, err, \"callMapper failed\")\n\tassert.DeepEquals(t, channels, []string{\"foo\", \"bar\", \"baz\"})\n}",
"func CallNonBlock(f func()) {\n\tcheckRun()\n\tcallQueue <- f\n}",
"func Exec(f func() error) error {\n\tdone := make(chan error, 1)\n\tgfxfunc <- func() {\n\t\tdone <- f()\n\t}\n\terr := <-done\n\treturn err\n}",
"func (w *funcWrapper) Run() {\n\t(*w)()\n}",
"func (e *Exclusive) CallAsync(key interface{}, value func() (interface{}, error)) <-chan *ExclusiveOutcome {\n\treturn e.CallAfterAsync(key, value, 0)\n}",
"func Call(f func()) {\n\tcheckRun()\n\tdone := make(chan struct{})\n\tcallQueue <- func() {\n\t\tf()\n\t\tdone <- struct{}{}\n\t}\n\t<-done\n}",
"func (f Func) Call(functionName string, payload []byte, contentType StoreContentType) (<-chan FuncResponse, error) {\n\n\tresponseChan := make(chan FuncResponse)\n\tgo f.parseRawFuncResponse(functionName, payload, contentType, responseChan)\n\treturn responseChan, nil\n\n}",
"func (s *Server) wrap() {\n\toldCb := s.Config.ConnState\n\n\ts.Config.ConnState = func(conn of.Conn, state of.ConnState) {\n\t\ts.mu.Lock()\n\t\tif state == of.StateNew {\n\t\t\t// Persist the new connections, so they\n\t\t\t// could be closed gracefully.\n\t\t\tif s.conns == nil {\n\t\t\t\ts.conns = make(map[of.Conn]struct{})\n\t\t\t}\n\n\t\t\ts.conns[conn] = struct{}{}\n\t\t}\n\n\t\ts.mu.Unlock()\n\t\tif oldCb != nil {\n\t\t\toldCb(conn, state)\n\t\t}\n\t}\n\n}",
"func SafeExecute(executeFunction Execute) (interface{}, error) {\n\tconn, err := getgrpcConnection()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdefer conn.Close()\n\n\treturn executeFunction(conn)\n}",
"func (ex *ExecutorS) Execute() {\n\timmediate := uint32(ex.Result.TwelveBitImmediate)\n\tsrc := uint(ex.Result.FiveBitRegister2)\n\tbase := uint(ex.Result.FiveBitRegister1)\n\tfunc3 := validOperationS(ex.Result.Funct3)\n\n\tdecision := map[Parser.OpCode](map[validOperationS](executionFunctionS)){\n\t\tParser.Store: map[validOperationS](executionFunctionS){\n\t\t\tStoreWord: (RiscVExecutor).storeWord,\n\t\t\tStoreHalfWord: (RiscVExecutor).storeHalfWord,\n\t\t\tStoreByte: (RiscVExecutor).storeByte,\n\t\t},\n\t}\n\n\tif m, ok := decision[ex.Result.OpCode]; ok {\n\t\tif f, ok := m[func3]; ok {\n\t\t\tf(ex.Executor, base, src, immediate)\n\t\t} else {\n\t\t\tpanic(fmt.Sprintf(\"executionFunctionS: %d operation not found\", func3))\n\t\t}\n\t} else {\n\t\tpanic(fmt.Sprintf(\"executionFunctionS: %d opcode not found\", ex.Result.OpCode))\n\t}\n}",
"func call(endpoint string, fn string, req interface{}, rep interface{}) bool {\n\tc, e := rpc.Dial(\"unix\", endpoint)\n\tif e != nil {\n\t\tglog.Errorln(e)\n\t\treturn false\n\t}\n\n\tdefer c.Close()\n\te = c.Call(fn, req, rep)\n\tif e != nil {\n\t\tglog.Errorln(e)\n\t\treturn false\n\t}\n\treturn true\n}",
"func FuncChangeRet() bool { return false }",
"func SetRemoteFunc(f NewRemoteFunc) {\n\tmutex.Lock()\n\tdefer mutex.Unlock()\n\tnewRemoteFunc = f\n}",
"func (rf *Raft) sendRequestVote(server int, args *RequestVoteArgs, reply *RequestVoteReply) bool {\n fmt.Printf(\"----> sendRequestProc: sendRequest to %d from %d\\n\", server, args.CandidateId)\n // Why is there no lock here? We are accessing a common variable.\n ok := rf.peers[server].Call(\"Raft.RequestVote\", args, reply)\n return ok\n}",
"func (c *conn) Get(fn func([]byte)) (err error) {\n\tc.mux.Lock()\n\tif err = c.get(fn); err != nil {\n\t\tc.setIdle()\n\t}\n\tc.mux.Unlock()\n\treturn\n}",
"func (r *ReconcileFunction) Reconcile(request reconcile.Request) (reconcile.Result, error) {\n\tif r.config == nil || r.runtimeConfig == nil || time.Since(configUpdatedTime) > 30*time.Second {\n\t\tlogrus.Infof(\"Read GlobalConfig...\")\n\t\tr.configInit()\n\t\tlogrus.Infof(\"Read GlobalConfig Success\")\n\t} else {\n\t\tlogrus.Warn(\"Read GlobalConfig From Cache\")\n\t}\n\treqLogger := logrus.WithFields(logrus.Fields{\n\t\t\"Request.Namespace\": request.Namespace,\n\t\t\"Request.Name\": request.Name,\n\t})\n\tr.logger = reqLogger\n\treqLogger.Info(\"Reconciling Function\")\n\n\tinstance := &funceasyv1.Function{}\n\ttheRegexp := regexp.MustCompile(`^(\\w+)-(\\w+)$`)\n\tparams := theRegexp.FindStringSubmatch(request.Name)\n\tif params != nil {\n\t\tfunctionCRName := params[1]\n\t\terr := r.client.Get(context.TODO(), types.NamespacedName{\n\t\t\tNamespace: request.Namespace,\n\t\t\tName: functionCRName,\n\t\t}, instance)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t}\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t} else {\n\t\terr := r.client.Get(context.TODO(), request.NamespacedName, instance)\n\t\tif err != nil {\n\t\t\tif errors.IsNotFound(err) {\n\t\t\t\treturn reconcile.Result{}, nil\n\t\t\t}\n\t\t\treturn reconcile.Result{}, err\n\t\t}\n\t}\n\n\tconfigMap, requeue, err := r.ensureConfigMap(instance)\n\tif requeue {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\t_, requeue, err = r.ensureDeployment(instance)\n\tif requeue {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\trequeue, err = r.ensureService(instance)\n\tif requeue {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\trequeue, err = r.ensureHPA(instance)\n\tif requeue {\n\t\treturn reconcile.Result{Requeue: true}, err\n\t}\n\n\terr = r.checkUpdate(instance, configMap)\n\tif err != nil {\n\t\treturn reconcile.Result{}, err\n\t}\n\n\terr = r.updateStatus(instance)\n\tif err != nil {\n\t\tr.logger.Warn(\"Failed Update Status -> Requeue \")\n\t\treturn reconcile.Result{Requeue: true}, nil\n\t}\n\n\treturn reconcile.Result{}, nil\n}",
"func (e *EvaluatedFunctionExpression) Execute(ctx *Context, args *Values) Value {\n\tctx.SetParent(e.this) // this is how closure works\n\treturn e.fn.Execute(ctx, args)\n}",
"func (dtw dispatchTaskWrapper) Func() func(id int64) {\n return dtw.t.Func()\n}",
"func forwardAsync(fhandler *flowHandler, currentNodeId string, result []byte) ([]byte, error) {\n\tvar hash []byte\n\tstore := make(map[string]string)\n\n\t// get pipeline\n\tpipeline := fhandler.getPipeline()\n\n\t// Get pipeline state\n\tpipelineState := pipeline.GetState()\n\n\tdefaultStore, ok := fhandler.dataStore.(*requestEmbedDataStore)\n\tif ok {\n\t\tstore = defaultStore.store\n\t}\n\n\t// Build request\n\tuprequest := buildRequest(fhandler.id, string(pipelineState), fhandler.query, result, store)\n\n\t// Make request data\n\tdata, _ := uprequest.encode()\n\n\t// Check if HMAC used\n\tif hmacEnabled() {\n\t\tkey := getHmacKey()\n\t\thash = hmac.Sign(data, []byte(key))\n\t}\n\n\t// build url for calling the flow in async\n\thttpreq, _ := http.NewRequest(http.MethodPost, fhandler.asyncUrl, bytes.NewReader(data))\n\thttpreq.Header.Add(\"Accept\", \"application/json\")\n\thttpreq.Header.Add(\"Content-Type\", \"application/json\")\n\n\t// If hmac is enabled set digest\n\tif hmacEnabled() {\n\t\thttpreq.Header.Add(\"X-Hub-Signature\", \"sha1=\"+hex.EncodeToString(hash))\n\t}\n\n\t// extend req span for async call (TODO : Get the value)\n\tfhandler.tracer.extendReqSpan(fhandler.id, currentNodeId,\n\t\tfhandler.asyncUrl, httpreq)\n\n\tclient := &http.Client{}\n\tres, resErr := client.Do(httpreq)\n\tif resErr != nil {\n\t\treturn nil, resErr\n\t}\n\n\tdefer res.Body.Close()\n\tresdata, _ := ioutil.ReadAll(res.Body)\n\n\tif res.StatusCode != http.StatusOK && res.StatusCode != http.StatusAccepted {\n\t\treturn resdata, fmt.Errorf(res.Status)\n\t}\n\treturn resdata, nil\n}",
"func serverFunc(requireLease chan leaseGrantItem, exit chan struct{}) {\n\tvar version int64\n\tleaseTS := uint64(time.Now().UnixNano())\n\tfor {\n\t\tselect {\n\t\tcase requireLease <- leaseGrantItem{\n\t\t\tleaseGrantTS: leaseTS,\n\t\t\toldVer: version - 1,\n\t\t\tschemaVer: version,\n\t\t}:\n\t\t\tversion++\n\t\t\tleaseTS = uint64(time.Now().UnixNano())\n\t\tcase <-exit:\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (ini *Server_1To1_Init) Run(f func(*Server_1To1_1) *Server_1To1_End) {\n\n\tst1, err := ini.Init()\n\n\tif err != nil {\n\t\tlog.Fatalf(\"failed to initialise the session: %s\", err)\n\t}\n\n\tf(st1)\n}"
] | [
"0.5580895",
"0.54706144",
"0.5422586",
"0.5316484",
"0.5282471",
"0.51429075",
"0.5124593",
"0.5043788",
"0.50177395",
"0.49903253",
"0.49519637",
"0.4918952",
"0.49022597",
"0.48930693",
"0.48373848",
"0.48258403",
"0.4825075",
"0.48124862",
"0.48095956",
"0.48040572",
"0.47999182",
"0.4799393",
"0.47911218",
"0.47453237",
"0.4745125",
"0.47352841",
"0.4673451",
"0.46715152",
"0.46485597",
"0.46351779",
"0.46264967",
"0.46217737",
"0.4612354",
"0.46072468",
"0.4601189",
"0.4590388",
"0.45686933",
"0.45681897",
"0.45596865",
"0.455785",
"0.45503992",
"0.4547664",
"0.4543855",
"0.45431057",
"0.45329478",
"0.45282567",
"0.45279983",
"0.45145598",
"0.45138398",
"0.4492582",
"0.4490201",
"0.4459545",
"0.44589522",
"0.44488865",
"0.44416696",
"0.44290692",
"0.44269893",
"0.44119957",
"0.44009936",
"0.43997753",
"0.43849105",
"0.43800658",
"0.4364805",
"0.4363961",
"0.4354415",
"0.4337614",
"0.43306708",
"0.43299374",
"0.4328959",
"0.4321662",
"0.43168795",
"0.43163502",
"0.43154624",
"0.43150777",
"0.43129256",
"0.43007603",
"0.4293972",
"0.42921928",
"0.42866454",
"0.42802966",
"0.4280255",
"0.42731813",
"0.42464903",
"0.4241003",
"0.42346063",
"0.42343697",
"0.42303073",
"0.42287675",
"0.42274684",
"0.42218986",
"0.4221307",
"0.42129257",
"0.42050952",
"0.42049736",
"0.42034537",
"0.41934702",
"0.4185878",
"0.41783097",
"0.4176773",
"0.41583282"
] | 0.61029977 | 0 |
Init registers testing flags. These flags are automatically registered by the "go test" command before running test functions, so Init is only needed when calling functions such as Benchmark without using "go test". Init has no effect if it was already called. | func Init() {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func init() {\n\ttestEnv.Init()\n}",
"func InitFlags() {\n\tinitEnvFlag()\n}",
"func Init(cmd *cobra.Command) {\n\tapi.RegisterBenchmark(&benchEthBlockNumber{})\n\tapi.RegisterBenchmark(&benchNetVersion{})\n\tapi.RegisterBenchmark(&benchEthGetBlockByNumber{})\n}",
"func InitFlags() *FactoryOptions {\n\ttesting.Init()\n\t_, err := types.NewAttachedGinkgoFlagSet(flag.CommandLine, types.GinkgoFlags{}, nil, types.GinkgoFlagSections{}, types.GinkgoFlagSection{})\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\ttestOptions := &FactoryOptions{}\n\ttestOptions.BindFlags(flag.CommandLine)\n\tflag.Parse()\n\n\treturn testOptions\n}",
"func TestInit(t *testing.T) {\n\tt.Log(\"Initialization test\")\n\t//userlib.DebugPrint = true\n\t//\tsomeUsefulThings()\n\n\t//userlib.DebugPrint = false\n\tu, err := InitUser(\"alice\", \"fubar\")\n\tif err != nil {\n\t\t// t.Error says the test fails\n\t\tt.Error(\"Failed to initialize user\", err)\n\t}\n\t// t.Log() only produces output if you run with \"go test -v\"\n\tt.Log(\"Got username\", *u)\n\t// You probably want many more tests here.\n}",
"func init() {\n\tflag.StringVar(&globalSettings.WorkDir, \"istio.test.work_dir\", os.TempDir(),\n\t\t\"Local working directory for creating logs/temp files. If left empty, os.TempDir() is used.\")\n\tflag.StringVar((*string)(&globalSettings.Environment), \"istio.test.env\", string(globalSettings.Environment),\n\t\tfmt.Sprintf(\"Specify the environment to run the tests against. Allowed values are: [%s, %s]\",\n\t\t\tLocal, Kubernetes))\n\tflag.BoolVar(&globalSettings.NoCleanup, \"istio.test.noCleanup\", globalSettings.NoCleanup,\n\t\t\"Do not cleanup resources after test completion\")\n\n\tglobalSettings.LogOptions.AttachFlags(\n\t\tfunc(p *[]string, name string, value []string, usage string) {\n\t\t\t// TODO(ozben): Implement string array method for capturing the complete set of log settings.\n\t\t},\n\t\tflag.StringVar,\n\t\tflag.IntVar,\n\t\tflag.BoolVar)\n}",
"func (suite *BinPackingTestSuite) TestInit() {\n\tsuite.Equal(3, len(rankers))\n\tsuite.NotNil(rankers[DeFrag])\n\tsuite.Equal(rankers[DeFrag].Name(), DeFrag)\n\tsuite.NotNil(rankers[FirstFit])\n\tsuite.Equal(rankers[FirstFit].Name(), FirstFit)\n\tsuite.NotNil(rankers[LoadAware])\n\tsuite.Equal(rankers[LoadAware].Name(), LoadAware)\n}",
"func (b *TestDriver) Init() (err error) {\n\tlog.Println(\"Init Drone\")\n\treturn\n}",
"func (b *KRMBlueprintTest) Init(assert *assert.Assertions) {\n\tb.init(assert)\n}",
"func Init() {\n\tflag.Set(\"logtostderr\", \"true\")\n\tlogf.SetLogger(logf.ZapLogger(true))\n\n\tinitialized = true\n}",
"func Init(pluginRegistry *pluginregistry.PluginRegistry, log xcontext.Logger) {\n\n\t// Register TargetManager plugins\n\tfor _, tmloader := range targetManagers {\n\t\tif err := pluginRegistry.RegisterTargetManager(tmloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestFetcher plugins\n\tfor _, tfloader := range testFetchers {\n\t\tif err := pluginRegistry.RegisterTestFetcher(tfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// Register TestStep plugins\n\tfor _, tsloader := range testSteps {\n\t\tif err := pluginRegistry.RegisterTestStep(tsloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\n\t\t}\n\t}\n\n\t// Register Reporter plugins\n\tfor _, rfloader := range reporters {\n\t\tif err := pluginRegistry.RegisterReporter(rfloader()); err != nil {\n\t\t\tlog.Fatalf(\"%v\", err)\n\t\t}\n\t}\n\n\t// user-defined function registration\n\ttestInitOnce.Do(func() {\n\t\tfor _, userFunction := range userFunctions {\n\t\t\tfor name, fn := range userFunction {\n\t\t\t\tif err := test.RegisterFunction(name, fn); err != nil {\n\t\t\t\t\tlog.Fatalf(\"%v\", err)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n}",
"func InitTesting(t *testing.T, classname string, reportname string) {\n\tRegisterFailHandler(Fail)\n\tRunSpecsWithDefaultAndCustomReporters(t, classname, reporter.GetReporters(reportname))\n\tloki.SendLokiMarker(\"Start of test \" + classname)\n}",
"func init() {\n\tSetup()\n}",
"func Init(debugMode bool) {\n\tdbgMode = debugMode\n}",
"func init() {\n\texutil.InitTest()\n}",
"func (l *TKGFlags) Init(c *cobraLintConfig) {\n\tl.cmd = c.cmd.Parent()\n\tl.cmdFlags = c.cliTerms.CmdFlags\n\tl.globalFlags = c.cliTerms.GlobalFlags\n\tr := make(Results)\n\tl.results = &r\n}",
"func (l *Logger) init() {\r\n\t// Set Testing flag to TRUE if testing detected\r\n\tl.Options.Testing = (flag.Lookup(\"test.v\") != nil)\r\n\r\n\tl.timeReset()\r\n\tl.started = l.timer\r\n\tinitColors()\r\n\tinitFormatPlaceholders()\r\n}",
"func Init() {\n\tflag.Parse()\n\tdefer sklog.Flush()\n\tflag.VisitAll(func(f *flag.Flag) {\n\t\tsklog.Infof(\"Flags: --%s=%v\", f.Name, f.Value)\n\t})\n\n\t// See skbug.com/4386 for details on why the below section exists.\n\tsklog.Info(\"Initializing logging for log level INFO.\")\n\tsklog.Warning(\"Initializing logging for log level WARNING.\")\n\tsklog.Error(\"Initializing logging for log level ERROR.\")\n\n\t// Use all cores.\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}",
"func init() {\n\t// We register a flag to get it shown in the default usage.\n\t//\n\t// We don't actually use the parsed flag value, though, since that would require us to call\n\t// flag.Parse() here. If we call flag.Parse(), then higher-level libraries can't easily add\n\t// their own flags, since testing's t.Run() will not re-run flag.Parse() if the flags have\n\t// already been parsed.\n\t//\n\t// Instead, we simply look for our flag text in os.Args.\n\n\tflag.Bool(\"help-docket\", false, \"get help on docket\")\n\n\tfor _, arg := range os.Args {\n\t\tif arg == \"-help-docket\" || arg == \"--help-docket\" {\n\t\t\twriteHelp(os.Stderr)\n\n\t\t\tconst helpExitCode = 2 // This matches what 'go test -h' returns.\n\t\t\tos.Exit(helpExitCode)\n\t\t}\n\t}\n}",
"func Init() error {\n\tif CPUProfileFlag != \"\" {\n\t\tf, err := os.Create(CPUProfileFlag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = pprof.StartCPUProfile(f)\n\t\tif err != nil {\n\t\t\t_ = f.Close()\n\t\t\treturn err\n\t\t}\n\t\tcpuProfileFile = f\n\t}\n\tif HeapProfileFlag != \"\" {\n\t\tf, err := os.Create(HeapProfileFlag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\theapProfileFile = f\n\t}\n\tif ThreadProfileFlag != \"\" {\n\t\tf, err := os.Create(ThreadProfileFlag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tthreadProfileFile = f\n\t}\n\tif BlockProfileFlag != \"\" {\n\t\tf, err := os.Create(BlockProfileFlag)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tblockProfileFile = f\n\t}\n\treturn nil\n}",
"func Init() {\n\n\t// Verbose logging\n\tlog.SetFlags(log.Lshortfile)\n\n\t// Use all available cores\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}",
"func (c *commonFlags) Init() {\n\tc.defaultFlags.Init(&c.Flags)\n\tc.Flags.StringVar(&c.serverURL, \"server\", os.Getenv(\"SWARMING_SERVER\"), \"Server URL; required. Set $SWARMING_SERVER to set a default.\")\n}",
"func Init() {\n\t// noop for now\n}",
"func Init() {\n\t// noop for now\n}",
"func (scanner *Scanner) Init(flags zgrab2.ScanFlags) error {\n\tf, _ := flags.(*Flags)\n\tscanner.config = f\n\treturn nil\n}",
"func (env *TestEnv) Init(t *testing.T, testLedgerID string) {\n\tenv.t = t\n\tenv.DBEnv.Init(t)\n\tenv.DB = env.DBEnv.GetDBHandle(testLedgerID)\n\tenv.TStoreEnv = transientstore.NewTestStoreEnv(t)\n\tenv.Txmgr = NewLockbasedTxMgr(env.DB)\n\tenv.TStore = env.TStoreEnv.TestStore\n}",
"func (f *Factory) Init() {\n\tif f.Name == \"\" {\n\t\tf.Name = \"usage\"\n\t}\n\tif f.HelpFlag == \"\" {\n\t\tf.HelpFlag = \"h\"\n\t}\n\tflag.BoolVar(&f.Help, f.HelpFlag, false, f.GetDescription())\n}",
"func Init() {\n\tonce.Do(initialize)\n}",
"func (b *KRMBlueprintTest) DefineInit(init func(*assert.Assertions)) {\n\tb.init = init\n}",
"func InitHelpers(t *testing.T) {\n\tinitHelpers(t)\n}",
"func Init() {\n\tdocker.Init()\n\thost.Init()\n\tlabel.Init()\n\tospackages.Init()\n\tdiff.Init()\n\tcontainer.Init()\n}",
"func (b *Base) Init(c Config) {\n\tb.Config = c\n\tb.Flags.SetOutput(ioutil.Discard)\n\tb.flHelp = b.Flags.Bool([]string{\"h\", \"-help\"}, false, \"Print usage\")\n}",
"func (c *Subcommand) InitFlags(flags *flag.FlagSet) {\n\tif c.flagFn != nil {\n\t\tc.flagFn(flags)\n\t}\n}",
"func (g *Gorc) Init() {\n\tg.Lock()\n\tg.count = 0\n\tg.waitMillis = 100 * time.Millisecond\n\tg.Unlock()\n}",
"func Init() {\n\tGlobalStats = make(map[Type]time.Duration)\n\tCountStats = make(map[Type]int)\n\tmutex = &sync.RWMutex{}\n}",
"func TestInit(t *testing.T) {\n\tctx := getTestContextCopy(t, filepath.Join(\"testdata\", \"init\"))\n\tdefer os.RemoveAll(ctx.GOPATH)\n\tpkgDir := filepath.Join(ctx.GOPATH, \"src\", \"example.com\", \"x\")\n\terr := initc(ctx, pkgDir, \"lib\", false, false)\n\tif err != nil {\n\t\tt.Errorf(\"error during init : %s\", err.Error())\n\t\tt.FailNow()\n\t}\n\t// Test that the import paths updated.\n\ttestImports(t, pkgDir,\n\t\t[]string{\"example.com/x/lib/a\", \"example.com/x/lib/b\"}, false)\n\t// Test that child import path not updated.\n\tchildPkgDir := filepath.Join(pkgDir, \"z\")\n\ttestImports(t, childPkgDir,\n\t\t[]string{\"other.com/y/a1\", \"other.com/y/c\"}, false)\n\t// Test that copied packages build.\n\taDir := filepath.Join(pkgDir, \"lib\", \"a\")\n\ttestBuild(t, aDir)\n\tbDir := filepath.Join(pkgDir, \"lib\", \"b\")\n\ttestBuild(t, bDir)\n}",
"func init() {\n\t// NOTE: reminder that flag.Parse will be called by `go test`, so we don't need to call it here.\n\tflagConfig = flag.String(\"cbtest.config\", \"cbtest.json\", \"Path to the config file to use (credentials mostly)\")\n\tflagConfigOut = flag.String(\"cbtest.config-out\", \"\", \"Path to write the config to\")\n\tflagPlatformURL = flag.String(\"cbtest.platform-url\", \"\", \"Platform URL to use\")\n\tflagMessagingURL = flag.String(\"cbtest.messaging-url\", \"\", \"Messaging URL to use\")\n\tflagRegistrationKey = flag.String(\"cbtest.registration-key\", \"\", \"Registration key to use when creating developers\")\n\tflagSystemKey = flag.String(\"cbtest.system-key\", \"\", \"System key to use\")\n\tflagSystemSecret = flag.String(\"cbtest.system-secret\", \"\", \"System secret to use\")\n\tflagDevEmail = flag.String(\"cbtest.dev-email\", \"\", \"Developer email to use\")\n\tflagDevPassword = flag.String(\"cbtest.dev-password\", \"\", \"Developer password to use\")\n\tflagUserEmail = flag.String(\"cbtest.user-email\", \"\", \"User email to use\")\n\tflagUserPassword = flag.String(\"cbtest.user-password\", \"\", \"User password to use\")\n\tflagDeviceName = flag.String(\"cbtest.device-name\", \"\", \"Device name to use\")\n\tflagDeviceActiveKey = flag.String(\"cbtest.device-active-key\", \"\", \"Device active key to use\")\n\tflagImportUsers = flag.Bool(\"cbtest.import-users\", true, \"Whenever users should be imported\")\n\tflagImportRows = flag.Bool(\"cbtest.import-rows\", true, \"Whenever rows should be imported\")\n}",
"func init() {\n\tsetUpConfig()\n\tsetUpUsingEnv()\n}",
"func (i *InvariantsChecker) Init() {\n\ti.initStatus = colexecop.OperatorInitialized\n\ti.Input.Init()\n}",
"func Init(defaultMatrix func() Matrix, f FixtureFactory, beforeAll func() error) *Supervisor {\n\trunRealTests := !(Flags.PrintMatrix || Flags.PrintDimensions)\n\tif Flags.PrintDimensions {\n\t\tdefaultMatrix().PrintDimensions()\n\t}\n\tif runRealTests {\n\t\tif err := beforeAll(); err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\treturn NewSupervisor(f)\n\t}\n\treturn nil\n}",
"func Init(flags ...InitFlag) InitFlag {\n\tvar f InitFlag\n\tfor i := range flags {\n\t\tf |= flags[i]\n\t}\n\treturn InitFlag(C.IMG_Init(C.int(f)))\n}",
"func Init(seed int) {\n\tif seed <= 0 {\n\t\tseed = int(time.Now().Unix())\n\t}\n\trand.Seed(int64(seed))\n}",
"func Init(opts ...Option) error {\n\treturn std.Init(opts...)\n}",
"func Init(testing bool, token string) {\n\tif testing {\n\t\tapiURL = \"https://api.staging.tauros.io\"\n\t}\n\tapiToken = token\n}",
"func (c *MigrationsCmd) Init() {\n\tc.common.SetArgs(c.CheckArgs)\n\tc.common.SetRun(c.Run)\n}",
"func Init() {\n\trand.Seed(time.Now().UnixNano())\n}",
"func (c *Context) Init() {\n\tc.Logger = zerolog.New(zerolog.ConsoleWriter{Out: os.Stdout}).With().Timestamp().Logger()\n\tc.Logger = c.Logger.Hook(zerolog.HookFunc(c.getMemoryUsage))\n\n\tc.inShutdownMutex.Lock()\n\tc.inShutdown = false\n\tc.inShutdownMutex.Unlock()\n\n\tc.RandomSource = rand.New(rand.NewSource(time.Now().Unix()))\n\n\tc.Logger.Info().Msgf(\"LBTDS v. %s is starting...\", VERSION)\n}",
"func Init() {\n\tbtc()\n\tbch()\n\teth()\n\tltc()\n\tusdt()\n\n\tada()\n}",
"func Init() {\n\trand.Seed(time.Now().Unix())\n}",
"func (t *BenchmarkerChaincode) Init(stub shim.ChaincodeStubInterface) pb.Response {\n\targs := stub.GetStringArgs()\n\n\tif len(args) != 1 {\n\t\treturn shim.Error(fmt.Sprintf(\"Incorrect number of arguments. Expecting 1. You gave %+v\", args))\n\t}\n\n\treturn shim.Success(nil)\n}",
"func Init() {\n\n\tprometheus.MustRegister(FunctionDurations)\n\tprometheus.MustRegister(FunctionCountTotal)\n\n}",
"func Init() {\n\tinitOnce.Do(func() {\n\t\tflag.Var(&resourceFiles, \"res_files\", \"Resource files and asset directories to parse.\")\n\t\tflag.StringVar(&rPbOutput, \"out\", \"\", \"Path to the output proto file.\")\n\t\tflag.StringVar(&pkg, \"pkg\", \"\", \"Java package name.\")\n\t})\n}",
"func (t *TestHandler) Init() error {\n\tfmt.Println(\"TestHandler.Init\")\n\treturn nil\n}",
"func (t *TestHandler) Init() error {\n\tfmt.Println(\"TestHandler.Init\")\n\treturn nil\n}",
"func Init() {\n\tcreateDB(\"backendtest\")\n\tuseDB(\"backendtest\")\n\tCreateUserTable()\n\tCreateEventTable()\n\tCreateAddFriendTable()\n}",
"func GitInit(tb testing.TB) {\n\ttb.Helper()\n\tout, err := fakeGit(\"init\")\n\trequire.NoError(tb, err)\n\trequire.Contains(tb, out, \"Initialized empty Git repository\")\n\trequire.NoError(tb, err)\n\tGitCheckoutBranch(tb, \"main\")\n\t_, _ = fakeGit(\"branch\", \"-D\", \"master\")\n}",
"func (b *TestdriveCmd) Init() *cobra.Command {\n\treturn &cobra.Command{\n\t\tUse: \"testdrive\",\n\t\tShort: \"Start a guided tour of Atlantis\",\n\t\tRunE: func(cmd *cobra.Command, args []string) error {\n\t\t\terr := testdrive.Start()\n\t\t\tif err != nil {\n\t\t\t\tfmt.Fprintf(os.Stderr, \"\\033[31mError: %s\\033[39m\\n\\n\", err.Error())\n\t\t\t}\n\t\t\treturn err\n\t\t},\n\t\tSilenceErrors: true,\n\t}\n}",
"func InitTest() {\n\tssid = os.Getenv(\"SSID\")\n\tif len(ssid) == 0 {\n\t\tssid = \"NO_SSID\"\n\t}\n\tpwd = os.Getenv(\"PASSWORD\")\n\tif len(pwd) == 0 {\n\t\tpwd = \"NO_PWD\"\n\t}\n\tm_id = os.Getenv(\"MODULE_ID\")\n\tif len(m_id) == 0 {\n\t\tm_id = \"NO_MODULE_ID\"\n\t}\n\tfmt.Printf(\"SSID=\\\"%s\\\", PWD=\\\"%s\\\" MID=\\\"%s\\\"\\n\", ssid, pwd, m_id)\n}",
"func Init() {\n\tglobalStats = make(map[Type]time.Duration)\n\tminMaxTimeStats = make(map[Type]minMaxTime)\n\tcountStats = make(map[Type]int)\n\tmutex = &sync.RWMutex{}\n}",
"func Init() {\n\tif *version {\n\t\tprintVersion()\n\t\tos.Exit(0)\n\t}\n}",
"func (i *Instance) Init() {\n\ti.DirExecStatusMap = makeStatusMap()\n\ti.initState()\n}",
"func InitFlags(c *Config) {\n\tflag.BoolVar(&c.Stdout, \"stdout\", c.Stdout, \"send to STDOUT\")\n\tflag.StringVar(&c.Chdir, \"chdir\", c.Chdir, \"change the working directory\")\n\tflag.StringVar(&c.SkipStr, \"skip\", c.SkipStr, \"disable preset collectors. i.e: \\\"-skip=cpu,disk\\\"\")\n\tflag.IntVar(&c.Freq, \"frequency\", c.Freq, \"collection frequency in seconds. set to >0 to repeat\")\n\tflag.IntVar(&c.CollectorTimeout, \"collection-timeout\", c.CollectorTimeout, \"specify collection timeout in seconds\")\n\tflag.StringVar(&c.Destination, \"destination\", c.Destination, \"send data to server. i.e: \\\"-destination=tcp:localhost:12345\\\"\")\n\tflag.BoolVar(&c.DryRun, \"dry-run\", c.DryRun, \"validate environment setting to run collections\")\n\tflag.IntVar(&c.WaitTime, \"retrywait\", c.WaitTime, \"wait time in seconds before reconnect to destination\")\n\tflag.IntVar(&c.Duration, \"duration\", c.Duration, \"number of seconds to run the agent for. 0 for non-stop\")\n}",
"func (p *Prometheus) Init() {\n\tp.fileApplyCount = prometheus.NewCounterVec(prometheus.CounterOpts{\n\t\tName: \"file_apply_count\",\n\t\tHelp: \"Success metric for every file applied\",\n\t},\n\t\t[]string{\n\t\t\t// Path of the file that was applied\n\t\t\t\"file\",\n\t\t\t// Result: true if the apply was successful, false otherwise\n\t\t\t\"success\",\n\t\t},\n\t)\n\tp.runLatency = prometheus.NewSummaryVec(prometheus.SummaryOpts{\n\t\tName: \"run_latency_seconds\",\n\t\tHelp: \"Latency for completed apply runs\",\n\t},\n\t\t[]string{\n\t\t\t// Result: true if the run was successful, false otherwise\n\t\t\t\"success\",\n\t\t},\n\t)\n\n\tprometheus.MustRegister(p.fileApplyCount)\n\tprometheus.MustRegister(p.runLatency)\n}",
"func init() {\n\tInitTests(false)\n\tif *logLevel == \"debug\" {\n\t\t//*logLevel = \"debug\"\n\t\tcore.DebugRequests = true\n\t}\n}",
"func (si *ScanIterator) Init() {\n\t// Initialization\n}",
"func Init() {\n\tC.al_init()\n}",
"func (a *Analytics) Init() {\n\tflag.IntVar(&a.lowerBound, \"l\", 100, \"lower size of input\")\n\tflag.IntVar(&a.upperBound, \"u\", 10000, \"upper size of input\")\n\tflag.IntVar(&a.step, \"s\", 100, \"step of increasing input size\")\n\tflag.IntVar(&a.repetitions, \"r\", 1000, \"number of repetitions for given input size\")\n\n\tflag.Parse()\n}",
"func Init() {\n\tif initialized {\n\t\treturn\n\t}\n\tinitialized = true\n\tpopulatecnamechain()\n\tensureresourcefinder()\n\tloadphantomjs()\n}",
"func Init(state *core.BuildState) {\n\ttheFilegroupBuilder = &filegroupBuilder{\n\t\tbuilt: map[string]bool{},\n\t}\n\tstate.TargetHasher = newTargetHasher(state)\n}",
"func Init() {\n\tfmt.Println(fmt.Sprintf(\"Frain version %s\", Version))\n\tfmt.Println(\"\\nA status checker for various developer tools.\")\n}",
"func TestInit(t *testing.T) {\n\tfmt.Println(\"Entering the test method for Init\")\n\tprovcc := new(SimpleAsset)\n\tstub := shim.NewMockStub(\"ANY_PARAM\", provcc)\n\tcheckInit(t, stub, [][]byte{[]byte(\"init\")})\n}",
"func Init() {\n\tdebug := DebugMode()\n\tlogutil.InitLog(\"\", debug)\n\tsignal.RegisterKnownSignals()\n\texplorepolicy.RegisterKnownExplorePolicies()\n}",
"func initMode() {\n\tmode = NormalModeFlag\n\tif os.Getenv(\"GS_RECORD_MODE\") != \"\" {\n\t\tmode |= RecordModeFlag\n\t}\n\tif os.Getenv(\"GS_REPLAY_MODE\") != \"\" {\n\t\tmode |= ReplayModeFlag\n\t}\n\tfor _, arg := range os.Args {\n\t\tif strings.HasPrefix(arg, \"-test.\") {\n\t\t\tmode |= TestModeFlag\n\t\t\tbreak\n\t\t}\n\t}\n}",
"func (t *CCHandler) Init() error {\n\tlog.Info(\"TestHandler.Init\")\n\treturn nil\n}",
"func (n Noop) Init(_ int) error {\n\treturn nil\n}",
"func (t *ObjectTree) Init(flags byte) *ObjectTree {\n\tt.Tree.Init(objectCompare, flags)\n\treturn t\n}",
"func InitTestData(ctx context.Context) error {\n\terr := AddUserTest(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = AddTaskTest(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = AddCommentTest(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn err\n}",
"func InitFlags() {\n\n pflag.CommandLine.SetNormalizerFunc(WordSepNormalizeFunc)\n pflag.CommandLine.AddGoFlagSet(goflag.ComamndLine)\n pflag.Parse()\n pflag.VisitAll(func(flag *pflag.Flag)) {\n \tglog.V(2).Infof(\"FLAG: --%s=%q\", flag.Name, flag.Value)\n }\n}",
"func (_m *MockSeriesIteratorPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}",
"func (_m *MockMutableSeriesIteratorsPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}",
"func Initialize() {\n\tfor cmd, fs := range commandFlagSets {\n\t\tcommandPathFlagSets[cmd.CommandPath()] = fs\n\t}\n}",
"func Init() {\n\tlog.SetLevel(log.TraceLevel) // This is not the log level for logs, this just asserts that hooks with all levels can\n\t// be triggered\n\n\tlog.SetFormatter(\n\t\t&log.TextFormatter{\n\t\t\tDisableColors: true,\n\t\t\tForceQuote: true,\n\t\t\tFullTimestamp: true,\n\t\t},\n\t)\n\tSetOutput()\n}",
"func init() {\n\tflag.StringVar(&KubectlPath, \"kubectl-path\", \"\", \"Path to the kubectl binary\")\n\tflag.StringVar(&ClusterctlPath, \"clusterctl-path\", \"\", \"Path to the clusterctl binary\")\n\tflag.StringVar(&DumpPath, \"dump-path\", \"\", \"Path to the kubevirt artifacts dump cmd binary\")\n\tflag.StringVar(&WorkingDir, \"working-dir\", \"\", \"Path used for e2e test files\")\n}",
"func (l *LogDebugger) Init() error {\r\n\tl.counter = 0\r\n\tl.start = time.Now()\r\n\tif l.Output == nil {\r\n\t\tl.Output = os.Stderr\r\n\t}\r\n\tl.logger = log.New(l.Output, l.Prefix, l.Flag)\r\n\treturn nil\r\n}",
"func init() {\n\n\t// if we have a debug environment var we'll proceed\n\tif isDebug := os.Getenv(\"DEBUG\"); isDebug != \"\" {\n\n\t\t// try to convert the debug setting to an int, if it's an integer then any\n\t\t// positive number will enable debugging. Values =< 0 will not.\n\t\tdebugInt, err := strconv.ParseInt(isDebug, 10, 0)\n\t\tif err != nil {\n\n\t\t\t// looks like DEBUG is a string. We'll upper case it and check to see\n\t\t\t// if it matches one of our truthy contants\n\t\t\tdebugStr := strings.ToUpper(isDebug)\n\t\t\tif debugStr == y || debugStr == t {\n\t\t\t\tdebugInt = 1\n\t\t\t}\n\t\t}\n\n\t\t// if we found a truthy value, enable debug logging and source line display\n\t\tif debugInt == 1 {\n\t\t\tenableDebug = true\n\t\t\tenableSourceInfo = true\n\t\t}\n\t}\n}",
"func init() {\n\taddBuiltinFns(map[string]interface{}{\n\t\t\"nop\": nop,\n\t\t\"kind-of\": kindOf,\n\t\t\"constantly\": constantly,\n\n\t\t// Introspection\n\t\t\"call\": call,\n\t\t\"resolve\": resolve,\n\t\t\"eval\": eval,\n\t\t\"use-mod\": useMod,\n\n\t\t\"deprecate\": deprecate,\n\n\t\t// Time\n\t\t\"sleep\": sleep,\n\t\t\"time\": timeCmd,\n\n\t\t\"-ifaddrs\": _ifaddrs,\n\t})\n\n\t// For rand and randint.\n\trand.Seed(time.Now().UTC().UnixNano())\n}",
"func (c *CentralCacheTestImpl) Init(conf Config) {\n\tc.baseUrl = conf.Host\n\tc.keyPrefix = conf.KeyPrefix\n\tc.dumpFilePath = conf.DumpFilePath\n\tc.expirySec = conf.ExpirySec\n\tc.file = nil\n}",
"func Init() {\n\tconf := config.GetConfig()\n\tvar err error\n\n\tLog, err = plivolog.New()\n\tif conf.GetString(\"general.config\") == \"test\" {\n\t\tLog.Info(\"For testing environment err will be ignored\")\n\t\treturn\n\t}\n\tif err != nil {\n\t\tpanic(\"logger could not be initialized. Error \" + err.Error())\n\t}\n}",
"func (_m *MockIteratorArrayPool) Init() {\n\t_m.ctrl.Call(_m, \"Init\")\n}",
"func Init() {\n\targs, errs := options.Parse(optMap)\n\n\tif len(errs) != 0 {\n\t\tfmtc.Println(\"Arguments parsing errors:\")\n\n\t\tfor _, err := range errs {\n\t\t\tfmtc.Printf(\" %s\\n\", err.Error())\n\t\t}\n\n\t\tos.Exit(1)\n\t}\n\n\tif options.Has(OPT_COMPLETION) {\n\t\tgenCompletion()\n\t}\n\n\tif options.GetB(OPT_NO_COLOR) {\n\t\tfmtc.DisableColors = true\n\t}\n\n\tif options.GetB(OPT_VER) {\n\t\tshowAbout()\n\t\treturn\n\t}\n\n\tif options.GetB(OPT_HELP) || len(args) == 0 {\n\t\tshowUsage()\n\t\treturn\n\t}\n\n\tswitch len(args) {\n\tcase 1:\n\t\tprocess(args[0], \"\")\n\tcase 2:\n\t\tprocess(args[0], args[1])\n\tdefault:\n\t\tshowUsage()\n\t}\n}",
"func Init() error {\n\tif err := view.Register(views...); err != nil {\n\t\treturn fmt.Errorf(\"register metric views: %v\", err)\n\t}\n\treturn nil\n}",
"func (gb *GameBoy) Init(noBoot bool) {\n\tgb.CPU.Init(noBoot)\n\tgb.Timer.Init(noBoot)\n\tgb.APU.Init(noBoot)\n\tgb.PPU.Init(noBoot)\n\n\t// MMU should be initialized last, because it disables the bootrom flag and sets the gbc to dmg mode if needed.\n\tgb.MMU.Init(noBoot)\n}",
"func (_m *MockMultiReaderIteratorPool) Init(alloc ReaderIteratorAllocate) {\n\t_m.ctrl.Call(_m, \"Init\", alloc)\n}",
"func (cmd *GetUserListCmd) Init() {\n\tcmd.cmd = \"getuserlist\"\n\tcmd.flagSet = flag.NewFlagSet(cmd.cmd, flag.ExitOnError)\n}",
"func (t *SBITransaction) Init(stub shim.ChaincodeStubInterface, function string, args []string) ([]byte, error) {\n\tfmt.Println(\"Inside INIT for test chaincode\")\n\treturn nil, nil\n}",
"func DoInit() {\n\tSetLogger(utils.InitializeLogging(\"gpbackup\", \"\"))\n\tinitializeFlags()\n}",
"func InitFlags(flagset *flag.FlagSet) {\n\tif flagset == nil {\n\t\tflagset = flag.CommandLine\n\t}\n\n\tcommandLine.VisitAll(func(f *flag.Flag) {\n\t\tflagset.Var(f.Value, f.Name, f.Usage)\n\t})\n}",
"func init() {\n\n\tcobra.OnInitialize(func() {\n\t\tconfig.Initialize(rootCmd)\n\t})\n\n\tconfig.InitializeFlags(rootCmd)\n}",
"func Setup() {\n\t// Setup all variables.\n\t// Setting up all the variables first will allow px\n\t// to initialize the init functions in any order\n\tfor _, v := range varInitFncs {\n\t\tv()\n\t}\n\n\t// Call all plugin inits\n\tfor _, f := range cmdInitFncs {\n\t\tf()\n\t}\n}",
"func Init(debug bool) {\n\t// Connect to the database\n\tInitDB(debug)\n}",
"func TestInit(t *testing.T) {\n\ts := &MSTeams{}\n\texpectedError := fmt.Errorf(msteamsErrMsg, \"Missing MS teams webhook URL\")\n\n\tvar Tests = []struct {\n\t\tms config.MSTeams\n\t\terr error\n\t}{\n\t\t{config.MSTeams{WebhookURL: \"somepath\"}, nil},\n\t\t{config.MSTeams{}, expectedError},\n\t}\n\n\tfor _, tt := range Tests {\n\t\tc := &config.Config{}\n\t\tc.Handler.MSTeams = tt.ms\n\t\tif err := s.Init(c); !reflect.DeepEqual(err, tt.err) {\n\t\t\tt.Fatalf(\"Init(): %v\", err)\n\t\t}\n\t}\n}"
] | [
"0.70790935",
"0.6898078",
"0.68355215",
"0.67986417",
"0.6779053",
"0.6766589",
"0.66843873",
"0.66655123",
"0.66454095",
"0.66173506",
"0.66151476",
"0.6604759",
"0.6604005",
"0.659418",
"0.65715605",
"0.6545239",
"0.6523597",
"0.6521723",
"0.65161896",
"0.6418926",
"0.6413366",
"0.6385511",
"0.63851655",
"0.63851655",
"0.6362853",
"0.63570917",
"0.63565135",
"0.6353314",
"0.6345383",
"0.6340381",
"0.6323532",
"0.6316879",
"0.62966007",
"0.6253997",
"0.6246088",
"0.6236081",
"0.6227082",
"0.6215958",
"0.61981714",
"0.61917067",
"0.6181497",
"0.6152995",
"0.6152325",
"0.6137197",
"0.6132923",
"0.613029",
"0.61178625",
"0.61141676",
"0.6110611",
"0.61086905",
"0.61068034",
"0.6102304",
"0.6101432",
"0.6101432",
"0.6096698",
"0.60841995",
"0.6050632",
"0.60399824",
"0.60314906",
"0.60197526",
"0.6004671",
"0.60035026",
"0.59919745",
"0.5991007",
"0.59892637",
"0.597448",
"0.59738785",
"0.5955019",
"0.59507155",
"0.5942281",
"0.5935893",
"0.59261847",
"0.5925932",
"0.5922448",
"0.5913354",
"0.590939",
"0.5906992",
"0.59014666",
"0.5898875",
"0.5895867",
"0.5895371",
"0.5894949",
"0.588989",
"0.5887871",
"0.5884973",
"0.5881444",
"0.5876653",
"0.5867909",
"0.5864168",
"0.5863663",
"0.5855626",
"0.58549345",
"0.5848363",
"0.58461285",
"0.58419317",
"0.5841162",
"0.58337545",
"0.58322924",
"0.5821002",
"0.58198756",
"0.5815449"
] | 0.0 | -1 |
Short reports whether the test.short flag is set. | func Short() bool {
// possible: panic("testing: Short called before Init")
// possible: panic("testing: Short called before Parse")
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func Short() bool",
"func TestHelloShort(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"skipped\")\n\t}\n\n\t// 後続の時間のかかるテスト\n}",
"func (field *Field) SetShort() {\n\tfield.Short = field.Get(\"short\")\n}",
"func isShortFlag(value string) bool {\n\treturn isFlag(value) && len(value) == 2 && !strings.HasPrefix(value, \"--\")\n}",
"func (m *MockCommandScaffold) Short() string {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Short\")\n\tret0, _ := ret[0].(string)\n\treturn ret0\n}",
"func (s Status) ShortPrint() string {\n\tsflag := fmt.Sprintf(\"Status: 0x%x\\n\", s.Header)\n\tsflag += fmt.Sprintf(\" session:%d\\n\", s.Session)\n\tif sarflags.GetStr(s.Header, \"reqtstamp\") == \"yes\" {\n\t\tsflag += fmt.Sprintf(\" timestamp:%s\\n\", s.Tstamp.Print())\n\t}\n\tsflag += fmt.Sprintf(\" errcode:%s\\n\", sarflags.GetStr(s.Header, \"errcode\"))\n\tsflag += fmt.Sprintf(\" progress:%d\\n\", s.Progress)\n\tsflag += fmt.Sprintf(\" inrespto:%d\\n\", s.Inrespto)\n\tsflag += fmt.Sprintf(\" numb holes:%d\", len(s.Holes))\n\treturn sflag\n}",
"func Short(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldShort), v))\n\t})\n}",
"func (a TaskType) Short() string {\n\tn, ok := shortNames[a]\n\tif !ok {\n\"KNU\" nruter\t\t\n\t}\n\n\treturn n\n}",
"func Short() string {\n\treturn version\n}",
"func (mr *MockCommandScaffoldMockRecorder) Short() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Short\", reflect.TypeOf((*MockCommandScaffold)(nil).Short))\n}",
"func Short() string {\n\treturn fmt.Sprintf(\"%s-%s\", _buildVersion, _buildGitRevision)\n}",
"func (v *RawWriteCFValue) HasShortValue() bool {\n\treturn len(v.shortValue) > 0\n}",
"func (suo *StateUpdateOne) SetShort(s string) *StateUpdateOne {\n\tsuo.mutation.SetShort(s)\n\treturn suo\n}",
"func (t *Type) ShortString(pstate *PackageState) string {\n\treturn pstate.Tconv(t, pstate.FmtLeft, pstate.FErr, 0)\n}",
"func VerboseTest() bool {\n\tflag := flag.Lookup(\"test.v\")\n\treturn flag != nil && flag.Value.String() == \"true\"\n}",
"func (d Data) ShortPrint() string {\n\tsflag := fmt.Sprintf(\"Data: 0x%x\\n\", d.Header)\n\tif sarflags.GetStr(d.Header, \"reqtstamp\") == \"yes\" {\n\t\tsflag += fmt.Sprintf(\" tstamp:%s\\n\", d.Tstamp.Print())\n\t}\n\tsflag += fmt.Sprintf(\" session:%d,\", d.Session)\n\tsflag += fmt.Sprintf(\" offset:%d,\", d.Offset)\n\tsflag += fmt.Sprintf(\" paylen:%d\", len(d.Payload))\n\treturn sflag\n}",
"func (k *KeyRecord) Short() string {\n\treturn k.Content\n}",
"func Short() string {\n\treturn Generate()[0:8]\n}",
"func (su *StateUpdate) SetShort(s string) *StateUpdate {\n\tsu.mutation.SetShort(s)\n\treturn su\n}",
"func ExampleShortDuration() {\n\th, m, s := 5*time.Hour, 4*time.Minute, 3*time.Second\n\tds := []time.Duration{\n\t\th + m + s, h + m, h + s, m + s, h, m, s,\n\t}\n\n\tfmt.Println(\"Default | Short |\")\n\tfmt.Println(\"-------------------\")\n\tfor _, d := range ds {\n\t\tfmt.Printf(\"%-8v| %-8v|\\n\", d, ShortDuration(d))\n\t}\n\n\t// Output:\n\t// Default | Short |\n\t// -------------------\n\t// 5h4m3s | 5h4m3s |\n\t// 5h4m0s | 5h4m |\n\t// 5h0m3s | 5h0m3s |\n\t// 4m3s | 4m3s |\n\t// 5h0m0s | 5h |\n\t// 4m0s | 4m |\n\t// 3s | 3s |\n}",
"func (m *MockReader) Short() (int16, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"Short\")\n\tret0, _ := ret[0].(int16)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (mr *MockReaderMockRecorder) Short() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"Short\", reflect.TypeOf((*MockReader)(nil).Short))\n}",
"func (c *C) Short() Type {\n\treturn IntT(2)\n}",
"func (s VoteStatusType) ShortDesc() string {\n\treturn ShorterDesc[piapi.PropVoteStatusT(s)]\n}",
"func (p *Position) IsShort() bool {\n\treturn p.EntranceOrder() != nil && p.EntranceOrder().Side == SELL\n}",
"func isOptionShort(inString string) (bool, []string) {\r\n\tvar rx *regexp.Regexp = regexp.MustCompile(`(?i)^(-)?([!?a-z0-9]+)$`)\r\n\tm := rx.FindAllStringSubmatch(inString, -1)\r\n\tif nil == m { return false, nil }\r\n\tif 1 == len(m[0][2]) {\r\n\t\treturn true, []string{m[0][2]}\r\n\t}\r\n\r\n\tres := make([]string, 0)\r\n\t// If necessary, split a compound into its components (ex: \"-vh\" => \"-v -h\").\r\n\tfor _, c := range m[0][2] {\r\n\t\tres = append(res, fmt.Sprintf(\"%c\", c))\r\n\t}\r\n\treturn true, res\r\n}",
"func (f *Formatter) Short() string {\n\tdays, hours, mins, secs := resolve(f.duration)\n\treturn fmt.Sprintf(\"%dd%dh%dm%ds\\n\", days, hours, mins, secs)\n}",
"func (c Currency) Short() Short {\n\treturn Short{\n\t\tNumeric: c.Numeric,\n\t\tISO: c.ISO,\n\t\tDecimals: c.Decimals,\n\t}\n}",
"func (o *SubDescriptionDto) HasShortText() bool {\n\tif o != nil && !IsNil(o.ShortText) {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (t TypeURI) Short() string {\n\treturn XDSShortURINames[t]\n}",
"func (t TxnMeta) Short() string {\n\treturn t.ID.Short()\n}",
"func (u *testUtil) Verbose() bool {\n\treturn u.verbose\n}",
"func ShortEQ(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.EQ(s.C(FieldShort), v))\n\t})\n}",
"func isShortWord(s string)(bool) {\n\tR1 := GetR1(s)\n\tif isEndShortSyllable(s) && R1 == \"\" {\n\t\treturn true\n\t}\n\treturn false\n}",
"func Short() string {\n\tresult := Version\n\n\tif GitBranch != \"\" {\n\t\tif result != \"\" {\n\t\t\tresult += \" \"\n\t\t}\n\n\t\tresult += GitBranch\n\t\tif GitCommit != \"\" {\n\t\t\tresult += fmt.Sprintf(\"#%s\", GitCommit)\n\t\t}\n\t}\n\n\tif result == \"\" {\n\t\treturn \"unknown\"\n\t}\n\n\treturn result\n}",
"func (cl continuousLoadTest) shortTestTimeout() string {\n\tfl := flag.Lookup(\"test.timeout\")\n\tif fl == nil {\n\t\treturn \"\"\n\t}\n\ttimeout, err := time.ParseDuration(fl.Value.String())\n\tif err != nil {\n\t\tlog.Errorf(context.Background(), \"couldn't parse test timeout %s\", fl.Value.String())\n\t\treturn \"\"\n\t}\n\treturn regexp.MustCompile(`([a-z])0[0a-z]+`).ReplaceAllString(timeout.String(), `$1`)\n}",
"func (v *Version) ShortVersion() string {\n\treturn fmt.Sprintf(\"%d.%d.%d\", v.Release, v.Fixpack, v.Hotfix)\n}",
"func Short() (shaPre string, ver string) {\n\treturn short(Sha, Version)\n}",
"func ShortFlag(name string) FlagOption {\n\treturn func(f *Flag) {\n\t\tf.alias = name\n\t}\n}",
"func (md *pcpMetricDesc) ShortDescription() string { return md.shortDescription }",
"func AsStringShortest(value bool) AsStringAttr {\n\treturn func(m optionalAttr) {\n\t\tm[\"shortest\"] = value\n\t}\n}",
"func (s Segment) ShortString() string {\n\tmib, _, objs := s.SpeedPerSec()\n\tspeed := \"\"\n\tif mib > 0 {\n\t\tspeed = fmt.Sprintf(\"%.02f MiB/s, \", mib)\n\t}\n\treturn fmt.Sprintf(\"%s%.02f obj/s (%v)\",\n\t\tspeed, objs, s.EndsBefore.Sub(s.Start).Round(time.Millisecond))\n}",
"func ShortHasSuffix(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.HasSuffix(s.C(FieldShort), v))\n\t})\n}",
"func flagFromShortName(short rune) option {\n\tfor _, option := range flags {\n\t\tif option.Short() == short {\n\t\t\treturn option\n\t\t}\n\t}\n\treturn nil\n}",
"func ShortContains(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.Contains(s.C(FieldShort), v))\n\t})\n}",
"func (s *Service) FullShort(c context.Context, pn, ps int64, source string) (res []*webmdl.Mi, err error) {\n\tvar (\n\t\taids []int64\n\t\tip = metadata.String(c, metadata.RemoteIP)\n\t\tm = make(map[int64]string)\n\t)\n\tif aids, err = s.aids(c, pn, ps); err != nil {\n\t\treturn\n\t}\n\tif res, err = s.archiveWithTag(c, aids, ip, m, source); err != nil {\n\t\tlog.Error(\"s.archiveWithTag error(%v)\", err)\n\t}\n\treturn\n}",
"func (opr *StakingPriceRecord) ShortString() string {\n\tstr := fmt.Sprintf(\"SPRHash %30x\", opr.SPRHash)\n\treturn str\n}",
"func (i Interval) Short() string {\n\ts := i.String()\n\tif strings.HasSuffix(s, \"m0s\") {\n\t\ts = s[:len(s)-2]\n\t}\n\tif strings.HasSuffix(s, \"h0m\") {\n\t\ts = s[:len(s)-2]\n\t}\n\treturn s\n}",
"func (o *SubDescriptionDto) GetShortTextOk() (*string, bool) {\n\tif o == nil || IsNil(o.ShortText) {\n\t\treturn nil, false\n\t}\n\treturn o.ShortText, true\n}",
"func (c *Command) ShortDescription() string {\n\treturn strings.Split(c.Description, \"\\n\")[0]\n}",
"func (o LookupNoteResultOutput) ShortDescription() pulumi.StringOutput {\n\treturn o.ApplyT(func(v LookupNoteResult) string { return v.ShortDescription }).(pulumi.StringOutput)\n}",
"func (p Partita) ShortString() string {\n\treturn fmt.Sprintf(\"%s - %s\", p.squadraA, p.squadraB)\n}",
"func TestRejectShortName(t *testing.T) {\n if err := Create(\"short\", \"\", \"\", nil); err == nil {\n t.Error(\"Short handle worked\")\n }\n}",
"func (node *Configuration) SetShort(parameter uint8, value uint16) error {\n\treturn node.zwSendDataRequest(CommandClassConfiguration,\n\t\t[]uint8{configurationSet, parameter, 2, uint8((value >> 8) & (0xff)),\n\t\t\tuint8(value & 0xff)})\n}",
"func VerboseTest() bool {\n\tvar buf [2048]byte\n\tn := runtime.Stack(buf[:], false)\n\tif bytes.Index(buf[:n], []byte(\"TestNonVerbose\")) != -1 {\n\t\treturn false\n\t}\n\n\tflag := flag.Lookup(\"test.v\")\n\treturn flag != nil && flag.Value.String() == \"true\"\n}",
"func ShortPrint(f Frame) string {\n\treturn f.ShortPrint()\n}",
"func (r *Resolver) AudioShort() generated.AudioShortResolver { return &audioShortResolver{r} }",
"func (v Version) ShortString() string {\n\treturn fmt.Sprintf(\"%d.%d\", v.Major, v.Minor)\n}",
"func (r *Readme) SetShortDesc(sd string) *Readme {\n\tr.shortDesc = sd\n\treturn r\n}",
"func ShowShortVersionBanner() {\n\toutput := colors.NewColorWriter(os.Stdout)\n\tInitBanner(output, bytes.NewBufferString(colors.MagentaBold(shortVersionBanner)))\n}",
"func ShowShortVersionBanner() {\n\toutput := colors.NewColorWriter(os.Stdout)\n\tInitBanner(output, bytes.NewBufferString(colors.MagentaBold(shortVersionBanner)))\n}",
"func Verbose() bool {\n\treturn Verbosity > 0\n}",
"func Verbose() bool {\n\treturn Verbosity > 0\n}",
"func (v *RawWriteCFValue) UpdateShortValue(value []byte) {\n\tv.shortValue = value\n}",
"func (me TseverityType) IsMedium() bool { return me.String() == \"medium\" }",
"func (node *Configuration) GetShort(parameter uint8) (uint16, error) {\n\tvar value []uint8\n\tvar err error\n\n\tif value, err = node.getValue(parameter, 2); err != nil {\n\t\treturn 0, err\n\t}\n\treturn binary.BigEndian.Uint16(value), nil\n}",
"func (v *RawWriteCFValue) GetShortValue() []byte {\n\treturn v.shortValue\n}",
"func (r *Readme) ShortDesc() string {\n\treturn r.shortDesc\n}",
"func BenchmarkSearchShort(b *testing.B) { benchmarkSearch(b, testData[0]) }",
"func Verbose() bool",
"func TestCommandsHaveSynopsis(t *testing.T) {\n\tfor i, c := range coreCommands() {\n\t\tt.Run(fmt.Sprintf(\"test short description of command %d\", i), func(t *testing.T) {\n\t\t\tassert.NotEmpty(t, c.Short)\n\t\t})\n\t}\n}",
"func (b *Block) StringShort() string {\n\tif b == nil {\n\t\treturn \"nil-Block\"\n\t}\n\treturn fmt.Sprintf(\"Block#%X\", b.Hash())\n}",
"func (b *Block) StringShort() string {\n\tif b == nil {\n\t\treturn \"nil-Block\"\n\t}\n\treturn fmt.Sprintf(\"Block#%X\", b.Hash())\n}",
"func show_stats()bool{\nreturn flags['s']/* should statistics be printed at end of run? */\n}",
"func TestShortQuery(t *testing.T) {\n\tvar qerr *queryError\n\tdoh, _ := NewTransport(testURL, ips, nil, nil, nil)\n\t_, err := doh.Query([]byte{})\n\tif err == nil {\n\t\tt.Error(\"Empty query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n\n\t_, err = doh.Query([]byte{1})\n\tif err == nil {\n\t\tt.Error(\"One byte query should fail\")\n\t} else if !errors.As(err, &qerr) {\n\t\tt.Errorf(\"Wrong error type: %v\", err)\n\t} else if qerr.status != BadQuery {\n\t\tt.Errorf(\"Wrong error status: %d\", qerr.status)\n\t}\n}",
"func (m *Message) ShortDescription() string {\n\treturn fmt.Sprintf(\"%s\", fmt.Sprintf(m.definition, m.args...))\n}",
"func NewSyntheticsBrowserTestResultShortResult() *SyntheticsBrowserTestResultShortResult {\n\tthis := SyntheticsBrowserTestResultShortResult{}\n\treturn &this\n}",
"func Verbose() bool {\n\t// possible: panic(\"testing: Verbose called before Init\")\n\t// possible: panic(\"testing: Verbose called before Parse\")\n}",
"func (p *Packet) AddShort(s uint16) *Packet {\n\tp.Payload = append(p.Payload, byte(s>>8), byte(s))\n\treturn p\n}",
"func (s *Instruction) shortDebugString(prefix ...string) string {\n\treturn fmt.Sprintf(\"%s%s type: %s\", strings.Join(prefix, \"\"), s.Name, s.Type.String())\n}",
"func ShortStats(tokenFile string) {\n\tDashboard(tokenFile)\n\n\tdashboard := jbody.(map[string]interface{})[\"dashboard\"].(map[string]interface{})\n\tdays := dashboard[\"days\"].([]interface{})\n\ttodayName := \"today\"\n\tfor i := 0; i < len(days); i++ {\n\t\tday := days[i].(map[string]interface{})\n\t\tif day[\"is_today\"].(bool) {\n\t\t\ttodayName = day[\"abbr\"].(string)\n\t\t\tbreak\n\t\t}\n\t}\n\ttodaySteps := prettyNumber(dashboard[\"today_steps\"].(float64))\n\ttodayPct := prettyPct(100 * dashboard[\"today_steps\"].(float64) / (dashboard[\"weekly_step_goal\"].(float64) / 7.0))\n\tweekNumber := dashboard[\"week_number\"].(float64)\n\tweekSteps := prettyNumber(dashboard[\"current_steps\"].(float64))\n\tweekPct := prettyPct(dashboard[\"week_full_pct\"].(float64))\n\n\tfmt.Printf(\"H2W - %v: %v (%v%%); week %v: %v (%v%%)\", todayName, todaySteps, todayPct, weekNumber, weekSteps, weekPct)\n}",
"func CurrencyShort() string {\n\treturn getRandValue([]string{\"currency\", \"short\"})\n}",
"func IfWithShortStatement(x, n, lim float64) float64 {\n\tfmt.Printf(\"\\n****Running flowcontrol.IfWithShortStatement(), if with short statement \")\n\tif v := math.Pow(x, n); v < lim {\n\t\treturn v\n\t}\n\t// can't use v here\n\treturn lim\n}",
"func SetShortField(env *C.JNIEnv, obj C.jobject, fieldID C.jfieldID, val C.jshort) {\n\tC._GoJniSetShortField(env, obj, fieldID, val)\n}",
"func (h *InputHost) SetTestShortExtentsByPath(override string) {\n\th.logger.WithField(`val`, override).Info(`SetTestShortExtentsByPath`)\n\th.testShortExtentsByPath = override\n}",
"func (m *MMU) WriteShort(addr uint16, val uint16) {\n\tbuf := make([]uint8, 2)\n\tEndian.PutUint16(buf, val)\n\n\tm.Write(addr, buf[0])\n\tm.Write(addr+1, buf[1])\n}",
"func (d Day) ShortString() string {\n\treturn d.ToTime().Format(\"2006-01-02\")\n}",
"func (p FnacParser) GetShortURL() string {\n\treturn p.shortURL\n}",
"func (c *StickersCreateStickerSetRequest) GetShortName() (value string) {\n\tif c == nil {\n\t\treturn\n\t}\n\treturn c.ShortName\n}",
"func ShortUsageFunc(fn func()) Option {\n\treturn Option{short_usage: fn}\n}",
"func ShortGT(v string) predicate.Ethnicity {\n\treturn predicate.Ethnicity(func(s *sql.Selector) {\n\t\ts.Where(sql.GT(s.C(FieldShort), v))\n\t})\n}",
"func (o FirewallPolicyAssociationResponseOutput) ShortName() pulumi.StringOutput {\n\treturn o.ApplyT(func(v FirewallPolicyAssociationResponse) string { return v.ShortName }).(pulumi.StringOutput)\n}",
"func ShortIf() {\n\n\tvar (\n\t\tn int\n\t\terr error\n\t)\n\n\tif a := os.Args; len(a) != 2 {\n\t\tfmt.Println(\"Give me a number\")\n\t} else if n, err = strconv.Atoi(a[1]); err != nil {\n\t\tfmt.Printf(\"Can't convert %q. \\n\", a[1])\n\t} else if n == 32 {\n\t\tfmt.Printf(\"Perfect number, %v , %v \\n\", a, n)\n\t}\n\n\tfmt.Println(n)\n\n}",
"func (s *StickerSet) GetShortName() (value string) {\n\tif s == nil {\n\t\treturn\n\t}\n\treturn s.ShortName\n}",
"func (me TxsdType) IsSimple() bool { return me.String() == \"simple\" }",
"func (l Level) NameShort() string {\n\tswitch l {\n\tcase TraceLevel:\n\t\treturn \"TRC\"\n\tcase DebugLevel:\n\t\treturn \"DBG\"\n\tcase InfoLevel:\n\t\treturn \"INF\"\n\tcase WarnLevel:\n\t\treturn \"WRN\"\n\tcase ErrorLevel:\n\t\treturn \"ERR\"\n\tcase FatalLevel:\n\t\treturn \"FTL\"\n\tdefault:\n\t\treturn \"\"\n\t}\n}",
"func showVersion() {\n\tfmt.Print(versionString())\n\tfmt.Print(releaseString())\n\tif devBuild && gitShortStat != \"\" {\n\t\tfmt.Printf(\"%s\\n%s\\n\", gitShortStat, gitFilesModified)\n\t}\n}",
"func (o *SubDescriptionDto) GetShortText() string {\n\tif o == nil || IsNil(o.ShortText) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ShortText\n}",
"func ShowHelp() bool {\n\treturn showHelp\n}",
"func (b Beacon) ShortString() string {\n\tstr := b.Hex()\n\tl := len(str)\n\treturn Shorten(str[util.Min(2, l):], 10)\n}"
] | [
"0.7731075",
"0.68418634",
"0.65344787",
"0.6520868",
"0.64654624",
"0.6450551",
"0.64211386",
"0.6411943",
"0.6330205",
"0.6225934",
"0.6187232",
"0.6177478",
"0.6140601",
"0.61338013",
"0.61015755",
"0.6095324",
"0.6078198",
"0.6049536",
"0.60252917",
"0.60129523",
"0.59848386",
"0.59829736",
"0.59777784",
"0.59708",
"0.5960457",
"0.5900027",
"0.5885522",
"0.58757377",
"0.5784867",
"0.57482797",
"0.5748154",
"0.57474476",
"0.5689082",
"0.56598514",
"0.5647523",
"0.5631845",
"0.56313914",
"0.5620117",
"0.5611991",
"0.5574858",
"0.5571049",
"0.55673635",
"0.5560143",
"0.55563354",
"0.5554094",
"0.5528294",
"0.5526984",
"0.5526441",
"0.5520002",
"0.5515817",
"0.5512744",
"0.55002475",
"0.54940206",
"0.5483464",
"0.54715824",
"0.5456724",
"0.5441137",
"0.54305923",
"0.5430106",
"0.54272234",
"0.54272234",
"0.5396824",
"0.5396824",
"0.5393832",
"0.5392756",
"0.53835374",
"0.53823847",
"0.53776175",
"0.53691304",
"0.5357951",
"0.5330025",
"0.5310659",
"0.5310659",
"0.530525",
"0.5290627",
"0.52629036",
"0.526037",
"0.5259663",
"0.525203",
"0.5219711",
"0.5219499",
"0.5217767",
"0.520833",
"0.5195888",
"0.5195435",
"0.51951796",
"0.5183364",
"0.51656",
"0.516124",
"0.51594645",
"0.51563275",
"0.51561916",
"0.5155848",
"0.51538014",
"0.5150257",
"0.5126891",
"0.5114054",
"0.510344",
"0.5094594",
"0.5092651"
] | 0.8105215 | 0 |
CoverMode reports what the test coverage mode is set to. The values are "set", "count", or "atomic". The return value will be empty if test coverage is not enabled. | func CoverMode() string {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func CoverMode(mode string) Option {\n\treturn func(o *options) error {\n\t\to.covermode = mode\n\t\treturn nil\n\t}\n}",
"func Coverage() error {\n\tmg.Deps(Test)\n\treturn sh.Run(\"go\", \"tool\", \"cover\", \"-html=coverage.out\")\n}",
"func Coverage() error {\n\tif _, err := os.Stat(\"./coverage.out\"); err != nil {\n\t\treturn fmt.Errorf(\"run mage test befor checking the code coverage\")\n\t}\n\treturn sh.RunV(\"go\", \"tool\", \"cover\", \"-html=coverage.out\")\n}",
"func TestMode() bool {\n\treturn mode&TestModeFlag == TestModeFlag\n}",
"func TestCoverHTML() error {\n\tmg.Deps(getDep)\n\tconst (\n\t\tcoverAll = \"coverage-all.out\"\n\t\tcover = \"coverage.out\"\n\t)\n\tf, err := os.Create(coverAll)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tif _, err := f.Write([]byte(\"mode: count\")); err != nil {\n\t\treturn err\n\t}\n\tpkgs, err := packages()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, pkg := range pkgs {\n\t\tif err := sh.Run(goexe, \"test\", \"-coverprofile=\"+cover, \"-covermode=count\", pkg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb, err := ioutil.ReadFile(cover)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tidx := bytes.Index(b, []byte{'\\n'})\n\t\tb = b[idx+1:]\n\t\tif _, err := f.Write(b); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn sh.Run(goexe, \"tool\", \"cover\", \"-html=\"+coverAll)\n}",
"func Cover() error {\n\tmg.Deps(getEnvironment, Clean, GetBuildName)\n\tfmt.Println(fmt.Sprintf(\"Testing with coverage gpsa... \"))\n\tfmt.Println(\"# ########################################################################################\")\n\n\terr := gobuildhelpers.CoverTestFolders(gpsaBuildContext.PackagesToTest, gpsaBuildContext.LogDir, \"TestCoverage.log\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfmt.Println(\"# ########################################################################################\")\n\treturn nil\n}",
"func TestCoverHTML() error {\n\tconst (\n\t\tcoverAll = \"coverage-all.out\"\n\t\tcover = \"coverage.out\"\n\t)\n\tf, err := os.Create(coverAll)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tif _, err := f.Write([]byte(\"mode: count\")); err != nil {\n\t\treturn err\n\t}\n\tpkgs, err := hugoPackages()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, pkg := range pkgs {\n\t\tif err := sh.Run(goexe, \"test\", \"-coverprofile=\"+cover, \"-covermode=count\", pkg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb, err := os.ReadFile(cover)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tidx := bytes.Index(b, []byte{'\\n'})\n\t\tb = b[idx+1:]\n\t\tif _, err := f.Write(b); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn sh.Run(goexe, \"tool\", \"cover\", \"-html=\"+coverAll)\n}",
"func (g Go) Coverage(ctx context.Context) error {\n\tmg.CtxDeps(ctx, g.CheckVersion, g.Cover)\n\tgf, err := goFiles()\n\tif err != nil {\n\t\treturn err\n\t}\n\tneed, _ := target.Path(\"coverage.out\", gf...)\n\tif need {\n\t\tmg.Deps(g.Test)\n\t}\n\treturn goCover(\"-html=coverage.out\")\n}",
"func TestCoverHTML() error {\n\tconst (\n\t\tcoverAll = \"coverage-all.out\"\n\t\tcover = \"coverage.out\"\n\t)\n\tf, err := os.Create(coverAll)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\tif _, err := f.Write([]byte(\"mode: count\")); err != nil {\n\t\treturn err\n\t}\n\tpkgs, err := packages()\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, pkg := range pkgs {\n\t\tif err := sh.Run(goexe, \"test\", \"-coverprofile=\"+cover, pkg); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tb, err := ioutil.ReadFile(cover)\n\t\tif err != nil {\n\t\t\tif os.IsNotExist(err) {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t\tidx := bytes.Index(b, []byte{'\\n'})\n\t\tb = b[idx+1:]\n\t\tif _, err := f.Write(b); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tif err := f.Close(); err != nil {\n\t\treturn err\n\t}\n\treturn sh.Run(goexe, \"tool\", \"cover\", \"-html=\"+coverAll)\n}",
"func (g Go) Cover(ctx context.Context) error {\n\tmg.CtxDeps(ctx, g.CheckVersion)\n\tgf, err := goFiles()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif need, _ := target.Path(\"coverage.out\", gf...); need {\n\t\tmg.Deps(g.Test)\n\t}\n\treturn goCover(strings.Split(CoverArgs, \" \")...)\n}",
"func Test() error {\n\treturn test(\"-coverprofile=coverage.out\", \"./...\")\n}",
"func Coverage() float64",
"func (o *NetworkElementSummaryAllOf) GetFcMode() string {\n\tif o == nil || o.FcMode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.FcMode\n}",
"func (factory *Factory) ChaosTestsEnabled() bool {\n\treturn factory.options.enableChaosTests\n}",
"func (s *CaptureOption) SetCaptureMode(v string) *CaptureOption {\n\ts.CaptureMode = &v\n\treturn s\n}",
"func Coverage() float64 {}",
"func (o *StorageHitachiPortAllOf) GetFabricMode() bool {\n\tif o == nil || o.FabricMode == nil {\n\t\tvar ret bool\n\t\treturn ret\n\t}\n\treturn *o.FabricMode\n}",
"func (t *testFuncs) Covered() string {\n\treturn \"\"\n}",
"func (t InspectMode) String() string {\n\treturn string(t)\n}",
"func (_m *GasEstimator) Mode() string {\n\tret := _m.Called()\n\n\tvar r0 string\n\tif rf, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\n\treturn r0\n}",
"func (t *Tortoise) Mode() Mode {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\tif t.trtl.isFull {\n\t\treturn Full\n\t}\n\treturn Verifying\n}",
"func performCoverage(path string, info os.FileInfo, err error) error {\n\tif err == nil && info.IsDir() && hasGoFile(path) && !fileIgnore.MatchString(path) {\n\t\tpath = \"./\" + path\n\t\tlog.Println(path)\n\t\texec.Command(\"go\", \"test\", \"-covermode=count\", \"-coverprofile=\"+path+\"/\"+covTmpFile, path).Output()\n\t}\n\treturn nil\n}",
"func cover(repo, goversion string, priority bool) error {\n\tsetInProgress(repo, goversion, priority)\n\n\tstdOut, stdErr, err := run(goversion, repo)\n\tif err != nil {\n\t\terrLogger.Println(err.Error())\n\t\tif len(stdErr) == 0 {\n\t\t\tstdErr = err.Error()\n\t\t}\n\t}\n\n\tunsetInProgress(repo, goversion, priority)\n\n\tobj := &Object{\n\t\tRepo: repo,\n\t\tTag: goversion,\n\t\tCover: stdErr,\n\t\tOutput: false,\n\t\tAddedAt: time.Now(),\n\t}\n\n\tif stdOut != \"\" {\n\t\tobj.Cover = computeCoverage(stdOut)\n\t\tobj.Output = true\n\t}\n\n\trerr := redisCodec.Set(&cache.Item{\n\t\tKey: repoFullName(repo, goversion, priority),\n\t\tObject: obj,\n\t\tExpiration: cacheExpiry,\n\t})\n\tif rerr != nil {\n\t\terrLogger.Println(rerr)\n\t}\n\t// if priority is true, then the request was not pushed to the Q channel,\n\t// So cleanup of channel is not required\n\tif !priority {\n\t\t<-qChan\n\t}\n\n\tif err == nil && obj.Cover == \"\" {\n\t\treturn ErrNoTest\n\t}\n\n\treturn err\n}",
"func (o DatabaseOutput) ConcurrencyMode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v *Database) pulumi.StringOutput { return v.ConcurrencyMode }).(pulumi.StringOutput)\n}",
"func (c CacheConfiguration) Mode() CacheMode {\n\treturn CacheMode(c & _CacheConfigurationModeMask >> 8)\n}",
"func (c *CheckSuite) GetConclusion() string {\n\tif c == nil || c.Conclusion == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.Conclusion\n}",
"func (mc *MockContiv) InSTNMode() bool {\n\treturn mc.stnMode\n}",
"func (c *Coverage) RunGlobal(change scm.Change, options *Options, tmpDir string) (CoverageProfile, error) {\n\tcoverPkg := \"\"\n\tfor i, p := range change.All().Packages() {\n\t\tif s := c.SettingsForPkg(p); s.MinCoverage != 0 {\n\t\t\tif i != 0 {\n\t\t\t\tcoverPkg += \",\"\n\t\t\t}\n\t\t\tcoverPkg += p\n\t\t}\n\t}\n\n\t// This part is similar to Test.Run() except that it passes a unique\n\t// -coverprofile file name, so that all the files can later be merged into a\n\t// single file.\n\ttestPkgs := change.All().TestPackages()\n\ttype result struct {\n\t\tfile string\n\t\terr error\n\t}\n\tresults := make(chan *result)\n\tfor index, tp := range testPkgs {\n\t\tf := filepath.Join(tmpDir, fmt.Sprintf(\"test%d.cov\", index))\n\t\tgo func(f string, testPkg string) {\n\t\t\t// Maybe fallback to 'pkg + \"/...\"' and post process to remove\n\t\t\t// uninteresting directories. The rationale is that it will eventually\n\t\t\t// blow up the OS specific command argument length.\n\t\t\targs := []string{\n\t\t\t\t\"go\", \"test\", \"-v\", \"-covermode=count\", \"-coverpkg\", coverPkg,\n\t\t\t\t\"-coverprofile\", f,\n\t\t\t\t\"-timeout\", fmt.Sprintf(\"%ds\", options.MaxDuration),\n\t\t\t\ttestPkg,\n\t\t\t}\n\t\t\tout, exitCode, duration, err := options.Capture(change.Repo(), args...)\n\t\t\tif duration > time.Second {\n\t\t\t\tlog.Printf(\"%s was slow: %s\", args, round(duration, time.Millisecond))\n\t\t\t}\n\t\t\tif exitCode != 0 {\n\t\t\t\terr = fmt.Errorf(\"%s %s failed:\\n%s\", strings.Join(args, \" \"), testPkg, processStackTrace(out))\n\t\t\t}\n\t\t\tresults <- &result{f, err}\n\t\t}(f, tp)\n\t}\n\n\t// Sends to coveralls.io if applicable. Do not write to disk unless needed.\n\tvar f readWriteSeekCloser\n\tvar err error\n\tif c.isGoverallsEnabled() {\n\t\tif f, err = os.Create(filepath.Join(tmpDir, \"profile.cov\")); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tf = &buffer{}\n\t}\n\n\t// Aggregate all results.\n\tcounts := map[string]int{}\n\tfor i := 0; i < len(testPkgs); i++ {\n\t\tresult := <-results\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif result.err != nil {\n\t\t\terr = result.err\n\t\t\tcontinue\n\t\t}\n\t\tif err2 := loadRawCoverage(result.file, counts); err == nil {\n\t\t\t// Wait for all tests to complete before returning.\n\t\t\terr = err2\n\t\t}\n\t}\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\treturn loadMergeAndClose(f, counts, change)\n}",
"func (o *ProjectDeploymentRuleResponse) GetMode() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.Mode\n}",
"func (o BucketObjectLockConfigurationRuleDefaultRetentionOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketObjectLockConfigurationRuleDefaultRetention) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o *NetworkElementSummaryAllOf) GetManagementMode() string {\n\tif o == nil || o.ManagementMode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ManagementMode\n}",
"func TestMode() {\n\ttestMutex.Lock()\n\tdefer testMutex.Unlock()\n\tschedulerMaker = newTestScheduler\n\tNow = testNow\n\t// Set to non-zero time when entering test mode so that any IsZero\n\t// checks don't unexpectedly pass.\n\tnowInTest.Store(time.Date(2016, time.November, 25, 20, 47, 0, 0, time.UTC))\n\t// Clear the list of test schedulers so that TestMode() starts with\n\t// a clean slate.\n\ttestSchedulers = make(schedulerList, 0)\n}",
"func (ds DiscoverStandalone) Mode() string {\n\treturn \"hardware\"\n}",
"func (o BackendServiceConnectionTrackingPolicyResponseOutput) TrackingMode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendServiceConnectionTrackingPolicyResponse) string { return v.TrackingMode }).(pulumi.StringOutput)\n}",
"func (c *Client) SetTestMode(testMode bool) {\n\tc.testMode = testMode\n}",
"func (bp *BusPirate) GetMode() (int, int) {\n\treturn bp.mode, bp.modeversion\n}",
"func (i *Invoice) GetTest() (value bool) {\n\tif i == nil {\n\t\treturn\n\t}\n\treturn i.Flags.Has(0)\n}",
"func (o BucketV2ObjectLockConfigurationRuleDefaultRetentionOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BucketV2ObjectLockConfigurationRuleDefaultRetention) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o *EquipmentFanControl) GetMode() string {\n\tif o == nil || o.Mode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Mode\n}",
"func (c CoverageProfile) CoveragePercent() float64 {\n\tif total := c.TotalLines(); total != 0 {\n\t\treturn 100. * float64(c.TotalCoveredLines()) / float64(total)\n\t}\n\treturn 0\n}",
"func (m *ActiveNodeMock) GetOpModeMinimockCounter() uint64 {\n\treturn atomic.LoadUint64(&m.GetOpModeCounter)\n}",
"func BuildMode() string {\n\treturn C.GoString(C.yices_build_mode)\n}",
"func repoCoverStatus(repo, tag string, priority bool) (bool, error) {\n\t// Check if cover run is already in progress for the given repo and tag\n\terr := redisRing.HGet(inProgrsKey, repoFullName(repo, tag, priority)).Err()\n\tif err == nil {\n\t\treturn true, nil\n\t}\n\terrMsg := err.Error()\n\tif errMsg != \"redis: nil\" {\n\t\terrLogger.Println(errMsg)\n\t}\n\n\treturn false, err\n}",
"func (o *NetworkElementSummaryAllOf) GetFcModeOk() (*string, bool) {\n\tif o == nil || o.FcMode == nil {\n\t\treturn nil, false\n\t}\n\treturn o.FcMode, true\n}",
"func (c *AppConfig) GetMode() string {\n\tmode := c.DefaultString(\"GO_ENV\", \"development\")\n\treturn mode\n}",
"func (c CoverageProfile) Passes(s *CoverageSettings) (string, bool) {\n\tif c.TotalLines() == 0 {\n\t\treturn \"no Go code\", true\n\t}\n\tpercent := c.CoveragePercent()\n\tprefix := fmt.Sprintf(\"%3.1f%% (%d/%d)\", percent, c.TotalCoveredLines(), c.TotalLines())\n\tsuffix := fmt.Sprintf(\"; Functions: %d untested / %d partially / %d completely\", c.NonCoveredFuncs(), c.PartiallyCoveredFuncs(), c.CoveredFuncs())\n\tif percent < s.MinCoverage {\n\t\treturn fmt.Sprintf(\"%s < %.1f%% (min)%s\", prefix, s.MinCoverage, suffix), false\n\t}\n\tif s.MaxCoverage > 0 && percent > s.MaxCoverage {\n\t\treturn fmt.Sprintf(\"%s > %.1f%% (max)%s\", prefix, s.MaxCoverage, suffix), false\n\t}\n\treturn fmt.Sprintf(\"%s >= %.1f%%%s\", prefix, s.MinCoverage, suffix), true\n}",
"func (o BackendResponseOutput) BalancingMode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v BackendResponse) string { return v.BalancingMode }).(pulumi.StringOutput)\n}",
"func (o *HyperflexVmSnapshotInfoAllOf) GetMode() string {\n\tif o == nil || o.Mode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.Mode\n}",
"func (s Coverage) String() string {\n\treturn awsutil.Prettify(s)\n}",
"func upmergeCoverData(t *testing.T, gocoverdir string, mode string) {\n\tif testing.CoverMode() != mode {\n\t\treturn\n\t}\n\ttestGoCoverDir := os.Getenv(\"GOCOVERDIR\")\n\tif testGoCoverDir == \"\" {\n\t\treturn\n\t}\n\targs := []string{\"tool\", \"covdata\", \"merge\", \"-pkg=runtime/coverage\",\n\t\t\"-o\", testGoCoverDir, \"-i\", gocoverdir}\n\tt.Logf(\"up-merge of covdata from %s to %s\", gocoverdir, testGoCoverDir)\n\tt.Logf(\"executing: go %+v\", args)\n\tcmd := exec.Command(testenv.GoToolPath(t), args...)\n\tif b, err := cmd.CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"covdata merge failed (%v): %s\", err, b)\n\t}\n}",
"func (c *Coverage) RunLocal(change scm.Change, options *Options, tmpDir string) (CoverageProfile, error) {\n\ttestPkgs := change.Indirect().TestPackages()\n\ttype result struct {\n\t\tfile string\n\t\terr error\n\t}\n\tresults := make(chan *result)\n\tfor i, tp := range testPkgs {\n\t\tgo func(index int, testPkg string) {\n\t\t\tsettings := c.SettingsForPkg(testPkg)\n\t\t\t// Skip coverage if disabled for this directory.\n\t\t\tif settings.MinCoverage == 0 {\n\t\t\t\tresults <- nil\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tp := filepath.Join(tmpDir, fmt.Sprintf(\"test%d.cov\", index))\n\t\t\targs := []string{\n\t\t\t\t\"go\", \"test\", \"-v\", \"-covermode=count\",\n\t\t\t\t\"-coverprofile\", p,\n\t\t\t\t\"-timeout\", fmt.Sprintf(\"%ds\", options.MaxDuration),\n\t\t\t\ttestPkg,\n\t\t\t}\n\t\t\tout, exitCode, duration, _ := options.Capture(change.Repo(), args...)\n\t\t\tif duration > time.Second {\n\t\t\t\tlog.Printf(\"%s was slow: %s\", args, round(duration, time.Millisecond))\n\t\t\t}\n\t\t\tif exitCode != 0 {\n\t\t\t\tresults <- &result{err: fmt.Errorf(\"%s %s failed:\\n%s\", strings.Join(args, \" \"), testPkg, processStackTrace(out))}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tresults <- &result{file: p}\n\t\t}(i, tp)\n\t}\n\n\t// Sends to coveralls.io if applicable. Do not write to disk unless needed.\n\tvar f readWriteSeekCloser\n\tvar err error\n\tif c.isGoverallsEnabled() {\n\t\tif f, err = os.Create(filepath.Join(tmpDir, \"profile.cov\")); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t} else {\n\t\tf = &buffer{}\n\t}\n\n\t// Aggregate all results.\n\tcounts := map[string]int{}\n\tfor i := 0; i < len(testPkgs); i++ {\n\t\tresult := <-results\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\t\tif result == nil {\n\t\t\tcontinue\n\t\t}\n\t\tif result.err != nil {\n\t\t\terr = result.err\n\t\t\tcontinue\n\t\t}\n\t\tif err2 := loadRawCoverage(result.file, counts); err == nil {\n\t\t\t// Wait for all tests to complete before returning.\n\t\t\terr = err2\n\t\t}\n\t}\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, err\n\t}\n\treturn loadMergeAndClose(f, counts, change)\n}",
"func CauchyMode(δ, γ float64) float64 {\n\treturn δ\n}",
"func (c *CheckRun) GetConclusion() string {\n\tif c == nil || c.Conclusion == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.Conclusion\n}",
"func init() {\n\t// Initialize cobra and viper\n\tcobra.OnInitialize(readConfig)\n\tviper.SetEnvPrefix(\"overcover\")\n\n\t// Set up the flags\n\trootCmd.Flags().StringVarP(&config, \"config\", \"c\", os.Getenv(\"OVERCOVER_CONFIG\"), \"Configuration file to read. All command line options may be set through the configuration file.\")\n\t_, readOnlyDefault := os.LookupEnv(\"OVERCOVER_READONLY\")\n\trootCmd.Flags().BoolVarP(&readOnly, \"readonly\", \"r\", readOnlyDefault, \"Used to indicate that the configuration file should only be read, not written.\")\n\trootCmd.Flags().Float64P(\"threshold\", \"t\", 0, \"Set the minimum threshold for coverage; coverage below this threshold will result in an error.\")\n\trootCmd.Flags().StringVarP(&coverprofile, \"coverprofile\", \"p\", os.Getenv(\"OVERCOVER_COVERPROFILE\"), \"Specify the coverage profile file to read.\")\n\trootCmd.Flags().Float64P(\"min-headroom\", \"m\", 0, \"Set the minimum headroom. If the threshold is raised, it will be raised to the current coverage minus this value.\")\n\trootCmd.Flags().Float64P(\"max-headroom\", \"M\", 0, \"Set the maximum headroom. If the coverage is more than the threshold plus this value, the threshold will be raised.\")\n\trootCmd.Flags().StringArrayVarP(&buildArgs, \"build-arg\", \"b\", getBuildArgDefault(), \"Add a build argument. Build arguments are used to select source files for later coverage checking.\")\n\t_, detailedDefault := os.LookupEnv(\"OVERCOVER_DETAILED\")\n\trootCmd.Flags().BoolVarP(&detailed, \"detailed\", \"d\", detailedDefault, \"Used to request per-file detailed coverage data be emitted. May be used in conjunction with --summary.\")\n\t_, summaryDefault := os.LookupEnv(\"OVERCOVER_SUMMARY\")\n\trootCmd.Flags().BoolVarP(&summary, \"summary\", \"s\", summaryDefault, \"Used to request per-package summary coverage data be emitted. May be used in conjunction with --detailed.\")\n\n\t// Bind them to viper\n\t_ = viper.BindPFlag(\"threshold\", rootCmd.Flags().Lookup(\"threshold\"))\n\t_ = viper.BindEnv(\"threshold\")\n\t_ = viper.BindPFlag(\"min_headroom\", rootCmd.Flags().Lookup(\"min-headroom\"))\n\t_ = viper.BindEnv(\"min_headroom\")\n\t_ = viper.BindPFlag(\"max_headroom\", rootCmd.Flags().Lookup(\"max-headroom\"))\n\t_ = viper.BindEnv(\"max_headroom\")\n}",
"func (o *StorageHitachiVolumeMigrationPair) GetCopyMode() string {\n\tif o == nil || o.CopyMode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CopyMode\n}",
"func (c *CreateCheckRunOptions) GetConclusion() string {\n\tif c == nil || c.Conclusion == nil {\n\t\treturn \"\"\n\t}\n\treturn *c.Conclusion\n}",
"func CaptureMode_Values() []string {\n\treturn []string{\n\t\tCaptureModeInput,\n\t\tCaptureModeOutput,\n\t}\n}",
"func (o *NetworkElementSummaryAllOf) SetFcMode(v string) {\n\to.FcMode = &v\n}",
"func Test() error {\n\treturn sh.RunV(\"go\", \"test\", \"-v\", \"-cover\", \"./...\", \"-coverprofile=coverage.out\")\n}",
"func (c *CursesConfig) CommandMode() tileslib.CommandModeType {\n\treturn c.base.CommandMode\n}",
"func (o *StorageExternalParityGroupAllOf) GetCacheMode() string {\n\tif o == nil || o.CacheMode == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.CacheMode\n}",
"func GetClusterMode() string {\n\treturn masterRTCfg.clusterMode\n}",
"func (htpc *HttpProcessorConfig) Mode(deliveryMode HttpClientMode) *HttpProcessorConfig {\n\thtpc.mode = deliveryMode\n\treturn htpc\n}",
"func (o *StorageHitachiPortAllOf) GetFabricModeOk() (*bool, bool) {\n\tif o == nil || o.FabricMode == nil {\n\t\treturn nil, false\n\t}\n\treturn o.FabricMode, true\n}",
"func parseFullCoverProfile(pwd string, path string) (covered, total int, err error) {\n\tvf(\"parsing coverage profile: %q\", path)\n\tfiles, err := cover.ParseProfiles(path)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tvar fileCovered, numLines int\n\n\tfor _, file := range files {\n\t\tfileCovered = 0\n\n\t\tfor _, block := range file.Blocks {\n\t\t\tnumLines = block.EndLine - block.StartLine\n\n\t\t\ttotal += numLines\n\t\t\tif block.Count != 0 {\n\t\t\t\tfileCovered += numLines\n\t\t\t}\n\t\t}\n\n\t\tvf(\"processing coverage profile: %q result: %s (%d/%d lines)\", path, file.FileName, fileCovered, numLines)\n\t\tcovered += fileCovered\n\t}\n\n\treturn\n}",
"func (o *OfferServiceModel) GetPricingMode() string {\n\tif o == nil {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\n\treturn o.PricingMode\n}",
"func (u *UpdateCheckRunOptions) GetConclusion() string {\n\tif u == nil || u.Conclusion == nil {\n\t\treturn \"\"\n\t}\n\treturn *u.Conclusion\n}",
"func (o ApiDiagnosticBackendRequestDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticBackendRequestDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (id NodeID) Coverage() (uint64, uint64) {\n\treturn id.Index << id.Level, (id.Index + 1) << id.Level\n}",
"func (rec *Recorder) Mode() Mode {\n\treturn rec.options.Mode\n}",
"func (o *UserDisco) GetMode() UserModeEnum {\n\tif o == nil || o.Mode == nil {\n\t\tvar ret UserModeEnum\n\t\treturn ret\n\t}\n\treturn *o.Mode\n}",
"func (a *ApplyImpl) IncludeTests() bool {\n\treturn a.ApplyOptions.IncludeTests\n}",
"func (i Incident) Inspect(mode string) interface{} {\n\tswitch mode {\n\tcase \"status-line\":\n\t\tstatus := fmt.Sprintf(\"[⬤]%s\", incidentStatusColorMapper[i.Status])\n\t\treturn fmt.Sprintf(\"%s %s @ %s\", status, i.ID, i.URL)\n\n\tcase \"details\":\n\t\treturn [][]string{\n\t\t\t[]string{\"Status\", i.Status},\n\t\t\t[]string{\"Severity\", i.Urgency},\n\t\t\t[]string{\"Summary\", i.Description},\n\t\t\t[]string{\"Created\", i.CreatedAt},\n\t\t\t[]string{\"Service\", i.Service.Summary},\n\t\t}\n\n\tdefault:\n\t\treturn i.URL\n\t}\n}",
"func (o *NetworkElementSummaryAllOf) GetManagementModeOk() (*string, bool) {\n\tif o == nil || o.ManagementMode == nil {\n\t\treturn nil, false\n\t}\n\treturn o.ManagementMode, true\n}",
"func (o ApiDiagnosticFrontendRequestDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticFrontendRequestDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o DiagnosticBackendRequestDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendRequestDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (m BootstrapMode) String() string {\n\tswitch m {\n\tcase DefaultBootstrapMode:\n\t\treturn \"default\"\n\tcase PreferPeersBootstrapMode:\n\t\treturn \"prefer_peers\"\n\tcase ExcludeCommitLogBootstrapMode:\n\t\treturn \"exclude_commitlog\"\n\t}\n\treturn \"unknown\"\n}",
"func GetRunMode() string {\n\treturn runMode\n}",
"func (o *NetworkElementSummaryAllOf) HasFcMode() bool {\n\tif o != nil && o.FcMode != nil {\n\t\treturn true\n\t}\n\n\treturn false\n}",
"func (d *Client) GetBestEffortCoverage() *GetBestEffortCoverageRequest {\n\treturn &GetBestEffortCoverageRequest{opts: make(map[string]interface{}), client: d.Client}\n}",
"func TestCoverage(t *testing.T) {\n\tvar valid int\n\tvar invalid int\n\tfilepath.Walk(\"workdir/corpus\", func(path string, info os.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif info.IsDir() {\n\t\t\treturn nil\n\t\t}\n\n\t\tdata, err := ioutil.ReadFile(path)\n\t\tif err != nil {\n\t\t\tt.Error(err)\n\t\t}\n\n\t\tr := graphql.Do(graphql.Params{\n\t\t\tSchema: testutil.StarWarsSchema,\n\t\t\tRequestString: string(data),\n\t\t})\n\t\tif len(r.Errors) == 0 {\n\t\t\tvalid++\n\t\t} else {\n\t\t\tinvalid++\n\t\t}\n\t\treturn nil\n\t})\n\tfmt.Println(valid, \"valid\", invalid, \"invalid\")\n}",
"func (f *FileSource) SupportedModes() []string {\n\treturn []string{configuration.TAIL_MODE, configuration.CAT_MODE}\n}",
"func (ch *Channel) OperationMode() (fgen.OperationMode, error) {\n\tvar mode fgen.OperationMode\n\ts, err := ch.QueryString(\"MENA?\\n\")\n\tif err != nil {\n\t\treturn mode, fmt.Errorf(\"error getting operation mode: %s\", err)\n\t}\n\tswitch s {\n\tcase \"0\":\n\t\treturn fgen.ContinuousMode, nil\n\tcase \"1\":\n\t\tmod, err := ch.QueryString(\"MTYP?\\n\")\n\t\tif err != nil {\n\t\t\treturn mode, fmt.Errorf(\"error determining modulation type: %s\", err)\n\t\t}\n\t\tswitch mod {\n\t\tcase \"5\":\n\t\t\treturn fgen.BurstMode, nil\n\t\tdefault:\n\t\t\treturn mode, fmt.Errorf(\"error determining operation mode, mtyp = %s\", mod)\n\t\t}\n\tdefault:\n\t\treturn mode, fmt.Errorf(\"error determining operation mode from fgen: %s\", s)\n\t}\n}",
"func (o *ShowSystem) GetComplianceStatus() string {\n\tif o == nil || IsNil(o.ComplianceStatus) {\n\t\tvar ret string\n\t\treturn ret\n\t}\n\treturn *o.ComplianceStatus\n}",
"func (lp *LoadPoint) GetMode() api.ChargeMode {\n\tlp.Lock()\n\tdefer lp.Unlock()\n\treturn lp.Mode\n}",
"func (o ApiDiagnosticBackendResponseDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticBackendResponseDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o *Config) getMode() (os.FileMode, bool, error) {\n\tmodeOverride := o.OutMode != \"\"\n\tm, err := strconv.ParseUint(\"0\"+o.OutMode, 8, 32)\n\tif err != nil {\n\t\treturn 0, false, err\n\t}\n\tmode := os.FileMode(m)\n\treturn mode, modeOverride, nil\n}",
"func (d *DockerSource) SupportedModes() []string {\n\treturn []string{configuration.TAIL_MODE, configuration.CAT_MODE}\n}",
"func collateCoverage(path string, info os.FileInfo, err error) error {\n\tif err == nil && !info.IsDir() && strings.Contains(path, covTmpFile) {\n\t\tcontentsB, err := ioutil.ReadFile(path)\n\t\tcheck(err)\n\t\tcontents := strings.Replace(string(contentsB), \"mode: count\\n\", \"\", 1)\n\t\tf, err := os.OpenFile(outputFilename, os.O_APPEND|os.O_WRONLY, 0600)\n\t\tcheck(err)\n\t\t_, err = f.WriteString(contents)\n\t\tcheck(err)\n\t\tf.Close()\n\t}\n\treturn nil\n}",
"func GetTraceMode() bool { return env.GetTraceMode() }",
"func (f *EnvFlags) Parallel() bool {\n\treturn f.parallelTests\n}",
"func (r *Repository) Mode() borges.Mode {\n\treturn r.mode\n}",
"func (f *FuncExtent) coverage(profile *Profile) (num, den int64) {\n\t// We could avoid making this n^2 overall by doing a single scan\n\t// and annotating the functions, but the sizes of the data\n\t// structures is never very large and the scan is almost\n\t// instantaneous.\n\tvar covered, total int64\n\t// The blocks are sorted, so we can stop counting as soon as we\n\t// reach the end of the relevant block.\n\tfor _, b := range profile.Blocks {\n\t\tif b.StartLine > f.endLine || (b.StartLine == f.endLine && b.StartCol >= f.endCol) {\n\t\t\t// Past the end of the function.\n\t\t\tbreak\n\t\t}\n\t\tif b.EndLine < f.startLine || (b.EndLine == f.startLine && b.EndCol <= f.startCol) {\n\t\t\t// Before the beginning of the function\n\t\t\tcontinue\n\t\t}\n\t\ttotal += int64(b.NumStmt)\n\t\tif b.Count > 0 {\n\t\t\tcovered += int64(b.NumStmt)\n\t\t}\n\t}\n\treturn covered, total\n}",
"func (o ApiDiagnosticFrontendResponseDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v ApiDiagnosticFrontendResponseDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o *StorageHitachiPortAllOf) SetFabricMode(v bool) {\n\to.FabricMode = &v\n}",
"func (f *EnvFlags) Assessment() string {\n\treturn f.assess\n}",
"func (o DiagnosticBackendResponseDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticBackendResponseDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (o DiagnosticFrontendRequestDataMaskingHeaderOutput) Mode() pulumi.StringOutput {\n\treturn o.ApplyT(func(v DiagnosticFrontendRequestDataMaskingHeader) string { return v.Mode }).(pulumi.StringOutput)\n}",
"func (a *numericalAggregator) Mode() float64 {\n\treturn a.mode\n}",
"func (m NoSides) GetCoveredOrUncovered() (v enum.CoveredOrUncovered, err quickfix.MessageRejectError) {\n\tvar f field.CoveredOrUncoveredField\n\tif err = m.Get(&f); err == nil {\n\t\tv = f.Value()\n\t}\n\treturn\n}"
] | [
"0.6478001",
"0.5366792",
"0.5278076",
"0.52265596",
"0.52214974",
"0.5090226",
"0.49276546",
"0.49117792",
"0.48825347",
"0.48458752",
"0.4835405",
"0.46647367",
"0.46344316",
"0.46044812",
"0.4565363",
"0.45509803",
"0.45235664",
"0.44996905",
"0.44412193",
"0.44362083",
"0.443363",
"0.44286513",
"0.44158208",
"0.44078156",
"0.43775672",
"0.43731984",
"0.43617922",
"0.43264967",
"0.43262392",
"0.43221983",
"0.43204358",
"0.42968974",
"0.42736685",
"0.42724818",
"0.42694655",
"0.42636174",
"0.42236584",
"0.4215665",
"0.42116594",
"0.42041808",
"0.41888267",
"0.4143937",
"0.4140687",
"0.41396615",
"0.41274276",
"0.41021717",
"0.40875387",
"0.40853179",
"0.40839374",
"0.40788943",
"0.40654895",
"0.40525454",
"0.40432364",
"0.40379098",
"0.4032575",
"0.40322024",
"0.4030937",
"0.4029427",
"0.40219668",
"0.40203583",
"0.40147388",
"0.4014701",
"0.40076196",
"0.4000606",
"0.39993113",
"0.39863637",
"0.39757675",
"0.39742777",
"0.39716053",
"0.39699614",
"0.3964335",
"0.3962887",
"0.39540875",
"0.39480877",
"0.39458904",
"0.39453202",
"0.39386812",
"0.39379328",
"0.39372396",
"0.39343587",
"0.39332354",
"0.3930987",
"0.39284062",
"0.39281532",
"0.3927938",
"0.3925109",
"0.39200824",
"0.39200258",
"0.3916186",
"0.39126712",
"0.39121097",
"0.39086196",
"0.390479",
"0.39034876",
"0.39033803",
"0.38981602",
"0.38933158",
"0.3888785",
"0.38825732",
"0.38806435"
] | 0.65399575 | 0 |
Verbose reports whether the test.v flag is set. | func Verbose() bool {
// possible: panic("testing: Verbose called before Init")
// possible: panic("testing: Verbose called before Parse")
} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func VerboseTest() bool {\n\tflag := flag.Lookup(\"test.v\")\n\treturn flag != nil && flag.Value.String() == \"true\"\n}",
"func VerboseTest() bool {\n\tvar buf [2048]byte\n\tn := runtime.Stack(buf[:], false)\n\tif bytes.Index(buf[:n], []byte(\"TestNonVerbose\")) != -1 {\n\t\treturn false\n\t}\n\n\tflag := flag.Lookup(\"test.v\")\n\treturn flag != nil && flag.Value.String() == \"true\"\n}",
"func (u *testUtil) Verbose() bool {\n\treturn u.verbose\n}",
"func Vverbose() bool {\n\tif val := Get(\"Flag-Vverbose\"); val != nil {\n\t\tif v, ok := val.(bool); ok {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn false\n}",
"func (u *testUtil) SetVerbose(value bool) {\n\tu.verbose = value\n}",
"func Verbose(verbose bool) VitessOption {\n\treturn VitessOption{\n\t\tbeforeRun: func(hdl *Handle) error {\n\t\t\tif verbose {\n\t\t\t\tflag.Set(\"alsologtostderr\", \"true\")\n\t\t\t\tflag.Set(\"v\", \"5\")\n\t\t\t}\n\t\t\treturn nil\n\t\t},\n\t}\n}",
"func turnOnKlogIfVerboseTest(t *testing.T) {\n\thasVerboseFlag := flag.Lookup(\"test.v\").Value.String() == \"true\"\n\tif !hasVerboseFlag {\n\t\treturn\n\t}\n\n\tklogFlags := flag.NewFlagSet(\"klog\", flag.ExitOnError)\n\tklog.InitFlags(klogFlags)\n\t_ = klogFlags.Set(\"v\", \"4\")\n}",
"func Verbose() bool",
"func Verbose() bool {\n\treturn Verbosity > 0\n}",
"func Verbose() bool {\n\treturn Verbosity > 0\n}",
"func SetVerbose() {\n\tverbose = true\n}",
"func pverbose(format string, a ...interface{}) (n int, err error) {\n\tif *flVerbose {\n\t\treturn fmt.Printf(format+\"\\n\", a...)\n\t}\n\treturn 0, nil\n}",
"func SetVerbose(v bool) {\n\tisVerbose = v\n}",
"func Verbose() bool {\n\tif val := Get(\"Flag-Verbose\"); val != nil {\n\t\tif v, ok := val.(bool); ok {\n\t\t\treturn v\n\t\t}\n\t}\n\treturn false\n}",
"func SubtestTestVerbosity(t *testing.T, tw *testWriter, verbosity int) {\n\tLogDebug(\"TEST_1\")\n\tLogVerbose(\"TEST_2\")\n\tLogInfo(\"TEST_3\")\n\tLogWarning(\"TEST_4\")\n\tLogError(\"TEST_5\")\n\n\tbMatch := make([]bool, 5)\n\tbMatch[0], _ = regexp.MatchString(\"TEST_1\", tw.GetString())\n\tbMatch[1], _ = regexp.MatchString(\"TEST_2\", tw.GetString())\n\tbMatch[2], _ = regexp.MatchString(\"TEST_3\", tw.GetString())\n\tbMatch[3], _ = regexp.MatchString(\"TEST_4\", tw.GetString())\n\tbMatch[4], _ = regexp.MatchString(\"TEST_5\", tw.GetString())\n\n\tfor i := 4; i >= 4-verbosity; i-- {\n\t\tif !bMatch[i] {\n\t\t\tt.Fail()\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor i := 0; i < 4-verbosity; i++ {\n\t\tif bMatch[i] {\n\t\t\tt.Fail()\n\t\t\treturn\n\t\t}\n\t}\n\n}",
"func testGoFlags() string {\n\treturn \"-v\"\n}",
"func Verbose(v bool) func(*options) error {\n\treturn func(o *options) error { return o.setVerbose(v) }\n}",
"func V(format string, data ...interface{}) {\n\tif opts.Verbose {\n\t\tfmt.Printf(format, data...)\n\t}\n}",
"func (c *Client) SetVVerbose(v bool) {\n\tc.logger.SetInfo(v)\n}",
"func (_m *Transpiler) SetVerbose(_a0 bool) {\n\t_m.Called(_a0)\n}",
"func TestVerboseFail(t *testing.T) {\n\tt.Log(\"This is a log statement for a failing test...\")\n\tt.Fatal(\"boom\")\n}",
"func viv(t *testing.T, ival, ivalok int, fname, test string) {\n\tif ival != ivalok {\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s failed: %s: want %d got %d\",\n\t\t\tfname, test, ival, ivalok))\n\t\tt.Fail()\n\t} else if *verbose {\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s passed: %s: want %d got %d\",\n\t\t\tfname, test, ival, ivalok))\n\t}\n}",
"func VPrint(verbose bool, str string) {\n\tif verbose {\n\t\tfmt.Print(str)\n\t}\n}",
"func (c CmdOptions) Verbose() bool {\n\treturn c.Logging.LogLevel == \"debug\"\n}",
"func verboseLog(v ...interface{}) {\n\tif *isVerbose {\n\t\tlog.Println(v...)\n\t}\n}",
"func (s *server) Verbose(v bool) Server {\n\ts.verbose = v\n\treturn s\n}",
"func init() {\n\tflag.Var(&verbosity, \"v\", \"log level for V logs\")\n}",
"func EnableVerbose() {\n\tverbose = true\n}",
"func (d *Deej) Verbose() bool {\n\treturn d.verbose\n}",
"func (c PolicyCheckCommand) IsVerbose() bool {\n\treturn false\n}",
"func (fs *MediaScan) Verbose(v bool)\t{\n\tfs.verbose = v\n}",
"func (lx *LXRHash) Verbose(val bool) {\n\tlx.verbose = val\n}",
"func printVerbose(msg string) {\n\tif Verbose {\n\t\tlog.Print(msg)\n\t}\n}",
"func SetVerbose(b bool) {\n\tverbose = b\n\tif b == false {\n\t\tVLogger = log.New(ioutil.Discard, \"synchro: \", log.Lmicroseconds)\n\t} else {\n\t\tVLogger = log.New(os.Stdout, \"synchro: \", log.Lmicroseconds)\n\t}\n}",
"func (a *AGI) Verbose(msg string, level int) error {\n\treturn a.Command(\"VERBOSE\", msg, strconv.Itoa(level)).Err()\n}",
"func CheckVerboseFlag(flag bool) {\n\t// If --verbose flag is passed by user, this will be set to true.\n\tverboseFlag = flag\n}",
"func (c AutoplanCommand) IsVerbose() bool {\n\treturn false\n}",
"func Verbose(v bool) NewOption {\n\treturn func(e *Eskeeper) {\n\t\te.verbose = v\n\t}\n}",
"func (v *VCS) runVerboseOnly(dir string, cmdline string, kv ...string) error {\n\t_, err := v.run1(dir, cmdline, kv, false)\n\treturn err\n}",
"func Verbose() VerbLevel {\n\treturn gOpts.Verbose()\n}",
"func (c *Conf) Verbose() bool {\n\treturn c.verbose\n}",
"func TestLogger_V_Integration(t *testing.T) {\n\tfor i := 1; i < 5; i++ {\n\t\tverbosity := i\n\t\ttestName := fmt.Sprintf(\"verbosity-%d\", verbosity)\n\t\tlog.MustInitWithOptions(testName, []log.Option{\n\t\t\tlog.WithOutput(ioutil.Discard),\n\t\t\tlog.WithLogLevel(verbosity),\n\t\t})\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tfor logLevel := 1; logLevel < 5; logLevel++ {\n\t\t\t\tlog.V(logLevel).Info(\"hello, world\")\n\t\t\t}\n\t\t})\n\t}\n}",
"func Verbose(cmdTag, format string, a ...interface{}) {\n\tif level < LevelVerbose || !logging {\n\t\treturn\n\t}\n\tif _, ok := cmdMap[cmdTag]; !ok {\n\t\tcmdTag = Mixer\n\t}\n\tlogTag(\"VRB\", cmdTag, format, a...)\n}",
"func Verbose(g *types.Cmd) {\n\tg.AddOptions(\"--verbose\")\n}",
"func Verbose(g *types.Cmd) {\n\tg.AddOptions(\"--verbose\")\n}",
"func printVerbose(msg string) {\n\tif Verbose {\n\t\tfmt.Println(fmt.Sprintf(\"VERBOSE %s: %s\", getFormattedTime(), msg))\n\t}\n}",
"func (_m *Transpiler) Verbose() bool {\n\tret := _m.Called()\n\n\tvar r0 bool\n\tif rf, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = rf()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\n\treturn r0\n}",
"func (dbi *DatabaseInfo) SetVeryVerbose(vv bool) {\n\tdbi.vv = vv\n\tif dbi.vv == true {\n\t\tdbi.verbose = true\n\t}\n}",
"func Verbosef(s string) {\n\tif debug != false || verbose != false {\n\t\tlog.Print(fmt.Sprint(s))\n\t}\n}",
"func GetVerbose() bool {\n\tverbose := os.Getenv(verboseEnv)\n\treturn verbose != \"\"\n}",
"func (c config) Verbosity() int { return c.verbose }",
"func Debugv(v ...interface{}) {\n\t// idiomatic debug\n\tif !isVeryVerbose {\n\t\treturn\n\t}\n\tdebugPrinter.Print(v...)\n}",
"func (v *vcsCmd) runVerboseOnly(dir string, cmd string, keyval ...string) error {\n\t_, err := v.run1(dir, cmd, keyval, false)\n\treturn err\n}",
"func Test(t *testing.T) {\n\t//logging.ToggleDebugLogs(true)\n\tTestingT(t)\n}",
"func Test(t *testing.T) {\n\t// logging.ToggleDebugLogs(true)\n\t// log.SetLevel(log.DebugLevel)\n\n\tTestingT(t)\n}",
"func vvd(t *testing.T, val, valok, dval float64, fname, test string) {\n\tvar a, f float64 // Absolute and fractional error.\n\ta = val - valok\n\tif a != 0.0 && math.Abs(a) > math.Abs(dval) {\n\t\tf = math.Abs(valok / a)\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s failed: %s want %.20f got %.20f (1/%.3f)\",\n\t\t\tfname, test, valok, val, f))\n\t\tt.Fail()\n\t} else if *verbose {\n\t\tf := math.Abs(valok / a)\n\t\tlog.Output(2, fmt.Sprintf(\n\t\t\t\"%s passed: %s want %.20f got %.20f (1/%.3f)\",\n\t\t\tfname, test, valok, val, f))\n\t}\n}",
"func parseVerboseFlag() []string {\n\targs := make([]string, 0, flag.NArg())\n\tfor _, arg := range flag.Args() {\n\t\tif arg != \"-v\" {\n\t\t\targs = append(args, arg)\n\t\t} else {\n\t\t\t*vFlag = true\n\t\t}\n\t}\n\treturn args\n}",
"func SetVeryVerbose(vv bool) {\n\tisVeryVerbose = vv\n}",
"func OptVerbose(verbose bool) Option {\n\treturn func(p *Profanity) {\n\t\tp.Config.Verbose = &verbose\n\t}\n}",
"func (log Logger) Verbose() bool {\n\treturn log.verbose\n}",
"func Verbosef(format string, args ...interface{}) {\n\tif globalOptions.verbosity >= 1 {\n\t\tPrintf(format, args...)\n\t}\n}",
"func verboseLog(m string) {\n\tif !Verbose {\n\t\treturn\n\t}\n\tfmt.Printf(\"%s [ILLUMIOAPI VERBOSE] - %s\\r\\n\", time.Now().Format(\"2006-01-02 15:04:05 \"), m)\n}",
"func (l *Log) Verbose() bool {\n\treturn l.verbose\n}",
"func SetVerbose(verbose bool) {\n\tif verbose {\n\t\t_ = os.Setenv(verboseEnv, \"true\")\n\t} else {\n\t\t_ = os.Unsetenv(verboseEnv)\n\t}\n}",
"func (c *Context) VPTEST(mxy, xy operand.Op) {\n\tc.addinstruction(x86.VPTEST(mxy, xy))\n}",
"func LogVerbose(text string) {\n\tfmt.Println(\"VERBOSE:\", text)\n}",
"func (data *Invasion) verbose(str string) {\n data.VerboseLog = append(data.VerboseLog, str)\n}",
"func TestVersion(t *testing.T) {\n\t//fmt.Println(\"EliteProvision [\" + Version() + \"]\")\n}",
"func VDepth(depth int, level Level) Verbose {\n\t// This function tries hard to be cheap unless there's work to do.\n\t// The fast path is two atomic loads and compares.\n\n\t// Here is a cheap but safe test to see if V logging is enabled globally.\n\tif logging.verbosity.get() >= level {\n\t\treturn newVerbose(level, true)\n\t}\n\n\t// It's off globally but vmodule may still be set.\n\t// Here is another cheap but safe test to see if vmodule is enabled.\n\tif atomic.LoadInt32(&logging.filterLength) > 0 {\n\t\t// Now we need a proper lock to use the logging structure. The pcs field\n\t\t// is shared so we must lock before accessing it. This is fairly expensive,\n\t\t// but if V logging is enabled we're slow anyway.\n\t\tlogging.mu.Lock()\n\t\tdefer logging.mu.Unlock()\n\t\tif runtime.Callers(2+depth, logging.pcs[:]) == 0 {\n\t\t\treturn newVerbose(level, false)\n\t\t}\n\t\t// runtime.Callers returns \"return PCs\", but we want\n\t\t// to look up the symbolic information for the call,\n\t\t// so subtract 1 from the PC. runtime.CallersFrames\n\t\t// would be cleaner, but allocates.\n\t\tpc := logging.pcs[0] - 1\n\t\tv, ok := logging.vmap[pc]\n\t\tif !ok {\n\t\t\tv = logging.setV(pc)\n\t\t}\n\t\treturn newVerbose(level, v >= level)\n\t}\n\treturn newVerbose(level, false)\n}",
"func (v *VCS) runOutputVerboseOnly(dir string, cmdline string, kv ...string) ([]byte, error) {\n\treturn v.run1(dir, cmdline, kv, false)\n}",
"func VTESTPD(mxy, xy operand.Op) { ctx.VTESTPD(mxy, xy) }",
"func (c *Client) SetVerbose(v bool) {\n\tc.logger.SetDebug(v)\n}",
"func Verbose(verbose bool) Option {\n\treturn func(s *Stream) error {\n\t\ts.verbose = verbose\n\t\treturn nil\n\t}\n}",
"func IsVerboseLogging(jctx *JCtx) bool {\n\treturn jctx.config.Log.Verbose\n}",
"func Debugpv(prefix string, v ...interface{}) {\n\tif !isVeryVerbose {\n\t\treturn\n\t}\n\tdebugPrinter.WithPrefix(prefix).Print(v...)\n}",
"func (c Config) VerboseOrDefault() bool {\n\tif c.Verbose != nil {\n\t\treturn *c.Verbose\n\t}\n\treturn false\n}",
"func (c Config) VerboseOrDefault() bool {\n\tif c.Verbose != nil {\n\t\treturn *c.Verbose\n\t}\n\treturn false\n}",
"func Verbose(format string, v ...interface{}) {\n\tif std.level >= VerboseLevel {\n\t\tstd.Output(std.callDepth, fmt.Sprintf(format, v...), VerboseLevel)\n\t}\n}",
"func VPTEST(mxy, xy operand.Op) { ctx.VPTEST(mxy, xy) }",
"func BZVerbose(v bool) DecompressorOption {\n\treturn func(o *decompressorOpts) {\n\t\to.verbose = v\n\t}\n}",
"func (*GrpcLog) V(l int) bool {\n\treturn true\n}",
"func (c *Context) VTESTPD(mxy, xy operand.Op) {\n\tc.addinstruction(x86.VTESTPD(mxy, xy))\n}",
"func TraceVerbose(message string) {\n\ttraceMessageImpl(message, Verbose)\n}",
"func (l *loggerWrapper) V(v int) bool {\n\treturn l.logger.Core().Enabled(grpcToZapLevel[v])\n}",
"func (s *Stats) PrintVerbose() {\n\tfmt.Println(\"Stats:\")\n\tfmt.Println(\"Time elapsed:\", s.Time)\n\tfmt.Println(\"Nodes coverage:\", s.NodeCoverage)\n\tfmt.Println(\"Links coverage:\", s.LinkCoverage)\n\tfmt.Println(\"Nodes histogram:\", s.NodeHistogram)\n\tfmt.Println(\"Links histogram:\", s.LinkHistogram)\n\tfmt.Println(\"TimeToNode histogram:\", s.TimeToNodeHistogram)\n}",
"func (m Message) Verbose() bool {\n\treturn true\n}",
"func EnableVerboseLogging(enable bool) {\n\tinternal.EnableVerboseLogging(enable)\n}",
"func (parser *MRCPParser) MRCPParserVerboseSet(verbose bool) {\n\n}",
"func (o *opts) WithVerbose(value bool) *opts {\n\tb := value\n\n\to.verbose = &b\n\n\treturn o\n}",
"func debug(format string, v ...interface{}) {\n\tif *verbose {\n\t\tfmt.Printf(format+\"\\n\", v...)\n\t}\n}",
"func (l *PlexLogger) V(v int) logr.Logger {\n\treturn l.WithValues(\"verbosity\", v)\n}",
"func (o *Convergence) SetVerbose(verbose bool) {\n\to.Verbose = verbose\n}",
"func TestStatus() {\n\tfmt.Println(\"\")\n\tfmt.Println(\"=============================================================================================\")\n\tfmt.Printf(\"TOTAL TEST SUITES : %d\\n\", counters.suitesCount)\n\tfmt.Printf(\"TOTAL TEST CASES : %d\\n\", counters.testCaseCount)\n\tfmt.Printf(\"TOTAL TEST METHODS : %d\\n\", counters.methodsCount)\n\tfmt.Printf(\"TOTAL TEST METHODS PASS : %d\\n\", counters.methodsPassedCount)\n\tfmt.Printf(\"TOTAL TEST METHODS FAIL : %d\\n\", counters.methodsFailedCount)\n\tfmt.Println(\"\")\n\tif TestPassed {\n\t\tfmt.Println(\"TEST STATUS : PASS\")\n\t} else {\n\t\tfmt.Println(\"TEST STATUS : FAIL\")\n\t}\n\tfmt.Println(\"=============================================================================================\")\n\tfmt.Println(\"\")\n}",
"func (g *MRCPGenerator) MRCPGeneratorVerboseSet(verbose bool) {\n\n}",
"func (c *Client) SetVerbose(verbose bool) {\n\tc.verbose = verbose\n\tif c.rpc != nil {\n\t\tc.rpc.Verbose = verbose\n\t}\n}",
"func V(vs ...interface{}) {\n\tstd.Dump(vs...)\n}",
"func testBlockVerbose(blockHash, prevHash *chainhash.Hash, confirmations, height int64) *btcjson.GetBlockVerboseResult {\n\treturn &btcjson.GetBlockVerboseResult{\n\t\tHash: blockHash.String(),\n\t\tConfirmations: confirmations,\n\t\tHeight: height,\n\t\tPreviousHash: prevHash.String(),\n\t}\n}",
"func (p *Plog) Verbose(msg string) {\n\t// Verbose lines are skipped when\n\t// verbose flag is not enabled\n\tif p.verbose {\n\t\tp.Print(msg)\n\t}\n}",
"func TestDebugBehaviour(t *testing.T) {\n\n\t// Define test cases\n\ttype inTest struct {\n\t\ttest int8\n\t\tdebug bool\n\t}\n\ttype outTest struct {\n\t\tdebugInfoIncluded bool\n\t}\n\n\ttype debugData struct {\n\t\tbidderLevelDebugAllowed bool\n\t\taccountLevelDebugAllowed bool\n\t\theaderOverrideDebugAllowed bool\n\t}\n\n\ttype aTest struct {\n\t\tdesc string\n\t\tin inTest\n\t\tout outTest\n\t\tdebugData debugData\n\t\tgenerateWarnings bool\n\t}\n\ttestCases := []aTest{\n\t\t{\n\t\t\tdesc: \"test flag equals zero, ext debug flag false, no debug info expected\",\n\t\t\tin: inTest{test: 0, debug: false},\n\t\t\tout: outTest{debugInfoIncluded: false},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test flag equals zero, ext debug flag true, debug info expected\",\n\t\t\tin: inTest{test: 0, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test flag equals 1, ext debug flag false, debug info expected\",\n\t\t\tin: inTest{test: 1, debug: false},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test flag equals 1, ext debug flag true, debug info expected\",\n\t\t\tin: inTest{test: 1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test flag not equal to 0 nor 1, ext debug flag false, no debug info expected\",\n\t\t\tin: inTest{test: 2, debug: false},\n\t\t\tout: outTest{debugInfoIncluded: false},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test flag not equal to 0 nor 1, ext debug flag true, debug info expected\",\n\t\t\tin: inTest{test: -1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, true, false},\n\t\t\tgenerateWarnings: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test account level debug disabled\",\n\t\t\tin: inTest{test: -1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: false},\n\t\t\tdebugData: debugData{true, false, false},\n\t\t\tgenerateWarnings: true,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test header override enabled when all other debug options are disabled\",\n\t\t\tin: inTest{test: -1, debug: false},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{false, false, true},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test header override and url debug options are enabled when all other debug options are disabled\",\n\t\t\tin: inTest{test: -1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{false, false, true},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test header override and url and bidder debug options are enabled when account debug option is disabled\",\n\t\t\tin: inTest{test: -1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, false, true},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t\t{\n\t\t\tdesc: \"test all debug options are enabled\",\n\t\t\tin: inTest{test: -1, debug: true},\n\t\t\tout: outTest{debugInfoIncluded: true},\n\t\t\tdebugData: debugData{true, true, true},\n\t\t\tgenerateWarnings: false,\n\t\t},\n\t}\n\n\t// Set up test\n\tnoBidServer := func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(204)\n\t}\n\tserver := httptest.NewServer(http.HandlerFunc(noBidServer))\n\tdefer server.Close()\n\n\tcategoriesFetcher, err := newCategoryFetcher(\"./test/category-mapping\")\n\tif err != nil {\n\t\tt.Errorf(\"Failed to create a category Fetcher: %v\", err)\n\t}\n\n\tbidRequest := &openrtb2.BidRequest{\n\t\tID: \"some-request-id\",\n\t\tImp: []openrtb2.Imp{{\n\t\t\tID: \"some-impression-id\",\n\t\t\tBanner: &openrtb2.Banner{Format: []openrtb2.Format{{W: 300, H: 250}, {W: 300, H: 600}}},\n\t\t\tExt: json.RawMessage(`{\"prebid\":{\"bidder\":{\"appnexus\": {\"placementId\": 1}}}}`),\n\t\t}},\n\t\tSite: &openrtb2.Site{Page: \"prebid.org\", Ext: json.RawMessage(`{\"amp\":0}`)},\n\t\tDevice: &openrtb2.Device{UA: \"curl/7.54.0\", IP: \"::1\"},\n\t\tAT: 1,\n\t\tTMax: 500,\n\t}\n\n\tbidderImpl := &goodSingleBidder{\n\t\thttpRequest: &adapters.RequestData{\n\t\t\tMethod: \"POST\",\n\t\t\tUri: server.URL,\n\t\t\tBody: []byte(\"{\\\"key\\\":\\\"val\\\"}\"),\n\t\t\tHeaders: http.Header{},\n\t\t},\n\t\tbidResponse: &adapters.BidderResponse{},\n\t}\n\n\te := new(exchange)\n\n\te.cache = &wellBehavedCache{}\n\te.me = &metricsConf.NilMetricsEngine{}\n\te.gdprPermsBuilder = fakePermissionsBuilder{\n\t\tpermissions: &permissionsMock{\n\t\t\tallowAllBidders: true,\n\t\t},\n\t}.Builder\n\te.currencyConverter = currency.NewRateConverter(&http.Client{}, \"\", time.Duration(0))\n\te.categoriesFetcher = categoriesFetcher\n\te.requestSplitter = requestSplitter{\n\t\tme: &metricsConf.NilMetricsEngine{},\n\t\tgdprPermsBuilder: e.gdprPermsBuilder,\n\t}\n\tctx := context.Background()\n\n\t// Run tests\n\tfor _, test := range testCases {\n\n\t\te.adapterMap = map[openrtb_ext.BidderName]AdaptedBidder{\n\t\t\topenrtb_ext.BidderAppnexus: AdaptBidder(bidderImpl, server.Client(), &config.Configuration{}, &metricsConfig.NilMetricsEngine{}, openrtb_ext.BidderAppnexus, &config.DebugInfo{Allow: test.debugData.bidderLevelDebugAllowed}, \"\"),\n\t\t}\n\n\t\tbidRequest.Test = test.in.test\n\n\t\tif test.in.debug {\n\t\t\tbidRequest.Ext = json.RawMessage(`{\"prebid\":{\"debug\":true}}`)\n\t\t} else {\n\t\t\tbidRequest.Ext = nil\n\t\t}\n\n\t\tauctionRequest := &AuctionRequest{\n\t\t\tBidRequestWrapper: &openrtb_ext.RequestWrapper{BidRequest: bidRequest},\n\t\t\tAccount: config.Account{DebugAllow: test.debugData.accountLevelDebugAllowed},\n\t\t\tUserSyncs: &emptyUsersync{},\n\t\t\tStartTime: time.Now(),\n\t\t\tHookExecutor: &hookexecution.EmptyHookExecutor{},\n\t\t\tTCF2Config: gdpr.NewTCF2Config(config.TCF2{}, config.AccountGDPR{}),\n\t\t}\n\t\tif test.generateWarnings {\n\t\t\tvar errL []error\n\t\t\terrL = append(errL, &errortypes.Warning{\n\t\t\t\tMessage: fmt.Sprintf(\"CCPA consent test warning.\"),\n\t\t\t\tWarningCode: errortypes.InvalidPrivacyConsentWarningCode})\n\t\t\tauctionRequest.Warnings = errL\n\t\t}\n\t\tdebugLog := &DebugLog{}\n\t\tif test.debugData.headerOverrideDebugAllowed {\n\t\t\tdebugLog = &DebugLog{DebugOverride: true, DebugEnabledOrOverridden: true}\n\t\t}\n\t\t// Run test\n\t\toutBidResponse, err := e.HoldAuction(ctx, auctionRequest, debugLog)\n\n\t\t// Assert no HoldAuction error\n\t\tassert.NoErrorf(t, err, \"%s. ex.HoldAuction returned an error: %v \\n\", test.desc, err)\n\t\tassert.NotNilf(t, outBidResponse.Ext, \"%s. outBidResponse.Ext should not be nil \\n\", test.desc)\n\t\tassert.False(t, auctionRequest.BidderResponseStartTime.IsZero())\n\t\tactualExt := &openrtb_ext.ExtBidResponse{}\n\t\terr = json.Unmarshal(outBidResponse.Ext, actualExt)\n\t\tassert.NoErrorf(t, err, \"%s. \\\"ext\\\" JSON field could not be unmarshaled. err: \\\"%v\\\" \\n outBidResponse.Ext: \\\"%s\\\" \\n\", test.desc, err, outBidResponse.Ext)\n\n\t\tassert.NotEmpty(t, actualExt.Prebid, \"%s. ext.prebid should not be empty\")\n\t\tassert.NotEmpty(t, actualExt.Prebid.AuctionTimestamp, \"%s. ext.prebid.auctiontimestamp should not be empty when AuctionRequest.StartTime is set\")\n\t\tassert.Equal(t, auctionRequest.StartTime.UnixNano()/1e+6, actualExt.Prebid.AuctionTimestamp, \"%s. ext.prebid.auctiontimestamp has incorrect value\")\n\n\t\tif test.debugData.headerOverrideDebugAllowed {\n\t\t\tassert.Empty(t, actualExt.Warnings, \"warnings should be empty\")\n\t\t\tassert.Empty(t, actualExt.Errors, \"errors should be empty\")\n\t\t}\n\n\t\tif test.out.debugInfoIncluded {\n\t\t\tassert.NotNilf(t, actualExt, \"%s. ext.debug field is expected to be included in this outBidResponse.Ext and not be nil. outBidResponse.Ext.Debug = %v \\n\", test.desc, actualExt.Debug)\n\n\t\t\t// Assert \"Debug fields\n\t\t\tassert.Greater(t, len(actualExt.Debug.HttpCalls), 0, \"%s. ext.debug.httpcalls array should not be empty\\n\", test.desc)\n\t\t\tassert.Equal(t, server.URL, actualExt.Debug.HttpCalls[\"appnexus\"][0].Uri, \"%s. ext.debug.httpcalls array should not be empty\\n\", test.desc)\n\t\t\tassert.NotNilf(t, actualExt.Debug.ResolvedRequest, \"%s. ext.debug.resolvedrequest field is expected to be included in this outBidResponse.Ext and not be nil. outBidResponse.Ext.Debug = %v \\n\", test.desc, actualExt.Debug)\n\n\t\t\t// If not nil, assert bid extension\n\t\t\tif test.in.debug {\n\t\t\t\tactualResolvedReqExt, _, _, err := jsonparser.Get(actualExt.Debug.ResolvedRequest, \"ext\")\n\t\t\t\tassert.NoError(t, err, \"Resolved request should have the correct format\")\n\t\t\t\tassert.JSONEq(t, string(bidRequest.Ext), string(actualResolvedReqExt), test.desc)\n\t\t\t}\n\t\t} else if !test.debugData.bidderLevelDebugAllowed && test.debugData.accountLevelDebugAllowed {\n\t\t\tassert.Equal(t, len(actualExt.Debug.HttpCalls), 0, \"%s. ext.debug.httpcalls array should not be empty\", \"With bidder level debug disable option http calls should be empty\")\n\n\t\t} else {\n\t\t\tassert.Nil(t, actualExt.Debug, \"%s. ext.debug.httpcalls array should not be empty\", \"With bidder level debug disable option http calls should be empty\")\n\t\t}\n\n\t\tif test.out.debugInfoIncluded && !test.debugData.accountLevelDebugAllowed && !test.debugData.headerOverrideDebugAllowed {\n\t\t\tassert.Len(t, actualExt.Warnings, 1, \"warnings should have one warning\")\n\t\t\tassert.NotNil(t, actualExt.Warnings[\"general\"], \"general warning should be present\")\n\t\t\tassert.Equal(t, \"debug turned off for account\", actualExt.Warnings[\"general\"][0].Message, \"account debug disabled message should be present\")\n\t\t}\n\n\t\tif !test.out.debugInfoIncluded && test.in.debug && test.debugData.accountLevelDebugAllowed && !test.debugData.headerOverrideDebugAllowed {\n\t\t\tif test.generateWarnings {\n\t\t\t\tassert.Len(t, actualExt.Warnings, 2, \"warnings should have one warning\")\n\t\t\t} else {\n\t\t\t\tassert.Len(t, actualExt.Warnings, 1, \"warnings should have one warning\")\n\t\t\t}\n\t\t\tassert.NotNil(t, actualExt.Warnings[\"appnexus\"], \"bidder warning should be present\")\n\t\t\tassert.Equal(t, \"debug turned off for bidder\", actualExt.Warnings[\"appnexus\"][0].Message, \"account debug disabled message should be present\")\n\t\t}\n\n\t\tif test.generateWarnings {\n\t\t\tassert.NotNil(t, actualExt.Warnings[\"general\"], \"general warning should be present\")\n\t\t\tCCPAWarningPresent := false\n\t\t\tfor _, warn := range actualExt.Warnings[\"general\"] {\n\t\t\t\tif warn.Code == errortypes.InvalidPrivacyConsentWarningCode {\n\t\t\t\t\tCCPAWarningPresent = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.True(t, CCPAWarningPresent, \"CCPA Warning should be present\")\n\t\t}\n\n\t}\n}",
"func (l *Logger) SetVerbose(level event.Level) {\n\tl.Lock()\n\tdefer l.Unlock()\n\n\tl.level = level\n}"
] | [
"0.8219944",
"0.77900326",
"0.73779184",
"0.72216606",
"0.7048828",
"0.6968071",
"0.6917209",
"0.65574235",
"0.6455272",
"0.6455272",
"0.6342419",
"0.633263",
"0.63211787",
"0.62211806",
"0.6217348",
"0.62132",
"0.62004864",
"0.6159226",
"0.6104627",
"0.6104502",
"0.607373",
"0.60611534",
"0.6059551",
"0.6057908",
"0.60323054",
"0.6027807",
"0.60269105",
"0.6017334",
"0.6012902",
"0.6004221",
"0.5965012",
"0.5869678",
"0.58609426",
"0.58582705",
"0.5787328",
"0.57868606",
"0.5773542",
"0.57627267",
"0.5762071",
"0.57440567",
"0.57067895",
"0.5688676",
"0.56638056",
"0.565193",
"0.565193",
"0.56504154",
"0.56475097",
"0.5622293",
"0.5608094",
"0.5603775",
"0.56025624",
"0.5581724",
"0.5554475",
"0.5554268",
"0.5550559",
"0.55446464",
"0.5538753",
"0.55241024",
"0.5516183",
"0.5491719",
"0.5475381",
"0.5474351",
"0.5456983",
"0.5431853",
"0.5431128",
"0.5413772",
"0.5412391",
"0.5402526",
"0.5393225",
"0.5385198",
"0.5354382",
"0.5335351",
"0.5328457",
"0.5316788",
"0.530318",
"0.5300002",
"0.5300002",
"0.52871764",
"0.52863795",
"0.52822435",
"0.5261793",
"0.5251838",
"0.5243674",
"0.5240245",
"0.5218886",
"0.521407",
"0.5209914",
"0.5201182",
"0.51988155",
"0.517869",
"0.517832",
"0.51766896",
"0.5170262",
"0.51678675",
"0.5167704",
"0.51635325",
"0.5158806",
"0.5154066",
"0.5149281",
"0.51320666"
] | 0.65761465 | 7 |
Main is an internal function, part of the implementation of the "go test" command. It was exported because it is crosspackage and predates "internal" packages. It is no longer used by "go test" but preserved, as much as possible, for other systems that simulate "go test" using Main, but Main sometimes cannot be updated as new functionality is added to the testing package. Systems simulating "go test" should be updated to use MainStart. | func Main(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func TestMain(m *testing.M) {\n\tos.Exit(testscript.RunMain(m, map[string]func() int{\n\t\t\"main\": main1,\n\t}))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run()\n\tshutdown()\n\t// THE FOLLOWING LINE IS VERY IMPORTANT and cannot be removed for TestMain func's\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tdefer tracing.Cleanup()\n\n\tglobal = environment.NewStandardGlobalEnvironment()\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func TestMain(t *testing.T) {\n\n\tmainInvoked := false\n\n\t// Mock function for main\n\tmainMock := func() {\n\t\tmainInvoked = true\n\t}\n\tmainFunc = mainMock\n\n\t// run main function and check if it invokes the mainMock\n\tmain()\n\n\t// check if mock function was called\n\tif !mainInvoked {\n\t\tt.Error(\"no call to mainFunc was detected\")\n\t}\n\n}",
"func TestMain(m *testing.M) {\n\tsetup.WrapTestMain(m, config)\n}",
"func TestRunMain(t *testing.T) {\n\tmain()\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar code int\n\tfunc() {\n\t\tdefer InitMain()()\n\t\tcode = m.Run()\n\t}()\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\t// If the test binary is named \"app\", then we're running as a subprocess.\n\t// Otherwise, run the tests.\n\tswitch filepath.Base(os.Args[0]) {\n\tcase \"app\":\n\t\tmain()\n\t\tos.Exit(0)\n\tdefault:\n\t\tos.Exit(m.Run())\n\t}\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}",
"func TestMain(m *testing.M) {\n\ttestsuite.RevelTestHelper(m, \"dev\", run.Run)\n}",
"func TestMain(m *testing.M) {\n\tDropTestData(0)\n\tanswer := m.Run()\n\tDropTestData(0)\n\tos.Exit(answer)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\t_Init();\n\tresult := m.Run();\n\t_TearDown();\n\tos.Exit(result);\n}",
"func main() {\n\tos.Exit(realMain())\n}",
"func main() {\n\tos.Exit(realMain())\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tresult := m.Run()\n\n\tos.Exit(result)\n}",
"func TestMain(m *testing.M) {\n\trootDir := \".\"\n\ttestsDir := \"tests\"\n\tdistDir := \"../../../cmd/agent/dist\"\n\n\t// best effort for abs path\n\tif _, fileName, _, ok := runtime.Caller(0); ok {\n\t\trootDir = filepath.Dir(fileName)\n\t\ttestsDir = filepath.Join(rootDir, testsDir)\n\t\tdistDir = filepath.Join(rootDir, distDir)\n\t}\n\tstate := Initialize(rootDir, testsDir, distDir)\n\n\t// testing this package needs an inited aggregator\n\t// to work properly.\n\taggregator.InitAggregatorWithFlushInterval(nil, \"\", \"\", time.Hour)\n\n\tret := m.Run()\n\n\tpython.PyEval_RestoreThread(state)\n\t// benchmarks don't like python.Finalize() for some reason, let's just not call it\n\n\tos.Exit(ret)\n}",
"func TestMain(m *testing.M) {\n\tfmt.Println(\"Building spawner binary...\")\n\tif err := exec.Command(\"go\", \"build\", \"-tags\", \"forceposix\", \"-o\", \"spawner.exe\", \"./spawner\").Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tkillAllSpawner()\n\tcode := m.Run()\n\n\t// Need some time to release binary\n\tkillAllSpawner()\n\tif err := os.Remove(\"spawner.exe\"); err != nil {\n\t\tfmt.Println(\"Failed to remove spawner:\", err)\n\t}\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run() \n os.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\t// Note: The setup will provision a single K8s env and\n\t// all the tests need to create and use a separate namespace\n\n\t// setup env test\n\tif err := setupSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// run tests\n\tcode := m.Run()\n\n\t// tear down test env\n\tif err := tearDownSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\t// Code here runs before testing starts\n\tmux = GetMux()\n\t// Run tests\n\texitCode := m.Run()\n\t// Code here runs after testing finishes\n\tos.Exit(exitCode)\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}",
"func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}",
"func main() {\n\tif err := realMain(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\t// When flags are specified construct the run config object. When no\n\t// flags have been specified, the run config will be nil and the tests\n\t// will early return.\n\tif len(os.Args) > 1 {\n\t\trc, err = initRunConfig()\n\t}\n\tif err != nil {\n\t\t// Silly Go has no method in M to log errors or abort,\n\t\t// so we'll have to do it outside of the testing module.\n\t\tlog.Fatal(\"Can't initialize test server\", err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(m.Run())\n}",
"func Test_main(t *testing.T) {\n\tif *systemTest {\n\t\tsignalHandler()\n\t\tendRunning = make(chan bool, 1)\n\t\tgo main()\n\t\t<-endRunning\n\t}\n}",
"func TestMain(m *testing.M) {\n\tflag.BoolVar(&realTest, \"real\", false, \"Test with real uHunt API server\")\n\tflag.Parse()\n\tos.Exit(m.Run())\n}",
"func RealMain(opts types.Options, d types.Deployer, tester types.Tester) error {\n\t// Now for the core kubetest2 logic:\n\t// - build\n\t// - cluster up\n\t// - test\n\t// - cluster down\n\t// TODO(bentheelder): write out structured metadata\n\t// TODO(bentheelder): signal handling & timeoutf\n\n\t// build if specified\n\tif opts.ShouldBuild() {\n\t\tbuild := d.GetBuilder()\n\t\tif build == nil {\n\t\t\tbuild = defaultBuild\n\t\t}\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\tif err := build(); err != nil {\n\t\t\t// we do not continue to up / test etc. if build fails\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// up a cluster\n\tif opts.ShouldUp() {\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\tif err := d.Up(); err != nil {\n\t\t\t// we do not continue to test if build fails\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// ensure tearing down the cluster happens last\n\tdefer func() {\n\t\tif opts.ShouldDown() {\n\t\t\t// TODO(bentheelder): this should write out to JUnit\n\t\t\td.Down()\n\t\t}\n\t}()\n\n\t// and finally test, if a test was specified\n\tif opts.ShouldTest() {\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\ttester.Test()\n\t}\n\n\treturn nil\n}",
"func TestMain(t *testing.T) {\n\tif *systemTestFlag {\n\t\tos.Stdin.Close() // interpreter exits on EOF\n\t\tmain()\n\t}\n}",
"func TestMain(m *testing.M) {\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateTables(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tcode := m.Run()\n\n\tos.Exit(code)\n\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M {}",
"func TestMain(m *testing.M) {\n\tsetUp()\n\tretCode := m.Run()\n\ttearDown()\n\tos.Exit(retCode)\n}",
"func TestMain(m *testing.M) {\n\tlogger.SetOutput(io.Discard)\n\n\tb, err := exec.Command(\"git\", \"rev-parse\", \"--show-cdup\").Output()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"failed to execute 'git rev-parse --show-cdup': %s\", err))\n\t}\n\tprojRoot := strings.TrimSpace(string(b))\n\tos.Remove(filepath.Join(projRoot, \".evans.toml\"))\n\n\tsetEnv := func(k, v string) func() {\n\t\told := os.Getenv(k)\n\t\tos.Setenv(k, v)\n\t\treturn func() {\n\t\t\tos.Setenv(k, old)\n\t\t}\n\t}\n\n\tconfigDir := os.TempDir()\n\tcleanup1 := setEnv(\"XDG_CONFIG_HOME\", configDir)\n\tdefer cleanup1()\n\n\tcacheDir := os.TempDir()\n\tcleanup2 := setEnv(\"XDG_CACHE_HOME\", cacheDir)\n\tdefer cleanup2()\n\n\tgoleak.VerifyTestMain(m, goleak.IgnoreTopFunction(\"github.com/desertbit/timer.timerRoutine\"))\n}",
"func Main(rootCommand *Command, version string) {\n\tos.Exit(Run(rootCommand, version, internal.NewOSRunEnv()))\n}",
"func TestMain(m *testing.M) {\n\trest.StartMockupServer()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\t// We get a chance to parse flags to include the framework flags for the\n\t// framework as well as any additional flags included in the integration.\n\tflag.Parse()\n\n\t// EnableInjectionOrDie will enable client injection, this is used by the\n\t// testing framework for namespace management, and could be leveraged by\n\t// features to pull Kubernetes clients or the test environment out of the\n\t// context passed in the features.\n\tctx, startInformers := injection.EnableInjectionOrDie(nil, nil) //nolint\n\tstartInformers()\n\n\t// global is used to make instances of Environments, NewGlobalEnvironment\n\t// is passing and saving the client injection enabled context for use later.\n\tglobal = environment.NewGlobalEnvironment(ctx)\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\terr := os.Chdir(\"..\")\n\tif err != nil {\n\t\tfmt.Printf(\"could not change dir: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tabs, err := filepath.Abs(binaryName)\n\n\tif err != nil {\n\t\tfmt.Printf(\"could not get abs path for %s: %v\", binaryName, err)\n\t\tos.Exit(1)\n\t}\n\n\tbinaryPath = abs\n\n\tif err := exec.Command(\"go\", \"build\").Run(); err != nil {\n\t\tfmt.Printf(\"could not make binary for %s: %v\", binaryName, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Start mock server\n\tgo func() {\n\t\tmux := http.NewServeMux()\n\t\tserver := http.Server{Addr: \":8080\", Handler: mux}\n\t\tmux.HandleFunc(\"/token\", MockTokenApi)\n\t\tmux.HandleFunc(\"/expiredtoken\", MockExpiredTokenApi)\n\t\tmux.HandleFunc(\"/curl\", MockCurlApi)\n\t\tif err := server.ListenAndServe(); err != nil {\n\t\t\tfmt.Printf(\"could not listen on port 8080 %v\", err)\n\t\t}\n\t}()\n\n\tstatus := m.Run()\n\n\tos.Remove(binaryPath)\n\tos.Exit(status)\n}",
"func TestMain(m *testing.M) {\n\tif android.BuildOs != android.Linux {\n\t\t// b/145598135 - Generating host snapshots for anything other than linux is not supported.\n\t\tlog.Printf(\"Skipping as sdk snapshot generation is only supported on %s not %s\", android.Linux, android.BuildOs)\n\t\tos.Exit(0)\n\t}\n\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\terr := ssntpTestsSetup()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tret := m.Run()\n\n\tssntpTestsTeardown()\n\tos.Exit(ret)\n}",
"func TestMain(m *testing.M) {\n\n\tcolor.NoColor = true\n\n\tflag.Parse()\n\topt.Paths = flag.Args()\n\n\tstatus := godog.RunWithOptions(\"godogs\", func(s *godog.Suite) {\n\t\tsteps.CliContext(s)\n\t\tsteps.EnvContext(s)\n\t\tsteps.VersionFeatureContext(s)\n\t}, opt)\n\n\tif st := m.Run(); st > status {\n\t\tstatus = st\n\t}\n\tos.Exit(status)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\t// We are running in bazel so set up the directory for the test binaries\n\tif os.Getenv(\"TEST_WORKSPACE\") != \"\" {\n\t\t// TODO create a toolchain for this\n\t\tpaths.MaybeSetEnv(\"PATH\", \"kubetest2-kind\", \"hack\", \"bin\", \"kubetest2-kind\")\n\t}\n\n\tnoKind := os.Getenv(\"TEST_DO_NOT_USE_KIND\")\n\tif noKind == \"\" {\n\t\tos.Setenv(\"USE_EXISTING_CLUSTER\", \"true\")\n\n\t\t// TODO random name for server and also random open port\n\t\terr := exec.StartKubeTest2(\"test\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t// TODO verify success of cluster start? Does kind do it?\n\n\te := testenv.NewEnv(runtime.NewSchemeBuilder(api.AddToScheme),\n\t\tfilepath.Join(\"..\", \"config\", \"crd\", \"bases\"),\n\t\tfilepath.Join(\"..\", \"config\", \"rbac\", \"bases\"))\n\n\tenv = e.Start()\n\tcode := m.Run()\n\te.Stop()\n\n\tif noKind == \"\" {\n\t\terr := exec.StopKubeTest2(\"test\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tcode := setupForTesting()\n\tif code != 0 {\n\t\tos.Exit(code)\n\t}\n\tcode = m.Run()\n\n\tif !TestDisableDatabase {\n\t\terr := TestDB.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"close error:\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tos.Exit(code)\n}",
"func TestMain() { //nolint:deadcode\n\tmain()\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\ts, err := NewServer(TESTDB, 10, 2, ioutil.Discard , \":9123\")\n\tif err!=nil {\n\t\tpanic(err)\n\t}\n\tts=s\n\ts.Start()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\t// Initialize so we can actually run tests\n\tbot := BofhwitsBot{}\n\tbotSetup(&bot)\n\tretVal := m.Run()\n\tbotTeardown(&bot)\n\tos.Exit(retVal)\n\n}",
"func TestMain(m *testing.M) {\n\t// Override the expected bundle location.\n\tExpectedBundleLocation = BundleLocationBuildDirectory\n\n\t// Run tests.\n\tm.Run()\n}",
"func TestMain(m *testing.M) {\n\tvar err error\n\tvar writeGolden bool\n\tflag.BoolVar(&writeGolden, \"write-golden\", false, \"write golden files\")\n\tflag.Parse()\n\tif writeGolden {\n\t\terr = updateGolden()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tassert.Main(m)\n}",
"func Main(use string, options ...RootCommandOption) {\n\tappcmd.Main(context.Background(), newRootCommand(use, options...), version)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\t// if *databaseTest {\n\tif isIntegrationTest() {\n\t\tsetupTestDB()\n\t}\n\texitCode := m.Run()\n\tos.Exit(exitCode)\n}",
"func TestMain(m *testing.M) {\n\trt, _ := framework.Run(\"authn_permissive_test\", m)\n\tos.Exit(rt)\n}",
"func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tlog.SetOutput(os.Stdout)\n\tos.Exit(m.Run())\n}",
"func TestOldMain(t *testing.T) {\n\tf := New(nil, nil)\n\tcalled := false\n\tf.AddPrim(\"BYE\", func() {\n\t\tfmt.Println(\"hello from the BYE primitive\")\n\t\tcalled = true\n\t\tf._BYE()\n\t}, 0)\n\tf.AddWord(\": nop ;\")\n\tf.AddWord(\": C BYE ;\")\n\tf.AddWord(\": B C ;\")\n\tf.AddWord(\": A nop B ;\")\n\ta, err := f.Addr(\"A\")\n\tfmt.Printf(\"Addr of doit is %x\\n\", a)\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tf.SetWordPtr(COLDD, a)\n\tf.IP = COLDD\n\tf.Next()\n\tf.Main()\n\tif !called {\n\t\tt.Fatal(\"Didn't call BYE function\")\n\t}\n}",
"func main() {\n\t// Setup signal handlers.\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() { <-c; cancel() }()\n\n\t// Instantiate a new type to represent our application.\n\t// This type lets us shared setup code with our end-to-end tests.\n\tm := NewMain()\n\n\t// Parse command line flags & load configuration.\n\tif err := m.ParseFlags(ctx, os.Args[1:]); err == flag.ErrHelp {\n\t\tos.Exit(1)\n\t} else if err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Execute program.\n\tif err := m.Run(ctx); err != nil {\n\t\tm.Close()\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tapi.ReportError(ctx, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Wait for CTRL-C.\n\t<-ctx.Done()\n\n\t// Clean up program.\n\tif err := m.Close(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tframework.Run(\"pilot_test\", m)\n}",
"func TestMain(t *testing.T) { TestingT(t) }",
"func main() {\n\tTest()\n}",
"func main() {\n\tpp.Println(\"=========================================\")\n\tpp.Println(\"No local tests available.\")\n\tpp.Println(\"=========================================\")\n}",
"func TestMain(m *testing.M) {\n\t// build the test binary\n\targs := []string{\"build\", \"-o\", \"testreg\" + exeSuffix}\n\tout, err := exec.Command(\"go\", args...).CombinedOutput()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"building testreg failed: %v\\n%s\", err, out)\n\t\tos.Exit(2)\n\t}\n\n\t// create the docker client\n\tdcli, err := client.NewEnvClient()\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"could not connect to docker: %v\", err))\n\t}\n\n\t// start registry\n\tregID, addr, err := testutils.StartRegistry(dcli)\n\tif err != nil {\n\t\ttestutils.RemoveContainer(dcli, regID)\n\t\tpanic(fmt.Errorf(\"starting registry container failed: %v\", err))\n\t}\n\tregistryAddr = addr\n\n\tflag.Parse()\n\tmerr := m.Run()\n\n\t// remove registry\n\tif err := testutils.RemoveContainer(dcli, regID); err != nil {\n\t\tlog.Printf(\"couldn't remove registry container: %v\", err)\n\t}\n\n\t// remove test binary\n\tos.Remove(\"testreg\" + exeSuffix)\n\n\tos.Exit(merr)\n}",
"func TestMain(m *testing.M) {\n\tefiVarDir, cleanup, err := vartest.SetupVarZip(\"../testdata/sys_fw_efi_vars.zip\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdefer cleanup()\n\tuefivars.EfiVarDir = efiVarDir\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tkubeconfig := flag.String(\n\t\t\"kubeconfig\",\n\t\t\"\",\n\t\t\"path to kubeconfig\",\n\t)\n\tflag.Parse()\n\n\tvar err error\n\tsuite, err = kubetest.NewSuiteFromKubeconfig(*kubeconfig)\n\tif err != nil {\n\t\tlog.Fatal(err)\n\t}\n\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tklog.InitFlags(flag.CommandLine)\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tvar (\n\t\tretCode int\n\t\terr error\n\t)\n\n\terr = setup()\n\tif err != nil {\n\t\tlog.Panic(\"Error setting up test db\", err)\n\t}\n\n\tretCode = m.Run()\n\n\terr = teardown()\n\tif err != nil {\n\t\tlog.Panic(\"Error tearing down test db\", err)\n\t}\n\n\tos.Exit(retCode)\n}",
"func TestMain(m *testing.M) {\n\trt, _ := framework.Run(\"echo_test\", m)\n\tos.Exit(rt)\n}",
"func TestMain(t *testing.T) {\n}",
"func TestMain(m *testing.M) {\n\tInitialize()\n\tsetDB(dao.ConfigurationManager.GetDB())\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func runTestMain(m *testing.M) int {\n\tvar t mockAsserter\n\n\thome, cleanup := tmtest.SetupConfig(t, \"testdata\")\n\tdefer cleanup()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\tappCleanup := tmtest.RunApp(ctx, t, appName, home)\n\ttmCleanup := tmtest.RunTendermint(ctx, t, home)\n\n\tdefer appCleanup()\n\tdefer tmCleanup()\n\n\treturn m.Run()\n}",
"func TestMain(m *testing.M) {\n\t// set configuration from environment\n\tc = &config.Config{\n\t\tAppID: os.Getenv(\"APP_ID\"),\n\t\tAppToken: os.Getenv(\"APP_TOKEN\"),\n\t\tDeviceID: os.Getenv(\"DEVICE_ID\"),\n\t}\n\tif len(os.Getenv(\"API_HOST\")) > 0 {\n\t\tc.Backend.SetBaseURL(os.Getenv(\"API_HOST\"))\n\t}\n\n\t// run tests\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tserver = httptest.NewServer(mux)\n\t// skip the default value of airbraker.Airbrake here\n\tnotifier, _ = au.NewBufferNotifier()\n\n\tretCode := m.Run()\n\n\tserver.Close()\n\tos.Exit(retCode)\n}",
"func TestMain(m *testing.M) {\n\tgin.SetMode(gin.TestMode)\n\tos.Exit(m.Run()) // run the tests, then exit\n}",
"func TestMain(m *testing.M) {\n\tMOUNTED = false\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tvar err error\n\terr = godotenv.Load(os.ExpandEnv(\"../../.env\"))\n\tif err != nil {\n\t\tlog.Fatalf(\"Error getting env %v\\n\", err)\n\t}\n\n\ttestutils.Database()\n\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Setenv(\"KLONE_WORKSPACE\", local.Home()) // Always test in the home directory..\n\tif os.Getenv(\"TEST_KLONE_GITHUBTOKEN\") != \"\" {\n\t\tos.Setenv(\"KLONE_GITHUBTOKEN\", os.Getenv(\"TEST_KLONE_GITHUBTOKEN\"))\n\t}\n\tif os.Getenv(\"TEST_KLONE_GITHUBUSER\") != \"\" {\n\t\tos.Setenv(\"KLONE_GITHUBUSER\", os.Getenv(\"TEST_KLONE_GITHUBUSER\"))\n\t}\n\tif os.Getenv(\"TEST_KLONE_GITHUBPASS\") != \"\" {\n\t\tos.Setenv(\"KLONE_GITHUBPASS\", os.Getenv(\"TEST_KLONE_GITHUBPASS\"))\n\t}\n\tgithub.Testing = true\n\tprovider := klone.NewGithubProvider()\n\tgitServer, err := provider.NewGitServer()\n\tif err != nil {\n\t\tfmt.Printf(\"Unable to get GitHub server: %v\\n\", err)\n\t\tos.Exit(-1)\n\t}\n\tGitServer = gitServer\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tretCode := m.Run()\n\t// call the tearedown function to close the server\n\tclient.teardown()\n\t// exit\n\tos.Exit(retCode)\n}",
"func Main() {\n\tos.Exit(Run(os.Args, os.Stdin, os.Stdout, os.Stderr))\n}",
"func runTestMain(m *testing.M) int {\n\tisLess, err := test_helpers.IsTarantoolVersionLess(2, 2, 0)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to extract Tarantool version: %s\", err)\n\t}\n\n\tif isLess {\n\t\tlog.Println(\"Skipping decimal tests...\")\n\t\tisDecimalSupported = false\n\t\treturn m.Run()\n\t} else {\n\t\tisDecimalSupported = true\n\t}\n\n\tinstance, err := test_helpers.StartTarantool(test_helpers.StartOpts{\n\t\tInitScript: \"config.lua\",\n\t\tListen: server,\n\t\tUser: opts.User,\n\t\tPass: opts.Pass,\n\t\tWaitStart: 100 * time.Millisecond,\n\t\tConnectRetry: 10,\n\t\tRetryTimeout: 500 * time.Millisecond,\n\t})\n\tdefer test_helpers.StopTarantoolWithCleanup(instance)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to prepare test Tarantool: %s\", err)\n\t\treturn 1\n\t}\n\n\treturn m.Run()\n}",
"func Main(args ...interface{}) {\n\n\t// ...\n}",
"func TestMain(m *testing.M) {\n\tctx := context.Background()\n\tghostC, err := getContainer(ctx)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\tdefer ghostC.Terminate(ctx)\n\t//ghostC.FollowOutput(&logPrinter{})\n\t//ghostC.StartLogProducer(ctx)\n\n\ttestBaseURL, err = getBaseURL(ctx, ghostC)\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\terr = setupTestUser()\n\tif err != nil {\n\t\tlog.Panic(err)\n\t}\n\n\tres := m.Run()\n\tif res != 0 {\n\t\terr = printLogs(ctx, ghostC)\n\t\tif err != nil {\n\t\t\tlog.Panic(err)\n\t\t}\n\t}\n\tos.Exit(res)\n}",
"func TestMain(m *testing.M) {\n\tconfigImp = mocks.NewMockConfig(caServerURL)\n\tcryptoSuiteProvider, _ = cryptosuiteimpl.GetSuiteByConfig(configImp)\n\tif cryptoSuiteProvider == nil {\n\t\tpanic(\"Failed initialize cryptoSuiteProvider\")\n\t}\n\t// Start Http Server\n\tgo mocks.StartFabricCAMockServer(strings.TrimPrefix(caServerURL, \"http://\"))\n\t// Allow HTTP server to start\n\ttime.Sleep(1 * time.Second)\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tframework.\n\t\tNewSuite(m).\n\t\tRequireSingleCluster().\n\t\tSetup(istio.Setup(&i, func(cfg *istio.Config) {\n\t\t\tcfg.Values[\"telemetry.enabled\"] = \"true\"\n\t\t\tcfg.Values[\"telemetry.v2.enabled\"] = \"true\"\n\t\t\tcfg.Values[\"telemetry.v2.stackdriver.enabled\"] = \"true\"\n\t\t\tcfg.Values[\"telemetry.v2.stackdriver.logging\"] = \"true\"\n\t\t})).\n\t\tSetup(testSetup).\n\t\tRun()\n}",
"func TestMain(m *testing.M) {\n\tif os.Getenv(\"BENT_TEST_IS_CMD_BENT\") != \"\" {\n\t\tmain()\n\t\tos.Exit(0)\n\t}\n\tvar err error\n\tdir, err = os.MkdirTemp(\"\", \"bent_test\")\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\tdefer os.RemoveAll(dir)\n\tm.Run()\n}",
"func InitMain() func() {\n\tif calledInitMain {\n\t\tpanic(\"v23test: already called v23test.TestMain or v23test.InitMain\")\n\t}\n\tcalledInitMain = true\n\tgosh.InitMain()\n\tvar err error\n\tbinDir, err = os.MkdirTemp(\"\", \"bin-\")\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\treturn func() {\n\t\tos.RemoveAll(binDir)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tworkingDir, err := os.Getwd()\n\tif err != nil {\n\t\tlog.Fatal(\"Could not get current directory\")\n\t}\n\n\trepos.SetBaseRepoInfo(repos.RepoPath(filepath.Dir(workingDir)), repos.RepoName(\"reqtraq\"))\n\tparsers.Register()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tfakeS3Server := setup()\n\tcode := m.Run()\n\tshutdown(fakeS3Server)\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\n\tfmt.Println(\"Running tests with file from disk...\")\n\tusingFile = true\n\ttestOpenFile()\n\n\tresult := m.Run()\n\ttestDbf.Close()\n\n\tif result != 0 {\n\t\tos.Exit(result)\n\t}\n\n\tfmt.Println(\"Running tests with byte stream...\")\n\tusingFile = false\n\ttestOpenStream()\n\n\tresult = m.Run()\n\n\tos.Exit(result)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(mongotesting.RunWithMongoInDocker(m, &mongoURI))\n}",
"func TestMain(m *testing.M) {\n\tflag.Set(\"alsologtostderr\", \"true\")\n\tflag.Set(\"v\", \"100\")\n\tflag.Parse()\n\tc, err := common.ReadConfig(\"../../test_conf.json\")\n\tif err != nil {\n\t\tfmt.Printf(\"Couldn't read config: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tResetTestApi(c)\n\tdefer TestApi.DB.Close()\n\tret := m.Run()\n\tos.Exit(ret)\n}"
] | [
"0.7855635",
"0.77311206",
"0.77311206",
"0.77311206",
"0.7662056",
"0.7522855",
"0.7515142",
"0.7501964",
"0.74927384",
"0.74730176",
"0.7459258",
"0.7459258",
"0.7459258",
"0.7456344",
"0.7455583",
"0.744522",
"0.74388605",
"0.7376313",
"0.7376313",
"0.7376313",
"0.73496336",
"0.7338583",
"0.7321099",
"0.7321099",
"0.72805053",
"0.72713125",
"0.7263473",
"0.7253343",
"0.72317374",
"0.72208744",
"0.71957433",
"0.7182844",
"0.7182844",
"0.7180181",
"0.7180181",
"0.7176325",
"0.7162672",
"0.71396273",
"0.7138158",
"0.7126851",
"0.71266377",
"0.7098157",
"0.709421",
"0.7092024",
"0.7086122",
"0.7062819",
"0.70569324",
"0.7030086",
"0.7000252",
"0.6976778",
"0.69503915",
"0.69456196",
"0.6944931",
"0.6940349",
"0.6933281",
"0.69322944",
"0.6875094",
"0.68737835",
"0.6863564",
"0.68411416",
"0.68244696",
"0.68241",
"0.6816297",
"0.6800173",
"0.6799662",
"0.6799662",
"0.6797113",
"0.6794462",
"0.678877",
"0.6787253",
"0.67867815",
"0.67746854",
"0.6773563",
"0.6769006",
"0.67681944",
"0.67671573",
"0.6744851",
"0.6732078",
"0.6731856",
"0.67311215",
"0.67173743",
"0.67098874",
"0.66999596",
"0.66960436",
"0.6687075",
"0.6686749",
"0.66836244",
"0.66784316",
"0.66547596",
"0.66530234",
"0.6652894",
"0.6646161",
"0.6637016",
"0.66336125",
"0.66333723",
"0.66324985",
"0.663045",
"0.66181695",
"0.6612114",
"0.65960413",
"0.6579137"
] | 0.0 | -1 |
MainStart is meant for use by tests generated by 'go test'. It is not meant to be called directly and is not subject to the Go 1 compatibility document. It may change signature from release to release. | func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func TestStart(t *testing.T) {\n\tTestingT(t)\n}",
"func Start(entryPoint interface{}) {\n\tapp := &App{\n\t\tEntryPoint: entryPoint,\n\t\tCtors: GetCtors(),\n\t\tDtors: GetDtors(),\n\t}\n\tif err := app.Run(); err != nil {\n\t\tlog.Fatal(err.Error())\n\t}\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testscript.RunMain(m, map[string]func() int{\n\t\t\"main\": main1,\n\t}))\n}",
"func start() {\n\tfmt.Printf(\"Starting %s...\", AppName)\n\tif err := Start(1); err != nil {\n\t\tfailed(err)\n\t} else {\n\t\tfmt.Println(\"OK\")\n\t}\n}",
"func TestMain(m *testing.M) {\n\trest.StartMockupServer()\n\tos.Exit(m.Run())\n}",
"func main() {\n\tapp.StartApp()\n}",
"func Start() {\n\tconfig, configErr := config.ParseArgsReturnConfig(os.Args[1:])\n\tif configErr != nil {\n\t\tfor _, err := range configErr {\n\t\t\tlog.Error(err)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\tswitch {\n\tcase config.FlagVersion:\n\t\tfmt.Printf(\"DCOS Signal Service\\n Version: %s\\n Revision: %s\\n DC/OS Variant: %s\\n\", VERSION, REVISION, config.DCOSVariant)\n\t\tos.Exit(0)\n\tdefault:\n\t\tif config.Enabled == \"false\" {\n\t\t\tos.Exit(0)\n\t\t}\n\t\tif config.FlagVerbose {\n\t\t\tlog.SetLevel(log.DebugLevel)\n\t\t}\n\t\tif config.FlagTest {\n\t\t\tlog.SetLevel(log.ErrorLevel)\n\t\t}\n\t}\n\tif err := executeRunner(config); err != nil {\n\t\tlog.Error(err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(0)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar code int\n\tfunc() {\n\t\tdefer InitMain()()\n\t\tcode = m.Run()\n\t}()\n\tos.Exit(code)\n}",
"func StartTest(testName string) {\n\tlog.Println(\"\")\n\tlog.Println(\"\")\n\tpc, file, line, _ := runtime.Caller(1)\n\n\tfullPCName := runtime.FuncForPC(pc).Name()\n\tlastIndexOfPc := strings.LastIndex(fullPCName, \"/\") + 1\n\tjustPcName := fullPCName[lastIndexOfPc:len(fullPCName)]\n\n\tlastIndexOfFile := strings.LastIndex(file, \"/\") + 1\n\tjustFileName := file[lastIndexOfFile:len(file)]\n\n\t//log.Printf(\"INFO [%s:%d] [%s] %v\", justFileName, line, justPcName, msg)\n\tlog.Printf(\"***START [%s:%d] [%s] %v\", justFileName, line, justPcName, testName)\n\n\t//log.Printf(\"***START \" + testName + \" [%s:%d] [%s] %v\", justFileName, line, justPcName, msg))\n\tlog.Println(\"\")\n}",
"func TestMain(m *testing.M) {\n\t// If the test binary is named \"app\", then we're running as a subprocess.\n\t// Otherwise, run the tests.\n\tswitch filepath.Base(os.Args[0]) {\n\tcase \"app\":\n\t\tmain()\n\t\tos.Exit(0)\n\tdefault:\n\t\tos.Exit(m.Run())\n\t}\n}",
"func main() {\n\tcore.Start()\n}",
"func TestStart(t *testing.T) {\n\ts := SetUpSuite(t)\n\n\t// Fetch the services.App that the service heartbeat.\n\tservers, err := s.authServer.AuthServer.GetApplicationServers(s.closeContext, defaults.Namespace)\n\trequire.NoError(t, err)\n\n\t// Check that the services.Server sent via heartbeat is correct. For example,\n\t// check that the dynamic labels have been evaluated.\n\tappFoo, err := types.NewAppV3(types.Metadata{\n\t\tName: \"foo\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: s.testhttp.URL,\n\t\tPublicAddr: \"foo.example.com\",\n\t\tDynamicLabels: map[string]types.CommandLabelV2{\n\t\t\tdynamicLabelName: {\n\t\t\t\tPeriod: dynamicLabelPeriod,\n\t\t\t\tCommand: dynamicLabelCommand,\n\t\t\t\tResult: \"4\",\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\tserverFoo, err := types.NewAppServerV3FromApp(appFoo, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\tappAWS, err := types.NewAppV3(types.Metadata{\n\t\tName: \"awsconsole\",\n\t\tLabels: staticLabels,\n\t}, types.AppSpecV3{\n\t\tURI: constants.AWSConsoleURL,\n\t\tPublicAddr: \"aws.example.com\",\n\t})\n\trequire.NoError(t, err)\n\tserverAWS, err := types.NewAppServerV3FromApp(appAWS, \"test\", s.hostUUID)\n\trequire.NoError(t, err)\n\n\tsort.Sort(types.AppServers(servers))\n\trequire.Empty(t, cmp.Diff([]types.AppServer{serverAWS, serverFoo}, servers,\n\t\tcmpopts.IgnoreFields(types.Metadata{}, \"ID\", \"Expires\")))\n\n\t// Check the expiry time is correct.\n\tfor _, server := range servers {\n\t\trequire.True(t, s.clock.Now().Before(server.Expiry()))\n\t\trequire.True(t, s.clock.Now().Add(2*defaults.ServerAnnounceTTL).After(server.Expiry()))\n\t}\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run()\n\tshutdown()\n\t// THE FOLLOWING LINE IS VERY IMPORTANT and cannot be removed for TestMain func's\n\tos.Exit(code)\n}",
"func main() {\n\tif err := realMain(); err != nil {\n\t\tlog.Fatal(err)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\t// When flags are specified construct the run config object. When no\n\t// flags have been specified, the run config will be nil and the tests\n\t// will early return.\n\tif len(os.Args) > 1 {\n\t\trc, err = initRunConfig()\n\t}\n\tif err != nil {\n\t\t// Silly Go has no method in M to log errors or abort,\n\t\t// so we'll have to do it outside of the testing module.\n\t\tlog.Fatal(\"Can't initialize test server\", err)\n\t\tos.Exit(1)\n\t}\n\tos.Exit(m.Run())\n}",
"func main() {\n\tos.Exit(realMain())\n}",
"func main() {\n\tos.Exit(realMain())\n}",
"func TestMain(m *testing.M) {\n\tklog.InitFlags(flag.CommandLine)\n\tos.Exit(m.Run())\n}",
"func main() {\n\tservice := service.Service{}\n\tservice.Start(\"\")\n}",
"func Start(args []string) {\n\tfmt.Println(args)\n}",
"func TestMain(m *testing.M) {\n\tfmt.Println(\"Building spawner binary...\")\n\tif err := exec.Command(\"go\", \"build\", \"-tags\", \"forceposix\", \"-o\", \"spawner.exe\", \"./spawner\").Run(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tkillAllSpawner()\n\tcode := m.Run()\n\n\t// Need some time to release binary\n\tkillAllSpawner()\n\tif err := os.Remove(\"spawner.exe\"); err != nil {\n\t\tfmt.Println(\"Failed to remove spawner:\", err)\n\t}\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func (m *Main) PrintStart() {\n\tfmt.Println(\"---\")\n\tlog.Infof(\"testing %s\", m.Name)\n\tfmt.Println(\"\")\n}",
"func Main() {\n\n\tcheckSupportArch()\n\n\tif len(os.Args) > 1 {\n\t\tcmd := os.Args[1]\n\t\tfmt.Println(cmd)\n\t}\n\n\tstartEtcdOrProxyV2()\n}",
"func TestMain(m *testing.M) {\n\t_Init();\n\tresult := m.Run();\n\t_TearDown();\n\tos.Exit(result);\n}",
"func MainStart(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar err error\n\ts, err := NewServer(TESTDB, 10, 2, ioutil.Discard , \":9123\")\n\tif err!=nil {\n\t\tpanic(err)\n\t}\n\tts=s\n\ts.Start()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tsetup.WrapTestMain(m, config)\n}",
"func (t *SelfTester) Start() {}",
"func TestMain(m *testing.M) {\n\t// Note: The setup will provision a single K8s env and\n\t// all the tests need to create and use a separate namespace\n\n\t// setup env test\n\tif err := setupSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// run tests\n\tcode := m.Run()\n\n\t// tear down test env\n\tif err := tearDownSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.Exit(code)\n}",
"func Main(use string, options ...RootCommandOption) {\n\tappcmd.Main(context.Background(), newRootCommand(use, options...), version)\n}",
"func main() {\n\tserver.StartUp(false)\n}",
"func Main(rootCommand *Command, version string) {\n\tos.Exit(Run(rootCommand, version, internal.NewOSRunEnv()))\n}",
"func Start(_ string) error {\n\treturn nil\n}",
"func TestRunMain(t *testing.T) {\n\tmain()\n}",
"func main() {\n\t// Setup signal handlers.\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() { <-c; cancel() }()\n\n\t// Instantiate a new type to represent our application.\n\t// This type lets us shared setup code with our end-to-end tests.\n\tm := NewMain()\n\n\t// Parse command line flags & load configuration.\n\tif err := m.ParseFlags(ctx, os.Args[1:]); err == flag.ErrHelp {\n\t\tos.Exit(1)\n\t} else if err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Execute program.\n\tif err := m.Run(ctx); err != nil {\n\t\tm.Close()\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tapi.ReportError(ctx, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Wait for CTRL-C.\n\t<-ctx.Done()\n\n\t// Clean up program.\n\tif err := m.Close(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tif android.BuildOs != android.Linux {\n\t\t// b/145598135 - Generating host snapshots for anything other than linux is not supported.\n\t\tlog.Printf(\"Skipping as sdk snapshot generation is only supported on %s not %s\", android.Linux, android.BuildOs)\n\t\tos.Exit(0)\n\t}\n\n\tos.Exit(m.Run())\n}",
"func Main() error {\n\tapp, err := New(context.Background())\n\tif err != nil {\n\t\treturn skerr.Wrap(err)\n\t}\n\n\treturn app.Run()\n}",
"func Test_main(t *testing.T) {\n\tif *systemTest {\n\t\tsignalHandler()\n\t\tendRunning = make(chan bool, 1)\n\t\tgo main()\n\t\t<-endRunning\n\t}\n}",
"func TestMain(t *testing.T) {\n\n\tmainInvoked := false\n\n\t// Mock function for main\n\tmainMock := func() {\n\t\tmainInvoked = true\n\t}\n\tmainFunc = mainMock\n\n\t// run main function and check if it invokes the mainMock\n\tmain()\n\n\t// check if mock function was called\n\tif !mainInvoked {\n\t\tt.Error(\"no call to mainFunc was detected\")\n\t}\n\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\terr := ssntpTestsSetup()\n\tif err != nil {\n\t\tos.Exit(1)\n\t}\n\n\tret := m.Run()\n\n\tssntpTestsTeardown()\n\tos.Exit(ret)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}",
"func TestMain(m *testing.M) {\n\ttestsuite.RevelTestHelper(m, \"dev\", run.Run)\n}",
"func TestMain(m *testing.M) {\n\tdefer tracing.Cleanup()\n\n\tglobal = environment.NewStandardGlobalEnvironment()\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func (suite *PouchStartSuite) TestStartCommand(c *check.C) {\n\tname := \"start-normal\"\n\tcommand.PouchRun(\"create\", \"--name\", name, busyboxImage).Assert(c, icmd.Success)\n\n\tcommand.PouchRun(\"start\", name).Assert(c, icmd.Success)\n\n\tcommand.PouchRun(\"stop\", name).Assert(c, icmd.Success)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tresult := m.Run()\n\n\tos.Exit(result)\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run() \n os.Exit(code)\n}",
"func RealMain(opts types.Options, d types.Deployer, tester types.Tester) error {\n\t// Now for the core kubetest2 logic:\n\t// - build\n\t// - cluster up\n\t// - test\n\t// - cluster down\n\t// TODO(bentheelder): write out structured metadata\n\t// TODO(bentheelder): signal handling & timeoutf\n\n\t// build if specified\n\tif opts.ShouldBuild() {\n\t\tbuild := d.GetBuilder()\n\t\tif build == nil {\n\t\t\tbuild = defaultBuild\n\t\t}\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\tif err := build(); err != nil {\n\t\t\t// we do not continue to up / test etc. if build fails\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// up a cluster\n\tif opts.ShouldUp() {\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\tif err := d.Up(); err != nil {\n\t\t\t// we do not continue to test if build fails\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// ensure tearing down the cluster happens last\n\tdefer func() {\n\t\tif opts.ShouldDown() {\n\t\t\t// TODO(bentheelder): this should write out to JUnit\n\t\t\td.Down()\n\t\t}\n\t}()\n\n\t// and finally test, if a test was specified\n\tif opts.ShouldTest() {\n\t\t// TODO(bentheelder): this should write out to JUnit\n\t\ttester.Test()\n\t}\n\n\treturn nil\n}",
"func main() {\n\tfmt.Println(\"################################\")\n\tfmt.Println(\"#### Hello from MyAppStatus ####\")\n\tfmt.Println(\"################################\")\n\n\tapp.StartServer()\n}",
"func TestStart(t *testing.T) {\n\t// Preparation\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdir = dir + \"/../assets\" // path to your tracks for testing\n\n\tlib, err := library.NewLibrary(\"MyLibrary\", dir)\n\tif err != nil {\n\t\tt.Errorf(\"initialize library with valid params: %s\", err.Error())\n\t}\n\n\terr = lib.ScanWithRoutines()\n\tif err != nil {\n\t\tt.Errorf(\"scan library: %s\", err.Error())\n\t}\n\n\t// Test\n\tp := player.NewPlayer(lib, make(chan request.Request, 1000))\n\n\tvar requests = []request.RequestType{\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestRepeatMode,\n\t}\n\tch := p.Start(make(chan string, 1000))\n\tfor _, req := range requests {\n\t\tch <- request.NewRequestToPlayer(req)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n\t// time.Sleep(1000*time.Second)\n\t// Another process is started on the backend, this process can exit naturally.\n}",
"func TestMain(m *testing.M) {\n\tflag.BoolVar(&realTest, \"real\", false, \"Test with real uHunt API server\")\n\tflag.Parse()\n\tos.Exit(m.Run())\n}",
"func main() {\n\tstartTime = time.Now()\n\t// Handle the env vars before calling into Run().\n\t// That way, if debug is on, we will get the start message for Run().\n\terr := HandleEnvVars()\n\tif err == nil {\n\t\terr = Run()\n\t}\n\tif err != nil {\n\t\t// Not using Stderr(...) here because I don't want the time and function prefix on this.\n\t\tfmt.Fprintf(os.Stderr, \"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}",
"func Start() {\n\tdriver.Main(func(app oswin.App) {\n\t\tatomic.AddInt32(&started, 1)\n\t\t<-quit\n\t})\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func Start() error {\n\tc := NewDefaults()\n\tc.Flags.ParseArgs(os.Args[1:])\n\n\tif c.Flags.verReq {\n\t\tfmt.Println(version.Print(c.Flags.Name()))\n\t\treturn nil // print version and exit.\n\t} else if c.Flags.pslist {\n\t\treturn printProcessList()\n\t}\n\n\tif err := c.config(); err != nil {\n\t\t_, _ = ui.Error(Title, err.Error())\n\t\treturn err\n\t}\n\n\tif err := c.start(); err != nil {\n\t\t_, _ = ui.Error(Title, err.Error())\n\t\treturn err\n\t}\n\n\treturn nil\n}",
"func TestMain(m *testing.M) {\n\t// Code here runs before testing starts\n\tmux = GetMux()\n\t// Run tests\n\texitCode := m.Run()\n\t// Code here runs after testing finishes\n\tos.Exit(exitCode)\n}",
"func Main(args ...interface{}) {\n\n\t// ...\n}",
"func TestEmptyGoRunMain(t *testing.T) {\n\tintegration.ProgramTest(t, &integration.ProgramTestOptions{\n\t\tDir: filepath.Join(\"empty\", \"gorun_main\"),\n\t\tDependencies: []string{\n\t\t\t\"github.com/pulumi/pulumi/sdk/v3\",\n\t\t},\n\t\tQuick: true,\n\t})\n}",
"func Main() {\n\tif err := Run(cmd.NewLogger(), cmd.StandardIOStreams(), os.Args[1:]); err != nil {\n\t\tos.Exit(1)\n\t}\n}",
"func main() {\n\tfmt.Println(\"APPLICATION BEGIN\")\n\twebserver := new(service.Webserver)\n\tregisterConfig()\n\tregisterErrors()\n\tregisterAllApis()\n\tregisterInitFunc()\n\toverrideConfByEnvVariables()\n\twebserver.Start()\n}",
"func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}",
"func main() {\n\tfmt.Println(\"Starting management container...\")\n\tservice.Start()\n\tlog.Info(service.Container().Stats())\n\trouter := service.Container().Router\n\tsetupWebapp := service.SetupWebApplication(router)\n\trouter.PathPrefix(\"/setup\").HandlerFunc(setupWebapp.HandleRequest)\n\tmonitorWebapp := service.MonitorWebApplication(router)\n\trouter.PathPrefix(\"/\").HandlerFunc(monitorWebapp.HandleRequest)\n\tstartup()\n}",
"func Main(name string) {\n\tappcmd.Main(context.Background(), NewRootCommand(name))\n}",
"func main() {\n\tlog.SetFlags(log.LstdFlags | log.Lshortfile)\n\tos.Exit(commands.Run(versionString()))\n}",
"func Main() {\n\tflag.Parse()\n\n\tif err := run(); err != nil {\n\t\tlog.Warningf(\"%v\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func requiresTestStart() {\n\tif !testsStarted {\n\t\tpanic(\"May only be called from within a test case\")\n\t}\n}",
"func TestControlMain(t *testing.T) {\n\t// create cli app for testing\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{controlCmd}\n\n\t// create args to call\n\targs := []string{\"./minio\", \"control\"}\n\n\t// run app\n\terr := app.Run(args)\n\tif err != nil {\n\t\tt.Errorf(\"Control-Main test failed with - %s\",\n\t\t\terr.Error())\n\t}\n}",
"func TestMain(m *testing.M) {\n\t// Override the expected bundle location.\n\tExpectedBundleLocation = BundleLocationBuildDirectory\n\n\t// Run tests.\n\tm.Run()\n}",
"func Main() {\n\tenv.Lock()\n\terr := cli.Main()\n\tif err != nil {\n\t\tfmt.Fprintln(os.Stderr, \"fatal:\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func main() {\n\t// Propagate build information to root package to share globally.\n\tbooking.Version = strings.TrimPrefix(version, \"\")\n\tbooking.Commit = commit\n\n\t// Setup signal handlers.\n\tctx, cancel := context.WithCancel(context.Background())\n\tc := make(chan os.Signal, 1)\n\tsignal.Notify(c, os.Interrupt)\n\tgo func() { <-c; cancel() }()\n\n\t// Instantiate a new type to represent our application.\n\t// This type lets us shared setup code with our end-to-end tests.\n\tm := NewMain()\n\n\t// Parse command line flags & load configuration.\n\tif err := m.ParseFlags(ctx, os.Args[1:]); err == flag.ErrHelp {\n\t\tos.Exit(1)\n\t} else if err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Execute program.\n\tif err := m.Run(ctx); err != nil {\n\t\tm.Close()\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tbooking.ReportError(ctx, err)\n\t\tos.Exit(1)\n\t}\n\n\t// Wait for CTRL-C.\n\t<-ctx.Done()\n\n\t// Clean up program.\n\tif err := m.Close(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func createMain() error {\n\tpath := \"./\" + Name + \"/main.go\"\n\n\t// Create the main.go file content\n\tmain := fmt.Sprintf(`package main\n\nimport (\n\t\"%s/cmd/server\"\n\t\"%s/pkg/logs\"\n)\n\t\nfunc main() {\n\tlogs.StartLogger()\n\t\t\n\tserver := server.NewServer()\n\n\tserver.StartServer()\n}\n\t`, Repo, Repo)\n\n\t// Create the main.go file\n\treturn util.CreateFile(path, main)\n}",
"func main() {\n\tTest()\n}",
"func TestMain(m *testing.M) {\n\tsetUp()\n\tretCode := m.Run()\n\ttearDown()\n\tos.Exit(retCode)\n}",
"func LogStart(version string, env string) {\n\tlog.Println(\"Start\")\n\tlog.Printf(\"Version: %s\", version)\n\tlog.Printf(\"Environment: %s\", env)\n\tlog.Printf(\"Go version: %s\", runtime.Version())\n\tlog.Printf(\"Go max procs: %d\", runtime.GOMAXPROCS(0))\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func cmdStart() {\n\tdefer os.Exit(<-start)\n\tif !DebugMode {\n\t\terr := cmd.Start()\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}",
"func TestMain(m *testing.M) {\n\n\tcolor.NoColor = true\n\n\tflag.Parse()\n\topt.Paths = flag.Args()\n\n\tstatus := godog.RunWithOptions(\"godogs\", func(s *godog.Suite) {\n\t\tsteps.CliContext(s)\n\t\tsteps.EnvContext(s)\n\t\tsteps.VersionFeatureContext(s)\n\t}, opt)\n\n\tif st := m.Run(); st > status {\n\t\tstatus = st\n\t}\n\tos.Exit(status)\n}",
"func TestMain(m *testing.M) {\n\t// We get a chance to parse flags to include the framework flags for the\n\t// framework as well as any additional flags included in the integration.\n\tflag.Parse()\n\n\t// EnableInjectionOrDie will enable client injection, this is used by the\n\t// testing framework for namespace management, and could be leveraged by\n\t// features to pull Kubernetes clients or the test environment out of the\n\t// context passed in the features.\n\tctx, startInformers := injection.EnableInjectionOrDie(nil, nil) //nolint\n\tstartInformers()\n\n\t// global is used to make instances of Environments, NewGlobalEnvironment\n\t// is passing and saving the client injection enabled context for use later.\n\tglobal = environment.NewGlobalEnvironment(ctx)\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func main() {\n\t//Clears screen for better readability\n\tCallClear()\n\t//Calls function to start program\n\tmenu()\n}",
"func main() {\n\tapplication.Application()\n}",
"func main() {\n\tconfig.SetVersion(\"0.1.0\")\n\tconfig.Load()\n\tsync.StartProcessing()\n\tserver.StartServer()\n}",
"func TestMain(m *testing.M) {\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateTables(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tcode := m.Run()\n\n\tos.Exit(code)\n\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestServiceStart(t *testing.T) {\n\trunTest(t, func(s *res.Service) {\n\t\ts.Handle(\"model\", res.GetResource(func(r res.GetRequest) { r.NotFound() }))\n\t}, nil)\n}",
"func Main() {\n\tvalidateEnv()\n\n\tif err := rootCmd.Execute(); err != nil {\n\t\tfmt.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tfakeS3Server := setup()\n\tcode := m.Run()\n\tshutdown(fakeS3Server)\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tvar client *RedisFakeClient\n server = s.Server{}\n\tserver.Init(client)\n\tserver.HandleRequest()\n code := m.Run()\n os.Exit(code)\n}",
"func Main() {\n\tif err := smake(); err != nil {\n\t\tfmt.Fprintln(os.Stderr, err)\n\t\tos.Exit(1)\n\t}\n}",
"func main() {\n\tdebugln(\"main!\")\n}",
"func Main() {\n\tos.Exit(Run(os.Args, os.Stdin, os.Stdout, os.Stderr))\n}",
"func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}"
] | [
"0.6651523",
"0.6557407",
"0.6542749",
"0.6516005",
"0.64983016",
"0.64894754",
"0.6442193",
"0.64012825",
"0.6369519",
"0.63639724",
"0.6340145",
"0.6325003",
"0.6300346",
"0.6300346",
"0.6300346",
"0.62951183",
"0.6283727",
"0.62814456",
"0.6273109",
"0.6273109",
"0.6269826",
"0.6255805",
"0.625503",
"0.6229025",
"0.6222162",
"0.6222162",
"0.6222162",
"0.62110883",
"0.61880696",
"0.61863405",
"0.61801326",
"0.61722195",
"0.6142028",
"0.6139601",
"0.6127818",
"0.61250275",
"0.6109037",
"0.61002153",
"0.60919964",
"0.6079665",
"0.60563254",
"0.6050988",
"0.60450613",
"0.6036391",
"0.6029023",
"0.6023946",
"0.6016859",
"0.6016389",
"0.60155636",
"0.60151464",
"0.59992343",
"0.5999186",
"0.5970243",
"0.59614694",
"0.59594816",
"0.59545195",
"0.59477156",
"0.5938076",
"0.59379697",
"0.59307843",
"0.5924357",
"0.5924357",
"0.5924357",
"0.5922278",
"0.59144866",
"0.5911412",
"0.59019744",
"0.58970445",
"0.58806604",
"0.5871216",
"0.5868359",
"0.5865551",
"0.5862204",
"0.5842648",
"0.5841937",
"0.5840336",
"0.58380705",
"0.58339924",
"0.58317477",
"0.58284545",
"0.5828227",
"0.5818997",
"0.581837",
"0.5814432",
"0.5814432",
"0.58137965",
"0.5811643",
"0.580715",
"0.5799359",
"0.57986856",
"0.5797739",
"0.579652",
"0.5789116",
"0.5781835",
"0.5778797",
"0.57669914",
"0.5764948",
"0.5763561",
"0.5749829",
"0.5747709"
] | 0.73757213 | 0 |
Run runs the tests. It returns an exit code to pass to os.Exit. | func (m *M) Run() int {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (t testCommand) Run() error {\n\tif t.shouldFail {\n\t\treturn errors.New(\"I AM ERROR\")\n\t}\n\treturn nil\n}",
"func Run(m *testing.M, opts ...RunOption) {\n\t// Run tests in a separate function such that we can use deferred statements and still\n\t// (indirectly) call `os.Exit()` in case the test setup failed.\n\tif err := func() error {\n\t\tvar cfg runConfig\n\t\tfor _, opt := range opts {\n\t\t\topt(&cfg)\n\t\t}\n\n\t\tdefer mustHaveNoChildProcess()\n\t\tif !cfg.disableGoroutineChecks {\n\t\t\tdefer mustHaveNoGoroutines()\n\t\t}\n\n\t\tcleanup, err := configure()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"test configuration: %w\", err)\n\t\t}\n\t\tdefer cleanup()\n\n\t\tif cfg.setup != nil {\n\t\t\tif err := cfg.setup(); err != nil {\n\t\t\t\treturn fmt.Errorf(\"error calling setup function: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tm.Run()\n\n\t\treturn nil\n\t}(); err != nil {\n\t\tfmt.Printf(\"%s\", err)\n\t\tos.Exit(1)\n\t}\n}",
"func (t *Test) Run() error {\n\treturn t.Wrap(t.run)\n}",
"func RunTests(m *testing.M, version *int) {\n\tflag.IntVar(version, \"v\", 0, \"The anwork version that should be used with these tests\")\n\tflag.Parse()\n\n\tif *version == 0 {\n\t\tpanic(\"Version (-v) must be passed with a legitimate anwork version number\")\n\t}\n\n\tos.Exit(m.Run())\n}",
"func (t *Test) Run() error {\n\tfor _, cmd := range t.cmds {\n\t\t// TODO(fabxc): aggregate command errors, yield diffs for result\n\t\t// comparison errors.\n\t\tif err := t.exec(cmd); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}",
"func Run(args []string, stdin io.Reader, stdout io.Writer, stderr io.Writer) int {\n\tif err := run(args, stdin, stdout, stderr); err != nil {\n\t\tmessage := err.Error()\n\t\tif message == \"\" {\n\t\t\tmessage = \"unexpected error\"\n\t\t}\n\t\t_, _ = fmt.Fprintln(stderr, message)\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func (c *Code) Run() error {\n\tvar result error\n\n\t// Header\n\theader := Code{}\n\theader.Add(`package main`)\n\theader.Add(`import \"github.com/enova/tokyo/src/cfg\"`)\n\theader.Add(``)\n\theader.Add(`func main() {`)\n\n\t// Footer\n\tfooter := Code{}\n\tfooter.Add(`}`)\n\n\t// Construct Code\n\ttext := header.Text() + c.Text() + footer.Text()\n\tfmt.Println(text)\n\n\t// Create Test-Directory\n\tos.MkdirAll(\"test_code\", 0755)\n\n\t// Create Test-App From Code\n\tioutil.WriteFile(\"test_code/test_code.go\", []byte(text), 0644)\n\n\t// Run Test-App\n\tcmd := \"go run test_code/test_code.go\"\n\toutput, result := exec.Command(\"bash\", \"-c\", cmd).CombinedOutput()\n\n\t// Logging For Failures\n\tfmt.Println(\"Output\\n\", string(output))\n\tfmt.Println(\"Result\\n\", result)\n\n\t// Delete Test-Directory\n\tcmd = \"rm -rf test_code\"\n\texec.Command(\"bash\", \"-c\", cmd).Output()\n\n\treturn result\n}",
"func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) {\n\n\terr = o.TestRunner.Initialize(ctx)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttests := o.selectTests()\n\tif len(tests) == 0 {\n\t\treturn testOutput, nil\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := o.TestRunner.RunTest(ctx, test)\n\t\tif err != nil {\n\t\t\tresult = convertErrorToStatus(test.Name, err)\n\t\t}\n\t\ttestOutput.Status.Results = append(testOutput.Status.Results, result.Results...)\n\t}\n\n\tif !o.SkipCleanup {\n\t\terr = o.TestRunner.Cleanup(ctx)\n\t\tif err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\treturn testOutput, nil\n}",
"func Run(t *testing.T, s suite.TestingSuite) {\n\tsuite.Run(t, s)\n}",
"func (cli *CLI) Run(args []string) int {\n\tif len(args) <= 1 {\n\t\tfmt.Fprint(cli.errStream, helpText)\n\t\treturn exitCodeErr\n\t}\n\n\tvar err error\n\n\tswitch args[1] {\n\tcase \"status\":\n\t\terr = cli.doStatus(args[2:])\n\tcase \"attach\":\n\t\terr = cli.doAttach(args[2:])\n\tcase \"detach\":\n\t\terr = cli.doDetach(args[2:])\n\tcase \"--version\":\n\t\tfmt.Fprintf(cli.errStream, \"%s version %s, build %s, date %s \\n\", name, version, commit, date)\n\t\treturn exitCodeOK\n\tcase \"-h\", \"--help\":\n\t\tfmt.Fprint(cli.errStream, helpText)\n\t\treturn exitCodeOK\n\tcase \"--credits\":\n\t\tfmt.Fprint(cli.errStream, creditsText)\n\t\treturn exitCodeOK\n\tdefault:\n\t\tfmt.Fprint(cli.errStream, helpText)\n\t\treturn exitCodeErr\n\t}\n\n\tif err != nil {\n\t\tif os.Getenv(\"ALBIO_DEBUG\") != \"\" {\n\t\t\tfmt.Fprintf(cli.errStream, \"%+v\\n\", err)\n\t\t} else {\n\t\t\tfmt.Fprintln(cli.errStream, err)\n\t\t}\n\t\treturn exitCodeErr\n\t}\n\n\treturn 0\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tcode := m.Run()\n\tos.Exit(code)\n}",
"func (test Test) Run(t *testing.T) {\n\tt.Logf(\"Starting test %v\", t.Name())\n\tt.Helper()\n\t// Double negative cannot be helped, this is intended to mitigate test failures where a global\n\t// resource is manipulated, e.g.: the default AWS security group.\n\tif !test.RunOptions.NoParallel {\n\t\tt.Parallel()\n\t}\n\tt.Run(\"Python\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.Python != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.Python)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"python\",\n\t\t\truntime: \"python\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n\tt.Run(\"TypeScript\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.TypeScript != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.TypeScript)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"typescript\",\n\t\t\truntime: \"nodejs\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n}",
"func Run(dir string) ([]byte, error) {\n\tcmd := exec.Command(\"go\", \"test\", \"-v\", \"-json\", \"-cover\", \"./...\")\n\tabsDir, err := filepath.Abs(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcmd.Dir = absDir\n\toutput, err := cmd.CombinedOutput()\n\treturn output, err\n}",
"func Run(ctx context.Context, ts mako.Storage, stdout io.Writer, stderr io.Writer, commander *subcommands.Commander) subcommands.ExitStatus {\n\t// we redirect output to our custom writer, which will be checked at the end of the test\n\toutWriter = stdout\n\terrWriter = stderr\n\n\tregisterMakoCommands(commander)\n\tcommander.Output = outWriter\n\tcommander.Error = errWriter\n\n\tstorageClient = ts\n\treturn commander.Execute(ctx)\n}",
"func TestRun(t *testing.T) {\n\tRun()\n}",
"func TestRunMain(t *testing.T) {\n\tmain()\n}",
"func Run(opts ...Option) error {\n\tvar o options\n\tfor _, opt := range opts {\n\t\tif err := opt(&o); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// Now build the command to run with the realized options\n\t// struct.\n\targs := []string{\"test\", \"-v\", \"-json\"}\n\tif o.race {\n\t\targs = append(args, \"-race\")\n\t}\n\tif o.coverprofile != \"\" {\n\t\targs = append(args, \"-coverprofile=\"+o.coverprofile)\n\t}\n\tif o.coverpkg != \"\" {\n\t\targs = append(args, \"-coverpkg=\"+o.coverpkg)\n\t}\n\tif o.covermode != \"\" {\n\t\targs = append(args, \"-covermode=\"+o.covermode)\n\t}\n\tif o.p != 0 {\n\t\targs = append(args, \"-p=\"+strconv.Itoa(o.p))\n\t}\n\targs = append(args, \"./...\")\n\tcmd := exec.Command(\"go\", args...)\n\tcmd.Stderr = os.Stderr\n\n\tcmdStdout, err := cmd.StdoutPipe()\n\tif err != nil {\n\t\treturn err\n\t}\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\tif err := parseGoTestJSONOutput(cmdStdout, newMultiResultAccepter(o.accepters...)); err != nil {\n\t\t_ = cmd.Process.Kill()\n\t\t_ = cmd.Wait()\n\t\treturn err\n\t}\n\tif err := cmd.Wait(); err != nil {\n\t\treturn fmt.Errorf(\"go test failed: %w\", err)\n\t}\n\n\treturn nil\n}",
"func (envManager *TestEnvManager) RunTest(m runnable) (ret int) {\n\tdefer envManager.TearDown()\n\tif err := envManager.StartUp(); err != nil {\n\t\tlog.Printf(\"Failed to setup framework: %s\", err)\n\t\tret = 1\n\t} else {\n\t\tlog.Printf(\"\\nStart testing ......\")\n\t\tret = m.Run()\n\t}\n\treturn ret\n}",
"func (c *Command) Run(args []string) {\n\tflag.StringVar(&c.Filter, \"f\", \"\", \"regexp to filter tests by name\")\n\tflag.BoolVar(&c.Verbose, \"v\", false, \"print all test names\")\n\tcheck(flag.CommandLine.Parse(args))\n\targs = flag.Args()\n\n\tif len(args) == 0 {\n\t\targs = []string{\".\"}\n\t}\n\n\tokPath, err := util.OKPath()\n\tcheck(err)\n\n\tfor _, arg := range args {\n\t\tpackageName := util.PackageNameFromPath(okPath, arg)\n\t\tif arg == \".\" {\n\t\t\tpackageName = \".\"\n\t\t}\n\t\tanonFunctionName := 0\n\t\tf, _, errs := compiler.Compile(okPath, packageName, true,\n\t\t\t&anonFunctionName, false)\n\t\tutil.CheckErrorsWithExit(errs)\n\n\t\tm := vm.NewVM(\"no-package\")\n\t\tstartTime := time.Now()\n\t\tcheck(m.LoadFile(f))\n\t\terr := m.RunTests(c.Verbose, regexp.MustCompile(c.Filter), packageName)\n\t\telapsed := time.Since(startTime).Milliseconds()\n\t\tcheck(err)\n\n\t\tassertWord := pluralise(\"assert\", m.TotalAssertions)\n\t\tif m.TestsFailed > 0 {\n\t\t\tfmt.Printf(\"%s: %d failed %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsFailed, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s: %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t}\n\n\t\tif m.TestsFailed > 0 {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run() \n os.Exit(code)\n}",
"func (s Suite) Run(t *testing.T) bool {\n\tt.Helper()\n\treturn s(\"\", nil, func(c *config) { c.t = t })\n}",
"func Test() error {\n\treturn sh.RunWith(map[string]string{\"GORACE\": \"halt_on_error=1\"},\n\t\t\"go\", \"test\", \"-race\", \"-v\", \"./...\")\n}",
"func (z *Zest) Run() error {\n\treturn z.cli.Run(os.Args)\n}",
"func Test(t *testing.T, p prog.Program, cases ...Case) {\n\tt.Helper()\n\tfor _, c := range cases {\n\t\tt.Run(strings.Join(c.args, \" \"), func(t *testing.T) {\n\t\t\tt.Helper()\n\t\t\tr := run(p, c.args, c.stdin)\n\t\t\tif r.exitCode != c.want.exitCode {\n\t\t\t\tt.Errorf(\"got exit code %v, want %v\", r.exitCode, c.want.exitCode)\n\t\t\t}\n\t\t\tif !matchOutput(r.stdout, c.want.stdout) {\n\t\t\t\tt.Errorf(\"got stdout %v, want %v\", r.stdout, c.want.stdout)\n\t\t\t}\n\t\t\tif !matchOutput(r.stderr, c.want.stderr) {\n\t\t\t\tt.Errorf(\"got stderr %v, want %v\", r.stderr, c.want.stderr)\n\t\t\t}\n\t\t})\n\t}\n}",
"func Run(t *testing.T, maker Maker) {\n\tsuite.Run(t, &baseSuite{maker: maker})\n}",
"func (o *Options) Run() error {\n\terr := o.Validate()\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to validate\")\n\t}\n\n\ttest := &v1alpha1.TestRun{}\n\terr = o.PopulateTest(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to populate the TestRun resource\")\n\t}\n\n\to.TestRun, err = o.TestClient.JxtestV1alpha1().TestRuns(o.Namespace).Create(test)\n\tif err != nil {\n\t\treturn errors.Wrapf(err, \"failed to create the TestRun CRD\")\n\t}\n\treturn nil\n}",
"func (o Scorecard) RunTests() (testOutput v1alpha2.ScorecardOutput, err error) {\n\ttests := selectTests(o.Selector, o.Config.Tests)\n\tif len(tests) == 0 {\n\t\tfmt.Println(\"no tests selected\")\n\t\treturn testOutput, err\n\t}\n\n\tbundleData, err := getBundleData(o.BundlePath)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error getting bundle data %w\", err)\n\t}\n\n\t// create a ConfigMap holding the bundle contents\n\to.bundleConfigMap, err = createConfigMap(o, bundleData)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error creating ConfigMap %w\", err)\n\t}\n\n\tfor i, test := range tests {\n\t\tvar err error\n\t\ttests[i].TestPod, err = o.runTest(test)\n\t\tif err != nil {\n\t\t\treturn testOutput, fmt.Errorf(\"test %s failed %w\", test.Name, err)\n\t\t}\n\t}\n\n\tif !o.SkipCleanup {\n\t\tdefer deletePods(o.Client, tests)\n\t\tdefer deleteConfigMap(o.Client, o.bundleConfigMap)\n\t}\n\n\terr = o.waitForTestsToComplete(tests)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttestOutput = getTestResults(o.Client, tests)\n\n\treturn testOutput, err\n}",
"func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}",
"func (t *TestRuntime) RunTests(m *testing.M) int {\n\treturn t.runTests(m, !testing.Verbose())\n}",
"func (ldx *LxdAudit) Run(args []string) int {\n\t// load audit tests fro benchmark folder\n\tauditTests := ldx.FileLoader.LoadAuditTests(ldx.FilesInfo)\n\t// filter tests by cmd criteria\n\tft := filteredAuditBenchTests(auditTests, ldx.PredicateChain, ldx.PredicateParams)\n\t//execute audit tests and show it in progress bar\n\tcompletedTest := executeTests(ft, ldx.runAuditTest, ldx.log)\n\t// generate output data\n\tui.PrintOutput(completedTest, ldx.OutputGenerator, ldx.log)\n\t// send test results to plugin\n\tsendResultToPlugin(ldx.PlChan, ldx.CompletedChan, completedTest)\n\treturn 0\n}",
"func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}",
"func (e *CachedTestExecutor) Run(ctx context.Context, actions ...chromedp.Action) error {\n\targs := e.Called(ctx, actions)\n\treturn args.Error(0)\n}",
"func RunTests(ctx context.Context, w io.Writer, path string) error {\n\tif path == \"\" || path == \".\" {\n\t\tvar err error\n\t\tpath, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfiles, err := getTestFiles(ctx, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range files {\n\t\terr = runFile(ctx, &files[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = Report(w, files)\n\treturn err\n}",
"func (s *FakeJujuRunnerSuite) TestRun(c *gc.C) {\n\ts.runner.Run()\n\ts.runner.Stop()\n\tresult := s.runner.Wait()\n \n\tc.Assert(result.String(), gc.Equals, \"OK: 1 passed\")\n\tc.Assert(result.Succeeded, gc.Equals, 1)\n\tc.Assert(result.RunError, gc.IsNil)\n\tc.Assert(\n\t\tstrings.Contains(s.output.String(), \"Starting service\"), gc.Equals, true)\n}",
"func (runner *suiteRunner) run() *Result {\n if runner.tracker.result.RunError == nil && len(runner.tests) > 0 {\n runner.tracker.start()\n if runner.checkFixtureArgs() {\n if runner.runFixture(runner.setUpSuite) {\n for i := 0; i != len(runner.tests); i++ {\n c := runner.runTest(runner.tests[i])\n if c.status == fixturePanickedSt {\n runner.missTests(runner.tests[i+1:])\n break\n }\n }\n } else {\n runner.missTests(runner.tests)\n }\n runner.runFixture(runner.tearDownSuite)\n } else {\n runner.missTests(runner.tests)\n }\n runner.tracker.waitAndStop()\n runner.tempDir.removeAll()\n }\n return &runner.tracker.result\n}",
"func (o Scorecard) Run(ctx context.Context) (testOutput v1alpha3.TestList, err error) {\n\ttestOutput = v1alpha3.NewTestList()\n\n\tif err := o.TestRunner.Initialize(ctx); err != nil {\n\t\treturn testOutput, err\n\t}\n\n\tfor _, stage := range o.Config.Stages {\n\t\ttests := o.selectTests(stage)\n\t\tif len(tests) == 0 {\n\t\t\tcontinue\n\t\t}\n\t\ttests = o.setTestDefaults(tests)\n\n\t\toutput := make(chan v1alpha3.Test, len(tests))\n\t\tif stage.Parallel {\n\t\t\to.runStageParallel(ctx, tests, output)\n\t\t} else {\n\t\t\to.runStageSequential(ctx, tests, output)\n\t\t}\n\t\tclose(output)\n\t\tfor o := range output {\n\t\t\ttestOutput.Items = append(testOutput.Items, o)\n\t\t}\n\t}\n\n\t// Get timeout error, if any, before calling Cleanup() so deletes don't cause a timeout.\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = ctx.Err()\n\tdefault:\n\t}\n\n\tif !o.SkipCleanup {\n\t\t// Use a separate context for cleanup, which needs to run regardless of a prior timeout.\n\t\tclctx, cancel := context.WithTimeout(context.Background(), cleanupTimeout)\n\t\tdefer cancel()\n\t\tif err := o.TestRunner.Cleanup(clctx); err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\n\treturn testOutput, err\n}",
"func runTestMain(m *testing.M) int {\n\tisLess, err := test_helpers.IsTarantoolVersionLess(2, 2, 0)\n\tif err != nil {\n\t\tlog.Fatalf(\"Failed to extract Tarantool version: %s\", err)\n\t}\n\n\tif isLess {\n\t\tlog.Println(\"Skipping decimal tests...\")\n\t\tisDecimalSupported = false\n\t\treturn m.Run()\n\t} else {\n\t\tisDecimalSupported = true\n\t}\n\n\tinstance, err := test_helpers.StartTarantool(test_helpers.StartOpts{\n\t\tInitScript: \"config.lua\",\n\t\tListen: server,\n\t\tUser: opts.User,\n\t\tPass: opts.Pass,\n\t\tWaitStart: 100 * time.Millisecond,\n\t\tConnectRetry: 10,\n\t\tRetryTimeout: 500 * time.Millisecond,\n\t})\n\tdefer test_helpers.StopTarantoolWithCleanup(instance)\n\n\tif err != nil {\n\t\tlog.Printf(\"Failed to prepare test Tarantool: %s\", err)\n\t\treturn 1\n\t}\n\n\treturn m.Run()\n}",
"func (t *Test) Run(ctx context.Context, opts ...TestOption) (*TestResult, error) {\n\tparsedOpts := &testOptions{\n\t\tvars: &starlark.Dict{},\n\t}\n\tfor _, opt := range opts {\n\t\topt.applyTest(parsedOpts)\n\t}\n\n\tthread := &starlark.Thread{\n\t\tPrint: skyPrint,\n\t}\n\tthread.SetLocal(\"context\", ctx)\n\n\tassertModule := assertmodule.AssertModule()\n\ttestCtx := &starlarkstruct.Module{\n\t\tName: \"skycfg_test_ctx\",\n\t\tMembers: starlark.StringDict(map[string]starlark.Value{\n\t\t\t\"vars\": parsedOpts.vars,\n\t\t\t\"assert\": assertModule,\n\t\t}),\n\t}\n\targs := starlark.Tuple([]starlark.Value{testCtx})\n\n\tresult := TestResult{\n\t\tTestName: t.Name(),\n\t}\n\n\tstartTime := time.Now()\n\t_, err := starlark.Call(thread, t.callable, args, nil)\n\tresult.Duration = time.Since(startTime)\n\tif err != nil {\n\t\t// if there is no assertion error, there was something wrong with the execution itself\n\t\tif len(assertModule.Failures) == 0 {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// there should only be one failure, because each test run gets its own *TestContext\n\t\t// and each assertion failure halts execution.\n\t\tif len(assertModule.Failures) > 1 {\n\t\t\tpanic(\"A test run should only have one assertion failure. Something went wrong with the test infrastructure.\")\n\t\t}\n\t\tresult.Failure = assertModule.Failures[0]\n\t}\n\n\treturn &result, nil\n}",
"func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}",
"func Run() error {\n\terr := Make()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"make: %w\", err)\n\t}\n\n\tbaseDir, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getwd: %w\", err)\n\t}\n\tdefer func() {\n\t\tos.Chdir(baseDir)\n\t}()\n\terr = os.Chdir(\"../bin/\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"chdir ../bin: %w\", err)\n\t}\n\t_, err = os.Stat(\"eqx\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"stat eqx: %w\", err)\n\t}\n\terr = sh.Run(\"eqx\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"run eqx: %w\", err)\n\t}\n\treturn nil\n}",
"func (b *HRPBoomer) Run(testcases ...ITestCase) {\n\tevent := sdk.EventTracking{\n\t\tCategory: \"RunLoadTests\",\n\t\tAction: \"hrp boom\",\n\t}\n\t// report start event\n\tgo sdk.SendEvent(event)\n\t// report execution timing event\n\tdefer sdk.SendEvent(event.StartTiming(\"execution\"))\n\n\t// quit all plugins\n\tdefer func() {\n\t\tpluginMap.Range(func(key, value interface{}) bool {\n\t\t\tif plugin, ok := value.(funplugin.IPlugin); ok {\n\t\t\t\tplugin.Quit()\n\t\t\t}\n\t\t\treturn true\n\t\t})\n\t}()\n\n\ttaskSlice := b.ConvertTestCasesToBoomerTasks(testcases...)\n\n\tb.Boomer.Run(taskSlice...)\n}",
"func TestExitCode(t *testing.T) {\n\tif runtime.GOOS != \"linux\" {\n\t\tt.Skipf(\"skipping fork/exec test on this platform\")\n\t}\n\n\tif os.Getenv(\"MULTICHECKER_CHILD\") == \"1\" {\n\t\t// child process\n\n\t\t// replace [progname -test.run=TestExitCode -- ...]\n\t\t// by [progname ...]\n\t\tos.Args = os.Args[2:]\n\t\tos.Args[0] = \"vet\"\n\t\tmain()\n\t\tpanic(\"unreachable\")\n\t}\n\n\ttestenv.NeedsTool(t, \"go\")\n\n\tfor _, test := range []struct {\n\t\targs []string\n\t\twant int\n\t}{\n\t\t{[]string{\"nosuchdir/...\"}, 1}, // matched no packages\n\t\t{[]string{\"nosuchpkg\"}, 1}, // matched no packages\n\t\t{[]string{\"-unknownflag\"}, 2}, // flag error\n\t\t{[]string{\"-findcall.name=panic\", \"io\"}, 3}, // finds diagnostics\n\t\t{[]string{\"-findcall=0\", \"io\"}, 0}, // no checkers\n\t\t{[]string{\"-findcall.name=nosuchfunc\", \"io\"}, 0}, // no diagnostics\n\t\t{[]string{\"-findcall.name=panic\", \"sort\", \"io\"}, 1}, // 'fail' failed on 'sort'\n\n\t\t// -json: exits zero even in face of diagnostics or package errors.\n\t\t{[]string{\"-findcall.name=panic\", \"-json\", \"io\"}, 0},\n\t\t{[]string{\"-findcall.name=panic\", \"-json\", \"io\"}, 0},\n\t\t{[]string{\"-findcall.name=panic\", \"-json\", \"sort\", \"io\"}, 0},\n\t} {\n\t\targs := []string{\"-test.run=TestExitCode\", \"--\"}\n\t\targs = append(args, test.args...)\n\t\tcmd := exec.Command(os.Args[0], args...)\n\t\tcmd.Env = append(os.Environ(), \"MULTICHECKER_CHILD=1\")\n\t\tout, err := cmd.CombinedOutput()\n\t\tif len(out) > 0 {\n\t\t\tt.Logf(\"%s: out=<<%s>>\", test.args, out)\n\t\t}\n\t\tvar exitcode int\n\t\tif err, ok := err.(*exec.ExitError); ok {\n\t\t\texitcode = err.ExitCode() // requires go1.12\n\t\t}\n\t\tif exitcode != test.want {\n\t\t\tt.Errorf(\"%s: exited %d, want %d\", test.args, exitcode, test.want)\n\t\t}\n\t}\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\tvar code int\n\tfunc() {\n\t\tdefer InitMain()()\n\t\tcode = m.Run()\n\t}()\n\tos.Exit(code)\n}",
"func (c *actionTests) actionRun(t *testing.T) {\n\te2e.EnsureImage(t, c.env)\n\n\ttests := []struct {\n\t\tname string\n\t\targv []string\n\t\texit int\n\t}{\n\t\t{\n\t\t\tname: \"NoCommand\",\n\t\t\targv: []string{c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"True\",\n\t\t\targv: []string{c.env.ImagePath, \"true\"},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"False\",\n\t\t\targv: []string{c.env.ImagePath, \"false\"},\n\t\t\texit: 1,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppGood\",\n\t\t\targv: []string{\"--app\", \"testapp\", c.env.ImagePath},\n\t\t\texit: 0,\n\t\t},\n\t\t{\n\t\t\tname: \"ScifTestAppBad\",\n\t\t\targv: []string{\"--app\", \"fakeapp\", c.env.ImagePath},\n\t\t\texit: 1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tc.env.RunSingularity(\n\t\t\tt,\n\t\t\te2e.AsSubtest(tt.name),\n\t\t\te2e.WithProfile(e2e.UserProfile),\n\t\t\te2e.WithCommand(\"run\"),\n\t\t\te2e.WithArgs(tt.argv...),\n\t\t\te2e.ExpectExit(tt.exit),\n\t\t)\n\t}\n}",
"func TestRunIntCode(t *testing.T) {\n\tfor _, testCase := range testCases {\n\t\toutput := RunIntCode(testCase.startingCode, testCase.input)\n\t\tif !Equal(output, testCase.expected) {\n\t\t\tt.Errorf(\"Error, expected %v got %v\", testCase.expected, output)\n\t\t}\n\t}\n}",
"func TestRun(t *testing.T) {\n\tsandbox, cleanup := cmdtest.TestSetupWithSandbox(t, false)\n\tdefer cleanup()\n\n\t// first add the test repo index\n\t_, err := cmdtest.AddLocalRepo(sandbox, \"LocalTestRepo\", filepath.Join(sandbox.TestDataPath, \"dev.local-index.yaml\"))\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\tstacksList := cmdtest.GetEnvStacksList()\n\n\tif stacksList == \"dev.local/starter\" {\n\t\tt.Skip()\n\t}\n\n\t// appsody init nodejs-express\n\t_, err = cmdtest.RunAppsody(sandbox, \"init\", \"nodejs-express\")\n\tif err != nil {\n\t\tt.Fatal(err)\n\t}\n\n\t// appsody run\n\trunChannel := make(chan error)\n\tgo func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"run\")\n\t\trunChannel <- err\n\t\tclose(runChannel)\n\t}()\n\n\t// defer the appsody stop to close the docker container\n\tdefer func() {\n\t\t_, err = cmdtest.RunAppsody(sandbox, \"stop\")\n\t\tif err != nil {\n\t\t\tt.Logf(\"Ignoring error running appsody stop: %s\", err)\n\t\t}\n\t\t// wait for the appsody command/goroutine to finish\n\t\trunErr := <-runChannel\n\t\tif runErr != nil {\n\t\t\tt.Logf(\"Ignoring error from the appsody command: %s\", runErr)\n\t\t}\n\t}()\n\n\thealthCheckFrequency := 2 // in seconds\n\thealthCheckTimeout := 60 // in seconds\n\thealthCheckWait := 0\n\thealthCheckOK := false\n\tfor !(healthCheckOK || healthCheckWait >= healthCheckTimeout) {\n\t\tselect {\n\t\tcase err = <-runChannel:\n\t\t\t// appsody run exited, probably with an error\n\t\t\tt.Fatalf(\"appsody run quit unexpectedly: %s\", err)\n\t\tcase <-time.After(time.Duration(healthCheckFrequency) * time.Second):\n\t\t\t// check the health endpoint\n\t\t\thealthCheckWait += healthCheckFrequency\n\t\t\tresp, err := http.Get(\"http://localhost:3000/health\")\n\t\t\tif err != nil {\n\t\t\t\tt.Logf(\"Health check error. Ignore and retry: %s\", err)\n\t\t\t} else {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tif resp.StatusCode != 200 {\n\t\t\t\t\tt.Logf(\"Health check response code %d. Ignore and retry.\", resp.StatusCode)\n\t\t\t\t} else {\n\t\t\t\t\tt.Logf(\"Health check OK\")\n\t\t\t\t\t// may want to check body\n\t\t\t\t\thealthCheckOK = true\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tif !healthCheckOK {\n\t\tt.Errorf(\"Did not receive an OK health check within %d seconds.\", healthCheckTimeout)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\tif err := TestCreateTables(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n\n\tcode := m.Run()\n\n\tos.Exit(code)\n\n\tif err := TestDropDB(\"..\"); err != nil {\n\t\tlog.Println(err)\n\t\tos.Exit(1)\n\t}\n}",
"func TestMain(m *testing.M) {\n\tsetUp()\n\tretCode := m.Run()\n\ttearDown()\n\tos.Exit(retCode)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}",
"func (t *Test) Run(tc *TestSuite) error {\n\n\tmqutil.Logger.Print(\"\\n--- \" + t.Name)\n\tfmt.Printf(\"\\nRunning test case: %s\\n\", t.Name)\n\terr := t.ResolveParameters(tc)\n\tif err != nil {\n\t\tfmt.Printf(\"... Fail\\n... %s\\n\", err.Error())\n\t\treturn err\n\t}\n\n\treq := resty.R()\n\tif len(tc.ApiToken) > 0 {\n\t\treq.SetAuthToken(tc.ApiToken)\n\t} else if len(tc.Username) > 0 {\n\t\treq.SetBasicAuth(tc.Username, tc.Password)\n\t}\n\n\tpath := GetBaseURL(t.db.Swagger) + t.SetRequestParameters(req)\n\tvar resp *resty.Response\n\n\tt.startTime = time.Now()\n\tswitch t.Method {\n\tcase mqswag.MethodGet:\n\t\tresp, err = req.Get(path)\n\tcase mqswag.MethodPost:\n\t\tresp, err = req.Post(path)\n\tcase mqswag.MethodPut:\n\t\tresp, err = req.Put(path)\n\tcase mqswag.MethodDelete:\n\t\tresp, err = req.Delete(path)\n\tcase mqswag.MethodPatch:\n\t\tresp, err = req.Patch(path)\n\tcase mqswag.MethodHead:\n\t\tresp, err = req.Head(path)\n\tcase mqswag.MethodOptions:\n\t\tresp, err = req.Options(path)\n\tdefault:\n\t\treturn mqutil.NewError(mqutil.ErrInvalid, fmt.Sprintf(\"Unknown method in test %s: %v\", t.Name, t.Method))\n\t}\n\tt.stopTime = time.Now()\n\tfmt.Printf(\"... call completed: %f seconds\\n\", t.stopTime.Sub(t.startTime).Seconds())\n\n\tif err != nil {\n\t\tt.err = mqutil.NewError(mqutil.ErrHttp, err.Error())\n\t} else {\n\t\tmqutil.Logger.Print(resp.Status())\n\t\tmqutil.Logger.Println(string(resp.Body()))\n\t}\n\terr = t.ProcessResult(resp)\n\treturn err\n}",
"func Run(rootCommand *Command, version string, runEnv *cli.RunEnv) int {\n\tstart := time.Now()\n\tinternal.SetRunEnvDefaults(runEnv)\n\tvar exitCode int\n\tif err := runRootCommand(rootCommand, version, start, runEnv, &exitCode); err != nil {\n\t\tprintError(runEnv.Stderr, err)\n\t\treturn 1\n\t}\n\treturn exitCode\n}",
"func Run(ctx context.Context, s *testing.State) {\n\t// Reserve time for cleanup\n\tcloseCtx := ctx\n\tctx, cancel := ctxutil.Shorten(ctx, 2*time.Second)\n\tdefer cancel()\n\n\t// Perform initial test setup\n\tsetupVars, err := runSetup(ctx, s)\n\tif err != nil {\n\t\ts.Fatal(\"Failed to run setup: \", err)\n\t}\n\tdefer setupVars.closeBrowser(closeCtx)\n\tdefer setupVars.recorder.Close(closeCtx)\n\n\tif err := muteDevice(ctx, s); err != nil {\n\t\ts.Log(\"(non-error) Failed to mute device: \", err)\n\t}\n\n\t// Execute Test\n\tif err := setupVars.recorder.Run(ctx, func(ctx context.Context) error {\n\t\treturn testBody(ctx, setupVars)\n\t}); err != nil {\n\t\ts.Fatal(\"Failed to conduct the test scenario, or collect the histogram data: \", err)\n\t}\n\n\t// Write out values\n\tpv := perf.NewValues()\n\tif err := setupVars.recorder.Record(ctx, pv); err != nil {\n\t\ts.Fatal(\"Failed to report: \", err)\n\t}\n\tif err := pv.Save(s.OutDir()); err != nil {\n\t\ts.Error(\"Failed to store values: \", err)\n\t}\n}",
"func Run(t *testing.T, configOpt core.ConfigProvider, sdkOpts ...fabsdk.Option) {\n\tsetupAndRun(t, true, configOpt, e2eTest, sdkOpts...)\n}",
"func (e *Environment) Run(tests ...TestFunc) {\n\tfor _, test := range tests {\n\t\tf := e.New(test)\n\t\te.t.Run(f.name, func(t *testing.T) {\n\t\t\t// Recover from panics to ensure that defer Teardown will run.\n\t\t\tdefer func() {\n\t\t\t\tif err := recover(); err != nil {\n\t\t\t\t\tt.Errorf(\"panic: %s\", err)\n\t\t\t\t}\n\t\t\t}()\n\t\t\tf.t = t\n\t\t\tf.testFn(t, f)\n\t\t})\n\t}\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func (tc ScannerTestcase) Run(ctx context.Context) func(*testing.T) {\n\tsort.Slice(tc.Want, pkgSort(tc.Want))\n\treturn func(t *testing.T) {\n\t\tctx := zlog.Test(ctx, t)\n\t\td := tc.Digest()\n\t\tn, err := fetch.Layer(ctx, t, http.DefaultClient, tc.Domain, tc.Name, d)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tdefer n.Close()\n\t\tl := &claircore.Layer{\n\t\t\tHash: d,\n\t\t}\n\t\tl.SetLocal(n.Name())\n\n\t\tgot, err := tc.Scanner.Scan(ctx, l)\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tsort.Slice(got, pkgSort(got))\n\t\tt.Logf(\"found %d packages\", len(got))\n\t\tif !cmp.Equal(tc.Want, got) {\n\t\t\tt.Error(cmp.Diff(tc.Want, got))\n\t\t}\n\t}\n}",
"func (runner TestSuiteRunner) RunTests(testNamesToRun map[string]bool, testParallelism uint) (allTestsPassed bool, executionErr error) {\n\tallTests := runner.testSuite.GetTests()\n\n\t// If the user doesn't specify any test names to run, run all of them\n\tif len(testNamesToRun) == 0 {\n\t\ttestNamesToRun = map[string]bool{}\n\t\tfor testName, _ := range allTests {\n\t\t\ttestNamesToRun[testName] = true\n\t\t}\n\t}\n\n\t// Validate all the requested tests exist\n\ttestsToRun := make(map[string]testsuite.Test)\n\tfor testName, _ := range testNamesToRun {\n\t\ttest, found := allTests[testName]\n\t\tif !found {\n\t\t\treturn false, stacktrace.NewError(\"No test registered with name '%v'\", testName)\n\t\t}\n\t\ttestsToRun[testName] = test\n\t}\n\n\texecutionInstanceId := uuid.Generate()\n\ttestParams, err := buildTestParams(executionInstanceId, testsToRun, runner.networkWidthBits)\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err, \"An error occurred building the test params map\")\n\t}\n\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\")\n\t}\n\n\ttestExecutor := parallelism.NewTestExecutorParallelizer(\n\t\texecutionInstanceId,\n\t\tdockerClient,\n\t\trunner.testControllerImageName,\n\t\trunner.testControllerLogLevel,\n\t\trunner.customTestControllerEnvVars,\n\t\ttestParallelism)\n\n\tlogrus.Infof(\"Running %v tests with execution ID %v...\", len(testsToRun), executionInstanceId.String())\n\tallTestsPassed = testExecutor.RunInParallelAndPrintResults(testParams)\n\treturn allTestsPassed, nil\n}",
"func Test(t *testing.T, command Runner, testCases []Case) {\n\tt.Helper()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Helper() // TODO: make Helper working for subtests: issue #24128\n\n\t\t\tstdout := &bytes.Buffer{}\n\t\t\tstderr := &bytes.Buffer{}\n\n\t\t\tcommand.SetStdout(stdout)\n\t\t\tcommand.SetStderr(stderr)\n\n\t\t\tm := newMatch(t, tc.wantFail)\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif !m.removeFile(tc.WantFile) {\n\t\t\t\t\ttc.WantFile = \"\" // stop testing File match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar gotErr string\n\t\t\tgotPanic := m.run(func() {\n\t\t\t\tif err := command.Run(tc.Args); err != nil {\n\t\t\t\t\tgotErr = err.Error()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif gotFile, ext, ok := m.getFile(tc.WantFile); ok {\n\t\t\t\t\tm.match(\"File golden\"+ext, gotFile, \"golden\"+ext)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.match(\"WantStdout\", stdout.String(), tc.WantStdout)\n\t\t\tm.match(\"WantStderr\", stderr.String(), tc.WantStderr)\n\t\t\tm.match(\"WantPanic\", gotPanic, tc.WantPanic)\n\t\t\tm.match(\"WantErr\", gotErr, tc.WantErr)\n\t\t\tm.equal(\"WantExitCode\", command.ExitCode(), tc.WantExitCode)\n\n\t\t\tm.done()\n\t\t})\n\t}\n}",
"func TestMain(m *testing.M) {\n\tDropTestData(0)\n\tanswer := m.Run()\n\tDropTestData(0)\n\tos.Exit(answer)\n}",
"func TestMain(m *testing.M) {\n\tsetup()\n\tcode := m.Run()\n\tshutdown()\n\t// THE FOLLOWING LINE IS VERY IMPORTANT and cannot be removed for TestMain func's\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\tresult := m.Run()\n\n\tos.Exit(result)\n}",
"func TestRun2(t *testing.T) {\n\n\tbody := ioutil.NopCloser(strings.NewReader(\n\t\t`{\n\t\t\t\t\"language\": \"cpp\",\n\t\t\t\t\"code\": \"#include <iostream>\\nint main() {std::cout << \\\"Hello\\\" << std::endl;return 0;}\",\n\t\t\t\t\"stdIn\": \"\"\n\t\t\t}`))\n\n\tstdout, stderr, err := Run(body)\n\twant_stdout := []byte(\"Hello\\n\")\n\twant_stderr := []byte{}\n\n\tif err != nil {\n\t\tt.Errorf(\"An unexpected error was raised:\\n %v\\n %T\", err, err)\n\t} else {\n\t\tif diff := cmp.Diff(want_stdout, stdout); diff != \"\" {\n\t\t\tt.Errorf(\"Stdout mismatch (-want +got):\\n%s\", diff)\n\t\t}\n\t\tif diff := cmp.Diff(want_stderr, stderr); diff != \"\" {\n\t\t\tt.Errorf(\"Stderr mismatch (-want +got):\\n%s\", diff)\n\t\t}\n\t}\n\n}",
"func runTestMain(m *testing.M) int {\n\tvar t mockAsserter\n\n\thome, cleanup := tmtest.SetupConfig(t, \"testdata\")\n\tdefer cleanup()\n\n\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\tdefer cancel()\n\tappCleanup := tmtest.RunApp(ctx, t, appName, home)\n\ttmCleanup := tmtest.RunTendermint(ctx, t, home)\n\n\tdefer appCleanup()\n\tdefer tmCleanup()\n\n\treturn m.Run()\n}",
"func (r *Runner) Run() []error {\n\tif len(r.Sources) < 1 {\n\t\tr.log.Warn(\"no sources given\")\n\t\treturn nil\n\t}\n\n\tres := []*runResult{}\n\n\tsourcesStart := time.Now()\n\n\tfor i, sourceFile := range r.Sources {\n\t\tsourceBytes, err := os.ReadFile(sourceFile)\n\t\tif err != nil {\n\t\t\tres = append(res, &runResult{Retcode: -1, Error: err})\n\t\t\tcontinue\n\t\t}\n\n\t\tres = append(res, r.checkSource(i, sourceFile, string(sourceBytes))...)\n\t}\n\n\tif !r.noExec && r.Count > 0 && len(res) != r.Count {\n\t\tr.log.WithFields(logrus.Fields{\n\t\t\t\"expected\": r.Count,\n\t\t\t\"actual\": len(res),\n\t\t}).Error(\"mismatched example count\")\n\n\t\treturn []error{fmt.Errorf(\"example count %d != expected %d\", len(res), r.Count)}\n\t}\n\n\tif len(res) == 0 {\n\t\treturn []error{}\n\t}\n\n\terrs := []error{}\n\n\tfor _, result := range res {\n\t\tif result == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif result.Stdout != \"\" || result.Stderr != \"\" {\n\t\t\tr.log.WithFields(logrus.Fields{\n\t\t\t\t\"source\": result.Runnable.SourceFile,\n\t\t\t\t\"stdout\": result.Stdout,\n\t\t\t\t\"stderr\": result.Stderr,\n\t\t\t}).Debug(\"captured output\")\n\t\t}\n\n\t\tif result.Error != nil {\n\t\t\tif v, ok := result.Error.(*skipErr); ok {\n\t\t\t\tr.log.WithFields(logrus.Fields{\n\t\t\t\t\t\"source\": result.Runnable.SourceFile,\n\t\t\t\t\t\"line\": result.Runnable.LineOffset,\n\t\t\t\t\t\"reason\": v.Reason,\n\t\t\t\t}).Debug(\"skipped example\")\n\t\t\t} else {\n\t\t\t\terrs = append(errs, result.Error)\n\t\t\t}\n\t\t}\n\t}\n\n\tr.log.WithFields(logrus.Fields{\n\t\t\"source_count\": len(r.Sources),\n\t\t\"example_count\": len(res),\n\t\t\"error_count\": len(errs),\n\t\t\"time\": time.Since(sourcesStart),\n\t}).Info(\"done\")\n\n\treturn errs\n}",
"func (suite FeatureTestSuite) Run(t *testing.T, buildFunc feature.BuildFunc) {\n\tfor _, test := range suite {\n\t\trunTest(t, test, buildFunc)\n\t}\n}",
"func TestMain(m *testing.M) {\n\t// Note: The setup will provision a single K8s env and\n\t// all the tests need to create and use a separate namespace\n\n\t// setup env test\n\tif err := setupSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// run tests\n\tcode := m.Run()\n\n\t// tear down test env\n\tif err := tearDownSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.Exit(code)\n}",
"func Run(t *testing.T, file string) string {\n\tout, err := exec.Command(\"go\", \"run\", file).Output()\n\tif e, ok := err.(*exec.ExitError); ok {\n\t\tt.Fatalf(\"%s\", e.Stderr)\n\t} else if err != nil {\n\t\tt.Fatalf(\"%s\", err)\n\t}\n\treturn string(out)\n}",
"func Run(handler Handler, runEnv *cli.RunEnv) int {\n\tstart := time.Now()\n\tinternal.SetRunEnvDefaults(runEnv)\n\tif err := runHandler(handler, start, runEnv); err != nil {\n\t\tif errString := err.Error(); errString != \"\" {\n\t\t\t_, _ = fmt.Fprintln(runEnv.Stderr, errString)\n\t\t}\n\t\treturn 1\n\t}\n\treturn 0\n}",
"func runTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error {\n\tok := true\n\tfor _, t := range tests {\n\t\tfmt.Printf(\"Running tests in %s...\\n\", filepath.Base(t.File))\n\t\tp, err := logstash.NewProcess(inv, t.Codec, t.InputFields, keptEnvVars)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer p.Release()\n\t\tif err = p.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, line := range t.InputLines {\n\t\t\t_, err = p.Input.Write([]byte(line + \"\\n\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err = p.Input.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresult, err := p.Wait()\n\t\tif err != nil || *logstashOutput {\n\t\t\tmessage := getLogstashOutputMessage(result.Output, result.Log)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error running Logstash: %s.%s\", err, message)\n\t\t\t}\n\t\t\tuserError(\"%s\", message)\n\t\t}\n\t\tif err = t.Compare(result.Events, false, diffCommand); err != nil {\n\t\t\tuserError(\"Testcase failed, continuing with the rest: %s\", err)\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\treturn errors.New(\"one or more testcases failed\")\n\t}\n\treturn nil\n}",
"func runTest(ctx context.Context, c autotest.Config, a *autotest.AutoservArgs, w io.Writer) (*Result, error) {\n\tr, err := runTask(ctx, c, a, w)\n\tif !r.Started {\n\t\treturn r, err\n\t}\n\tp := filepath.Join(a.ResultsDir, autoservPidFile)\n\tif i, err2 := readTestsFailed(p); err2 != nil {\n\t\tif err == nil {\n\t\t\terr = err2\n\t\t}\n\t} else {\n\t\tr.TestsFailed = i\n\t}\n\tif err2 := appendJobFinished(a.ResultsDir); err == nil {\n\t\terr = err2\n\t}\n\treturn r, err\n}",
"func (mock *MockEnv) Run(command ggman.Command, workdir string, stdin string, argv ...string) (code uint8, stdout, stderr string) {\n\t// create buffers\n\tstdinReader := strings.NewReader(stdin)\n\tstdoutBuffer := &bytes.Buffer{}\n\tstderrBuffer := &bytes.Buffer{}\n\n\t// create a program and run Main()\n\tfakeggman := ggman.NewProgram()\n\tccommand, _ := reflectx.CopyInterface(command)\n\tfakeggman.Register(ccommand)\n\n\tstream := stream.NewIOStream(stdoutBuffer, stderrBuffer, stdinReader, 0)\n\n\t// run the code\n\terr := exit.AsError(fakeggman.Main(stream, env.Parameters{\n\t\tVariables: mock.vars,\n\t\tPlumbing: mock.plumbing,\n\t\tWorkdir: workdir,\n\t}, argv))\n\treturn uint8(err.ExitCode), stdoutBuffer.String(), stderrBuffer.String()\n}",
"func (t *Target) Run() (int, string, error) {\n\tif len(t.Before) > 0 {\n\t\tstatus, out, err := t.runTargetList(t.Before)\n\t\tif status != 0 || err != nil {\n\t\t\treturn status, out, err\n\t\t}\n\t}\n\n\tenvs, err := t.File.Env.Config()\n\tif err != nil {\n\t\treturn 1, \"\", err\n\t}\n\tcmd, err := execute.CommandNoWait(t.Cmd, t.W, t.WErr, envs)\n\tif err != nil {\n\t\treturn 1, \"\", err\n\t}\n\tinterrupt := make(chan os.Signal, 1)\n\tgo func(c *exec.Cmd) {\n\t\texecute.WaitNoop(interrupt, cmd)\n\t\tif c != nil && c.Process != nil {\n\t\t\tc.Process.Kill()\n\t\t}\n\t}(cmd)\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tstatus := execute.GetExitStatus(err)\n\t\treturn status, \"\", err\n\t}\n\n\tif len(t.After) > 0 {\n\t\tstatus, out, err := t.runTargetList(t.After)\n\t\tif status != 0 || err != nil {\n\t\t\treturn status, out, err\n\t\t}\n\t}\n\treturn 0, \"\", nil\n}",
"func TestMain(m *testing.M) {\n\t_Init();\n\tresult := m.Run();\n\t_TearDown();\n\tos.Exit(result);\n}",
"func TestMain(m *testing.M) {\n\tvar (\n\t\tretCode int\n\t\terr error\n\t)\n\n\terr = setup()\n\tif err != nil {\n\t\tlog.Panic(\"Error setting up test db\", err)\n\t}\n\n\tretCode = m.Run()\n\n\terr = teardown()\n\tif err != nil {\n\t\tlog.Panic(\"Error tearing down test db\", err)\n\t}\n\n\tos.Exit(retCode)\n}",
"func (g *Ginkgo) Run() error {\n\ttestDir := fmt.Sprintf(\"test/e2e/%s\", g.Orchestrator)\n\tcmd := exec.Command(\"ginkgo\", \"-nodes\", g.GinkgoNodes, \"-slowSpecThreshold\", \"180\", \"-r\", testDir)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Start()\n\tif err != nil {\n\t\tlog.Printf(\"Error while trying to start ginkgo:%s\\n\", err)\n\t\treturn err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (a *Agent) Run() (loadtest.Status, error) {\n\tvar status loadtest.Status\n\tresp, err := a.apiPost(a.apiURL+a.id+\"/run\", nil)\n\tif err != nil {\n\t\treturn status, err\n\t}\n\tstatus = *resp.Status\n\treturn status, nil\n}",
"func Run(config *Settings) {\n\tfileNames, err := config.reader.ReadExecutables()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\tvar resMutex = &sync.Mutex{}\n\tres := make([]*google.TestResult, 0)\n\n\tvar tasksWg sync.WaitGroup\n\ttasksWg.Add(len(fileNames))\n\tworkerQueueLimit := make(chan bool, config.workersCount) \n\n\tfor _, line := range fileNames {\n\t\tworkerQueueLimit <- true // try get access to work\n\t\tgo func(file string) {\n\t\t\tdefer tasksWg.Done()\n\t\t\tdefer func() { <-workerQueueLimit }() // decrease working queue\n\n\t\t\tconfig.formatter.ShowSuiteStart(file)\n\n\t\t\tgr, output, err := runGoogleTest(file, config.workingDir)\n\t\t\tif err != nil {\n\t\t\t\tconfig.formatter.ShowSuiteFailure(file, output, err)\n\t\t\t} else {\n\t\t\t\tresMutex.Lock()\n\t\t\t\tdefer resMutex.Unlock()\n\t\t\t\tres = append(res, gr)\n\t\t\t\tconfig.formatter.ShowTests(gr, output)\n\t\t\t}\n\t\t}(line)\n\t}\n\n\ttasksWg.Wait()\n\n\tconfig.formatter.ShowStatistics(res)\n}",
"func Run(ctx context.Context, cfg *config.Config, state *config.DeprecatedState) ([]*resultsjson.Result, error) {\n\tif !config.ShouldConnect(cfg.Target()) {\n\t\tlogging.Info(ctx, \"Tast will not make any connection to the target '-'.\")\n\t}\n\n\treportClient, err := reporting.NewRPCClient(ctx, cfg.ReportsServer())\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to set up gRPC servers\")\n\t}\n\tdefer reportClient.Close()\n\n\t// Always start an ephemeral devserver for remote tests if TLWServer is not specified, and allowed.\n\tif cfg.TLWServer() == \"\" && cfg.UseEphemeralDevserver() && config.ShouldConnect(cfg.Target()) {\n\t\tes, err := startEphemeralDevserverForRemoteTests(ctx, cfg, state)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrap(err, \"failed to start ephemeral devserver for remote tests\")\n\t\t}\n\t\tdefer es.Close()\n\t} else {\n\t\tstate.RemoteDevservers = cfg.Devservers()\n\t}\n\n\tif err := prepare.CheckPrivateBundleFlag(ctx, cfg); err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed in checking downloadprivatebundles flag\")\n\t}\n\n\tdrv, err := driver.New(ctx, cfg, cfg.Target(), \"\")\n\tif err != nil {\n\t\treturn nil, errors.Wrap(err, \"failed to connect to target\")\n\t}\n\tdefer drv.Close(ctx)\n\tdutInfo, err := prepareDUT(ctx, cfg, drv)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch cfg.Mode() {\n\tcase config.ListTestsMode:\n\t\tresults, err := listTests(ctx, cfg, drv, dutInfo)\n\t\tif err != nil {\n\t\t\treturn nil, errors.Wrapf(err, \"failed to list tests\")\n\t\t}\n\t\treturn results, nil\n\tcase config.RunTestsMode:\n\t\tresults, err := runTests(ctx, cfg, state, drv, reportClient, dutInfo)\n\t\tif err != nil {\n\t\t\treturn results, errors.Wrapf(err, \"failed to run tests\")\n\t\t}\n\t\treturn results, nil\n\tdefault:\n\t\treturn nil, errors.Errorf(\"unhandled mode %d\", cfg.Mode())\n\t}\n}",
"func (c *Command) Run(t *testing.T) {\n\targs := strings.Split(c.Args, \" \")\n\tif output, err := exec.Command(c.Exec, args...).CombinedOutput(); err != nil {\n\t\tt.Fatalf(\"Error executing: '%s' '%s' -err: '%v'\", c.Exec, c.Args, strings.TrimSpace(string(output)))\n\t}\n}",
"func Test(cli *DockerClient, problemDir, fileName, solutionDir, ft string) (SubmissionStatus, error) {\n\tdefer cleanUpArtifacts(problemDir, fileName, ft)\n\n\tlogFile, err := os.Create(filepath.Join(problemDir, \"log.txt\"))\n\tif err != nil {\n\t\treturn RunnerError, err\n\t}\n\tdefer logFile.Close()\n\n\tlogFile.Write([]byte(\"Compile:\\n\"))\n\n\t// Compile the solution\n\terr = Compile(cli, problemDir, fileName, ft, logFile)\n\tif err == ErrExitStatusError {\n\t\tlogFile.Write([]byte(\"Status: Compile Error\\n\"))\n\t\treturn CompileError, nil\n\t} else if err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to run the compiler container: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\n\t// Run the submissions\n\ttests, err := ioutil.ReadDir(solutionDir)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to open solution directory: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\n\tproblemDef, err := loadProblem(problemDir)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Failed to open solution definition (using defaults): \" + err.Error() + \"\\n\"))\n\t}\n\n\trunner, err := NewRunner(cli, problemDir, fileName, ft, time.Duration(problemDef.Time)*time.Second)\n\tif err != nil {\n\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to create runner container: \" + err.Error() + \"\\n\"))\n\t\treturn RunnerError, err\n\t}\n\tdefer runner.Close()\n\n\tfor _, file := range tests {\n\t\tname, fileType := detectType(file.Name())\n\n\t\tif fileType != \"in\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogFile.Write([]byte(\"Running \" + file.Name() + \":\\n\"))\n\n\t\t// Get the inputs\n\t\tfileIn, err := os.Open(filepath.Join(solutionDir, file.Name()))\n\t\tif err != nil {\n\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to load input file \" + file.Name() + \": \" + err.Error() + \"\\n\"))\n\t\t\treturn RunnerError, err\n\t\t}\n\t\tdefer fileIn.Close()\n\n\t\toutBuffer := bytes.NewBufferString(\"\")\n\n\t\t// Run the code\n\t\tif err := runner.Run(fileIn, outBuffer); err != nil {\n\t\t\tif err == ErrExitStatusError {\n\t\t\t\tlogFile.Write([]byte(\"Status: Exception\\n\"))\n\t\t\t\treturn Exception, nil\n\t\t\t} else if err == ErrTimeLimit {\n\t\t\t\tlogFile.Write([]byte(\"Status: Time Limit Exceeded\\n\"))\n\t\t\t\treturn TimeLimit, nil\n\t\t\t} else {\n\t\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to run submission:\" + err.Error() + \"\\n\"))\n\t\t\t\treturn RunnerError, err\n\t\t\t}\n\t\t}\n\n\t\t// Verify the output of the submission\n\t\toutFile, err := ioutil.ReadFile(filepath.Join(solutionDir, name+\".out\"))\n\t\tif err != nil {\n\t\t\tlogFile.Write([]byte(\"Status: Runner error\\nError: Failed to load answer \" + name + \".out: \" + err.Error() + \"\\n\"))\n\t\t\treturn RunnerError, err\n\t\t}\n\n\t\texpectedOut := strings.Trim(string(outFile), \"\\r\\n\\t \")\n\t\tsolutionOut := strings.Trim(string(outBuffer.String()), \"\\r\\n\\t \")\n\n\t\tlogFile.Write([]byte(solutionOut + \"\\n\"))\n\n\t\tif expectedOut != solutionOut {\n\t\t\tlogFile.Write([]byte(\"Status: Wrong Answer\\n\"))\n\t\t\treturn Wrong, nil\n\t\t}\n\t}\n\n\tlogFile.Write([]byte(\"Status: Accepted\"))\n\treturn Ok, nil\n}",
"func Test() error {\n\treturn sh.RunV(\"go\", \"test\", \"-v\", \"-cover\", \"./...\", \"-coverprofile=coverage.out\")\n}",
"func RunTest(t *testing.T, name string, f Func, testCases []TestCase) {\n\tt.Run(name, func(t *testing.T) {\n\t\tfor _, test := range testCases {\n\t\t\tif actual := f(test.Input); actual != test.Expected {\n\t\t\t\tt.Errorf(\"\\nfor n=%d, expected: %t, actual: %t\", test.Input, test.Expected, actual)\n\t\t\t}\n\t\t}\n\t})\n}",
"func (g *Ginkgo) Run() error {\n\tg.Point.SetTestStart()\n\t// use the test bin rather than compile the directory b/c the compile will happen in a sub dir which is another module\n\ttestFile := fmt.Sprintf(\"test/e2e/%s/%s.test\", g.Config.Orchestrator, g.Config.Orchestrator)\n\n\targs := []string{\"-slowSpecThreshold\", \"180\", \"-r\", \"-v\"}\n\tif g.Config.GinkgoParallel {\n\t\targs = append(args, \"-p\")\n\t}\n\tif g.Config.GinkgoFailFast {\n\t\targs = append(args, \"-failFast\")\n\t}\n\tif g.Config.GinkgoFocus != \"\" {\n\t\targs = append(args, \"-focus\")\n\t\targs = append(args, g.Config.GinkgoFocus)\n\t}\n\tif g.Config.GinkgoSkip != \"\" {\n\t\targs = append(args, \"-skip\")\n\t\targs = append(args, g.Config.GinkgoSkip)\n\t}\n\targs = append(args, testFile)\n\tvar cmd = exec.Command(\"ginkgo\", args...)\n\n\tutil.PrintCommand(cmd)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\terr := cmd.Start()\n\tif err != nil {\n\t\tg.Point.RecordTestError()\n\t\tlog.Printf(\"Error while trying to start ginkgo:%s\\n\", err)\n\t\treturn err\n\t}\n\n\terr = cmd.Wait()\n\tif err != nil {\n\t\tg.Point.RecordTestError()\n\t\tkubectl := exec.Command(\"k\", \"get\", \"all\", \"--all-namespaces\", \"-o\", \"wide\")\n\t\tutil.PrintCommand(kubectl)\n\t\tkubectl.CombinedOutput()\n\t\tkubectl = exec.Command(\"k\", \"get\", \"nodes\", \"-o\", \"wide\")\n\t\tutil.PrintCommand(kubectl)\n\t\tkubectl.CombinedOutput()\n\t\treturn err\n\t}\n\tg.Point.RecordTestSuccess()\n\treturn nil\n}",
"func TestMain(m *testing.M) {\n\tcode := setupForTesting()\n\tif code != 0 {\n\t\tos.Exit(code)\n\t}\n\tcode = m.Run()\n\n\tif !TestDisableDatabase {\n\t\terr := TestDB.Close()\n\t\tif err != nil {\n\t\t\tfmt.Println(\"close error:\", err)\n\t\t\tos.Exit(2)\n\t\t}\n\t}\n\n\tos.Exit(code)\n}",
"func TestRun(t *testing.T) {\n\tsuite.Run(t, new(CategoryTestSuite))\n\tsuite.Run(t, new(ProductTestSuite))\n}",
"func (r *Runner) Run(ctx context.Context) error {\n\treturn errors.New(\"not implemented\")\n}",
"func (f *VRFTest) Run() error {\n\tif err := f.createChainlinkJobs(); err != nil {\n\t\treturn err\n\t}\n\tvar ctx context.Context\n\tvar testCtxCancel context.CancelFunc\n\tif f.TestOptions.TestDuration.Seconds() > 0 {\n\t\tctx, testCtxCancel = context.WithTimeout(context.Background(), f.TestOptions.TestDuration)\n\t} else {\n\t\tctx, testCtxCancel = context.WithCancel(context.Background())\n\t}\n\tdefer testCtxCancel()\n\tcancelPerfEvents := f.watchPerfEvents()\n\tcurrentRound := 0\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlog.Info().Msg(\"Test finished\")\n\t\t\ttime.Sleep(f.TestOptions.GracefulStopDuration)\n\t\t\tcancelPerfEvents()\n\t\t\treturn nil\n\t\tdefault:\n\t\t\tlog.Info().Int(\"RoundID\", currentRound).Msg(\"New round\")\n\t\t\tif err := f.requestRandomness(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.waitRoundFulfilled(currentRound + 1); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif f.TestOptions.NumberOfRounds != 0 && currentRound >= f.TestOptions.NumberOfRounds {\n\t\t\t\tlog.Info().Msg(\"Final round is reached\")\n\t\t\t\ttestCtxCancel()\n\t\t\t}\n\t\t\tcurrentRound++\n\t\t}\n\t}\n}",
"func Run(t *testing.T, suites ...TCatcher) {\n\trun(t, new(TDDFormatter), suites...)\n}",
"func (st *buildStatus) runTests(helpers <-chan buildlet.Client) (remoteErr, err error) {\n\ttestNames, remoteErr, err := st.distTestList()\n\tif remoteErr != nil {\n\t\treturn fmt.Errorf(\"distTestList remote: %v\", remoteErr), nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"distTestList exec: %v\", err)\n\t}\n\ttestStats := getTestStats(st)\n\n\tset, err := st.newTestSet(testStats, testNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tst.LogEventTime(\"starting_tests\", fmt.Sprintf(\"%d tests\", len(set.items)))\n\tstartTime := time.Now()\n\n\tworkDir, err := st.bc.WorkDir(st.ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error discovering workdir for main buildlet, %s: %v\", st.bc.Name(), err)\n\t}\n\n\tmainBuildletGoroot := st.conf.FilePathJoin(workDir, \"go\")\n\tmainBuildletGopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\n\t// We use our original buildlet to run the tests in order, to\n\t// make the streaming somewhat smooth and not incredibly\n\t// lumpy. The rest of the buildlets run the largest tests\n\t// first (critical path scheduling).\n\t// The buildletActivity WaitGroup is used to track when all\n\t// the buildlets are dead or done.\n\tvar buildletActivity sync.WaitGroup\n\tbuildletActivity.Add(2) // one per goroutine below (main + helper launcher goroutine)\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor !st.bc.IsBroken() {\n\t\t\ttis, ok := set.testsToRunInOrder()\n\t\t\tif !ok {\n\t\t\t\tselect {\n\t\t\t\tcase <-st.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tst.runTestsOnBuildlet(st.bc, tis, mainBuildletGoroot, mainBuildletGopath)\n\t\t}\n\t\tst.LogEventTime(\"main_buildlet_broken\", st.bc.Name())\n\t}()\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor helper := range helpers {\n\t\t\tbuildletActivity.Add(1)\n\t\t\tgo func(bc buildlet.Client) {\n\t\t\t\tdefer buildletActivity.Done() // for the per-helper Add(1) above\n\t\t\t\tdefer st.LogEventTime(\"closed_helper\", bc.Name())\n\t\t\t\tdefer bc.Close()\n\t\t\t\tif devPause {\n\t\t\t\t\tdefer time.Sleep(5 * time.Minute)\n\t\t\t\t\tdefer st.LogEventTime(\"DEV_HELPER_SLEEP\", bc.Name())\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"got_empty_test_helper\", bc.String())\n\t\t\t\tif err := bc.PutTarFromURL(st.ctx, st.SnapshotURL(pool.NewGCEConfiguration().BuildEnv()), \"go\"); err != nil {\n\t\t\t\t\tlog.Printf(\"failed to extract snapshot for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tworkDir, err := bc.WorkDir(st.ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error discovering workdir for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_set_up\", bc.Name())\n\t\t\t\tgoroot := st.conf.FilePathJoin(workDir, \"go\")\n\t\t\t\tgopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\t\t\t\tfor !bc.IsBroken() {\n\t\t\t\t\ttis, ok := set.testsToRunBiggestFirst()\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tst.LogEventTime(\"no_new_tests_remain\", bc.Name())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tst.runTestsOnBuildlet(bc, tis, goroot, gopath)\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_is_broken\", bc.Name())\n\t\t\t}(helper)\n\t\t}\n\t}()\n\n\t// Convert a sync.WaitGroup into a channel.\n\t// Aside: https://groups.google.com/forum/#!topic/golang-dev/7fjGWuImu5k\n\tbuildletsGone := make(chan struct{})\n\tgo func() {\n\t\tbuildletActivity.Wait()\n\t\tclose(buildletsGone)\n\t}()\n\n\tvar lastMetadata string\n\tvar lastHeader string\n\tvar serialDuration time.Duration\n\tfor _, ti := range set.items {\n\tAwaitDone:\n\t\tfor {\n\t\t\ttimer := time.NewTimer(30 * time.Second)\n\t\t\tselect {\n\t\t\tcase <-ti.done: // wait for success\n\t\t\t\ttimer.Stop()\n\t\t\t\tbreak AwaitDone\n\t\t\tcase <-timer.C:\n\t\t\t\tst.LogEventTime(\"still_waiting_on_test\", ti.name.Old)\n\t\t\tcase <-buildletsGone:\n\t\t\t\tset.cancelAll()\n\t\t\t\treturn nil, errBuildletsGone\n\t\t\t}\n\t\t}\n\n\t\tserialDuration += ti.execDuration\n\t\tif len(ti.output) > 0 {\n\t\t\tmetadata, header, out := parseOutputAndHeader(ti.output)\n\t\t\tprintHeader := false\n\t\t\tif metadata != lastMetadata {\n\t\t\t\tlastMetadata = metadata\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", metadata)\n\t\t\t\t// Always include the test header after\n\t\t\t\t// metadata changes. This is a readability\n\t\t\t\t// optimization that ensures that tests are\n\t\t\t\t// always immediately preceded by their test\n\t\t\t\t// banner, even if it is duplicate banner\n\t\t\t\t// because the test metadata changed.\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif header != lastHeader {\n\t\t\t\tlastHeader = header\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif printHeader {\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", header)\n\t\t\t}\n\t\t\tif pool.NewGCEConfiguration().InStaging() {\n\t\t\t\tout = bytes.TrimSuffix(out, nl)\n\t\t\t\tst.Write(out)\n\t\t\t\tfmt.Fprintf(st, \" (shard %s; par=%d)\\n\", ti.shardIPPort, ti.groupSize)\n\t\t\t} else {\n\t\t\t\tst.Write(out)\n\t\t\t}\n\t\t}\n\n\t\tif ti.remoteErr != nil {\n\t\t\tset.cancelAll()\n\t\t\treturn fmt.Errorf(\"dist test failed: %s: %v\", ti.name, ti.remoteErr), nil\n\t\t}\n\t}\n\telapsed := time.Since(startTime)\n\tvar msg string\n\tif st.conf.NumTestHelpers(st.isTry()) > 0 {\n\t\tmsg = fmt.Sprintf(\"took %v; aggregate %v; saved %v\", elapsed, serialDuration, serialDuration-elapsed)\n\t} else {\n\t\tmsg = fmt.Sprintf(\"took %v\", elapsed)\n\t}\n\tst.LogEventTime(\"tests_complete\", msg)\n\tfmt.Fprintf(st, \"\\nAll tests passed.\\n\")\n\treturn nil, nil\n}",
"func RunTest(flags *Flags) error {\n\tswitch flags.Mode {\n\tcase constants.ManagerMode:\n\t\treturn workermanager.New().RunTest()\n\tcase constants.WorkerMode:\n\t\tslackURL := flags.SlackURL\n\t\tvar slacks []string\n\t\tif len(slackURL) > 0 {\n\t\t\tslacks = append(slacks, slackURL)\n\t\t}\n\t\treturn worker.NewWorker().RunTest(flags.Type, slacks)\n\t}\n\n\treturn nil\n}",
"func (c *EBSCommand) Run(args []string) int {\n\n\t// Decorate this CLI's UI\n\tc.Ui = &cli.PrefixedUi{\n\t\tOutputPrefix: \" \",\n\t\tInfoPrefix: \"INFO: \",\n\t\tErrorPrefix: \"ERROR: \",\n\t\tUi: c.Ui,\n\t}\n\n\t// Set the args which may have the mtest config\n\tc.args = args\n\n\t// Dependency Injection\n\tif !c.IsInitialized() {\n\t\terr := c.SetAll()\n\t\tif err != nil {\n\t\t\tc.Ui.Error(err.Error())\n\t\t\treturn 1\n\t\t}\n\t}\n\n\tif c.wtrVarsMake == nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Writer-variants-maker instance is nil.\"))\n\t\treturn 1\n\t}\n\n\t// Defer flush the gatedWriter which is linked to this\n\t// CLI's io.writer during Dependency Injection\n\tgatedLogger := c.wtrVarsMake.GatedWriter()\n\tif gatedLogger == nil {\n\t\treturn 1\n\t}\n\tdefer gatedLogger.Flush()\n\n\tif c.mtestMake == nil {\n\t\tc.Ui.Error(fmt.Sprintf(\"Mtest-maker instance is nil.\"))\n\t\treturn 1\n\t}\n\n\t// ebs cli is meant to run Maya Server\n\t// Get a Mtest instance that is associated with Maya Server\n\tmt, err := c.mtestMake.Make()\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\treturn 1\n\t}\n\n\t// Output the header that the server has started\n\tc.Ui.Output(\"Mtest ebs run started! Log data will start streaming:\\n\")\n\n\t// Start EBS use cases\n\trpts, err := mt.Start()\n\tdefer mt.Stop()\n\n\tif err != nil {\n\t\tc.Ui.Error(err.Error())\n\t\t// Exit code is set to 0 as this has nothing to do\n\t\t// with running of CLI. CLI execution was fine.\n\t\treturn 0\n\t}\n\n\tc.Ui.Info(fmt.Sprintf(\"%+s\", rpts))\n\n\treturn 0\n}",
"func runTest(test TestCase) TestResult {\n\t// cut = command under test\n\tcut := cmd.NewCommand(test.Command.Cmd)\n\tcut.SetTimeout(test.Command.Timeout)\n\tcut.Dir = test.Command.Dir\n\tfor k, v := range test.Command.Env {\n\t\tcut.AddEnv(k, v)\n\t}\n\n\tif err := cut.Execute(); err != nil {\n\t\tlog.Println(test.Title, \" failed \", err.Error())\n\t\ttest.Result = CommandResult{\n\t\t\tError: err,\n\t\t}\n\n\t\treturn TestResult{\n\t\t\tTestCase: test,\n\t\t}\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Command: \", cut.Cmd)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Directory: \", cut.Dir)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Env: \", cut.Env)\n\n\t// Write test result\n\ttest.Result = CommandResult{\n\t\tExitCode: cut.ExitCode(),\n\t\tStdout: strings.Replace(cut.Stdout(), \"\\r\\n\", \"\\n\", -1),\n\t\tStderr: strings.Replace(cut.Stderr(), \"\\r\\n\", \"\\n\", -1),\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" ExitCode: \", test.Result.ExitCode)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stdout: \", test.Result.Stdout)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stderr: \", test.Result.Stderr)\n\n\treturn Validate(test)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testscript.RunMain(m, map[string]func() int{\n\t\t\"main\": main1,\n\t}))\n}"
] | [
"0.7075935",
"0.6968736",
"0.6907351",
"0.6861133",
"0.68104595",
"0.6731979",
"0.6706079",
"0.6689381",
"0.6647677",
"0.66201144",
"0.66130835",
"0.6571867",
"0.6571867",
"0.65591496",
"0.6545805",
"0.65347755",
"0.65086806",
"0.6502607",
"0.64886713",
"0.64882946",
"0.64779514",
"0.64617854",
"0.6450061",
"0.6447922",
"0.64404005",
"0.64197695",
"0.6418712",
"0.6416053",
"0.6406169",
"0.6405749",
"0.6404814",
"0.64021",
"0.6400328",
"0.63716024",
"0.6364809",
"0.6363124",
"0.6362761",
"0.63583744",
"0.6320356",
"0.6308738",
"0.63024354",
"0.62953717",
"0.6282222",
"0.62741154",
"0.62614423",
"0.62591743",
"0.62558836",
"0.62313384",
"0.62295455",
"0.6210504",
"0.6208913",
"0.6201418",
"0.6201418",
"0.6201418",
"0.6196835",
"0.6191751",
"0.618019",
"0.6175101",
"0.6170327",
"0.61687773",
"0.61617965",
"0.61617965",
"0.61617965",
"0.6139486",
"0.6130855",
"0.61289805",
"0.61231726",
"0.61142206",
"0.6113424",
"0.6109135",
"0.60902417",
"0.6079759",
"0.6072225",
"0.60668355",
"0.60626435",
"0.60561013",
"0.60450196",
"0.60437936",
"0.6040995",
"0.6030597",
"0.60240173",
"0.6015785",
"0.59806496",
"0.5977705",
"0.59699535",
"0.5964248",
"0.59622616",
"0.59488416",
"0.59465647",
"0.5945216",
"0.5943083",
"0.59362113",
"0.5931824",
"0.5926929",
"0.5915861",
"0.5890676",
"0.5889609",
"0.5885311",
"0.58836687",
"0.5882645",
"0.5878929"
] | 0.0 | -1 |
RunTests is an internal function but exported because it is crosspackage; it is part of the implementation of the "go test" command. | func RunTests(matchString func(pat, str string) (bool, error), tests []InternalTest) (ok bool) {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func RunTests(opts Options) {\n\tif opts.Cleanup {\n\t\terr := CleanupTests(opts.Driver, opts.DSN, opts.Verbose)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"Cleanup failed: %s\\n\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t\tos.Exit(0)\n\t}\n\t_ = flag.Set(\"test.run\", opts.Match)\n\tif opts.Verbose {\n\t\t_ = flag.Set(\"test.v\", \"true\")\n\t}\n\ttests := []testing.InternalTest{\n\t\t{\n\t\t\tName: \"MainTest\",\n\t\t\tF: func(t *testing.T) {\n\t\t\t\tTest(t, opts.Driver, opts.DSN, opts.Suites, opts.RW)\n\t\t\t},\n\t\t},\n\t}\n\n\tmainStart(tests)\n}",
"func (o Scorecard) RunTests(ctx context.Context) (testOutput v1alpha3.Test, err error) {\n\n\terr = o.TestRunner.Initialize(ctx)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttests := o.selectTests()\n\tif len(tests) == 0 {\n\t\treturn testOutput, nil\n\t}\n\n\tfor _, test := range tests {\n\t\tresult, err := o.TestRunner.RunTest(ctx, test)\n\t\tif err != nil {\n\t\t\tresult = convertErrorToStatus(test.Name, err)\n\t\t}\n\t\ttestOutput.Status.Results = append(testOutput.Status.Results, result.Results...)\n\t}\n\n\tif !o.SkipCleanup {\n\t\terr = o.TestRunner.Cleanup(ctx)\n\t\tif err != nil {\n\t\t\treturn testOutput, err\n\t\t}\n\t}\n\treturn testOutput, nil\n}",
"func (runner TestSuiteRunner) RunTests(testNamesToRun map[string]bool, testParallelism uint) (allTestsPassed bool, executionErr error) {\n\tallTests := runner.testSuite.GetTests()\n\n\t// If the user doesn't specify any test names to run, run all of them\n\tif len(testNamesToRun) == 0 {\n\t\ttestNamesToRun = map[string]bool{}\n\t\tfor testName, _ := range allTests {\n\t\t\ttestNamesToRun[testName] = true\n\t\t}\n\t}\n\n\t// Validate all the requested tests exist\n\ttestsToRun := make(map[string]testsuite.Test)\n\tfor testName, _ := range testNamesToRun {\n\t\ttest, found := allTests[testName]\n\t\tif !found {\n\t\t\treturn false, stacktrace.NewError(\"No test registered with name '%v'\", testName)\n\t\t}\n\t\ttestsToRun[testName] = test\n\t}\n\n\texecutionInstanceId := uuid.Generate()\n\ttestParams, err := buildTestParams(executionInstanceId, testsToRun, runner.networkWidthBits)\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err, \"An error occurred building the test params map\")\n\t}\n\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn false, stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\")\n\t}\n\n\ttestExecutor := parallelism.NewTestExecutorParallelizer(\n\t\texecutionInstanceId,\n\t\tdockerClient,\n\t\trunner.testControllerImageName,\n\t\trunner.testControllerLogLevel,\n\t\trunner.customTestControllerEnvVars,\n\t\ttestParallelism)\n\n\tlogrus.Infof(\"Running %v tests with execution ID %v...\", len(testsToRun), executionInstanceId.String())\n\tallTestsPassed = testExecutor.RunInParallelAndPrintResults(testParams)\n\treturn allTestsPassed, nil\n}",
"func (t *TestRuntime) RunTests(m *testing.M) int {\n\treturn t.runTests(m, !testing.Verbose())\n}",
"func (sfs *SuiteFS) RunTests(t *testing.T, userName string, stFuncs ...SuiteTestFunc) {\n\tvfs := sfs.vfsSetup\n\n\t_, _ = sfs.User(t, userName)\n\tdefer sfs.User(t, sfs.initUser.Name())\n\n\tfor _, stFunc := range stFuncs {\n\t\tfuncName := runtime.FuncForPC(reflect.ValueOf(stFunc).Pointer()).Name()\n\t\tfuncName = funcName[strings.LastIndex(funcName, \".\")+1 : strings.LastIndex(funcName, \"-\")]\n\t\ttestDir := vfs.Join(sfs.rootDir, funcName)\n\n\t\tsfs.CreateTestDir(t, testDir)\n\n\t\tt.Run(funcName, func(t *testing.T) {\n\t\t\tstFunc(t, testDir)\n\t\t})\n\n\t\tsfs.RemoveTestDir(t, testDir)\n\t}\n}",
"func TestRun(t *testing.T) {\n\tRun()\n}",
"func RunTests(t *testing.T, svctest ServiceTest) {\n\tt.Run(\"NewSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestNewSite) })\n\tt.Run(\"DeleteSite\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestDeleteSite) })\n\tt.Run(\"WritePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWritePost) })\n\tt.Run(\"RemovePost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestRemovePost) })\n\tt.Run(\"ReadPost\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadPost) })\n\tt.Run(\"WriteConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestWriteConfig) })\n\tt.Run(\"ReadConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadConfig) })\n\tt.Run(\"UpdateAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestUpdateAbout) })\n\tt.Run(\"ReadAbout\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestReadAbout) })\n\tt.Run(\"ChangeDefaultConfig\", func(t *testing.T) { clearEnvWrapper(t, svctest.TestChangeDefaultConfig) })\n}",
"func RunTests(m *testing.M, version *int) {\n\tflag.IntVar(version, \"v\", 0, \"The anwork version that should be used with these tests\")\n\tflag.Parse()\n\n\tif *version == 0 {\n\t\tpanic(\"Version (-v) must be passed with a legitimate anwork version number\")\n\t}\n\n\tos.Exit(m.Run())\n}",
"func TestRunMain(t *testing.T) {\n\tmain()\n}",
"func RunSubtests(ctx *Context) {\n\tfor name, fn := range tests {\n\t\tctx.Run(name, fn)\n\t}\n}",
"func RunTests(ctx context.Context, w io.Writer, path string) error {\n\tif path == \"\" || path == \".\" {\n\t\tvar err error\n\t\tpath, err = os.Getwd()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tfiles, err := getTestFiles(ctx, path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := range files {\n\t\terr = runFile(ctx, &files[i])\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr = Report(w, files)\n\treturn err\n}",
"func (ts *TestSuite) RunTests() {\n\n\tif len(ts.Tests) == 0 {\n\t\tout.Printf(\"No tests to run\\n\")\n\t\treturn\n\t}\n\n\tstartTime := time.Now()\n\n\t// setup search\n\ts := search.NewSearch()\n\tsl := search.NewSearchLimits()\n\tsl.MoveTime = ts.Time\n\tsl.Depth = ts.Depth\n\tif sl.MoveTime > 0 {\n\t\tsl.TimeControl = true\n\t}\n\n\tout.Printf(\"Running Test Suite\\n\")\n\tout.Printf(\"==================================================================\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"No of tests: %d\\n\", len(ts.Tests))\n\tout.Println()\n\n\t// execute all tests and store results in the\n\t// test instance\n\tfor i, t := range ts.Tests {\n\t\tout.Printf(\"Test %d of %d\\nTest: %s -- Target Result %s\\n\", i+1, len(ts.Tests), t.line, t.targetMoves.StringUci())\n\t\tstartTime2 := time.Now()\n\t\trunSingleTest(s, sl, t)\n\t\telapsedTime := time.Since(startTime2)\n\t\tt.nodes = s.NodesVisited()\n\t\tt.time = s.LastSearchResult().SearchTime\n\t\tt.nps = util.Nps(s.NodesVisited(), s.LastSearchResult().SearchTime)\n\t\tout.Printf(\"Test finished in %d ms with result %s (%s) - nps: %d\\n\\n\",\n\t\t\telapsedTime.Milliseconds(), t.rType.String(), t.actual.StringUci(), t.nps)\n\t}\n\n\t// sum up result for report\n\ttr := &SuiteResult{}\n\tfor _, t := range ts.Tests {\n\t\ttr.Counter++\n\t\tswitch t.rType {\n\t\tcase NotTested:\n\t\t\ttr.NotTestedCounter++\n\t\tcase Skipped:\n\t\t\ttr.SkippedCounter++\n\t\tcase Failed:\n\t\t\ttr.FailedCounter++\n\t\tcase Success:\n\t\t\ttr.SuccessCounter++\n\t\t}\n\t\ttr.Nodes += t.nodes\n\t\ttr.Time += t.time\n\t}\n\tts.LastResult = tr\n\n\telapsed := time.Since(startTime)\n\n\t// print report\n\tout.Printf(\"Results for Test Suite\\n\", ts.FilePath)\n\tout.Printf(\"------------------------------------------------------------------------------------------------------------------------------------\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\" %-4s | %-10s | %-8s | %-8s | %-15s | %s | %s\\n\", \" Nr.\", \"Result\", \"Move\", \"Value\", \"Expected Result\", \"Fen\", \"Id\")\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tfor i, t := range ts.Tests {\n\t\tif t.tType == DM {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s%-15d | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), \"dm \", t.mateDepth, t.fen, t.id)\n\t\t} else {\n\t\t\tout.Printf(\" %-4d | %-10s | %-8s | %-8s | %s %-15s | %s | %s\\n\",\n\t\t\t\ti+1, t.rType.String(), t.actual.StringUci(), t.value.String(), t.tType.String(), t.targetMoves.StringUci(), t.fen, t.id)\n\t\t}\n\t}\n\tout.Printf(\"====================================================================================================================================\\n\")\n\tout.Printf(\"Summary:\\n\")\n\tout.Printf(\"EPD File: %s\\n\", ts.FilePath)\n\tout.Printf(\"SearchTime: %d ms\\n\", ts.Time.Milliseconds())\n\tout.Printf(\"MaxDepth: %d\\n\", ts.Depth)\n\tout.Printf(\"Date: %s\\n\", time.Now().Local())\n\tout.Printf(\"Successful: %-3d (%d %%)\\n\", tr.SuccessCounter, 100*tr.SuccessCounter/tr.Counter)\n\tout.Printf(\"Failed: %-3d (%d %%)\\n\", tr.FailedCounter, 100*tr.FailedCounter/tr.Counter)\n\tout.Printf(\"Skipped: %-3d (%d %%)\\n\", tr.SkippedCounter, 100*tr.SkippedCounter/tr.Counter)\n\tout.Printf(\"Not tested: %-3d (%d %%)\\n\", tr.NotTestedCounter, 100*tr.NotTestedCounter/tr.Counter)\n\tout.Printf(\"Test time: %s\\n\", elapsed)\n\tout.Printf(\"Configuration: %s\\n\", config.Settings.String())\n}",
"func RunTestsInternal(ctx *kt.Context, suite string) {\n\tconf, ok := suites[suite]\n\tif !ok {\n\t\tctx.Skipf(\"No configuration found for suite '%s'\", suite)\n\t}\n\tctx.Config = conf\n\t// This is run as a sub-test so configuration will work nicely.\n\tctx.Run(\"PreCleanup\", func(ctx *kt.Context) {\n\t\tctx.RunAdmin(func(ctx *kt.Context) {\n\t\t\tcount, err := doCleanup(ctx.Admin, true)\n\t\t\tif count > 0 {\n\t\t\t\tctx.Logf(\"Pre-cleanup removed %d databases from previous test runs\", count)\n\t\t\t}\n\t\t\tif err != nil {\n\t\t\t\tctx.Fatalf(\"Pre-cleanup failed: %s\", err)\n\t\t\t}\n\t\t})\n\t})\n\tkt.RunSubtests(ctx)\n}",
"func RunBuiltinTests(t *testing.T, resourceType string) {\n\t// Get a list of all test cases\n\tbox := packr.NewBox(\"./assets/\" + resourceType)\n\tfilesInBox := box.List()\n\tfor _, file := range filesInBox {\n\t\tif isTestCase(file) {\n\t\t\tabsolutePath, _ := filepath.Abs(\"./assets/\" + resourceType + \"/\" + file)\n\t\t\tts, err := loadTestSuite(absolutePath)\n\t\t\tif err != nil {\n\t\t\t\tassert.Nil(t, err, \"Cannot load test case\")\n\t\t\t}\n\t\t\trunTestSuite(t, ts)\n\t\t}\n\t}\n}",
"func (o Scorecard) RunTests() (testOutput v1alpha2.ScorecardOutput, err error) {\n\ttests := selectTests(o.Selector, o.Config.Tests)\n\tif len(tests) == 0 {\n\t\tfmt.Println(\"no tests selected\")\n\t\treturn testOutput, err\n\t}\n\n\tbundleData, err := getBundleData(o.BundlePath)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error getting bundle data %w\", err)\n\t}\n\n\t// create a ConfigMap holding the bundle contents\n\to.bundleConfigMap, err = createConfigMap(o, bundleData)\n\tif err != nil {\n\t\treturn testOutput, fmt.Errorf(\"error creating ConfigMap %w\", err)\n\t}\n\n\tfor i, test := range tests {\n\t\tvar err error\n\t\ttests[i].TestPod, err = o.runTest(test)\n\t\tif err != nil {\n\t\t\treturn testOutput, fmt.Errorf(\"test %s failed %w\", test.Name, err)\n\t\t}\n\t}\n\n\tif !o.SkipCleanup {\n\t\tdefer deletePods(o.Client, tests)\n\t\tdefer deleteConfigMap(o.Client, o.bundleConfigMap)\n\t}\n\n\terr = o.waitForTestsToComplete(tests)\n\tif err != nil {\n\t\treturn testOutput, err\n\t}\n\n\ttestOutput = getTestResults(o.Client, tests)\n\n\treturn testOutput, err\n}",
"func RunUnitTest(cobraCmd *cobra.Command, args []string) {\n\terr := CommandWithStdout(\"go\", \"test\", \"./...\").Run()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}",
"func (d *Driver) runTests(ctx context.Context, bundle string, tests []*protocol.ResolvedEntity, dutInfos map[string]*protocol.DUTInfo, client *reporting.RPCClient, remoteDevservers []string) ([]*resultsjson.Result, error) {\n\n\targs := &runTestsArgs{\n\t\tDUTInfo: dutInfos,\n\t\tCounter: failfast.NewCounter(d.cfg.MaxTestFailures()),\n\t\tClient: client,\n\t\tRemoteDevservers: remoteDevservers,\n\t\tSwarmingTaskID: d.cfg.SwarmingTaskID(),\n\t\tBuildBucketID: d.cfg.BuildBucketID(),\n\t}\n\n\tif !ShouldRunTestsRecursively() {\n\t\tlocalTests, remoteTests, err := splitTests(tests)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// Note: These methods can return non-nil results even on errors.\n\t\tlocalResults, err := d.runLocalTests(ctx, bundle, localTests, args)\n\t\tif err != nil {\n\t\t\treturn localResults, err\n\t\t}\n\t\tvar remoteTestNames []string\n\t\tfor _, t := range remoteTests {\n\t\t\tremoteTestNames = append(remoteTestNames, t.GetEntity().GetName())\n\t\t}\n\t\tremoteResults, err := d.runRemoteTests(ctx, bundle, remoteTestNames, args)\n\n\t\treturn append(localResults, remoteResults...), err\n\t}\n\tvar testNames []string\n\tfor _, t := range tests {\n\t\ttestNames = append(testNames, t.GetEntity().GetName())\n\t}\n\treturn d.runRemoteTests(ctx, bundle, testNames, args)\n}",
"func (s MockInputsBoolsHelper) RunTests(t testRunner, testSet []bool, testFunc func(t *testing.T, index int, f bool)) {\n\tif test, ok := t.(helper); ok {\n\t\ttest.Helper()\n\t}\n\n\ttest := internal.GetTest(t)\n\tif test == nil {\n\t\tt.Error(internal.ErrCanNotRunIfNotBuiltinTesting)\n\t\treturn\n\t}\n\n\tfor i, v := range testSet {\n\t\ttest.Run(fmt.Sprint(v), func(t *testing.T) {\n\t\t\tt.Helper()\n\n\t\t\ttestFunc(t, i, v)\n\t\t})\n\t}\n}",
"func executeTests(t *testing.T, tests ...testExecution) {\n\tctx := setupTestRequirements(t)\n\tdefer ctx.Cleanup()\n\n\tsetupComplianceOperatorCluster(t, ctx)\n\n\t// get global framework variables\n\tf := framework.Global\n\n\tns, err := ctx.GetNamespace()\n\tif err != nil {\n\t\tt.Fatalf(\"could not get namespace: %v\", err)\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tif err := test.TestFn(t, f, ctx, ns); err != nil {\n\t\t\t\tt.Error(err)\n\t\t\t}\n\t\t})\n\n\t}\n}",
"func (st *buildStatus) runTests(helpers <-chan buildlet.Client) (remoteErr, err error) {\n\ttestNames, remoteErr, err := st.distTestList()\n\tif remoteErr != nil {\n\t\treturn fmt.Errorf(\"distTestList remote: %v\", remoteErr), nil\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"distTestList exec: %v\", err)\n\t}\n\ttestStats := getTestStats(st)\n\n\tset, err := st.newTestSet(testStats, testNames)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tst.LogEventTime(\"starting_tests\", fmt.Sprintf(\"%d tests\", len(set.items)))\n\tstartTime := time.Now()\n\n\tworkDir, err := st.bc.WorkDir(st.ctx)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error discovering workdir for main buildlet, %s: %v\", st.bc.Name(), err)\n\t}\n\n\tmainBuildletGoroot := st.conf.FilePathJoin(workDir, \"go\")\n\tmainBuildletGopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\n\t// We use our original buildlet to run the tests in order, to\n\t// make the streaming somewhat smooth and not incredibly\n\t// lumpy. The rest of the buildlets run the largest tests\n\t// first (critical path scheduling).\n\t// The buildletActivity WaitGroup is used to track when all\n\t// the buildlets are dead or done.\n\tvar buildletActivity sync.WaitGroup\n\tbuildletActivity.Add(2) // one per goroutine below (main + helper launcher goroutine)\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor !st.bc.IsBroken() {\n\t\t\ttis, ok := set.testsToRunInOrder()\n\t\t\tif !ok {\n\t\t\t\tselect {\n\t\t\t\tcase <-st.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tst.runTestsOnBuildlet(st.bc, tis, mainBuildletGoroot, mainBuildletGopath)\n\t\t}\n\t\tst.LogEventTime(\"main_buildlet_broken\", st.bc.Name())\n\t}()\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor helper := range helpers {\n\t\t\tbuildletActivity.Add(1)\n\t\t\tgo func(bc buildlet.Client) {\n\t\t\t\tdefer buildletActivity.Done() // for the per-helper Add(1) above\n\t\t\t\tdefer st.LogEventTime(\"closed_helper\", bc.Name())\n\t\t\t\tdefer bc.Close()\n\t\t\t\tif devPause {\n\t\t\t\t\tdefer time.Sleep(5 * time.Minute)\n\t\t\t\t\tdefer st.LogEventTime(\"DEV_HELPER_SLEEP\", bc.Name())\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"got_empty_test_helper\", bc.String())\n\t\t\t\tif err := bc.PutTarFromURL(st.ctx, st.SnapshotURL(pool.NewGCEConfiguration().BuildEnv()), \"go\"); err != nil {\n\t\t\t\t\tlog.Printf(\"failed to extract snapshot for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tworkDir, err := bc.WorkDir(st.ctx)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error discovering workdir for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_set_up\", bc.Name())\n\t\t\t\tgoroot := st.conf.FilePathJoin(workDir, \"go\")\n\t\t\t\tgopath := st.conf.FilePathJoin(workDir, \"gopath\")\n\t\t\t\tfor !bc.IsBroken() {\n\t\t\t\t\ttis, ok := set.testsToRunBiggestFirst()\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tst.LogEventTime(\"no_new_tests_remain\", bc.Name())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tst.runTestsOnBuildlet(bc, tis, goroot, gopath)\n\t\t\t\t}\n\t\t\t\tst.LogEventTime(\"test_helper_is_broken\", bc.Name())\n\t\t\t}(helper)\n\t\t}\n\t}()\n\n\t// Convert a sync.WaitGroup into a channel.\n\t// Aside: https://groups.google.com/forum/#!topic/golang-dev/7fjGWuImu5k\n\tbuildletsGone := make(chan struct{})\n\tgo func() {\n\t\tbuildletActivity.Wait()\n\t\tclose(buildletsGone)\n\t}()\n\n\tvar lastMetadata string\n\tvar lastHeader string\n\tvar serialDuration time.Duration\n\tfor _, ti := range set.items {\n\tAwaitDone:\n\t\tfor {\n\t\t\ttimer := time.NewTimer(30 * time.Second)\n\t\t\tselect {\n\t\t\tcase <-ti.done: // wait for success\n\t\t\t\ttimer.Stop()\n\t\t\t\tbreak AwaitDone\n\t\t\tcase <-timer.C:\n\t\t\t\tst.LogEventTime(\"still_waiting_on_test\", ti.name.Old)\n\t\t\tcase <-buildletsGone:\n\t\t\t\tset.cancelAll()\n\t\t\t\treturn nil, errBuildletsGone\n\t\t\t}\n\t\t}\n\n\t\tserialDuration += ti.execDuration\n\t\tif len(ti.output) > 0 {\n\t\t\tmetadata, header, out := parseOutputAndHeader(ti.output)\n\t\t\tprintHeader := false\n\t\t\tif metadata != lastMetadata {\n\t\t\t\tlastMetadata = metadata\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", metadata)\n\t\t\t\t// Always include the test header after\n\t\t\t\t// metadata changes. This is a readability\n\t\t\t\t// optimization that ensures that tests are\n\t\t\t\t// always immediately preceded by their test\n\t\t\t\t// banner, even if it is duplicate banner\n\t\t\t\t// because the test metadata changed.\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif header != lastHeader {\n\t\t\t\tlastHeader = header\n\t\t\t\tprintHeader = true\n\t\t\t}\n\t\t\tif printHeader {\n\t\t\t\tfmt.Fprintf(st, \"\\n%s\\n\", header)\n\t\t\t}\n\t\t\tif pool.NewGCEConfiguration().InStaging() {\n\t\t\t\tout = bytes.TrimSuffix(out, nl)\n\t\t\t\tst.Write(out)\n\t\t\t\tfmt.Fprintf(st, \" (shard %s; par=%d)\\n\", ti.shardIPPort, ti.groupSize)\n\t\t\t} else {\n\t\t\t\tst.Write(out)\n\t\t\t}\n\t\t}\n\n\t\tif ti.remoteErr != nil {\n\t\t\tset.cancelAll()\n\t\t\treturn fmt.Errorf(\"dist test failed: %s: %v\", ti.name, ti.remoteErr), nil\n\t\t}\n\t}\n\telapsed := time.Since(startTime)\n\tvar msg string\n\tif st.conf.NumTestHelpers(st.isTry()) > 0 {\n\t\tmsg = fmt.Sprintf(\"took %v; aggregate %v; saved %v\", elapsed, serialDuration, serialDuration-elapsed)\n\t} else {\n\t\tmsg = fmt.Sprintf(\"took %v\", elapsed)\n\t}\n\tst.LogEventTime(\"tests_complete\", msg)\n\tfmt.Fprintf(st, \"\\nAll tests passed.\\n\")\n\treturn nil, nil\n}",
"func (test Test) Run(t *testing.T) {\n\tt.Logf(\"Starting test %v\", t.Name())\n\tt.Helper()\n\t// Double negative cannot be helped, this is intended to mitigate test failures where a global\n\t// resource is manipulated, e.g.: the default AWS security group.\n\tif !test.RunOptions.NoParallel {\n\t\tt.Parallel()\n\t}\n\tt.Run(\"Python\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.Python != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.Python)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"python\",\n\t\t\truntime: \"python\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n\tt.Run(\"TypeScript\", func(t *testing.T) {\n\t\trunOpts := integration.ProgramTestOptions{}\n\t\tif test.RunOptions != nil {\n\t\t\trunOpts = *test.RunOptions\n\t\t}\n\t\tconvertOpts := test.Options\n\t\tif test.TypeScript != nil {\n\t\t\tconvertOpts = convertOpts.With(*test.TypeScript)\n\t\t}\n\n\t\ttargetTest := targetTest{\n\t\t\trunOpts: &runOpts,\n\t\t\tconvertOpts: &convertOpts,\n\t\t\tprojectName: test.ProjectName,\n\t\t\tlanguage: \"typescript\",\n\t\t\truntime: \"nodejs\",\n\t\t}\n\t\ttargetTest.Run(t)\n\t})\n}",
"func RunTest(flags *Flags) error {\n\tswitch flags.Mode {\n\tcase constants.ManagerMode:\n\t\treturn workermanager.New().RunTest()\n\tcase constants.WorkerMode:\n\t\tslackURL := flags.SlackURL\n\t\tvar slacks []string\n\t\tif len(slackURL) > 0 {\n\t\t\tslacks = append(slacks, slackURL)\n\t\t}\n\t\treturn worker.NewWorker().RunTest(flags.Type, slacks)\n\t}\n\n\treturn nil\n}",
"func (Tests) Run(ctx context.Context) error {\n\targ := BuildDockerComposeArgs(ProjectName, ProjectType, \"test\", DockerComposeTestFile)\n\targ = append(arg, \"run\")\n\targ = append(arg,\n\t\t\"--rm\",\n\t\t\"--use-aliases\",\n\t)\n\targ = append(arg, \"app\", \"go\", \"test\", \"-mod=vendor\", \"-v\", \"-cover\")\n\tif err := Exec(ComposeBin, append(arg, \"./service\")...); err != nil {\n\t\treturn err\n\t}\n\tif err := Exec(ComposeBin, append(arg, \"./...\")...); err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}",
"func (c *Command) Run(args []string) {\n\tflag.StringVar(&c.Filter, \"f\", \"\", \"regexp to filter tests by name\")\n\tflag.BoolVar(&c.Verbose, \"v\", false, \"print all test names\")\n\tcheck(flag.CommandLine.Parse(args))\n\targs = flag.Args()\n\n\tif len(args) == 0 {\n\t\targs = []string{\".\"}\n\t}\n\n\tokPath, err := util.OKPath()\n\tcheck(err)\n\n\tfor _, arg := range args {\n\t\tpackageName := util.PackageNameFromPath(okPath, arg)\n\t\tif arg == \".\" {\n\t\t\tpackageName = \".\"\n\t\t}\n\t\tanonFunctionName := 0\n\t\tf, _, errs := compiler.Compile(okPath, packageName, true,\n\t\t\t&anonFunctionName, false)\n\t\tutil.CheckErrorsWithExit(errs)\n\n\t\tm := vm.NewVM(\"no-package\")\n\t\tstartTime := time.Now()\n\t\tcheck(m.LoadFile(f))\n\t\terr := m.RunTests(c.Verbose, regexp.MustCompile(c.Filter), packageName)\n\t\telapsed := time.Since(startTime).Milliseconds()\n\t\tcheck(err)\n\n\t\tassertWord := pluralise(\"assert\", m.TotalAssertions)\n\t\tif m.TestsFailed > 0 {\n\t\t\tfmt.Printf(\"%s: %d failed %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsFailed, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t} else {\n\t\t\tfmt.Printf(\"%s: %d passed %d %s (%d ms)\\n\",\n\t\t\t\tpackageName, m.TestsPass,\n\t\t\t\tm.TotalAssertions, assertWord, elapsed)\n\t\t}\n\n\t\tif m.TestsFailed > 0 {\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n}",
"func TestAll() error {\n\tout, err := sh.Output(\"go\", \"test\", \"./...\")\n\tif err != nil {\n\t\treturn err\n\t}\n\tlog.Print(out)\n\treturn nil\n}",
"func RunTests(t *testing.T, tests map[string]SubTest) {\n\tfor name, test := range tests {\n\t\tdomainKeeper, ctx, mocks := NewTestKeeper(t, true)\n\t\t// set default mock.Supply not to fail\n\t\tmocks.Supply.SetSendCoinsFromAccountToModule(func(ctx types.Context, addr types.AccAddress, moduleName string, coins types.Coins) error {\n\t\t\treturn nil\n\t\t})\n\t\t// set default fees\n\t\tsetFees := domainKeeper.ConfigurationKeeper.(ConfigurationSetter).SetFees\n\t\tfees := configuration.NewFees()\n\t\tfees.SetDefaults(\"testcoin\")\n\t\tsetFees(ctx, fees)\n\t\t// run sub SubTest\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\t// run before SubTest\n\t\t\tif test.BeforeTest != nil {\n\t\t\t\tif test.BeforeTestBlockTime != 0 {\n\t\t\t\t\tt := time.Unix(test.BeforeTestBlockTime, 0)\n\t\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t\t}\n\t\t\t\ttest.BeforeTest(t, domainKeeper, ctx, mocks)\n\t\t\t}\n\n\t\t\tif test.TestBlockTime != 0 {\n\t\t\t\tt := time.Unix(test.TestBlockTime, 0)\n\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t}\n\t\t\t// run actual SubTest\n\t\t\ttest.Test(t, domainKeeper, ctx, mocks)\n\n\t\t\t// run after SubTest\n\t\t\tif test.AfterTest != nil {\n\t\t\t\tif test.AfterTestBlockTime != 0 {\n\t\t\t\t\tt := time.Unix(test.AfterTestBlockTime, 0)\n\t\t\t\t\tctx = ctx.WithBlockTime(t)\n\t\t\t\t}\n\t\t\t\ttest.AfterTest(t, domainKeeper, ctx, mocks)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (st *buildStatus) runTests(helpers <-chan *buildlet.Client) (remoteErr, err error) {\n\ttestNames, err := st.distTestList()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"distTestList: %v\", err)\n\t}\n\tset := st.newTestSet(testNames)\n\tst.logEventTime(\"starting_tests\", fmt.Sprintf(\"%d tests\", len(set.items)))\n\tstartTime := time.Now()\n\n\tworkDir, err := st.bc.WorkDir()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error discovering workdir for main buildlet, %s: %v\", st.bc.Name(), err)\n\t}\n\tmainBuildletGoroot := st.conf.FilePathJoin(workDir, \"go\")\n\n\t// We use our original buildlet to run the tests in order, to\n\t// make the streaming somewhat smooth and not incredibly\n\t// lumpy. The rest of the buildlets run the largest tests\n\t// first (critical path scheduling).\n\t// The buildletActivity WaitGroup is used to track when all\n\t// the buildlets are dead or done.\n\tvar buildletActivity sync.WaitGroup\n\tbuildletActivity.Add(2) // one per goroutine below (main + helper launcher goroutine)\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor !st.bc.IsBroken() {\n\t\t\ttis, ok := set.testsToRunInOrder()\n\t\t\tif !ok {\n\t\t\t\tselect {\n\t\t\t\tcase <-st.ctx.Done():\n\t\t\t\t\treturn\n\t\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\t}\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tst.runTestsOnBuildlet(st.bc, tis, mainBuildletGoroot)\n\t\t}\n\t\tst.logEventTime(\"main_buildlet_broken\", st.bc.Name())\n\t}()\n\tgo func() {\n\t\tdefer buildletActivity.Done() // for the per-goroutine Add(2) above\n\t\tfor helper := range helpers {\n\t\t\tbuildletActivity.Add(1)\n\t\t\tgo func(bc *buildlet.Client) {\n\t\t\t\tdefer buildletActivity.Done() // for the per-helper Add(1) above\n\t\t\t\tdefer st.logEventTime(\"closed_helper\", bc.Name())\n\t\t\t\tdefer bc.Close()\n\t\t\t\tif devPause {\n\t\t\t\t\tdefer time.Sleep(5 * time.Minute)\n\t\t\t\t\tdefer st.logEventTime(\"DEV_HELPER_SLEEP\", bc.Name())\n\t\t\t\t}\n\t\t\t\tst.logEventTime(\"got_empty_test_helper\", bc.String())\n\t\t\t\tif err := bc.PutTarFromURL(st.snapshotURL(), \"go\"); err != nil {\n\t\t\t\t\tlog.Printf(\"failed to extract snapshot for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tworkDir, err := bc.WorkDir()\n\t\t\t\tif err != nil {\n\t\t\t\t\tlog.Printf(\"error discovering workdir for helper %s: %v\", bc.Name(), err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tst.logEventTime(\"test_helper_set_up\", bc.Name())\n\t\t\t\tgoroot := st.conf.FilePathJoin(workDir, \"go\")\n\t\t\t\tfor !bc.IsBroken() {\n\t\t\t\t\ttis, ok := set.testsToRunBiggestFirst()\n\t\t\t\t\tif !ok {\n\t\t\t\t\t\tst.logEventTime(\"no_new_tests_remain\", bc.Name())\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t\tst.runTestsOnBuildlet(bc, tis, goroot)\n\t\t\t\t}\n\t\t\t\tst.logEventTime(\"test_helper_is_broken\", bc.Name())\n\t\t\t}(helper)\n\t\t}\n\t}()\n\n\t// Convert a sync.WaitGroup into a channel.\n\t// Aside: https://groups.google.com/forum/#!topic/golang-dev/7fjGWuImu5k\n\tbuildletsGone := make(chan struct{})\n\tgo func() {\n\t\tbuildletActivity.Wait()\n\t\tclose(buildletsGone)\n\t}()\n\n\tvar lastBanner string\n\tvar serialDuration time.Duration\n\tfor _, ti := range set.items {\n\tAwaitDone:\n\t\tfor {\n\t\t\ttimer := time.NewTimer(30 * time.Second)\n\t\t\tselect {\n\t\t\tcase <-ti.done: // wait for success\n\t\t\t\ttimer.Stop()\n\t\t\t\tbreak AwaitDone\n\t\t\tcase <-timer.C:\n\t\t\t\tst.logEventTime(\"still_waiting_on_test\", ti.name)\n\t\t\tcase <-buildletsGone:\n\t\t\t\tset.cancelAll()\n\t\t\t\treturn nil, fmt.Errorf(\"dist test failed: all buildlets had network errors or timeouts, yet tests remain\")\n\t\t\t}\n\t\t}\n\n\t\tserialDuration += ti.execDuration\n\t\tif len(ti.output) > 0 {\n\t\t\tbanner, out := parseOutputAndBanner(ti.output)\n\t\t\tif banner != lastBanner {\n\t\t\t\tlastBanner = banner\n\t\t\t\tfmt.Fprintf(st, \"\\n##### %s\\n\", banner)\n\t\t\t}\n\t\t\tif inStaging {\n\t\t\t\tout = bytes.TrimSuffix(out, nl)\n\t\t\t\tst.Write(out)\n\t\t\t\tfmt.Fprintf(st, \" (shard %s; par=%d)\\n\", ti.shardIPPort, ti.groupSize)\n\t\t\t} else {\n\t\t\t\tst.Write(out)\n\t\t\t}\n\t\t}\n\n\t\tif ti.remoteErr != nil {\n\t\t\tset.cancelAll()\n\t\t\treturn fmt.Errorf(\"dist test failed: %s: %v\", ti.name, ti.remoteErr), nil\n\t\t}\n\t}\n\telapsed := time.Since(startTime)\n\tvar msg string\n\tif st.conf.NumTestHelpers > 0 {\n\t\tmsg = fmt.Sprintf(\"took %v; aggregate %v; saved %v\", elapsed, serialDuration, serialDuration-elapsed)\n\t} else {\n\t\tmsg = fmt.Sprintf(\"took %v\", elapsed)\n\t}\n\tst.logEventTime(\"tests_complete\", msg)\n\tfmt.Fprintf(st, \"\\nAll tests passed.\\n\")\n\treturn nil, nil\n}",
"func doTests(t *testing.T, tests []string) {\n\tdoTestsParam(t, tests, TestParams{\n\t\textensions: parser.CommonExtensions,\n\t})\n}",
"func runTests(c *C, overrider configOverrider, tests ...func(dbt *DBTest)) {\n\tdb, err := sql.Open(\"mysql\", getDSN(overrider))\n\tc.Assert(err, IsNil, Commentf(\"Error connecting\"))\n\tdefer db.Close()\n\n\tdb.Exec(\"DROP TABLE IF EXISTS test\")\n\n\tdbt := &DBTest{c, db}\n\tfor _, test := range tests {\n\t\ttest(dbt)\n\t\tdbt.db.Exec(\"DROP TABLE IF EXISTS test\")\n\t}\n}",
"func runTests(inv *logstash.Invocation, tests []testcase.TestCaseSet, diffCommand []string, keptEnvVars []string) error {\n\tok := true\n\tfor _, t := range tests {\n\t\tfmt.Printf(\"Running tests in %s...\\n\", filepath.Base(t.File))\n\t\tp, err := logstash.NewProcess(inv, t.Codec, t.InputFields, keptEnvVars)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer p.Release()\n\t\tif err = p.Start(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfor _, line := range t.InputLines {\n\t\t\t_, err = p.Input.Write([]byte(line + \"\\n\"))\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t\tif err = p.Input.Close(); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tresult, err := p.Wait()\n\t\tif err != nil || *logstashOutput {\n\t\t\tmessage := getLogstashOutputMessage(result.Output, result.Log)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"Error running Logstash: %s.%s\", err, message)\n\t\t\t}\n\t\t\tuserError(\"%s\", message)\n\t\t}\n\t\tif err = t.Compare(result.Events, false, diffCommand); err != nil {\n\t\t\tuserError(\"Testcase failed, continuing with the rest: %s\", err)\n\t\t\tok = false\n\t\t}\n\t}\n\tif !ok {\n\t\treturn errors.New(\"one or more testcases failed\")\n\t}\n\treturn nil\n}",
"func runTests(t *testing.T, tests []test) {\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresp := executeRequest(tt.method, tt.url, serialize(tt.req), tt.asAdmin)\n\t\t\tif resp.StatusCode != tt.want {\n\t\t\t\tt.Errorf(\"Unexpected status code %d\", resp.StatusCode)\n\t\t\t}\n\n\t\t\tif tt.body != \"\" {\n\t\t\t\tbodyBytes, err := ioutil.ReadAll(resp.Body)\n\t\t\t\tdefer resp.Body.Close()\n\t\t\t\tif err != nil {\n\t\t\t\t\tt.Errorf(\"Error loading body\")\n\t\t\t\t}\n\t\t\t\tif tt.body != string(bodyBytes) {\n\t\t\t\t\tt.Errorf(\"Unexpected body '%s', expected '%s'\", bodyBytes, tt.body)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func (st *buildStatus) runTestsOnBuildlet(bc *buildlet.Client, tis []*testItem, goroot string) {\n\tnames := make([]string, len(tis))\n\tfor i, ti := range tis {\n\t\tnames[i] = ti.name\n\t\tif i > 0 && !strings.HasPrefix(ti.name, \"go_test:\") {\n\t\t\tpanic(\"only go_test:* tests may be merged\")\n\t\t}\n\t}\n\twhich := fmt.Sprintf(\"%s: %v\", bc.Name(), names)\n\tsp := st.createSpan(\"start_tests\", which)\n\n\targs := []string{\"tool\", \"dist\", \"test\", \"--no-rebuild\", \"--banner=\" + banner}\n\tif st.conf.IsRace() {\n\t\targs = append(args, \"--race\")\n\t}\n\tif st.conf.CompileOnly {\n\t\targs = append(args, \"--compile-only\")\n\t}\n\targs = append(args, names...)\n\tvar buf bytes.Buffer\n\tt0 := time.Now()\n\ttimeout := execTimeout(names)\n\tremoteErr, err := bc.Exec(path.Join(\"go\", \"bin\", \"go\"), buildlet.ExecOpts{\n\t\t// We set Dir to \".\" instead of the default (\"go/bin\") so when the dist tests\n\t\t// try to run os/exec.Command(\"go\", \"test\", ...), the LookPath of \"go\" doesn't\n\t\t// return \"./go.exe\" (which exists in the current directory: \"go/bin\") and then\n\t\t// fail when dist tries to run the binary in dir \"$GOROOT/src\", since\n\t\t// \"$GOROOT/src\" + \"./go.exe\" doesn't exist. Perhaps LookPath should return\n\t\t// an absolute path.\n\t\tDir: \".\",\n\t\tOutput: &buf, // see \"maybe stream lines\" TODO below\n\t\tExtraEnv: append(st.conf.Env(), \"GOROOT=\"+goroot),\n\t\tTimeout: timeout,\n\t\tPath: []string{\"$WORKDIR/go/bin\", \"$PATH\"},\n\t\tArgs: args,\n\t})\n\texecDuration := time.Since(t0)\n\tsp.done(err)\n\tif err != nil {\n\t\tbc.MarkBroken() // prevents reuse\n\t\tfor _, ti := range tis {\n\t\t\tti.numFail++\n\t\t\tst.logf(\"Execution error running %s on %s: %v (numFails = %d)\", ti.name, bc, err, ti.numFail)\n\t\t\tif err == buildlet.ErrTimeout {\n\t\t\t\tti.failf(\"Test %q ran over %v limit (%v)\", ti.name, timeout, execDuration)\n\t\t\t} else if ti.numFail >= maxTestExecErrors {\n\t\t\t\tti.failf(\"Failed to schedule %q test after %d tries.\\n\", ti.name, maxTestExecErrors)\n\t\t\t} else {\n\t\t\t\tti.retry()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tout := buf.Bytes()\n\tout = bytes.Replace(out, []byte(\"\\nALL TESTS PASSED (some were excluded)\\n\"), nil, 1)\n\tout = bytes.Replace(out, []byte(\"\\nALL TESTS PASSED\\n\"), nil, 1)\n\n\tfor _, ti := range tis {\n\t\tti.output = out\n\t\tti.remoteErr = remoteErr\n\t\tti.execDuration = execDuration\n\t\tti.groupSize = len(tis)\n\t\tti.shardIPPort = bc.IPPort()\n\t\tclose(ti.done)\n\n\t\t// After the first one, make the rest succeed with no output.\n\t\t// TODO: maybe stream lines (set Output to a line-reading\n\t\t// Writer instead of &buf). for now we just wait for them in\n\t\t// ~10 second batches. Doesn't look as smooth on the output,\n\t\t// though.\n\t\tout = nil\n\t\tremoteErr = nil\n\t\texecDuration = 0\n\t}\n}",
"func (f *Fusetest) RunOperationTests() error {\n\tif !f.Opts.FuseOpsTests {\n\t\treturn nil\n\t}\n\n\tif f.Opts.MountInfo.OneWaySyncMount {\n\t\treturn f.RunOneWayOperationTests()\n\t}\n\n\treturn f.RunFuseOperationTests()\n}",
"func TestRunTestAllReal(t *testing.T) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping test in short mode.\")\n\t}\n\n\ttaskData := agent.TaskData{\n\t\tStringValues: map[string]string{\n\t\t\tCFG_TEST_TYPE: CFG_TYPE_ALL,\n\t\t\tCFG_SERVER_HOST: \"speedtest.nyc.rr.com:8080\",\n\t\t\tCFG_SERVER_ID: \"16976\",\n\t\t},\n\t\tIntValues: map[string]int{\n\t\t\tCFG_SERVER_ID: 16976,\n\t\t\tCFG_TIME_OUT: 5,\n\t\t},\n\t\tFloatValues: map[string]float64{CFG_MAX_SECONDS: 6},\n\t\tIntSlices: map[string][]int{\n\t\t\tCFG_DOWNLOAD_SIZES: {245388, 505544},\n\t\t\tCFG_UPLOAD_SIZES: {32768, 65536},\n\t\t},\n\t}\n\n\tspdTestRunner := SpeedTestRunner{}\n\n\tspTestResults, err := spdTestRunner.Run(taskData)\n\tif err != nil {\n\t\tt.Fatalf(\"Unexpected Error: \\n%s\", err.Error())\n\t}\n\n\tresults := spTestResults.Latency.Seconds()\n\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Latency result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nLatency test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Download\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Download result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nDownload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n\n\tresults = spTestResults.Upload\n\tif results <= 0 {\n\t\tt.Errorf(\"Error: Expected a positive Upload result, but got: %f\", results)\n\t} else {\n\t\tfmt.Printf(\"\\nUpload test results for server %d ... %f\\n\", taskData.IntValues[CFG_SERVER_ID], results)\n\t}\n}",
"func (d *Driver) RunTests(ctx context.Context,\n\ttests []*BundleEntity,\n\tdutInfos map[string]*protocol.DUTInfo,\n\tclient *reporting.RPCClient,\n\tremoteDevservers []string) ([]*resultsjson.Result, error) {\n\ttestsPerBundle := make(map[string][]*protocol.ResolvedEntity)\n\tfor _, t := range tests {\n\t\ttestsPerBundle[t.Bundle] = append(testsPerBundle[t.Bundle], t.Resolved)\n\t}\n\tbundles := make([]string, 0, len(testsPerBundle))\n\tfor b := range testsPerBundle {\n\t\tbundles = append(bundles, b)\n\t}\n\tsort.Strings(bundles)\n\tvar results []*resultsjson.Result\n\tfor _, bundle := range bundles {\n\t\tres, err := d.runTests(ctx, bundle, testsPerBundle[bundle], dutInfos, client, remoteDevservers)\n\t\tresults = append(results, res...)\n\t\tif err != nil {\n\t\t\treturn results, err\n\t\t}\n\t}\n\treturn results, nil\n}",
"func (st *buildStatus) runTestsOnBuildlet(bc buildlet.Client, tis []*testItem, goroot, gopath string) {\n\tnames, rawNames := make([]string, len(tis)), make([]string, len(tis))\n\tfor i, ti := range tis {\n\t\tnames[i], rawNames[i] = ti.name.Old, ti.name.Raw\n\t\tif i > 0 && (!strings.HasPrefix(ti.name.Old, \"go_test:\") || !strings.HasPrefix(names[0], \"go_test:\")) {\n\t\t\tpanic(\"only go_test:* tests may be merged\")\n\t\t}\n\t}\n\tvar spanName string\n\tvar detail string\n\tif len(names) == 1 {\n\t\tspanName = \"run_test:\" + names[0]\n\t\tdetail = bc.Name()\n\t} else {\n\t\tspanName = \"run_tests_multi\"\n\t\tdetail = fmt.Sprintf(\"%s: %v\", bc.Name(), names)\n\t}\n\tsp := st.CreateSpan(spanName, detail)\n\n\targs := []string{\"tool\", \"dist\", \"test\", \"--no-rebuild\", \"--banner=\" + banner}\n\tif st.conf.IsRace() {\n\t\targs = append(args, \"--race\")\n\t}\n\tif st.conf.CompileOnly {\n\t\targs = append(args, \"--compile-only\")\n\t}\n\tif st.useKeepGoingFlag() {\n\t\targs = append(args, \"-k\")\n\t}\n\targs = append(args, rawNames...)\n\tvar buf bytes.Buffer\n\tt0 := time.Now()\n\ttimeout := st.conf.DistTestsExecTimeout(names)\n\n\tctx, cancel := context.WithTimeout(st.ctx, timeout)\n\tdefer cancel()\n\n\tenv := append(st.conf.Env(),\n\t\t\"GOROOT=\"+goroot,\n\t\t\"GOPATH=\"+gopath,\n\t)\n\tenv = append(env, st.modulesEnv()...)\n\n\tremoteErr, err := bc.Exec(ctx, \"./go/bin/go\", buildlet.ExecOpts{\n\t\t// We set Dir to \".\" instead of the default (\"go/bin\") so when the dist tests\n\t\t// try to run os/exec.Command(\"go\", \"test\", ...), the LookPath of \"go\" doesn't\n\t\t// return \"./go.exe\" (which exists in the current directory: \"go/bin\") and then\n\t\t// fail when dist tries to run the binary in dir \"$GOROOT/src\", since\n\t\t// \"$GOROOT/src\" + \"./go.exe\" doesn't exist. Perhaps LookPath should return\n\t\t// an absolute path.\n\t\tDir: \".\",\n\t\tOutput: &buf, // see \"maybe stream lines\" TODO below\n\t\tExtraEnv: env,\n\t\tPath: []string{st.conf.FilePathJoin(\"$WORKDIR\", \"go\", \"bin\"), \"$PATH\"},\n\t\tArgs: args,\n\t})\n\texecDuration := time.Since(t0)\n\tsp.Done(err)\n\tif err != nil {\n\t\tbc.MarkBroken() // prevents reuse\n\t\tfor _, ti := range tis {\n\t\t\tti.numFail++\n\t\t\tst.logf(\"Execution error running %s on %s: %v (numFails = %d)\", ti.name, bc, err, ti.numFail)\n\t\t\tif err == buildlet.ErrTimeout {\n\t\t\t\tti.failf(\"Test %q ran over %v limit (%v); saw output:\\n%s\", ti.name, timeout, execDuration, buf.Bytes())\n\t\t\t} else if ti.numFail >= maxTestExecErrors {\n\t\t\t\tti.failf(\"Failed to schedule %q test after %d tries.\\n\", ti.name, maxTestExecErrors)\n\t\t\t} else {\n\t\t\t\tti.retry()\n\t\t\t}\n\t\t}\n\t\treturn\n\t}\n\n\tout := buf.Bytes()\n\tout = bytes.Replace(out, []byte(\"\\nALL TESTS PASSED (some were excluded)\\n\"), nil, 1)\n\tout = bytes.Replace(out, []byte(\"\\nALL TESTS PASSED\\n\"), nil, 1)\n\n\tfor _, ti := range tis {\n\t\tti.output = out\n\t\tti.remoteErr = remoteErr\n\t\tti.execDuration = execDuration\n\t\tti.groupSize = len(tis)\n\t\tti.shardIPPort = bc.IPPort()\n\t\tclose(ti.done)\n\n\t\t// After the first one, make the rest succeed with no output.\n\t\t// TODO: maybe stream lines (set Output to a line-reading\n\t\t// Writer instead of &buf). for now we just wait for them in\n\t\t// ~10 second batches. Doesn't look as smooth on the output,\n\t\t// though.\n\t\tout = nil\n\t\tremoteErr = nil\n\t\texecDuration = 0\n\t}\n}",
"func TestAllTask() {\n\tif noTestsFlag {\n\t\tlog.Println(\"*** Skipping: notests\")\n\t\treturn\n\t}\n\trunCmd(\"./pkg/pac/make.sh\")\n\trunCmd(\"go\", \"test\", \"-tags=\\\"net databases\\\"\",\n\t\t\"./cmd/...\", \"./pkg/...\")\n}",
"func executeTests(k8s *Kubernetes, testList []*TestCase) []*TestCase {\n\terr := bootstrap(k8s)\n\tfailOnError(err)\n\n\t//make a copy and append the tests with CIDRs\n\tcidrTests := []*TestCase{\n\t\t{\"IngressOverlapCIDRBlocks\", testIngressOverlapCIDRBlocks()},\n\t}\n\tmodifiedTestList := append(testList, cidrTests...)\n\n\tfor _, testCase := range modifiedTestList {\n\t\tlog.Infof(\"running test case %s\", testCase.Name)\n\t\tlog.Debugf(\"cleaning-up previous policies and sleeping for %v\", networkPolicyDelay)\n\t\terr = k8s.CleanNetworkPolicies(namespaces)\n\t\ttime.Sleep(networkPolicyDelay)\n\t\tfailOnError(err)\n\t\tfor _, step := range testCase.Steps {\n\t\t\tlog.Infof(\"running step %s of test case %s\", step.Name, testCase.Name)\n\t\t\treachability := step.Reachability\n\t\t\tpolicy := step.NetworkPolicy\n\t\t\tif policy != nil {\n\t\t\t\tlog.Debugf(\"creating policy and sleeping for %v\", networkPolicyDelay)\n\t\t\t\t_, err := k8s.CreateOrUpdateNetworkPolicy(policy.Namespace, policy)\n\t\t\t\tfailOnError(err)\n\t\t\t\ttime.Sleep(networkPolicyDelay)\n\t\t\t}\n\t\t\tstart := time.Now()\n\t\t\tvalidate(k8s, reachability, step.Port)\n\t\t\tstep.Duration = time.Now().Sub(start)\n\t\t\treachability.PrintSummary(true, true, true)\n\t\t}\n\t}\n\treturn modifiedTestList\n}",
"func TestMain(t *testing.T) {\n}",
"func ExecTest() {\n\tHelloTest()\n\n\tVarTest()\n\tEnumTest()\n\n\tOpTestArithmetic()\n\tOpTestRelation()\n\tOpTestBoolean()\n\tOpTestBit()\n\tOpTestAssign()\n\tOpTestPriority()\n\tOpTestOther()\n\n\tLoopTest()\n\n\tArrayOneDimTest()\n\tArrayTwoDimTest()\n\n\tPointerTest()\n\tPtrArrayTest()\n\tPtr2PtrTest()\n\tPtrParamTest()\n\n\tFuncTest()\n\tFuncArrayParamTest()\n\tFuncMultiReturnTest()\n\tFuncRecurTest()\n\tFuncClosureTest()\n\n\tRangeTest()\n\tStructTest()\n\tSliceTest()\n\tTypeCastTest()\n\tMapTest()\n\n\tInterfaceTest()\n\n\tMethodTest()\n\tErrorTest()\n}",
"func TestMain(t *testing.T) { TestingT(t) }",
"func (m *Main) RunTest(name, command string, run func(t *Test) error) error {\n\tt := m.NewTest(name, command, run)\n\treturn t.Run()\n}",
"func RunTest(t *testing.T, name string, f Func, testCases []TestCase) {\n\tt.Run(name, func(t *testing.T) {\n\t\tfor _, test := range testCases {\n\t\t\tif actual := f(test.Input); actual != test.Expected {\n\t\t\t\tt.Errorf(\"\\nfor n=%d, expected: %t, actual: %t\", test.Input, test.Expected, actual)\n\t\t\t}\n\t\t}\n\t})\n}",
"func TestExecute(t *testing.T) {\n\tctx := context.Background()\n\n\t// Clear pre-existing golden files to avoid leaving stale ones around.\n\tif *updateGoldens {\n\t\tfiles, err := filepath.Glob(filepath.Join(*goldensDir, \"*.golden.json\"))\n\t\tif err != nil {\n\t\t\tt.Fatal(err)\n\t\t}\n\t\tfor _, f := range files {\n\t\t\tif err := os.Remove(f); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\tname string\n\t\tflags testsharderFlags\n\t\ttestSpecs []build.TestSpec\n\t\ttestDurations []build.TestDuration\n\t\ttestList []build.TestListEntry\n\t\tmodifiers []testsharder.TestModifier\n\t\tpackageRepos []build.PackageRepo\n\t\taffectedTests []string\n\t}{\n\t\t{\n\t\t\tname: \"no tests\",\n\t\t},\n\t\t{\n\t\t\tname: \"mixed device types\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\thostTestSpec(\"bar\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: 50,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic\", true),\n\t\t\t\ttestListEntry(\"not-affected\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-hermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"affected nonhermetic tests\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic\"),\n\t\t\t\tfuchsiaTestSpec(\"not-affected\"),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected-nonhermetic\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"target test count\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetTestCount: 2,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo1\"),\n\t\t\t\tfuchsiaTestSpec(\"foo2\"),\n\t\t\t\tfuchsiaTestSpec(\"foo3\"),\n\t\t\t\tfuchsiaTestSpec(\"foo4\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"sharding by time\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: int((4 * time.Minute).Seconds()),\n\t\t\t\tperTestTimeoutSecs: int((10 * time.Minute).Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"slow\"),\n\t\t\t\tfuchsiaTestSpec(\"fast1\"),\n\t\t\t\tfuchsiaTestSpec(\"fast2\"),\n\t\t\t\tfuchsiaTestSpec(\"fast3\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 2 * time.Second,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: packageURL(\"slow\"),\n\t\t\t\t\tMedianDuration: 5 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"max shards per env\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\t// Given expected test durations of 4 minutes for each test it's\n\t\t\t\t// impossible to satisfy both the target shard duration and the\n\t\t\t\t// max shards per environment, so the target shard duration\n\t\t\t\t// should effectively be ignored.\n\t\t\t\ttargetDurationSecs: int((5 * time.Minute).Seconds()),\n\t\t\t\tmaxShardsPerEnvironment: 2,\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected1\"),\n\t\t\t\tfuchsiaTestSpec(\"affected2\"),\n\t\t\t\tfuchsiaTestSpec(\"affected3\"),\n\t\t\t\tfuchsiaTestSpec(\"affected4\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected1\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected2\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic1\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic2\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: 4 * time.Minute,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"affected1\"),\n\t\t\t\tpackageURL(\"affected2\"),\n\t\t\t\tpackageURL(\"affected3\"),\n\t\t\t\tpackageURL(\"affected4\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected1\", true),\n\t\t\t\ttestListEntry(\"affected2\", true),\n\t\t\t\ttestListEntry(\"affected3\", true),\n\t\t\t\ttestListEntry(\"affected4\", true),\n\t\t\t\ttestListEntry(\"unaffected1\", true),\n\t\t\t\ttestListEntry(\"unaffected2\", true),\n\t\t\t\ttestListEntry(\"nonhermetic1\", false),\n\t\t\t\ttestListEntry(\"nonhermetic2\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"hermetic deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\thermeticDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tpackageRepos: []build.PackageRepo{\n\t\t\t\t{\n\t\t\t\t\tPath: \"pkg_repo1\",\n\t\t\t\t\tBlobs: filepath.Join(\"pkg_repo1\", \"blobs\"),\n\t\t\t\t\tTargets: filepath.Join(\"pkg_repo1\", \"targets.json\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"ffx deps\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tffxDeps: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply affected test\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\taffectedTestsMultiplyThreshold: 3,\n\t\t\t\ttargetDurationSecs: int(2 * time.Minute.Seconds()),\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"multiplied-affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-test\"),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Second,\n\t\t\t\t},\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tpackageURL(\"multiplied-affected-test\"),\n\t\t\t\tpackageURL(\"affected-test\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"multiplied-affected-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"test list with tags\",\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"hermetic-test\", true),\n\t\t\t\ttestListEntry(\"nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"skip unaffected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\").Name,\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"run all tests if no affected tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"affected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-nonhermetic-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"affected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-nonhermetic-test\", false),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiply unaffected hermetic tests\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\tskipUnaffected: true,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\"),\n\t\t\t\tfuchsiaTestSpec(\"unaffected-hermetic-multiplied-test\"),\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"unaffected-hermetic-test\", true),\n\t\t\t\ttestListEntry(\"affected-nonhermetic-test\", false),\n\t\t\t\ttestListEntry(\"unaffected-hermetic-multiplied-test\", true),\n\t\t\t},\n\t\t\taffectedTests: []string{\n\t\t\t\tfuchsiaTestSpec(\"affected-nonhermetic-test\").Name,\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t{\n\t\t\t\t\tName: \"unaffected-hermetic-multiplied-test\",\n\t\t\t\t\tTotalRuns: 100,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"various modifiers\",\n\t\t\tflags: testsharderFlags{\n\t\t\t\ttargetDurationSecs: 5,\n\t\t\t},\n\t\t\ttestSpecs: []build.TestSpec{\n\t\t\t\tfuchsiaTestSpec(\"foo\"),\n\t\t\t\tfuchsiaTestSpec(\"bar\"),\n\t\t\t\tfuchsiaTestSpec(\"baz\"),\n\t\t\t},\n\t\t\tmodifiers: []testsharder.TestModifier{\n\t\t\t\t// default modifier\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 2,\n\t\t\t\t},\n\t\t\t\t// multiplier\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts (but multiplier takes precedence)\n\t\t\t\t{\n\t\t\t\t\tName: \"foo\",\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t\t// change maxAttempts, set affected\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t\tAffected: true,\n\t\t\t\t\tTotalRuns: -1,\n\t\t\t\t\tMaxAttempts: 1,\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestList: []build.TestListEntry{\n\t\t\t\ttestListEntry(\"foo\", false),\n\t\t\t\ttestListEntry(\"bar\", true),\n\t\t\t\ttestListEntry(\"baz\", false),\n\t\t\t},\n\t\t\ttestDurations: []build.TestDuration{\n\t\t\t\t{\n\t\t\t\t\tName: \"*\",\n\t\t\t\t\tMedianDuration: time.Millisecond,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgoldenBasename := strings.ReplaceAll(tc.name, \" \", \"_\") + \".golden.json\"\n\t\t\tgoldenFile := filepath.Join(*goldensDir, goldenBasename)\n\n\t\t\tif *updateGoldens {\n\t\t\t\ttc.flags.outputFile = goldenFile\n\t\t\t} else {\n\t\t\t\ttc.flags.outputFile = filepath.Join(t.TempDir(), goldenBasename)\n\t\t\t}\n\n\t\t\ttc.flags.buildDir = t.TempDir()\n\t\t\tif len(tc.modifiers) > 0 {\n\t\t\t\ttc.flags.modifiersPath = writeTempJSONFile(t, tc.modifiers)\n\t\t\t}\n\t\t\tif len(tc.affectedTests) > 0 {\n\t\t\t\t// Add a newline to the end of the file to test that it still calculates the\n\t\t\t\t// correct number of affected tests even with extra whitespace.\n\t\t\t\ttc.flags.affectedTestsPath = writeTempFile(t, strings.Join(tc.affectedTests, \"\\n\")+\"\\n\")\n\t\t\t}\n\t\t\tif tc.flags.ffxDeps {\n\t\t\t\tsdkManifest := map[string]interface{}{\n\t\t\t\t\t\"atoms\": []interface{}{},\n\t\t\t\t}\n\t\t\t\tsdkManifestPath := filepath.Join(tc.flags.buildDir, \"sdk\", \"manifest\", \"core\")\n\t\t\t\tif err := os.MkdirAll(filepath.Dir(sdkManifestPath), os.ModePerm); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t\tif err := jsonutil.WriteToFile(sdkManifestPath, sdkManifest); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\t\t\t// Write test-list.json.\n\t\t\tif err := jsonutil.WriteToFile(\n\t\t\t\tfilepath.Join(tc.flags.buildDir, testListPath),\n\t\t\t\tbuild.TestList{Data: tc.testList, SchemaID: \"experimental\"},\n\t\t\t); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\t\t\twriteDepFiles(t, tc.flags.buildDir, tc.testSpecs)\n\t\t\tfor _, repo := range tc.packageRepos {\n\t\t\t\tif err := os.MkdirAll(filepath.Join(tc.flags.buildDir, repo.Path), 0o700); err != nil {\n\t\t\t\t\tt.Fatal(err)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm := &fakeModules{\n\t\t\t\ttestSpecs: tc.testSpecs,\n\t\t\t\ttestDurations: tc.testDurations,\n\t\t\t\tpackageRepositories: tc.packageRepos,\n\t\t\t}\n\t\t\tif err := execute(ctx, tc.flags, m); err != nil {\n\t\t\t\tt.Fatal(err)\n\t\t\t}\n\n\t\t\tif !*updateGoldens {\n\t\t\t\twant := readShards(t, goldenFile)\n\t\t\t\tgot := readShards(t, tc.flags.outputFile)\n\t\t\t\tif diff := cmp.Diff(want, got); diff != \"\" {\n\t\t\t\t\tt.Errorf(strings.Join([]string{\n\t\t\t\t\t\t\"Golden file mismatch!\",\n\t\t\t\t\t\t\"To fix, run `tools/integration/testsharder/update_goldens.sh\",\n\t\t\t\t\t\tdiff,\n\t\t\t\t\t}, \"\\n\"))\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}",
"func TestMain(m *testing.M) {\n\tprintln(\"do stuff before all tests\")\n\tm.Run()\n\tprintln(\"do stuff after all tests\")\n}",
"func RunGinkgoTests(t *testing.T, name string) {\n\t// Setup logging\n\tlog.SetFlags(log.LstdFlags)\n\tlog.SetOutput(ginkgo.GinkgoWriter)\n\tgomega.RegisterFailHandler(ginkgo.Fail)\n\tginkgo.RunSpecs(t, name)\n}",
"func RunTest(ctx context.Context, fn testFuncType, isGuest bool) error {\n\t// We lose connectivity along the way here, and if that races with the\n\t// recover_duts network-recovery hooks, it may interrupt us.\n\tunlock, err := network.LockCheckNetworkHook(ctx)\n\tif err != nil {\n\t\treturn errors.Wrap(err, \"failed locking the check network hook\")\n\t}\n\tdefer unlock()\n\n\tvar env TestEnv\n\n\tdefer tearDown(ctx, &env)\n\n\tif err := setUp(ctx, &env, isGuest); err != nil {\n\t\treturn errors.Wrap(err, \"failed starting the test\")\n\t}\n\n\treturn fn(ctx, &env)\n}",
"func TestRun(t *testing.T) {\n\tsuite.Run(t, new(CategoryTestSuite))\n\tsuite.Run(t, new(ProductTestSuite))\n}",
"func (i *InspecRunner) RunAllTests(path string) (verifiers.TestSuite, error) {\n\tv := inspec.InspecVerifier{}\n\terr := v.Setup(path)\n\n\tif err != nil {\n\t\tfmt.Println(\"error during inspec verifier setup\")\n\t\treturn verifiers.TestSuite{}, err\n\t}\n\n\tresult, err := v.Check(path)\n\n\tif err != nil {\n\t\tfmt.Println(\"error during inspec test execution\")\n\t\treturn result, err\n\t}\n\treturn result, nil\n\n}",
"func RegisterTests(f *framework.Framework) {\n\tContainerDeployerTests(f)\n\tManifestDeployerTests(f)\n\thelmcharts.RegisterTests(f)\n\tblueprints.RegisterTests(f)\n\tmanagement.RegisterTests(f)\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(runTest(m))\n}",
"func Test() error {\n\tfmt.Println(\"Testing...\")\n\tcmd := exec.Command(\"go\", \"test\", \"-cover\", \"-v\", \"-bench\", \"'.'\", \"-benchmem\", \"./...\")\n\tcmd.Stdout = Stdout\n\tcmd.Stderr = Stderr\n\treturn cmd.Run()\n}",
"func RunTestCases(t *testing.T, testcases *[]TestCase, ctx *TestContext) {\n\tcontext := ctx\n\tif context == nil {\n\t\tcontext = &TestContext{\n\t\t\tNameIDMap: make(map[string]string),\n\t\t\tNameObjectMap: make(map[string]interface{}),\n\t\t}\n\t}\n\n\tfor _, tc := range *testcases {\n\t\tTestLog = t\n\n\t\tif true { //tc.Enabled){\n\n\t\t\tt.Logf(\"======Begin to run test case: %s \\n\", tc.Name)\n\n\t\t\t//If test case name is like sleep_500, then it would sleep 500ms\n\t\t\tmethod := strings.ToLower(tc.Method)\n\t\t\tif strings.Contains(method, \"sleep\") {\n\t\t\t\tsleep, _ := strconv.Atoi(method[6:])\n\t\t\t\tt.Logf(\"Sleep %d mill-secondes\", sleep)\n\t\t\t\ttime.Sleep(time.Duration(sleep) * time.Millisecond)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tvar runner TestExecuter\n\t\t\tif tc.Executer != nil {\n\t\t\t\trunner = tc.Executer()\n\t\t\t} else {\n\t\t\t\tt.Fatalf(\"No Executer specified in test cases\")\n\t\t\t}\n\t\t\tt.Run(tc.Name, testcaseFunc(runner, context, &tc))\n\n\t\t}\n\t}\n}",
"func (t *Tester) Test() error {\n\tif err := t.pretestSetup(); err != nil {\n\t\treturn err\n\t}\n\n\te2eTestArgs := []string{\n\t\t\"--host=\" + t.host,\n\t\t\"--provider=\" + t.provider,\n\t\t\"--kubeconfig=\" + t.kubeconfigPath,\n\t\t\"--ginkgo.flakeAttempts=\" + t.flakeAttempts,\n\t\t\"--ginkgo.skip=\" + t.skipRegex,\n\t\t\"--ginkgo.focus=\" + t.focusRegex,\n\t}\n\tginkgoArgs := append([]string{\n\t\t\"--nodes=\" + t.parallel,\n\t\te2eTestPath,\n\t\t\"--\"}, e2eTestArgs...)\n\n\tlog.Printf(\"Running ginkgo test as %s %+v\", binary, ginkgoArgs)\n\tcmd := exec.Command(binary, ginkgoArgs...)\n\texec.InheritOutput(cmd)\n\treturn cmd.Run()\n}",
"func Test() error {\n\treturn sh.RunV(\"go\", \"test\", \"-v\", \"-cover\", \"./...\", \"-coverprofile=coverage.out\")\n}",
"func runAllTestCases(t *testing.T, checker resultsChecker) {\n\tt.Helper()\n\tchecker.resetTestCasesRun()\n\terr := filepath.Walk(checker.rootDir(),\n\t\tfunc(path string, info os.FileInfo, err error) error {\n\t\t\trequire.NoError(t, err)\n\t\t\tif info.IsDir() && checker.isTestDir(path) {\n\t\t\t\trunDirectoryTestCase(t, path, checker)\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\trequire.NoError(t, err)\n\trequire.NotZero(t, len(checker.TestCasesRun()), \"No complete test cases found in %s\", checker.rootDir())\n}",
"func RunTest(client pb.GNMIClient, testCase *common.TestCase, timeout time.Duration, stateUpdateDelay time.Duration) error {\n\tif client == nil {\n\t\treturn errors.New(\"gNMI client is not available\")\n\t}\n\tif testCase == nil {\n\t\treturn errors.New(\"empty test case\")\n\t}\n\tif len(testCase.OPs) == 0 {\n\t\t// Succeed if no operation specified in this test case.\n\t\treturn nil\n\t}\n\t// Determine the test case type.\n\tswitch testCase.OPs[0].Type {\n\tcase common.OPReplace, common.OPUpdate, common.OPDelete:\n\t\t// This is a config test.\n\t\treturn runConfigTest(client, testCase, timeout, stateUpdateDelay)\n\tcase common.OPGet:\n\t\t// This is a state fetching test.\n\t\treturn runStateTest(client, testCase, timeout)\n\tcase common.OPSubscribe:\n\t\treturn errors.New(\"not support telemetry streaming test cases\")\n\tdefault:\n\t\treturn fmt.Errorf(\"invalid operation type %s\", testCase.OPs[0].Type)\n\t}\n}",
"func (envManager *TestEnvManager) RunTest(m runnable) (ret int) {\n\tdefer envManager.TearDown()\n\tif err := envManager.StartUp(); err != nil {\n\t\tlog.Printf(\"Failed to setup framework: %s\", err)\n\t\tret = 1\n\t} else {\n\t\tlog.Printf(\"\\nStart testing ......\")\n\t\tret = m.Run()\n\t}\n\treturn ret\n}",
"func executeGoTestRunner(t *testing.T, expectedLogs []string, unexpectedLogs []string) {\n\tout, _ := exec.Command(\n\t\tpath.Join(runtime.GOROOT(), \"bin\", \"go\"),\n\t\t\"test\",\n\t\t\"../_supervised_in_test/\",\n\t\t\"-v\",\n\t\t\"-run\",\n\t\t\"^(\"+t.Name()+\")$\").CombinedOutput()\n\n\tgoTestOutput := string(out)\n\tribbon := \"------------------ EXTERNAL TEST OUTPUT (\" + t.Name() + \") ------------------\"\n\tdebugMsgOutput := fmt.Sprintln(ribbon, \"\\n\", goTestOutput, \"\\n\", ribbon)\n\n\tfor _, logLine := range expectedLogs {\n\t\trequire.Truef(t, strings.Contains(goTestOutput, logLine), \"log should contain: '%s'\\n\\n%s\", logLine, debugMsgOutput)\n\t}\n\tfor _, logLine := range unexpectedLogs {\n\t\trequire.Falsef(t, strings.Contains(goTestOutput, logLine), \"log should not contain: '%s'\\n\\n%s\", logLine, debugMsgOutput)\n\t}\n}",
"func Test(t *testing.T, command Runner, testCases []Case) {\n\tt.Helper()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.Name, func(t *testing.T) {\n\t\t\tt.Helper() // TODO: make Helper working for subtests: issue #24128\n\n\t\t\tstdout := &bytes.Buffer{}\n\t\t\tstderr := &bytes.Buffer{}\n\n\t\t\tcommand.SetStdout(stdout)\n\t\t\tcommand.SetStderr(stderr)\n\n\t\t\tm := newMatch(t, tc.wantFail)\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif !m.removeFile(tc.WantFile) {\n\t\t\t\t\ttc.WantFile = \"\" // stop testing File match\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar gotErr string\n\t\t\tgotPanic := m.run(func() {\n\t\t\t\tif err := command.Run(tc.Args); err != nil {\n\t\t\t\t\tgotErr = err.Error()\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tif tc.WantFile != \"\" {\n\t\t\t\tif gotFile, ext, ok := m.getFile(tc.WantFile); ok {\n\t\t\t\t\tm.match(\"File golden\"+ext, gotFile, \"golden\"+ext)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tm.match(\"WantStdout\", stdout.String(), tc.WantStdout)\n\t\t\tm.match(\"WantStderr\", stderr.String(), tc.WantStderr)\n\t\t\tm.match(\"WantPanic\", gotPanic, tc.WantPanic)\n\t\t\tm.match(\"WantErr\", gotErr, tc.WantErr)\n\t\t\tm.equal(\"WantExitCode\", command.ExitCode(), tc.WantExitCode)\n\n\t\t\tm.done()\n\t\t})\n\t}\n}",
"func Test1IsATest(t *testing.T) {\n}",
"func TestTask() {\n\tif noTestsFlag {\n\t\tlog.Println(\"*** Skipping: notests\")\n\t\treturn\n\t}\n\trunCmd(\"./pkg/pac/make.sh\")\n\trunCmd(\"go\", \"test\",\n\t\t\"./cmd/...\", \"./pkg/...\")\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func TestMain(m *testing.M) {\n\tfstest.TestMain(m)\n}",
"func RunE2ETests(t *testing.T) {\n\truntimeutils.ReallyCrash = true\n\tlogs.InitLogs()\n\tdefer logs.FlushLogs()\n\n\t// Disable skipped tests unless they are explicitly requested.\n\tif config.GinkgoConfig.FocusString == \"\" && config.GinkgoConfig.SkipString == \"\" {\n\t\tconfig.GinkgoConfig.SkipString = `\\[Flaky\\]|\\[Feature:.+\\]`\n\t}\n\n\t// Run tests through the Ginkgo runner with output to console + JUnit for Jenkins\n\tvar r []ginkgo.Reporter\n\n\tklog.Infof(\"Starting e2e run %q on Ginkgo node %d\", rand.String(5), config.GinkgoConfig.ParallelNode)\n\tginkgo.RunSpecsWithDefaultAndCustomReporters(t, \"Kubernetes e2e suite\", r)\n}",
"func main() {\n\n mainTests{}.mainTests117SortFileMgrsCaseSensitive()\n\n}",
"func Test() error {\n\treturn sh.RunWith(map[string]string{\"GORACE\": \"halt_on_error=1\"},\n\t\t\"go\", \"test\", \"-race\", \"-v\", \"./...\")\n}",
"func TestMain(m *testing.M) {\n\tflag.Parse()\n\n\t// We are running in bazel so set up the directory for the test binaries\n\tif os.Getenv(\"TEST_WORKSPACE\") != \"\" {\n\t\t// TODO create a toolchain for this\n\t\tpaths.MaybeSetEnv(\"PATH\", \"kubetest2-kind\", \"hack\", \"bin\", \"kubetest2-kind\")\n\t}\n\n\tnoKind := os.Getenv(\"TEST_DO_NOT_USE_KIND\")\n\tif noKind == \"\" {\n\t\tos.Setenv(\"USE_EXISTING_CLUSTER\", \"true\")\n\n\t\t// TODO random name for server and also random open port\n\t\terr := exec.StartKubeTest2(\"test\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\n\t// TODO verify success of cluster start? Does kind do it?\n\n\te := testenv.NewEnv(runtime.NewSchemeBuilder(api.AddToScheme),\n\t\tfilepath.Join(\"..\", \"config\", \"crd\", \"bases\"),\n\t\tfilepath.Join(\"..\", \"config\", \"rbac\", \"bases\"))\n\n\tenv = e.Start()\n\tcode := m.Run()\n\te.Stop()\n\n\tif noKind == \"\" {\n\t\terr := exec.StopKubeTest2(\"test\")\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n\tos.Exit(code)\n}",
"func RunTest(t *testing.T, dir string, opts ...TestOptionsFunc) {\n\ttest := Test{}\n\t// Apply common defaults.\n\ttest.ProjectName = filepath.Base(dir)\n\ttest.Options.Compile = nil\n\ttest.Options.FilterName = \"name\"\n\ttest.RunOptions = &integration.ProgramTestOptions{\n\t\tDir: dir,\n\t\tExpectRefreshChanges: true,\n\t}\n\tfor _, opt := range opts {\n\t\topt(t, &test)\n\t}\n\n\ttest.Run(t)\n}",
"func runTest(test TestCase) TestResult {\n\t// cut = command under test\n\tcut := cmd.NewCommand(test.Command.Cmd)\n\tcut.SetTimeout(test.Command.Timeout)\n\tcut.Dir = test.Command.Dir\n\tfor k, v := range test.Command.Env {\n\t\tcut.AddEnv(k, v)\n\t}\n\n\tif err := cut.Execute(); err != nil {\n\t\tlog.Println(test.Title, \" failed \", err.Error())\n\t\ttest.Result = CommandResult{\n\t\t\tError: err,\n\t\t}\n\n\t\treturn TestResult{\n\t\t\tTestCase: test,\n\t\t}\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Command: \", cut.Cmd)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Directory: \", cut.Dir)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Env: \", cut.Env)\n\n\t// Write test result\n\ttest.Result = CommandResult{\n\t\tExitCode: cut.ExitCode(),\n\t\tStdout: strings.Replace(cut.Stdout(), \"\\r\\n\", \"\\n\", -1),\n\t\tStderr: strings.Replace(cut.Stderr(), \"\\r\\n\", \"\\n\", -1),\n\t}\n\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" ExitCode: \", test.Result.ExitCode)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stdout: \", test.Result.Stdout)\n\tlog.Println(\"title: '\"+test.Title+\"'\", \" Stderr: \", test.Result.Stderr)\n\n\treturn Validate(test)\n}",
"func TestMain(m *testing.M) {\n\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(m.Run())\n}",
"func RunTest(ctx context.Context, target, location string, nodeIDs []int, limit int, debug, outputJSON bool, runTest runFunc, runOutput runOutputFunc) error {\n\trunReq := &perfops.RunRequest{\n\t\tTarget: target,\n\t\tLocation: location,\n\t\tNodes: nodeIDs,\n\t\tLimit: limit,\n\t}\n\n\tf := NewFormatter(debug && !outputJSON)\n\tf.StartSpinner()\n\ttestID, err := runTest(ctx, runReq)\n\tf.StopSpinner()\n\tif err != nil {\n\t\treturn err\n\t}\n\tres := &RunOutputResult{}\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-time.After(200 * time.Millisecond):\n\t\t\t}\n\t\t\toutput, err := runOutput(ctx, testID)\n\t\t\tres.SetOutput(output, err)\n\t\t\tif err != nil {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}()\n\n\tif outputJSON {\n\t\tf.StartSpinner()\n\t}\n\tvar o *perfops.RunOutput\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(50 * time.Millisecond):\n\t\t}\n\t\tif o, err = res.Output(); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !outputJSON && o != nil {\n\t\t\tPrintOutput(f, o)\n\t\t}\n\t\tif o != nil && o.IsFinished() {\n\t\t\tbreak\n\t\t}\n\t}\n\tif outputJSON {\n\t\tf.StopSpinner()\n\t\tPrintOutputJSON(o)\n\t}\n\treturn nil\n}",
"func KubeVirtTestSuiteSetup(t *testing.T) {\n\t_, description, _, _ := runtime.Caller(1)\n\tprojectRoot := findRoot()\n\tdescription = strings.TrimPrefix(description, projectRoot)\n\t// Redirect writes to ginkgo writer to keep tests quiet when\n\t// they succeed\n\tlog.Log.SetIOWriter(ginkgo.GinkgoWriter)\n\t// setup the connection between ginkgo and gomega\n\tgomega.RegisterFailHandler(ginkgo.Fail)\n\n\t// See https://github.com/bazelbuild/rules_go/blob/197699822e081dad064835a09825448a3e4cc2a2/go/core.rst#go_test\n\t// for context.\n\ttestsWrapped := os.Getenv(\"GO_TEST_WRAP\")\n\toutputFile := os.Getenv(\"XML_OUTPUT_FILE\")\n\n\t// if run on bazel (XML_OUTPUT_FILE is not empty)\n\t// and rules_go is configured to not produce the junit xml\n\t// produce it here. Otherwise just run the default RunSpec\n\tif testsWrapped == \"0\" && outputFile != \"\" {\n\t\ttestTarget := os.Getenv(\"TEST_TARGET\")\n\t\tif config.GinkgoConfig.ParallelTotal > 1 {\n\t\t\toutputFile = fmt.Sprintf(\"%s-%d\", outputFile, config.GinkgoConfig.ParallelNode)\n\t\t}\n\n\t\tginkgo.RunSpecsWithDefaultAndCustomReporters(\n\t\t\tt,\n\t\t\ttestTarget,\n\t\t\t[]ginkgo.Reporter{\n\t\t\t\treporters.NewJUnitReporter(outputFile),\n\t\t\t},\n\t\t)\n\t} else {\n\t\t// Use the current filename as description for ginkgo\n\t\tginkgo.RunSpecs(t, description)\n\t}\n}",
"func runTest(ctx context.Context, c autotest.Config, a *autotest.AutoservArgs, w io.Writer) (*Result, error) {\n\tr, err := runTask(ctx, c, a, w)\n\tif !r.Started {\n\t\treturn r, err\n\t}\n\tp := filepath.Join(a.ResultsDir, autoservPidFile)\n\tif i, err2 := readTestsFailed(p); err2 != nil {\n\t\tif err == nil {\n\t\t\terr = err2\n\t\t}\n\t} else {\n\t\tr.TestsFailed = i\n\t}\n\tif err2 := appendJobFinished(a.ResultsDir); err == nil {\n\t\terr = err2\n\t}\n\treturn r, err\n}",
"func TestMain(m *testing.M) {\n\t// Note: The setup will provision a single K8s env and\n\t// all the tests need to create and use a separate namespace\n\n\t// setup env test\n\tif err := setupSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\t// run tests\n\tcode := m.Run()\n\n\t// tear down test env\n\tif err := tearDownSuite(); err != nil {\n\t\tpanic(err)\n\t}\n\n\tos.Exit(code)\n}",
"func TestMain(m *testing.M) {\n\n\t// Run Setup\n\tSetup()\n\n\t// Run all the tests\n\treturnCode := m.Run()\n\n\t// Run teardown\n\tTearDown()\n\n\t// Pass on the exit codes\n\tos.Exit(returnCode)\n}",
"func TestMain(m *testing.M) {\n\n\tfmt.Println(\"Running tests with file from disk...\")\n\tusingFile = true\n\ttestOpenFile()\n\n\tresult := m.Run()\n\ttestDbf.Close()\n\n\tif result != 0 {\n\t\tos.Exit(result)\n\t}\n\n\tfmt.Println(\"Running tests with byte stream...\")\n\tusingFile = false\n\ttestOpenStream()\n\n\tresult = m.Run()\n\n\tos.Exit(result)\n}",
"func (suite FeatureTestSuite) Run(t *testing.T, buildFunc feature.BuildFunc) {\n\tfor _, test := range suite {\n\t\trunTest(t, test, buildFunc)\n\t}\n}",
"func Test(t *testing.T, driver, dsn string, testSuites []string, rw bool) {\n\tclients, err := ConnectClients(t, driver, dsn, nil)\n\tif err != nil {\n\t\tt.Fatalf(\"Failed to connect to %s (%s driver): %s\\n\", dsn, driver, err)\n\t}\n\tclients.RW = rw\n\ttests := make(map[string]struct{})\n\tfor _, test := range testSuites {\n\t\ttests[test] = struct{}{}\n\t}\n\tif _, ok := tests[SuiteAuto]; ok {\n\t\tt.Log(\"Detecting target service compatibility...\")\n\t\tsuites, err := detectCompatibility(clients.Admin)\n\t\tif err != nil {\n\t\t\tt.Fatalf(\"Unable to determine server suite compatibility: %s\\n\", err)\n\t\t}\n\t\ttests = make(map[string]struct{})\n\t\tfor _, suite := range suites {\n\t\t\ttests[suite] = struct{}{}\n\t\t}\n\t}\n\ttestSuites = make([]string, 0, len(tests))\n\tfor test := range tests {\n\t\ttestSuites = append(testSuites, test)\n\t}\n\tt.Logf(\"Running the following test suites: %s\\n\", strings.Join(testSuites, \", \"))\n\tfor _, suite := range testSuites {\n\t\tRunTestsInternal(clients, suite)\n\t}\n}",
"func (f *Fusetest) RunReconnectTests(reconnectOpts internet.ReconnectOpts) error {\n\tif runtime.GOOS != \"darwin\" {\n\t\tfmt.Println(\"Testing internet is disabled for non-darwin currently.\")\n\t\treturn nil\n\t}\n\n\tfmt.Printf(\"Testing reconnect, pausing for %s.\\n\", reconnectOpts.TotalDur())\n\tif err := internet.ToggleInternet(reconnectOpts); err != nil {\n\t\treturn err\n\t}\n\n\treturn f.RunOperationTests()\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tos.Exit(testMain(m))\n}",
"func TestMain(m *testing.M) {\n\tdefer tracing.Cleanup()\n\n\tglobal = environment.NewStandardGlobalEnvironment()\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func Test(t *testing.T, p prog.Program, cases ...Case) {\n\tt.Helper()\n\tfor _, c := range cases {\n\t\tt.Run(strings.Join(c.args, \" \"), func(t *testing.T) {\n\t\t\tt.Helper()\n\t\t\tr := run(p, c.args, c.stdin)\n\t\t\tif r.exitCode != c.want.exitCode {\n\t\t\t\tt.Errorf(\"got exit code %v, want %v\", r.exitCode, c.want.exitCode)\n\t\t\t}\n\t\t\tif !matchOutput(r.stdout, c.want.stdout) {\n\t\t\t\tt.Errorf(\"got stdout %v, want %v\", r.stdout, c.want.stdout)\n\t\t\t}\n\t\t\tif !matchOutput(r.stderr, c.want.stderr) {\n\t\t\t\tt.Errorf(\"got stderr %v, want %v\", r.stderr, c.want.stderr)\n\t\t\t}\n\t\t})\n\t}\n}",
"func (Golang) Test(gopath string, gomodule string, timeout string, local bool) (err error) {\n\tif !local {\n\t\twd := filepath.Join(gopath, \"src\", gomodule)\n\n\t\tlogrus.Debugf(\"Changing working directory to %s.\", wd)\n\n\t\terr = os.Chdir(wd)\n\n\t\tif err != nil {\n\t\t\terr = errors.Wrapf(err, \"changing working dir to %q\", wd)\n\t\t\treturn err\n\t\t}\n\t}\n\n\tlogrus.Debugf(\"Running 'go test -v ./...'.\")\n\n\t// TODO Should this use a shell exec like build?\n\tvar cmd *exec.Cmd\n\t// Things break if you pass in an arg that has an empty string. Splitting it up like this fixes https://github.com/nikogura/gomason/issues/24\n\tif timeout != \"\" {\n\t\tcmd = exec.Command(\"go\", \"test\", \"-v\", \"-timeout\", timeout, \"./...\")\n\t} else {\n\t\tcmd = exec.Command(\"go\", \"test\", \"-v\", \"./...\")\n\t}\n\n\trunenv := append(os.Environ(), fmt.Sprintf(\"GOPATH=%s\", gopath))\n\trunenv = append(runenv, \"GO111MODULE=on\")\n\n\tcmd.Env = runenv\n\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\tcmd.Stdin = os.Stdin\n\n\terr = cmd.Run()\n\tif err != nil {\n\t\terr = errors.Wrapf(err, \"failed running %s\", cmd)\n\t}\n\n\tlogrus.Debugf(\"Done with go test.\")\n\n\treturn err\n}",
"func TestMain(m *testing.M) {\n\t// If the test binary is named \"app\", then we're running as a subprocess.\n\t// Otherwise, run the tests.\n\tswitch filepath.Base(os.Args[0]) {\n\tcase \"app\":\n\t\tmain()\n\t\tos.Exit(0)\n\tdefault:\n\t\tos.Exit(m.Run())\n\t}\n}",
"func (s *FakeJujuRunnerSuite) TestRun(c *gc.C) {\n\ts.runner.Run()\n\ts.runner.Stop()\n\tresult := s.runner.Wait()\n \n\tc.Assert(result.String(), gc.Equals, \"OK: 1 passed\")\n\tc.Assert(result.Succeeded, gc.Equals, 1)\n\tc.Assert(result.RunError, gc.IsNil)\n\tc.Assert(\n\t\tstrings.Contains(s.output.String(), \"Starting service\"), gc.Equals, true)\n}",
"func TestMain(m *testing.M) {\n\t// We get a chance to parse flags to include the framework flags for the\n\t// framework as well as any additional flags included in the integration.\n\tflag.Parse()\n\n\t// EnableInjectionOrDie will enable client injection, this is used by the\n\t// testing framework for namespace management, and could be leveraged by\n\t// features to pull Kubernetes clients or the test environment out of the\n\t// context passed in the features.\n\tctx, startInformers := injection.EnableInjectionOrDie(nil, nil) //nolint\n\tstartInformers()\n\n\t// global is used to make instances of Environments, NewGlobalEnvironment\n\t// is passing and saving the client injection enabled context for use later.\n\tglobal = environment.NewGlobalEnvironment(ctx)\n\n\t// Run the tests.\n\tos.Exit(m.Run())\n}",
"func TestMain(m *testing.M) {\n\ttestsuite.RevelTestHelper(m, \"dev\", run.Run)\n}",
"func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}",
"func TestMain(t *testing.T) {\n\tvar ran bool\n\trun = func() {\n\t\tran = true\n\t}\n\tmain()\n\tif !ran {\n\t\tt.Error(\"Expected Run() to be called, but it wasn't\")\n\t}\n}",
"func (controller TestController) RunTest() (setupErr error, testErr error) {\n\ttests := controller.testSuite.GetTests()\n\tlogrus.Debugf(\"Test configs: %v\", tests)\n\ttest, found := tests[controller.testName]\n\tif !found {\n\t\treturn stacktrace.NewError(\"Nonexistent test: %v\", controller.testName), nil\n\t}\n\n\tnetworkLoader, err := test.GetNetworkLoader()\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not get network loader\"), nil\n\t}\n\n\tlogrus.Info(\"Connecting to Docker environment...\")\n\t// Initialize a Docker client\n\tdockerClient, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err,\"Failed to initialize Docker client from environment.\"), nil\n\t}\n\tdockerManager, err := docker.NewDockerManager(logrus.StandardLogger(), dockerClient)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred when constructing the Docker manager\"), nil\n\t}\n\tlogrus.Info(\"Connected to Docker environment\")\n\n\tlogrus.Infof(\"Configuring test network in Docker network %v...\", controller.networkId)\n\talreadyTakenIps := map[string]bool{\n\t\tcontroller.gatewayIp: true,\n\t\tcontroller.testControllerIp: true,\n\t}\n\tfreeIpTracker, err := networks.NewFreeIpAddrTracker(logrus.StandardLogger(), controller.subnetMask, alreadyTakenIps)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred creating the free IP address tracker\"), nil\n\t}\n\n\tbuilder := networks.NewServiceNetworkBuilder(\n\t\t\tdockerManager,\n\t\t\tcontroller.networkId,\n\t\t\tfreeIpTracker,\n\t\t\tcontroller.testVolumeName,\n\t\t\tcontroller.testVolumeFilepath)\n\tif err := networkLoader.ConfigureNetwork(builder); err != nil {\n\t\treturn stacktrace.Propagate(err, \"Could not configure test network in Docker network %v\", controller.networkId), nil\n\t}\n\tnetwork := builder.Build()\n\tdefer func() {\n\t\tlogrus.Info(\"Stopping test network...\")\n\t\terr := network.RemoveAll(CONTAINER_STOP_TIMEOUT)\n\t\tif err != nil {\n\t\t\tlogrus.Error(\"An error occurred stopping the network\")\n\t\t\tfmt.Fprintln(logrus.StandardLogger().Out, err)\n\t\t} else {\n\t\t\tlogrus.Info(\"Successfully stopped the test network\")\n\t\t}\n\t}()\n\tlogrus.Info(\"Test network configured\")\n\n\tlogrus.Info(\"Initializing test network...\")\n\tavailabilityCheckers, err := networkLoader.InitializeNetwork(network);\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"An error occurred initialized the network to its starting state\"), nil\n\t}\n\tlogrus.Info(\"Test network initialized\")\n\n\t// Second pass: wait for all services to come up\n\tlogrus.Info(\"Waiting for test network to become available...\")\n\tfor serviceId, availabilityChecker := range availabilityCheckers {\n\t\tlogrus.Debugf(\"Waiting for service %v to become available...\", serviceId)\n\t\tif err := availabilityChecker.WaitForStartup(); err != nil {\n\t\t\treturn stacktrace.Propagate(err, \"An error occurred waiting for service with ID %v to start up\", serviceId), nil\n\t\t}\n\t\tlogrus.Debugf(\"Service %v is available\", serviceId)\n\t}\n\tlogrus.Info(\"Test network is available\")\n\n\tlogrus.Info(\"Executing test...\")\n\tuntypedNetwork, err := networkLoader.WrapNetwork(network)\n\tif err != nil {\n\t\treturn stacktrace.Propagate(err, \"Error occurred wrapping network in user-defined network type\"), nil\n\t}\n\n\ttestResultChan := make(chan error)\n\n\tgo func() {\n\t\ttestResultChan <- runTest(test, untypedNetwork)\n\t}()\n\n\t// Time out the test so a poorly-written test doesn't run forever\n\ttestTimeout := test.GetExecutionTimeout()\n\tvar timedOut bool\n\tvar testResultErr error\n\tselect {\n\tcase testResultErr = <- testResultChan:\n\t\tlogrus.Tracef(\"Test returned result before timeout: %v\", testResultErr)\n\t\ttimedOut = false\n\tcase <- time.After(testTimeout):\n\t\tlogrus.Tracef(\"Hit timeout %v before getting a result from the test\", testTimeout)\n\t\ttimedOut = true\n\t}\n\n\tlogrus.Tracef(\"After running test w/timeout: resultErr: %v, timedOut: %v\", testResultErr, timedOut)\n\n\tif timedOut {\n\t\treturn nil, stacktrace.NewError(\"Timed out after %v waiting for test to complete\", testTimeout)\n\t}\n\n\tlogrus.Info(\"Test execution completed\")\n\n\tif testResultErr != nil {\n\t\treturn nil, stacktrace.Propagate(testResultErr, \"An error occurred when running the test\")\n\t}\n\n\treturn nil, nil\n}",
"func (f Factory) RunKeyValueStoreTests(t *testing.T) {\n\tt.Run(\"TestKeyValueStore\", f.TestKeyValueStore)\n}",
"func Run(top *testing.T, name string, f TestFunc, accessories ...*Accessory) {\n\tif _, previouslyCalled := seen.LoadOrStore(fmt.Sprintf(\"%p\", top), nil); !previouslyCalled {\n\t\ttop.Parallel()\n\t}\n\ttop.Run(name, func(mid *testing.T) {\n\t\tmid.Parallel()\n\t\tctx, cancel := context.WithCancel(context.Background())\n\t\tbottom := testhelper.NewT(ctx, mid)\n\t\tcmd := newCiOperatorCommand(bottom)\n\t\ttestDone, cleanupDone := make(chan struct{}), make(chan struct{})\n\t\tdefer func() {\n\t\t\t// signal to the command that we no longer need to be waiting to\n\t\t\t// interrupt it; then wait for the cleanup routine to finish before\n\t\t\t// we consider the test done\n\t\t\tclose(testDone)\n\t\t\t<-cleanupDone\n\t\t}()\n\t\tcmd.testDone = testDone\n\t\tcmd.cleanupDone = cleanupDone\n\n\t\twg := sync.WaitGroup{}\n\t\twg.Add(len(accessories))\n\t\tfor _, accessory := range accessories {\n\t\t\t// binding the accessory to ctx ensures its lifetime is only\n\t\t\t// as long as the test we are running in this specific case\n\t\t\taccessory.RunFromFrameworkRunner(bottom, ctx, false)\n\t\t\tcmd.AddArgs(accessory.ClientFlags()...)\n\t\t\tgo func(a *Accessory) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\ta.Ready(bottom)\n\t\t\t}(accessory)\n\t\t}\n\t\twg.Wait()\n\n\t\tgo func() {\n\t\t\tdefer func() { cancel() }() // stop waiting for errors\n\t\t\tf(bottom, &cmd)\n\t\t}()\n\n\t\tbottom.Wait()\n\t})\n}",
"func E2ETests(env e2e.TestEnv) func(*testing.T) {\n\tc := &actionTests{\n\t\tenv: env,\n\t}\n\n\treturn func(t *testing.T) {\n\t\t// singularity run\n\t\tt.Run(\"run\", c.actionRun)\n\t\t// singularity exec\n\t\tt.Run(\"exec\", c.actionExec)\n\t\t// stdin/stdout pipe\n\t\tt.Run(\"STDPIPE\", c.STDPipe)\n\t\t// action_URI\n\t\tt.Run(\"action_URI\", c.RunFromURI)\n\t\t// Persistent Overlay\n\t\tt.Run(\"Persistent_Overlay\", c.PersistentOverlay)\n\t\t// shell interaction\n\t\tt.Run(\"Shell\", c.actionShell)\n\t}\n}"
] | [
"0.75176436",
"0.71770805",
"0.71047145",
"0.70926744",
"0.70433205",
"0.69975567",
"0.69620216",
"0.6948858",
"0.6947141",
"0.68886244",
"0.6835153",
"0.6806744",
"0.6799127",
"0.67480135",
"0.67230546",
"0.6647444",
"0.65697587",
"0.6532074",
"0.65249354",
"0.6519513",
"0.65060484",
"0.6489568",
"0.64813536",
"0.64804",
"0.6459192",
"0.64511526",
"0.6391403",
"0.6389746",
"0.6366025",
"0.6358235",
"0.6334614",
"0.63284105",
"0.62885326",
"0.624311",
"0.6237976",
"0.62183493",
"0.62119",
"0.6206467",
"0.61661375",
"0.61564136",
"0.61542267",
"0.6143577",
"0.6139137",
"0.6119119",
"0.61170506",
"0.61119807",
"0.6109577",
"0.61071724",
"0.61030173",
"0.60864246",
"0.6047378",
"0.6034702",
"0.6033175",
"0.60315573",
"0.60193723",
"0.601796",
"0.5998478",
"0.5996871",
"0.5992677",
"0.5988023",
"0.5968511",
"0.59534603",
"0.5940941",
"0.5940941",
"0.5940941",
"0.5938171",
"0.5923718",
"0.5922744",
"0.5900394",
"0.5891314",
"0.5877876",
"0.5863532",
"0.58526784",
"0.58526784",
"0.58526784",
"0.5830914",
"0.58278364",
"0.58274984",
"0.5824469",
"0.5804947",
"0.57943434",
"0.5790453",
"0.5789864",
"0.5768513",
"0.5767648",
"0.5767648",
"0.5767648",
"0.5760391",
"0.574946",
"0.574885",
"0.57408166",
"0.5740172",
"0.5734862",
"0.573223",
"0.5719488",
"0.5719488",
"0.5716367",
"0.5709697",
"0.57085574",
"0.5705594"
] | 0.6647721 | 15 |
AllocsPerRun returns the average number of allocations during calls to f. Although the return value has type float64, it will always be an integral value. To compute the number of allocations, the function will first be run once as a warmup. The average number of allocations over the specified number of runs will then be measured and returned. AllocsPerRun sets GOMAXPROCS to 1 during its measurement and will restore it before returning. | func AllocsPerRun(runs int, f func()) (avg float64) {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func AllocsPerRun(runs int, f func()) (avg float64)",
"func (r BenchmarkResult) AllocsPerOp() int64 {}",
"func processRun(nRequests int, concurrency int, ch chan time.Duration, fun func()) []float64 {\n\tresults := make([]float64, 0, nRequests)\n\n\tn := nRequests\n\tfor n > 0 {\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif n > 0 {\n\t\t\t\tgo fun()\n\t\t\t\tn--\n\t\t\t}\n\t\t}\n\n\t\tfor i := 0; i < concurrency; i++ {\n\t\t\tif len(results) < nRequests {\n\t\t\t\tresults = append(results, float64(<-ch)/float64(time.Millisecond))\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}",
"func main() {\n\tfmt.Println(\"CPU core:\", runtime.NumCPU())\n\tfmt.Println(\"Goroutines:\", runtime.NumGoroutine())\n\n\tvar wg sync.WaitGroup //TODO: 0\n\tcounter := 0\n\tconst gs = 100\n\twg.Add(gs) //TODO: 1\n\n\tfor i := 0; i < gs; i++ {\n\t\tgo func() {\n\t\t\tv := counter\n\t\t\truntime.Gosched()\n\t\t\tv++\n\t\t\tcounter = v\n\t\t\twg.Done() //TODO: 2\n\t\t}()\n\t\tfmt.Println(\"Goroutines:\", runtime.NumGoroutine())\n\t}\n\twg.Wait() //TODO: 3\n\tfmt.Println(\"Counted:\", counter)\n\n}",
"func probaForSize(size int) int {\n\tvar count int\n\tfor i := 1; i < MAX_PROBA_ITER; i++ {\n\t\tif randomGroupHasDuplicates(size) {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count * 100 / MAX_PROBA_ITER\n}",
"func GetCpuCount() float64 {\n return float64(procStatCpuCount)\n}",
"func MBUsed() float64 {\n var m runtime.MemStats\n runtime.ReadMemStats(&m)\n return float64(m.TotalAlloc) / BytesPerMBF \n}",
"func (s *plannerStats) BytesPerFact() int {\n\tr := s.impl.BytesPerFact()\n\treturn s.track(r, \"BytesPerFact\")\n}",
"func GetRuntimeStats() (result map[string]float64) {\n\truntime.ReadMemStats(memStats)\n\n\tnow = time.Now()\n\tdiffTime = now.Sub(lastSampleTime).Seconds()\n\n\tresult = map[string]float64{\n\t\t\"alloc\": float64(memStats.Alloc),\n\t\t\"frees\": float64(memStats.Frees),\n\t\t\"gc.pause_total\": float64(memStats.PauseTotalNs) / nsInMs,\n\t\t\"heap.alloc\": float64(memStats.HeapAlloc),\n\t\t\"heap.objects\": float64(memStats.HeapObjects),\n\t\t\"mallocs\": float64(memStats.Mallocs),\n\t\t\"stack\": float64(memStats.StackInuse),\n\t}\n\n\tif lastPauseNs > 0 {\n\t\tpauseSinceLastSample = memStats.PauseTotalNs - lastPauseNs\n\t\tresult[\"gc.pause_per_second\"] = float64(pauseSinceLastSample) / nsInMs / diffTime\n\t}\n\n\tlastPauseNs = memStats.PauseTotalNs\n\n\tnbGc = memStats.NumGC - lastNumGc\n\tif lastNumGc > 0 {\n\t\tresult[\"gc.gc_per_second\"] = float64(nbGc) / diffTime\n\t}\n\n\t// Collect GC pauses\n\tif nbGc > 0 {\n\t\tif nbGc > 256 {\n\t\t\tnbGc = 256\n\t\t}\n\n\t\tvar i uint32\n\n\t\tfor i = 0; i < nbGc; i++ {\n\t\t\tidx := int((memStats.NumGC-uint32(i))+255) % 256\n\t\t\tpause := float64(memStats.PauseNs[idx])\n\t\t\tresult[\"gc.pause\"] = pause / nsInMs\n\t\t}\n\t}\n\n\t// Store last values\n\tlastNumGc = memStats.NumGC\n\tlastSampleTime = now\n\n\treturn result\n}",
"func BenchmarkFib(b *testing.B) {\n\tb.ReportAllocs()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = Fib(15)\n\t}\n}",
"func (p *president) EvaluateAllocationRequests(resourceRequest map[shared.ClientID]shared.Resources, availCommonPool shared.Resources) shared.PresidentReturnContent {\n\tp.c.clientPrint(\"Evaluating allocations...\")\n\tvar avgResource, avgRequest shared.Resources\n\tvar allocSum, commonPoolThreshold, sumRequest float64\n\n\t// Make sure resource skew is greater than 1\n\tresourceSkew := math.Max(float64(p.c.params.resourcesSkew), 1)\n\n\tresources := make(map[shared.ClientID]shared.Resources)\n\tallocations := make(map[shared.ClientID]float64)\n\tallocWeights := make(map[shared.ClientID]float64)\n\tfinalAllocations := make(map[shared.ClientID]shared.Resources)\n\n\tfor island, req := range resourceRequest {\n\t\tsumRequest += float64(req)\n\t\tresources[island] = shared.Resources(float64(p.c.declaredResources[island]) * math.Pow(resourceSkew, ((100-p.c.trustScore[island])/100)))\n\t}\n\n\tp.c.clientPrint(\"Resource requests: %+v\\n\", resourceRequest)\n\tp.c.clientPrint(\"Their resource estimation: %+v\\n\", p.c.declaredResources)\n\tp.c.clientPrint(\"Our estimation: %+v\\n\", resources)\n\tp.c.clientPrint(\"Trust Scores: %+v\\n\", p.c.trustScore)\n\n\tavgRequest = findAvgNoTails(resourceRequest)\n\tavgResource = findAvgNoTails(resources)\n\n\tfor island, resource := range resources {\n\t\tallocations[island] = float64(avgRequest) + p.c.params.equity*(float64(avgResource-resource)+float64(resourceRequest[island]-avgRequest))\n\t\t// p.c.clientPrint(\"Allocation for island %v: %f\", island, allocations[island])\n\t\tif island == p.c.GetID() {\n\t\t\tallocations[island] += math.Max(float64(resourceRequest[island])-allocations[island]*p.c.params.selfishness, 0)\n\t\t} else {\n\t\t\tallocations[island] = math.Min(float64(resourceRequest[island]), allocations[island]) // to prevent overallocating\n\t\t\tallocations[island] = math.Max(allocations[island], 0)\n\t\t}\n\t}\n\n\t// Collect weights\n\tfor _, alloc := range allocations {\n\t\tallocSum += alloc\n\t}\n\t// Normalise\n\tfor island, alloc := range allocations {\n\t\tallocWeights[island] = alloc / allocSum\n\t}\n\t// p.c.clientPrint(\"Allocation wieghts: %+v\\n\", allocWeights)\n\n\tcommonPoolThreshold = math.Min(float64(availCommonPool)*(1.0-p.c.params.riskFactor), sumRequest)\n\tif p.c.params.saveCriticalIsland {\n\t\tfor island := range resourceRequest {\n\t\t\tif resources[island] < p.c.criticalThreshold {\n\t\t\t\tfinalAllocations[island] = shared.Resources(math.Max((allocWeights[island] * commonPoolThreshold), float64(p.c.criticalThreshold-resources[island])))\n\t\t\t} else {\n\t\t\t\tfinalAllocations[island] = 0\n\t\t\t}\n\t\t}\n\t}\n\n\tfor island := range resourceRequest {\n\t\tif finalAllocations[island] == 0 {\n\t\t\tif sumRequest < commonPoolThreshold {\n\t\t\t\tfinalAllocations[island] = shared.Resources(math.Max(allocWeights[island]*float64(sumRequest), 0))\n\t\t\t} else {\n\t\t\t\tfinalAllocations[island] = shared.Resources(math.Max(allocWeights[island]*commonPoolThreshold, 0))\n\t\t\t}\n\t\t}\n\t}\n\n\tp.c.clientPrint(\"Final allocations: %+v\\n\", finalAllocations)\n\n\t// Curently always evaluate, would there be a time when we don't want to?\n\treturn shared.PresidentReturnContent{\n\t\tContentType: shared.PresidentAllocation,\n\t\tResourceMap: finalAllocations,\n\t\tActionTaken: true,\n\t}\n}",
"func BenchmarkRun(b *testing.B) {\n\trng := rand.New(rand.NewSource(1))\n\tctx := context.Background()\n\trunGC := func(eng storage.Engine, old bool, spec randomRunGCTestSpec) (Info, error) {\n\t\trunGCFunc := Run\n\t\tif old {\n\t\t\trunGCFunc = runGCOld\n\t\t}\n\t\tsnap := eng.NewSnapshot()\n\t\tpolicy := zonepb.GCPolicy{TTLSeconds: spec.ttl}\n\t\treturn runGCFunc(ctx, spec.ds.desc(), snap, spec.now,\n\t\t\tCalculateThreshold(spec.now, policy), intentAgeThreshold,\n\t\t\tpolicy,\n\t\t\tNoopGCer{},\n\t\t\tfunc(ctx context.Context, intents []roachpb.Intent) error {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\tfunc(ctx context.Context, txn *roachpb.Transaction) error {\n\t\t\t\treturn nil\n\t\t\t})\n\t}\n\tmakeTest := func(old bool, spec randomRunGCTestSpec) func(b *testing.B) {\n\t\treturn func(b *testing.B) {\n\t\t\teng := storage.NewDefaultInMemForTesting()\n\t\t\tdefer eng.Close()\n\t\t\tms := spec.ds.dist(b.N, rng).setupTest(b, eng, *spec.ds.desc())\n\t\t\tb.SetBytes(int64(float64(ms.Total()) / float64(b.N)))\n\t\t\tb.ResetTimer()\n\t\t\t_, err := runGC(eng, old, spec)\n\t\t\tb.StopTimer()\n\t\t\trequire.NoError(b, err)\n\t\t}\n\t}\n\tspecsWithTTLs := func(\n\t\tds distSpec, now hlc.Timestamp, ttls []int32,\n\t) (specs []randomRunGCTestSpec) {\n\t\tfor _, ttl := range ttls {\n\t\t\tspecs = append(specs, randomRunGCTestSpec{\n\t\t\t\tds: ds,\n\t\t\t\tnow: now,\n\t\t\t\tttl: ttl,\n\t\t\t})\n\t\t}\n\t\treturn specs\n\t}\n\tts100 := hlc.Timestamp{WallTime: (100 * time.Second).Nanoseconds()}\n\tttls := []int32{0, 25, 50, 75, 100}\n\tspecs := specsWithTTLs(fewVersionsTinyRows, ts100, ttls)\n\tspecs = append(specs, specsWithTTLs(someVersionsMidSizeRows, ts100, ttls)...)\n\tspecs = append(specs, specsWithTTLs(lotsOfVersionsMidSizeRows, ts100, ttls)...)\n\tfor _, old := range []bool{true, false} {\n\t\tb.Run(fmt.Sprintf(\"old=%v\", old), func(b *testing.B) {\n\t\t\tfor _, spec := range specs {\n\t\t\t\tb.Run(fmt.Sprint(spec.ds), makeTest(old, spec))\n\t\t\t}\n\t\t})\n\t}\n}",
"func Benchmark_Sum(b *testing.B) {\n\tbenchTable := []struct {\n\t\tname string\n\t\tsize int\n\t}{\n\t\t{\"size100\", 100},\n\t\t{\"size100k\", 100 * 1024},\n\t\t{\"size100M\", 500 * 1024 * 1024},\n\t}\n\tfor _, v := range benchTable {\n\t\tb.Run(v.name, func(b *testing.B) {\n\t\t\tb.ReportAllocs()\n\t\t\tsum(v.size)\n\t\t})\n\t}\n}",
"func assignedBatchSize( p int ) int {\n\treturn int( math.Floor( 512 / ( float64(p) * float64(p) ) ) )\n}",
"func BenchmarkEfficientSum(b *testing.B) {\n\tb.ReportAllocs()\n\tfor i := 0; i < b.N; i++ {\n\t\tEfficientSum(i, i+1)\n\t}\n}",
"func BenchmarkFibOptimize(b *testing.B) {\n\tn := 15\n\tmemo := make([]int, n)\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = FibOptimzie(n, memo)\n\t}\n}",
"func (p *Pool) AllocCount() int {\n\tp.RLock()\n\tdefer p.RUnlock()\n\treturn len(p.allocated)\n}",
"func RunBenchmark(name string, b *testing.B, f Func, n int) {\n\tb.Run(name, func(b *testing.B) {\n\t\tb.Logf(\"f(%d), loop (b.N) = %d\\n\", n, b.N)\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tf(n)\n\t\t}\n\t})\n}",
"func GetProcessCount(env string) int {\n\n\tenvName := \"PU_GO_MAX_PROCS\"\n\n\tif env != \"\" {\n\t\tenvName = env\n\t}\n\n\tpc, err := strconv.Atoi(os.Getenv(envName))\n\n\tif err != nil {\n\t\tpc = runtime.NumCPU()\n\t}\n\n\treturn int(pc)\n}",
"func Benchmark(t *testing.T, fn Func, sfn func() *optim.Solver, successfrac, avgeval float64) {\n\toptim.Rand = rand.New(rand.NewSource(BenchSeed))\n\tnrun := 44\n\tndrop := 2\n\tnkeep := nrun - 2*ndrop\n\tneval := 0\n\tniter := 0\n\tnsuccess := 0\n\tsum := 0.0\n\n\tsolvs := []*optim.Solver{}\n\tfor i := 0; i < nrun; i++ {\n\t\ts := sfn()\n\n\t\tfor s.Next() {\n\t\t\tif s.Best().Val < fn.Tol() {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif err := s.Err(); err != nil {\n\t\t\tt.Errorf(\"[%v:ERROR] %v\", fn.Name(), err)\n\t\t}\n\n\t\tsolvs = append(solvs, s)\n\t}\n\n\tsort.Sort(byevals(solvs))\n\n\tfor _, s := range solvs[ndrop : len(solvs)-ndrop] {\n\t\tneval += s.Neval()\n\t\tniter += s.Niter()\n\t\tsum += s.Best().Val\n\t\tif s.Best().Val < fn.Tol() {\n\t\t\tnsuccess++\n\t\t}\n\t}\n\n\tfrac := float64(nsuccess) / float64(nkeep)\n\tgotavg := float64(neval) / float64(nkeep)\n\n\tt.Logf(\"[%v] %v/%v runs, %v iters, %v evals, want < %.3f, averaged %.3f\", fn.Name(), nsuccess, nkeep, niter/nkeep, neval/nkeep, fn.Tol(), sum/float64(nkeep))\n\n\tif frac < successfrac {\n\t\tt.Errorf(\" FAIL: only %v/%v runs succeeded, want %v/%v\", nsuccess, nkeep, math.Ceil(successfrac*float64(nkeep)), nkeep)\n\t}\n\n\tif gotavg > avgeval {\n\t\tt.Errorf(\" FAIL: too many evaluations: want %v, averaged %.2f\", avgeval, gotavg)\n\t}\n}",
"func OptimizeNgenerations(ga *GA, generationsCnt uint) {\n\tfor i:=uint(0); i<generationsCnt; i++ {\n\t\tga.RunGeneration()\n\t}\n}",
"func main() {\n\tvar wg sync.WaitGroup\n\n\tincrementer := 0\n\tgs := 100\n\twg.Add(gs)\n\tvar m sync.Mutex\n\n\tfor i := 0; i < gs; i++ {\n\t\tgo func() {\n\t\t\tm.Lock()\n\t\t\tv := incrementer\n\t\t\truntime.Gosched()\n\t\t\tv++\n\t\t\tincrementer = v\n\t\t\tfmt.Println(incrementer)\n\t\t\tm.Unlock()\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tfmt.Println(\"end value:\", incrementer)\n}",
"func alloc() uint64 {\n\tvar stats runtime.MemStats\n\truntime.GC()\n\truntime.ReadMemStats(&stats)\n\t// return stats.Alloc - uint64(unsafe.Sizeof(hs[0]))*uint64(cap(hs))\n\treturn stats.Alloc\n}",
"func RuntimeAllocSize(size int64) int64 {\n\treturn size\n}",
"func TestAllocGarbageCollector_MakeRoomFor_MaxAllocs(t *testing.T) {\n\tci.Parallel(t)\n\n\tconst maxAllocs = 6\n\trequire := require.New(t)\n\n\tserver, serverAddr, cleanupS := testServer(t, nil)\n\tdefer cleanupS()\n\ttestutil.WaitForLeader(t, server.RPC)\n\n\tclient, cleanup := TestClient(t, func(c *config.Config) {\n\t\tc.GCMaxAllocs = maxAllocs\n\t\tc.GCDiskUsageThreshold = 100\n\t\tc.GCInodeUsageThreshold = 100\n\t\tc.GCParallelDestroys = 1\n\t\tc.GCInterval = time.Hour\n\t\tc.RPCHandler = server\n\t\tc.Servers = []string{serverAddr}\n\t\tc.ConsulConfig.ClientAutoJoin = new(bool)\n\t})\n\tdefer cleanup()\n\twaitTilNodeReady(client, t)\n\n\tjob := mock.Job()\n\tjob.TaskGroups[0].Count = 1\n\tjob.TaskGroups[0].Tasks[0].Driver = \"mock_driver\"\n\tjob.TaskGroups[0].Tasks[0].Config = map[string]interface{}{\n\t\t\"run_for\": \"30s\",\n\t}\n\n\tindex := uint64(98)\n\tnextIndex := func() uint64 {\n\t\tindex++\n\t\treturn index\n\t}\n\n\tupsertJobFn := func(server *nomad.Server, j *structs.Job) {\n\t\tstate := server.State()\n\t\trequire.NoError(state.UpsertJob(structs.MsgTypeTestSetup, nextIndex(), j))\n\t\trequire.NoError(state.UpsertJobSummary(nextIndex(), mock.JobSummary(j.ID)))\n\t}\n\n\t// Insert the Job\n\tupsertJobFn(server, job)\n\n\tupsertAllocFn := func(server *nomad.Server, a *structs.Allocation) {\n\t\tstate := server.State()\n\t\trequire.NoError(state.UpsertAllocs(structs.MsgTypeTestSetup, nextIndex(), []*structs.Allocation{a}))\n\t}\n\n\tupsertNewAllocFn := func(server *nomad.Server, j *structs.Job) *structs.Allocation {\n\t\talloc := mock.Alloc()\n\t\talloc.Job = j\n\t\talloc.JobID = j.ID\n\t\talloc.NodeID = client.NodeID()\n\n\t\tupsertAllocFn(server, alloc)\n\n\t\treturn alloc.Copy()\n\t}\n\n\tvar allocations []*structs.Allocation\n\n\t// Fill the node with allocations\n\tfor i := 0; i < maxAllocs; i++ {\n\t\tallocations = append(allocations, upsertNewAllocFn(server, job))\n\t}\n\n\t// Wait until the allocations are ready\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := len(client.getAllocRunners())\n\n\t\treturn ar == maxAllocs, fmt.Errorf(\"Expected %d allocs, got %d\", maxAllocs, ar)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not start: %v\", err)\n\t})\n\n\t// Mark the first three as terminal\n\tfor i := 0; i < 3; i++ {\n\t\tallocations[i].DesiredStatus = structs.AllocDesiredStatusStop\n\t\tupsertAllocFn(server, allocations[i].Copy())\n\t}\n\n\t// Wait until the allocations are stopped\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tstopped := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.Alloc().TerminalStatus() {\n\t\t\t\tstopped++\n\t\t\t}\n\t\t}\n\n\t\treturn stopped == 3, fmt.Errorf(\"Expected %d terminal allocs, got %d\", 3, stopped)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not terminate: %v\", err)\n\t})\n\n\t// Upsert a new allocation\n\t// This does not get appended to `allocations` as we do not use them again.\n\tupsertNewAllocFn(server, job)\n\n\t// A single allocation should be GC'd\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tdestroyed := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.IsDestroyed() {\n\t\t\t\tdestroyed++\n\t\t\t}\n\t\t}\n\n\t\treturn destroyed == 1, fmt.Errorf(\"Expected %d gc'd ars, got %d\", 1, destroyed)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not get GC'd: %v\", err)\n\t})\n\n\t// Upsert a new allocation\n\t// This does not get appended to `allocations` as we do not use them again.\n\tupsertNewAllocFn(server, job)\n\n\t// 2 allocations should be GC'd\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tdestroyed := 0\n\t\tfor _, r := range ar {\n\t\t\tif r.IsDestroyed() {\n\t\t\t\tdestroyed++\n\t\t\t}\n\t\t}\n\n\t\treturn destroyed == 2, fmt.Errorf(\"Expected %d gc'd ars, got %d\", 2, destroyed)\n\t}, func(err error) {\n\t\tt.Fatalf(\"Allocs did not get GC'd: %v\", err)\n\t})\n\n\t// check that all 8 get run eventually\n\ttestutil.WaitForResult(func() (bool, error) {\n\t\tar := client.getAllocRunners()\n\t\tif len(ar) != 8 {\n\t\t\treturn false, fmt.Errorf(\"expected 8 ARs, found %d: %v\", len(ar), ar)\n\t\t}\n\t\treturn true, nil\n\t}, func(err error) {\n\t\trequire.NoError(err)\n\t})\n}",
"func (s *ScaleDecider) CalculateNumInstancesToLaunch() int {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn mathx.Max(0, mathx.Clamp(\n\t\ts.minInstanceNum-len(s.instances),\n\t\ts.desiredNewInstances-len(s.recentlyLaunched),\n\t\ts.maxInstanceNum-len(s.instances),\n\t))\n}",
"func computeConcurrencyAllocation(requiredSum int, classes []allocProblemItem) ([]float64, float64, error) {\n\tif requiredSum < 0 {\n\t\treturn nil, 0, errors.New(\"negative sums are not supported\")\n\t}\n\trequiredSumF := float64(requiredSum)\n\tvar lowSum, highSum, targetSum float64\n\tubRange := minMax{min: float64(math.MaxFloat32)}\n\tlbRange := minMax{min: float64(math.MaxFloat32)}\n\trelativeItems := make([]relativeAllocItem, len(classes))\n\tfor idx, item := range classes {\n\t\ttarget := item.target\n\t\tif item.lowerBound < 0 {\n\t\t\treturn nil, 0, fmt.Errorf(\"lower bound %d is %v but negative lower bounds are not allowed\", idx, item.lowerBound)\n\t\t}\n\t\tif target < item.lowerBound {\n\t\t\treturn nil, 0, fmt.Errorf(\"target %d is %v, which is below its lower bound of %v\", idx, target, item.lowerBound)\n\t\t}\n\t\tif item.upperBound < item.lowerBound {\n\t\t\treturn nil, 0, fmt.Errorf(\"upper bound %d is %v but should not be less than the lower bound %v\", idx, item.upperBound, item.lowerBound)\n\t\t}\n\t\tif target < MinTarget {\n\t\t\t// tweak this to a non-zero value so avoid dividing by zero\n\t\t\ttarget = MinTarget\n\t\t}\n\t\tlowSum += item.lowerBound\n\t\thighSum += item.upperBound\n\t\ttargetSum += target\n\t\trelativeItem := relativeAllocItem{\n\t\t\ttarget: target,\n\t\t\trelativeLowerBound: item.lowerBound / target,\n\t\t\trelativeUpperBound: item.upperBound / target,\n\t\t}\n\t\tubRange.note(relativeItem.relativeUpperBound)\n\t\tlbRange.note(relativeItem.relativeLowerBound)\n\t\trelativeItems[idx] = relativeItem\n\t}\n\tif lbRange.max > 1 {\n\t\treturn nil, 0, fmt.Errorf(\"lbRange.max-1=%v, which is impossible because lbRange.max can not be greater than 1\", lbRange.max-1)\n\t}\n\tif lowSum-requiredSumF > epsilon {\n\t\treturn nil, 0, fmt.Errorf(\"lower bounds sum to %v, which is higher than the required sum of %v\", lowSum, requiredSum)\n\t}\n\tif requiredSumF-highSum > epsilon {\n\t\treturn nil, 0, fmt.Errorf(\"upper bounds sum to %v, which is lower than the required sum of %v\", highSum, requiredSum)\n\t}\n\tans := make([]float64, len(classes))\n\tif requiredSum == 0 {\n\t\treturn ans, 0, nil\n\t}\n\tif lowSum-requiredSumF > -epsilon { // no wiggle room, constrained from below\n\t\tfor idx, item := range classes {\n\t\t\tans[idx] = item.lowerBound\n\t\t}\n\t\treturn ans, lbRange.min, nil\n\t}\n\tif requiredSumF-highSum > -epsilon { // no wiggle room, constrained from above\n\t\tfor idx, item := range classes {\n\t\t\tans[idx] = item.upperBound\n\t\t}\n\t\treturn ans, ubRange.max, nil\n\t}\n\t// Now we know the solution is a unique fairProp in [lbRange.min, ubRange.max].\n\t// See if the solution does not run into any bounds.\n\tfairProp := requiredSumF / targetSum\n\tif lbRange.max <= fairProp && fairProp <= ubRange.min { // no bounds matter\n\t\tfor idx := range classes {\n\t\t\tans[idx] = relativeItems[idx].target * fairProp\n\t\t}\n\t\treturn ans, fairProp, nil\n\t}\n\t// Sadly, some bounds matter.\n\t// We find the solution by sorting the bounds and considering progressively\n\t// higher values of fairProp, starting from lbRange.min.\n\trap := (&relativeAllocProblem{items: relativeItems}).initIndices()\n\tsumSoFar := lowSum\n\tfairProp = lbRange.min\n\tvar sensitiveTargetSum, deltaSensitiveTargetSum float64\n\tvar numSensitiveClasses, deltaSensitiveClasses int\n\tvar nextIdx int\n\t// `nextIdx` is the next `rap` index to consider.\n\t// `sumSoFar` is what the allocs would sum to if the current\n\t// value of `fairProp` solves the problem.\n\t// If the current value of fairProp were the answer then\n\t// `sumSoFar == requiredSum`.\n\t// Otherwise the next increment in fairProp involves changing the allocations\n\t// of `numSensitiveClasses` classes whose targets sum to `sensitiveTargetSum`;\n\t// for the other classes, an upper or lower bound has applied and will continue to apply.\n\t// The last increment of nextIdx calls for adding `deltaSensitiveClasses`\n\t// to `numSensitiveClasses` and adding `deltaSensitiveTargetSum` to `sensitiveTargetSum`.\n\tfor sumSoFar < requiredSumF {\n\t\t// There might be more than one bound that is equal to the current value\n\t\t// of fairProp; find all of them because they will all be relevant to\n\t\t// the next change in fairProp.\n\t\t// Set nextBound to the next bound that is NOT equal to fairProp,\n\t\t// and advance nextIdx to the index of that bound.\n\t\tvar nextBound float64\n\t\tfor {\n\t\t\tsensitiveTargetSum += deltaSensitiveTargetSum\n\t\t\tnumSensitiveClasses += deltaSensitiveClasses\n\t\t\tif nextIdx >= rap.Len() {\n\t\t\t\treturn nil, 0, fmt.Errorf(\"impossible: ran out of bounds to consider in bound-constrained problem\")\n\t\t\t}\n\t\t\tvar itemIdx int\n\t\t\tvar lower bool\n\t\t\tnextBound, itemIdx, lower = rap.decode(nextIdx)\n\t\t\tif lower {\n\t\t\t\tdeltaSensitiveClasses = 1\n\t\t\t\tdeltaSensitiveTargetSum = rap.items[itemIdx].target\n\t\t\t} else {\n\t\t\t\tdeltaSensitiveClasses = -1\n\t\t\t\tdeltaSensitiveTargetSum = -rap.items[itemIdx].target\n\t\t\t}\n\t\t\tnextIdx++\n\t\t\tif nextBound > fairProp {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\t// fairProp can increase to nextBound without passing any intermediate bounds.\n\t\tif numSensitiveClasses == 0 {\n\t\t\t// No classes are affected by the next range of fairProp; skip right past it\n\t\t\tfairProp = nextBound\n\t\t\tcontinue\n\t\t}\n\t\t// See whether fairProp can increase to the solution before passing the next bound.\n\t\tdeltaFairProp := (requiredSumF - sumSoFar) / sensitiveTargetSum\n\t\tnextProp := fairProp + deltaFairProp\n\t\tif nextProp <= nextBound {\n\t\t\tfairProp = nextProp\n\t\t\tbreak\n\t\t}\n\t\t// No, fairProp has to increase above nextBound\n\t\tsumSoFar += (nextBound - fairProp) * sensitiveTargetSum\n\t\tfairProp = nextBound\n\t}\n\tfor idx, item := range classes {\n\t\tans[idx] = math.Max(item.lowerBound, math.Min(item.upperBound, fairProp*relativeItems[idx].target))\n\t}\n\treturn ans, fairProp, nil\n}",
"func TestProfileIncrementConfiguration(t *testing.T) {\n\n\tt.Skip(\"Skipping TestProfileIncrementConfiguration\")\n\n\tvar (\n\t\tidx, rps int\n\t\tpinnedFuncNum int\n\t\tstartVMID int\n\t\tservedTh uint64\n\t\tisSyncOffload bool = true\n\t\tmetrFile = \"bench.csv\"\n\t\timages = getImages(t)\n\t\tmetrics = make([]map[string]float64, *maxVMNum / *vmIncrStep)\n\t)\n\tlog.SetLevel(log.InfoLevel)\n\n\tcheckInputValidation(t)\n\n\tcreateResultsDir()\n\n\tfuncPool = NewFuncPool(!isSaveMemoryConst, servedTh, pinnedFuncNum, isTestModeConst)\n\n\tcores, err := cpuNum()\n\trequire.NoError(t, err, \"Cannot get the number of CPU\")\n\tfor vmNum := *vmIncrStep; vmNum <= *maxVMNum; vmNum += *vmIncrStep {\n\t\tif vmNum < cores {\n\t\t\trps = calculateRPS(vmNum)\n\t\t} else {\n\t\t\trps = calculateRPS(cores)\n\t\t}\n\n\t\tlog.Infof(\"vmNum: %d, Target RPS: %d\", vmNum, rps)\n\n\t\tbootVMs(t, images, startVMID, vmNum)\n\t\tmetrics[idx] = loadAndProfile(t, images, vmNum, rps, isSyncOffload)\n\t\tstartVMID = vmNum\n\t\tidx++\n\t}\n\n\tdumpMetrics(t, metrics, metrFile)\n\tprofile.PlotLineCharts(*vmIncrStep, *benchDir, metrFile, \"the number of tenants\")\n\tprofile.PlotStackCharts(*vmIncrStep, \"profile/toplev_metrics.json\", *benchDir, metrFile, \"the number of tenants\")\n\n\ttearDownVMs(t, images, startVMID, isSyncOffload)\n}",
"func (_Erc777 *Erc777Caller) Granularity(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _Erc777.contract.Call(opts, &out, \"granularity\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (r BenchmarkResult) AllocedBytesPerOp() int64 {}",
"func (p *Pipeline) NofBatches(n int) (nofBatches int) {\n\tif n < 1 {\n\t\tnofBatches = p.nofBatches\n\t\tif nofBatches < 1 {\n\t\t\tnofBatches = 2 * runtime.GOMAXPROCS(0)\n\t\t\tp.nofBatches = nofBatches\n\t\t}\n\t} else {\n\t\tnofBatches = n\n\t\tp.nofBatches = n\n\t}\n\treturn\n}",
"func (p *Pipeline) NofBatches(n int) (nofBatches int) {\n\tif n < 1 {\n\t\tnofBatches = p.nofBatches\n\t\tif nofBatches < 1 {\n\t\t\tnofBatches = 2 * runtime.GOMAXPROCS(0)\n\t\t\tp.nofBatches = nofBatches\n\t\t}\n\t} else {\n\t\tnofBatches = n\n\t\tp.nofBatches = n\n\t}\n\treturn\n}",
"func allocs(c echo.Context) error {\n\tpprof.Handler(\"allocs\").ServeHTTP(c.Response(), c.Request())\n\treturn nil\n}",
"func init() {\n\tgrs = 2\n\truntime.GOMAXPROCS(runtime.NumCPU())\n}",
"func (w Processor) NumWorkers() int64 {\n\treturn int64(len(w.limiter))\n}",
"func main() {\n\tvar counter uint64\n\tvar wg sync.WaitGroup\n\tconst gs = 50\n\n\twg.Add(50)\n\n\t//number of goroutines\n\tfor i := 0; i < gs; i++ {\n\t\tgo func() {\n\t\t\tatomic.AddUint64(&counter, 1)\n\t\t\tr := atomic.LoadUint64(&counter)\n\t\t\tfmt.Println(r)\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\tfmt.Println(counter) // should be 50\n}",
"func OptimalWorkerCountInRange(min int, max int) int {\n\tcores := runtime.NumCPU()\n\tif cores < min {\n\t\treturn min\n\t}\n\tif cores > max {\n\t\treturn max\n\t}\n\treturn cores\n}",
"func GetConcurrencyCount(env string) int {\n\n\tenvName := \"PU_GO_MAX_CONCURRENCY\"\n\n\tif env != \"\" {\n\t\tenvName = env\n\t}\n\n\tpc, err := strconv.Atoi(os.Getenv(envName))\n\n\tif err != nil {\n\t\tpc = runtime.NumCPU()\n\t}\n\n\treturn int(pc)\n}",
"func getTotalAllocatableMemory(f *framework.Framework) *resource.Quantity {\n\tselector := labels.Set{\"beta.kubernetes.io/os\": \"windows\"}.AsSelector()\n\tnodeList, err := f.ClientSet.CoreV1().Nodes().List(metav1.ListOptions{\n\t\tLabelSelector: selector.String(),\n\t})\n\tframework.ExpectNoError(err)\n\n\tginkgo.By(\"Summing allocatable memory across all agent nodes\")\n\n\ttotalAllocatable := resource.NewQuantity(0, resource.BinarySI)\n\n\tfor _, node := range nodeList.Items {\n\t\tstatus := node.Status\n\n\t\ttotalAllocatable.Add(status.Allocatable[v1.ResourceMemory])\n\t}\n\n\treturn totalAllocatable\n}",
"func Stats() (AllocatedSlabs int64, GrabbedChunks int64, ReleasedChunks int64, MemoryAllocated int64, MemoryInUse int64) {\n\tmemoryMutex.Lock()\n\tdefer memoryMutex.Unlock()\n\n\tAllocatedSlabs = int64(allocatedSlabs)\n\tGrabbedChunks = grabbedChunks\n\tReleasedChunks = releasedChunks\n\tMemoryInUse = (GrabbedChunks - ReleasedChunks) * int64(ChunkSize)\n\tMemoryAllocated = AllocatedSlabs * int64(SlabSize)\n\treturn\n}",
"func (_Erc777 *Erc777CallerSession) Granularity() (*big.Int, error) {\n\treturn _Erc777.Contract.Granularity(&_Erc777.CallOpts)\n}",
"func run(name string, b *testing.B, count int, fn func(buf *Buffer, r *Reader)) {\r\n\tb.Run(name, func(b *testing.B) {\r\n\t\tbuf := NewBuffer(count * 20)\r\n\t\tr := NewReader()\r\n\t\tb.ReportAllocs()\r\n\t\tb.ResetTimer()\r\n\t\tfor n := 0; n < b.N; n++ {\r\n\t\t\tbuf.Reset(\"test\")\r\n\t\t\tfn(buf, r)\r\n\t\t}\r\n\t})\r\n}",
"func calcRuntime(t time.Time, f string) {\n\t//now := time.Now()\n\t//log.Printf(\"%s cost %f millisecond\\n\", f, now.Sub(t).Seconds() * 1000)\n\t//log.Printf()\n}",
"func currMemoryAlloc() int {\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\treturn int(m.Alloc)\n}",
"func GetRunsCount() int64 {\n\tcount := runnerStats.Get(runsExpvarKey)\n\tif count == nil {\n\t\treturn 0\n\t}\n\treturn count.(*expvar.Int).Value()\n}",
"func (c *HostMetricCollector) Run() (HostMetrics, error) {\n\tcpuTimes, err := cpu.Times(false)\n\tif err != nil {\n\t\t// note: can't happen on Linux. gopsutil doesn't\n\t\t// return an error\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() failed: %s\", err)\n\t}\n\tif len(cpuTimes) == 0 {\n\t\t// possible with hardware failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"cpu.Times() returns no cpus\")\n\t}\n\tt := cpuTimes[0]\n\tjiffy := t.Total()\n\ttoPercent := 100 / (jiffy - c.lastJiffy)\n\n\tlastTimes := c.lastTimes\n\tc.lastJiffy = jiffy\n\tc.lastTimes = t\n\n\tconst mbSize float64 = 1024 * 1024\n\tvmem, err := mem.VirtualMemory()\n\tif err != nil {\n\t\t// only possible if can't parse numbers in /proc/meminfo\n\t\t// that would be massive failure\n\t\treturn HostMetrics{}, fmt.Errorf(\"mem.VirtualMemory() failed: %s:\", err)\n\t}\n\n\treturn HostMetrics{\n\t\tCPUUser: ((t.User + t.Nice) - (lastTimes.User + lastTimes.Nice)) * toPercent,\n\t\tCPUSystem: ((t.System + t.Irq + t.Softirq) - (lastTimes.System + lastTimes.Irq + lastTimes.Softirq)) * toPercent,\n\t\tCPUIowait: (t.Iowait - lastTimes.Iowait) * toPercent,\n\t\tCPUIdle: (t.Idle - lastTimes.Idle) * toPercent,\n\t\tCPUStolen: (t.Steal - lastTimes.Steal) * toPercent,\n\t\tCPUGuest: (t.Guest - lastTimes.Guest) * toPercent,\n\t\tMemTotal: float64(vmem.Total) / mbSize,\n\t\tMemFree: float64(vmem.Free) / mbSize,\n\t\tMemUsed: float64(vmem.Total-vmem.Free) / mbSize,\n\t\tMemUsable: float64(vmem.Available) / mbSize,\n\t\tMemPctUsable: float64(100-vmem.UsedPercent) / 100,\n\t}, nil\n}",
"func (r BenchmarkResult) NsPerOp() int64 {}",
"func TestPageAllocScavenge(t *testing.T) {\n\tif GOOS == \"openbsd\" && testing.Short() {\n\t\tt.Skip(\"skipping because virtual memory is limited; see #36210\")\n\t}\n\ttype test struct {\n\t\trequest, expect uintptr\n\t}\n\tminPages := PhysPageSize / PageSize\n\tif minPages < 1 {\n\t\tminPages = 1\n\t}\n\ttype setup struct {\n\t\tbeforeAlloc map[ChunkIdx][]BitRange\n\t\tbeforeScav map[ChunkIdx][]BitRange\n\t\texpect []test\n\t\tafterScav map[ChunkIdx][]BitRange\n\t}\n\ttests := map[string]setup{\n\t\t\"AllFreeUnscavExhaust\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 3 * PallocChunkPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"NoneFreeUnscavExhaust\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t\tBaseChunkIdx + 2: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 2: {},\n\t\t\t},\n\t\t},\n\t\t\"ScavHighestPageFirst\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{1, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(minPages)}},\n\t\t\t},\n\t\t},\n\t\t\"ScavMultiple\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"ScavMultiple2\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 1: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{2 * minPages * PageSize, 2 * minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t\t{minPages * PageSize, minPages * PageSize},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 1: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t\t\"ScavDiscontiguous\": {\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0xe: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{uint(minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t\tBaseChunkIdx + 0xe: {{uint(2 * minPages), PallocChunkPages - uint(2*minPages)}},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{2 * minPages * PageSize, 2 * minPages * PageSize},\n\t\t\t\t{^uintptr(0), 2 * minPages * PageSize},\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 0xe: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t},\n\t}\n\t// Disable these tests on iOS since we have a small address space.\n\t// See #46860.\n\tif PageAlloc64Bit != 0 && goos.IsIos == 0 {\n\t\ttests[\"ScavAllVeryDiscontiguous\"] = setup{\n\t\t\tbeforeAlloc: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0x1000: {},\n\t\t\t},\n\t\t\tbeforeScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {},\n\t\t\t\tBaseChunkIdx + 0x1000: {},\n\t\t\t},\n\t\t\texpect: []test{\n\t\t\t\t{^uintptr(0), 2 * PallocChunkPages * PageSize},\n\t\t\t\t{^uintptr(0), 0},\n\t\t\t},\n\t\t\tafterScav: map[ChunkIdx][]BitRange{\n\t\t\t\tBaseChunkIdx: {{0, PallocChunkPages}},\n\t\t\t\tBaseChunkIdx + 0x1000: {{0, PallocChunkPages}},\n\t\t\t},\n\t\t}\n\t}\n\tfor name, v := range tests {\n\t\tv := v\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := NewPageAlloc(v.beforeAlloc, v.beforeScav)\n\t\t\tdefer FreePageAlloc(b)\n\n\t\t\tfor iter, h := range v.expect {\n\t\t\t\tif got := b.Scavenge(h.request); got != h.expect {\n\t\t\t\t\tt.Fatalf(\"bad scavenge #%d: want %d, got %d\", iter+1, h.expect, got)\n\t\t\t\t}\n\t\t\t}\n\t\t\twant := NewPageAlloc(v.beforeAlloc, v.afterScav)\n\t\t\tdefer FreePageAlloc(want)\n\n\t\t\tcheckPageAlloc(t, want, b)\n\t\t})\n\t}\n}",
"func LogStats() {\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\tlog.Infof(\"Alloc=%v TotalAlloc=%v Sys=%v NumGC=%v Goroutines=%d\",\n\t\tm.Alloc/1024, m.TotalAlloc/1024, m.Sys/1024, m.NumGC, runtime.NumGoroutine())\n\n}",
"func BenchmarkNextAnnualProvisions(b *testing.B) {\n\tb.ReportAllocs()\n\tminter := InitialMinter(sdk.NewDecWithPrec(1, 1))\n\tparams := DefaultParams()\n\ttotalSupply := sdk.NewInt(100000000000000)\n\n\t// run the NextAnnualProvisions function b.N times\n\tfor n := 0; n < b.N; n++ {\n\t\tminter.NextAnnualProvisions(params, totalSupply)\n\t}\n\n}",
"func computeDefaultEventsRingBufferSize() uint32 {\n\tnumCPU, err := utils.NumCPU()\n\tif err != nil {\n\t\tnumCPU = 1\n\t}\n\n\tif numCPU <= 16 {\n\t\treturn uint32(8 * 256 * os.Getpagesize())\n\t} else if numCPU <= 64 {\n\t\treturn uint32(16 * 256 * os.Getpagesize())\n\t}\n\n\treturn uint32(32 * 256 * os.Getpagesize())\n}",
"func NewRuntimeStats(scope Scope) StatGenerator {\n\treturn runtimeStats{\n\t\talloc: scope.NewGauge(\"alloc\"),\n\t\ttotalAlloc: scope.NewCounter(\"totalAlloc\"),\n\t\tsys: scope.NewGauge(\"sys\"),\n\t\tlookups: scope.NewCounter(\"lookups\"),\n\t\tmallocs: scope.NewCounter(\"mallocs\"),\n\t\tfrees: scope.NewCounter(\"frees\"),\n\n\t\theapAlloc: scope.NewGauge(\"heapAlloc\"),\n\t\theapSys: scope.NewGauge(\"heapSys\"),\n\t\theapIdle: scope.NewGauge(\"heapIdle\"),\n\t\theapInuse: scope.NewGauge(\"heapInuse\"),\n\t\theapReleased: scope.NewGauge(\"heapReleased\"),\n\t\theapObjects: scope.NewGauge(\"heapObjects\"),\n\n\t\tnextGC: scope.NewGauge(\"nextGC\"),\n\t\tlastGC: scope.NewGauge(\"lastGC\"),\n\t\tpauseTotalNs: scope.NewCounter(\"pauseTotalNs\"),\n\t\tnumGC: scope.NewCounter(\"numGC\"),\n\t\tgcCPUPercent: scope.NewGauge(\"gcCPUPercent\"),\n\n\t\tnumGoroutine: scope.NewGauge(\"numGoroutine\"),\n\t}\n}",
"func CalculateSandboxSizing(spec *specs.Spec) (numCPU, memSizeMB uint32) {\n\tvar memory, quota int64\n\tvar period uint64\n\tvar err error\n\n\tif spec == nil || spec.Annotations == nil {\n\t\treturn 0, 0\n\t}\n\n\t// For each annotation, if it isn't defined, or if there's an error in parsing, we'll log\n\t// a warning and continue the calculation with 0 value. We expect values like,\n\t// Annotations[SandboxMem] = \"1048576\"\n\t// Annotations[SandboxCPUPeriod] = \"100000\"\n\t// Annotations[SandboxCPUQuota] = \"220000\"\n\t// ... to result in VM resources of 1 (MB) for memory, and 3 for CPU (2200 mCPU rounded up to 3).\n\tannotation, ok := spec.Annotations[ctrAnnotations.SandboxCPUPeriod]\n\tif ok {\n\t\tperiod, err = strconv.ParseUint(annotation, 10, 64)\n\t\tif err != nil {\n\t\t\tociLog.Warningf(\"sandbox-sizing: failure to parse SandboxCPUPeriod: %s\", annotation)\n\t\t\tperiod = 0\n\t\t}\n\t}\n\n\tannotation, ok = spec.Annotations[ctrAnnotations.SandboxCPUQuota]\n\tif ok {\n\t\tquota, err = strconv.ParseInt(annotation, 10, 64)\n\t\tif err != nil {\n\t\t\tociLog.Warningf(\"sandbox-sizing: failure to parse SandboxCPUQuota: %s\", annotation)\n\t\t\tquota = 0\n\t\t}\n\t}\n\n\tannotation, ok = spec.Annotations[ctrAnnotations.SandboxMem]\n\tif ok {\n\t\tmemory, err = strconv.ParseInt(annotation, 10, 64)\n\t\tif err != nil {\n\t\t\tociLog.Warningf(\"sandbox-sizing: failure to parse SandboxMem: %s\", annotation)\n\t\t\tmemory = 0\n\t\t}\n\t}\n\n\treturn calculateVMResources(period, quota, memory)\n}",
"func (_Erc777 *Erc777Session) Granularity() (*big.Int, error) {\n\treturn _Erc777.Contract.Granularity(&_Erc777.CallOpts)\n}",
"func BenchmarkCreateGoroutinesSingle(b *testing.B) {\n\t// Since we are interested in stealing behavior, warm the scheduler to\n\t// get all the Ps running first.\n\twarmupScheduler(runtime.GOMAXPROCS(0))\n\tb.ResetTimer()\n\n\tvar wg sync.WaitGroup\n\twg.Add(b.N)\n\tfor i := 0; i < b.N; i++ {\n\t\tgo func() {\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n}",
"func RunFunc(benchmarkFunc func() int, duration time.Duration, c int) *Result {\n\tworkers := make([]Worker, c)\n\tfor i := 0; i < c; i++ {\n\t\tworkers[i] = &funcWorker{ID: i, benchmarkFunc: benchmarkFunc}\n\t}\n\treturn Run(workers, duration)\n}",
"func calculateScopes(chunksize int64) (map[int]scope, error) {\n\tif chunksize < int64(maxBufferSize) {\n\t\tbufferSize = chunksize\n\t}\n\n\tscopes := make(map[int]scope)\n\t// calculate start and end of each chunk\n\tfor i := 0; i < runtime.NumCPU(); i++ {\n\t\tstart := int64(i) * chunksize\n\t\tend := start + chunksize\n\t\tscopes[i] = scope{start, end}\n\t}\n\treturn scopes, nil\n}",
"func calculateRequestedCPU(pods []*apiv1.Pod) int64 {\n\tvar CPURequests int64\n\tfor _, pod := range pods {\n\t\tCPURequests += getPodCPURequests(pod)\n\t}\n\treturn CPURequests\n}",
"func (p *Pool) Stats() map[string]int {\n\tfreeJobQueueSpaces := p.maxJobQueue - len(p.jobQueue)\n\n\treturn map[string]int{\n\t\t\"free_workers\": len(p.workerPool),\n\t\t\"free_job_queue_spaces\": freeJobQueueSpaces,\n\t}\n}",
"func RecordRuntimesCount(count int) {\n\tstats.Record(context.Background(), runtimesTotal.M(int64(count)))\n}",
"func WinCount(minerPower abi.StoragePower, totalPower abi.StoragePower, random float64) uint64 {\n\tE := big2.NewRat(5, 1)\n\tlambdaR := new(big2.Rat)\n\tlambdaR.SetFrac(minerPower.Int, totalPower.Int)\n\tlambdaR.Mul(lambdaR, E)\n\tlambda, _ := lambdaR.Float64()\n\n\trhs := 1 - poissonPMF(lambda, 0)\n\n\twinCount := uint64(0)\n\tfor rhs > random {\n\t\twinCount++\n\t\trhs -= poissonPMF(lambda, winCount)\n\t}\n\treturn winCount\n}",
"func CollectRuntimeMemStats(statsd scopedstatsd.Client, memstatsCurrent *runtime.MemStats, memstatsPrev *runtime.MemStats, tags []string) {\n\t// Collect number of bytes obtained from system.\n\tstatsd.Gauge(\"mem.sys_bytes\", float64(memstatsCurrent.Sys), tags, 1)\n\n\t// Collect number of pointer lookups.\n\tstatsd.Gauge(\"mem.pointer_lookups\", float64(memstatsCurrent.Lookups), tags, 1)\n\n\t// Collect increased heap objects allocated compared to last flush.\n\tstatsd.Count(\"mem.mallocs_total\", int64(memstatsCurrent.Mallocs-memstatsPrev.Mallocs), tags, 1)\n\n\t// Collect increased heap objects freed compared to last flush.\n\tstatsd.Count(\"mem.frees_total\", int64(memstatsCurrent.Frees-memstatsPrev.Frees), tags, 1)\n\n\t// Collect number of mallocs.\n\tstatsd.Gauge(\"mem.mallocs_count\", float64(memstatsCurrent.Mallocs-memstatsCurrent.Frees), tags, 1)\n\n\t// Collect number of bytes newly allocated for heap objects compared to last flush.\n\tstatsd.Count(\"mem.heap_alloc_bytes_total\", int64(memstatsCurrent.TotalAlloc-memstatsPrev.TotalAlloc), tags, 1)\n\n\t// Collect number of heap bytes allocated and still in use.\n\tstatsd.Gauge(\"mem.heap_alloc_bytes\", float64(memstatsCurrent.HeapAlloc), tags, 1)\n\n\t// Collect number of heap bytes obtained from system.\n\tstatsd.Gauge(\"mem.heap_sys_bytes\", float64(memstatsCurrent.HeapSys), tags, 1)\n\n\t// Collect number of heap bytes waiting to be used.\n\tstatsd.Gauge(\"mem.heap_idle_bytes\", float64(memstatsCurrent.HeapIdle), tags, 1)\n\n\t// Collect number of heap bytes that are in use.\n\tstatsd.Gauge(\"mem.heap_inuse_bytes\", float64(memstatsCurrent.HeapInuse), tags, 1)\n\n\t// Collect number of heap bytes released to OS.\n\tstatsd.Gauge(\"mem.heap_released_bytes\", float64(memstatsCurrent.HeapReleased), tags, 1)\n\n\t// Collect number of allocated objects.\n\tstatsd.Gauge(\"mem.heap_objects_count\", float64(memstatsCurrent.HeapObjects), tags, 1)\n\n\t// Collect number of bytes in use by the stack allocator.\n\tstatsd.Gauge(\"mem.stack_inuse_bytes\", float64(memstatsCurrent.StackInuse), tags, 1)\n\n\t// Collect number of bytes obtained from system for stack allocator.\n\tstatsd.Gauge(\"mem.stack_sys_bytes\", float64(memstatsCurrent.StackSys), tags, 1)\n\n\t// Collect number of bytes in use by mspan structures.\n\tstatsd.Gauge(\"mem.mspan_inuse_bytes\", float64(memstatsCurrent.MSpanInuse), tags, 1)\n\n\t// Collect number of bytes used for mspan structures obtained from system.\n\tstatsd.Gauge(\"mem.mspan_sys_bytes\", float64(memstatsCurrent.MSpanSys), tags, 1)\n\n\t// Collect number of bytes in use by mcache structures.\n\tstatsd.Gauge(\"mem.mcache_inuse_bytes\", float64(memstatsCurrent.MCacheInuse), tags, 1)\n\n\t// Collect number of bytes used for mcache structures obtained from system.\n\tstatsd.Gauge(\"mem.mcache_sys_bytes\", float64(memstatsCurrent.MCacheSys), tags, 1)\n\n\t// Collect number of bytes used by the profiling bucket hash table.\n\tstatsd.Gauge(\"mem.buck_hash_sys_bytes\", float64(memstatsCurrent.BuckHashSys), tags, 1)\n\n\t// Collect number of bytes used for garbage collection system metadata.\n\tstatsd.Gauge(\"mem.gc_sys_bytes\", float64(memstatsCurrent.GCSys), tags, 1)\n\n\t// Collect number of bytes used for other system allocations.\n\tstatsd.Gauge(\"mem.other_sys_bytes\", float64(memstatsCurrent.OtherSys), tags, 1)\n\n\t// Collect number of heap bytes when next garbage collection will take pace.\n\tstatsd.Gauge(\"mem.next_gc_bytes\", float64(memstatsCurrent.NextGC), tags, 1)\n}",
"func BaseWorkload(publishingRate float64, currentPerf float64) (numRep int32) {\n\treturn int32(math.Ceil(publishingRate / currentPerf))\n}",
"func Run(target string, args []string,\n\tregionNames []string,\n\tevents Events,\n\tattropts perf.Options,\n\timmediate func() MetricsWriter,\n\tignoreMissingRegions bool) (TotalMetrics, error) {\n\n\truntime.LockOSThread()\n\tdefer runtime.UnlockOSThread()\n\n\tpath, err := exec.LookPath(target)\n\tif err != nil {\n\t\treturn TotalMetrics{}, fmt.Errorf(\"lookpath: %w\", err)\n\t}\n\n\tf, err := os.Open(path)\n\tif err != nil {\n\t\treturn TotalMetrics{}, fmt.Errorf(\"open: %w\", err)\n\t}\n\n\tbin, err := bininfo.Read(f, f.Name())\n\tif err != nil {\n\t\treturn TotalMetrics{}, fmt.Errorf(\"elf-read: %w\", err)\n\t}\n\n\tvar regions []utrace.Region\n\tvar regionIds []int\n\n\taddregion := func(reg utrace.Region, id int) {\n\t\tregions = append(regions, reg)\n\t\tregionIds = append(regionIds, id)\n\t}\n\n\tfor i, name := range regionNames {\n\t\tif strings.Contains(name, \"-\") {\n\t\t\treg, err := ParseRegion(name, bin)\n\t\t\tif err != nil {\n\t\t\t\treturn TotalMetrics{}, fmt.Errorf(\"region-parse: %w\", err)\n\t\t\t}\n\n\t\t\tlogger.Printf(\"%s: 0x%x-0x%x\\n\", name, reg.StartAddr, reg.EndAddr)\n\n\t\t\taddregion(reg, i)\n\t\t} else {\n\t\t\tfnpc, fnerr := bin.FuncToPC(name)\n\n\t\t\tif fnerr == nil {\n\t\t\t\tlogger.Printf(\"%s: 0x%x\\n\", name, fnpc)\n\t\t\t\taddregion(&utrace.FuncRegion{\n\t\t\t\t\tAddr: fnpc,\n\t\t\t\t}, i)\n\t\t\t}\n\n\t\t\tinlinings, err := bin.InlinedFuncToPCs(name)\n\n\t\t\tif len(inlinings) == 0 {\n\t\t\t\tlogger.Printf(\"%s not inlined (error: %s)\\n\", name, err)\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\tif fnerr != nil {\n\t\t\t\t\tif err != nil && !ignoreMissingRegions {\n\t\t\t\t\t\treturn TotalMetrics{}, fmt.Errorf(\"func-lookup: %w, inlined-func-lookup: %s\", fnerr, err)\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tfor _, in := range inlinings {\n\t\t\t\tlogger.Printf(\"%s (inlined): 0x%x-0x%x\\n\", name, in.Low, in.High)\n\n\t\t\t\taddregion(&utrace.AddressRegion{\n\t\t\t\t\tStartAddr: in.Low,\n\t\t\t\t\tEndAddr: in.High,\n\t\t\t\t}, i)\n\t\t\t}\n\t\t}\n\t}\n\n\tprog, pid, err := utrace.NewProgram(bin, target, args, regions)\n\tif err != nil {\n\t\treturn TotalMetrics{}, err\n\t}\n\n\tfa := &perf.Attr{\n\t\tCountFormat: perf.CountFormat{\n\t\t\tEnabled: true,\n\t\t\tRunning: true,\n\t\t},\n\t\tOptions: attropts,\n\t}\n\tfa.Options.Disabled = true\n\n\tbase := make([]*perf.Attr, len(events.Base))\n\tfor i, c := range events.Base {\n\t\tattr := *fa\n\t\tc.Configure(&attr)\n\t\tbase[i] = &attr\n\t}\n\tgroups := make([][]*perf.Attr, len(events.Groups))\n\tfor i, group := range events.Groups {\n\t\tfor _, c := range group {\n\t\t\tattr := *fa\n\t\t\tc.Configure(&attr)\n\t\t\tgroups[i] = append(groups[i], &attr)\n\t\t}\n\t}\n\n\ttotal := make(TotalMetrics, 0)\n\tptable := make(map[int][]Profiler)\n\tptable[pid], err = makeProfilers(pid, len(regions), base, groups, fa)\n\tif err != nil {\n\t\treturn total, err\n\t}\n\n\tfor {\n\t\tvar ws utrace.Status\n\n\t\tp, evs, err := prog.Wait(&ws)\n\t\tif err == utrace.ErrFinishedTrace {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn total, fmt.Errorf(\"wait: %w\", err)\n\t\t}\n\n\t\tprofilers, ok := ptable[p.Pid()]\n\t\tif !ok {\n\t\t\tptable[p.Pid()], err = makeProfilers(p.Pid(), len(regions), base, groups, fa)\n\t\t\tif err != nil {\n\t\t\t\treturn total, err\n\t\t\t}\n\t\t}\n\n\t\tfor _, ev := range evs {\n\t\t\tswitch ev.State {\n\t\t\tcase utrace.RegionStart:\n\t\t\t\tlogger.Printf(\"%d: Profiler %d enabled\\n\", p.Pid(), ev.Id)\n\t\t\t\tprofilers[ev.Id].Disable()\n\t\t\t\tprofilers[ev.Id].Reset()\n\t\t\t\tprofilers[ev.Id].Enable()\n\t\t\tcase utrace.RegionEnd:\n\t\t\t\tprofilers[ev.Id].Disable()\n\t\t\t\tlogger.Printf(\"%d: Profiler %d disabled\\n\", p.Pid(), ev.Id)\n\t\t\t\tnm := NamedMetrics{\n\t\t\t\t\tMetrics: profilers[ev.Id].Metrics(),\n\t\t\t\t\tName: regionNames[regionIds[ev.Id]],\n\t\t\t\t}\n\t\t\t\ttotal = append(total, nm)\n\t\t\t\twriter := immediate()\n\t\t\t\tif writer != nil {\n\t\t\t\t\tnm.WriteTo(writer)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr = prog.Continue(p, ws)\n\t\tif err != nil {\n\t\t\treturn total, fmt.Errorf(\"trace-continue: %w\", err)\n\t\t}\n\t}\n\n\treturn total, nil\n}",
"func AddRunsCount(amount int) {\n\trunnerStats.Add(runsExpvarKey, int64(amount))\n}",
"func main() {\n //fmt.Println(sum_of_even_fibs_up_to(25))\n //=> 10\n\n fmt.Println(sum_of_even_fibs_up_to(4000000))\n //=> ?? \n}",
"func GetCPUPercentage() float64 {\n\tvar ru syscall.Rusage\n\tsyscall.Getrusage(syscall.RUSAGE_SELF, &ru)\n\tusageTime := ru.Utime.Nano() + ru.Stime.Nano()\n\tnowTime := time.Now().UnixNano()\n\tperc := float64(usageTime-lastCPUUsageTime) / float64(nowTime-lastInspectUnixNano) * 100.0\n\tlastInspectUnixNano = nowTime\n\tlastCPUUsageTime = usageTime\n\treturn perc\n}",
"func getAvgCpuUsage(period int64) (cpuUsage float64, err error) {\n\tprevTicks, err := getCPUTicks()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// sleep for a while\n\ttime.Sleep(time.Duration(period) * time.Millisecond)\n\n\tticks, err := getCPUTicks()\n\tif err != nil {\n\t\treturn\n\t}\n\n\tfor i := 0; i < 8; i++ {\n\t\tticks[i] = ticks[i] - prevTicks[i]\n\t}\n\n\tvar total uint64\n\tfor i := 0; i < 8; i++ {\n\t\ttotal += ticks[i]\n\t}\n\n\tidle := ticks[3] + ticks[4]\n\tcpuUsage = 1.0 - float64(idle)/float64(total)\n\treturn\n}",
"func (wp *Pool) NumResults() int {\n\twp.workersLock.RLock()\n\tdefer wp.workersLock.RUnlock()\n\ttotal := 0\n\tfor _, w := range wp.workers {\n\t\ttotal += w.worker.NumResults()\n\t}\n\treturn total\n}",
"func (tj *TensorFlowJob) AllocatedGPU() int64 {\n\tif tj.allocatedGPU > 0 {\n\t\treturn tj.allocatedGPU\n\t}\n\tfor _, pod := range tj.pods {\n\t\ttj.allocatedGPU += gpuInActivePod(pod)\n\t}\n\treturn tj.allocatedGPU\n}",
"func Run(ctx context.Context, f Runner, cycle time.Duration, maxErrs int, c clock.Clock) (ret *Results) {\n\t// TODO: ts_mon stuff.\n\tret = &Results{Success: true}\n\n\ttmr := c.NewTimer(ctx)\n\tdefer tmr.Stop()\n\n\tnextCycle := cycle\n\tconsecErrs := 0\n\tlog := logging.Get(ctx)\n\n\trun := func() {\n\t\texpvars.Add(\"Running\", 1)\n\t\tdefer expvars.Add(\"Running\", -1)\n\t\tdefer expvars.Add(\"Runs\", 1)\n\n\t\tt0 := c.Now()\n\t\t// TODO(seanmccullough) Optionally cancel overruns via context.WithTimeout.\n\t\terr := f(ctx)\n\t\tdur := c.Now().Sub(t0)\n\t\tif dur > cycle {\n\t\t\tlog.Errorf(\"Task overran by %v (%v - %v)\", (dur - cycle), dur, cycle)\n\t\t\tret.Overruns++\n\t\t\texpvars.Add(\"Overruns\", 1)\n\t\t}\n\n\t\tif err != nil {\n\t\t\tlog.Errorf(\"Got an error: %v\", err)\n\t\t\tret.Errs++\n\t\t\texpvars.Add(\"Errors\", 1)\n\t\t\tif consecErrs++; consecErrs >= maxErrs {\n\t\t\t\tret.Success = false\n\t\t\t\treturn\n\t\t\t}\n\t\t} else {\n\t\t\tconsecErrs = 0\n\t\t}\n\n\t\tnextCycle = cycle - dur\n\t\ttmr.Reset(nextCycle)\n\t}\n\n\t// Run f at least once.\n\trun()\n\n\t// Keep running f until ctx is done.\n\tfor {\n\t\tif ar := <-tmr.GetC(); ar.Incomplete() {\n\t\t\treturn ret\n\t\t}\n\t\trun()\n\t\tif !ret.Success {\n\t\t\treturn ret\n\t\t}\n\t}\n}",
"func procMem(_ int) (ProcMemStats, error) {\n\treturn ProcMemStats{}, nil\n}",
"func GetCPUMetricsLen() int {\n\treturn len(cpuMetrics)\n}",
"func stepPerms(n int32) int32 {\n\tcollection := &StepsPremsCollection{memory: make(map[int32]int32)}\n\treturn collection.getPerms(n)\n}",
"func (c *Cache) Requests() uint64 {\n\tn := uint64(0)\n\tfor _, shard := range c.shards {\n\t\tn += shard.Requests()\n\t}\n\treturn n\n}",
"func Workers(n uint64) func(*Attacker) {\n\treturn func(a *Attacker) { a.workers = n }\n}",
"func (cp *ConstantPacer) hitsPerNs() float64 {\n\treturn float64(cp.Freq) / nano\n}",
"func (m *Manager) AllocationCount() int {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\treturn len(m.allocations)\n}",
"func (d *DeviceProp) RegsPerBlock() int32 {\n\treturn (int32)(d.regsPerBlock)\n}",
"func AllocatorSize() (size uint32) {\n\t// size of freeBuddiesList\n\tsize += MaxOrder * uint32(unsafe.Sizeof(freeBuddiesList{}))\n\n\t// size of bigPagesBitmap\n\tsize += nMaps(_nBigPages) * 4\n\n\t// size of individual freeBuddiesList\n\tfor i := 0; i < MaxOrder; i++ {\n\t\t// maxOrder - 1 pages of order i, further divide by 2 since we use 1 bit\n\t\t// for buddy pair.\n\t\tvar nBuddies uint32 = _nBigPages * (1 << uint32(MaxOrder-i-1))\n\t\tsize += nMaps(nBuddies) * 4\n\t}\n\n\t// size of node pool\n\tsize += nodePoolSize()\n\n\treturn\n}",
"func (fcg *FuncCallGraph) TotalTypeAllocations(irType ir.Type) int {\n\treturn fcg.totalTypeAllocations[irType]\n}",
"func (v *ProcTableImpl) Stats() ProcTableStats {\n\tv.mutex.RLock()\n\tdefer v.mutex.RUnlock()\n\trunning := 0\n\tfor _, entry := range v.procTable {\n\t\tif entry.Status == CSRunning {\n\t\t\trunning++\n\t\t}\n\t}\n\treturn ProcTableStats{\n\t\tRunning: running,\n\t\tSucceeded: v.succeeded,\n\t\tFailed: v.failed,\n\t}\n}",
"func TestPallocBitsSummarize(t *testing.T) {\n\tvar emptySum = PackPallocSum(PallocChunkPages, PallocChunkPages, PallocChunkPages)\n\ttype test struct {\n\t\tfree []BitRange // Ranges of free (zero) bits.\n\t\thits []PallocSum\n\t}\n\ttests := make(map[string]test)\n\ttests[\"NoneFree\"] = test{\n\t\tfree: []BitRange{},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(0, 0, 0),\n\t\t},\n\t}\n\ttests[\"OnlyStart\"] = test{\n\t\tfree: []BitRange{{0, 10}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(10, 10, 0),\n\t\t},\n\t}\n\ttests[\"OnlyEnd\"] = test{\n\t\tfree: []BitRange{{PallocChunkPages - 40, 40}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(0, 40, 40),\n\t\t},\n\t}\n\ttests[\"StartAndEnd\"] = test{\n\t\tfree: []BitRange{{0, 11}, {PallocChunkPages - 23, 23}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(11, 23, 23),\n\t\t},\n\t}\n\ttests[\"StartMaxEnd\"] = test{\n\t\tfree: []BitRange{{0, 4}, {50, 100}, {PallocChunkPages - 4, 4}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(4, 100, 4),\n\t\t},\n\t}\n\ttests[\"OnlyMax\"] = test{\n\t\tfree: []BitRange{{1, 20}, {35, 241}, {PallocChunkPages - 50, 30}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(0, 241, 0),\n\t\t},\n\t}\n\ttests[\"MultiMax\"] = test{\n\t\tfree: []BitRange{{35, 2}, {40, 5}, {100, 5}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(0, 5, 0),\n\t\t},\n\t}\n\ttests[\"One\"] = test{\n\t\tfree: []BitRange{{2, 1}},\n\t\thits: []PallocSum{\n\t\t\tPackPallocSum(0, 1, 0),\n\t\t},\n\t}\n\ttests[\"AllFree\"] = test{\n\t\tfree: []BitRange{{0, PallocChunkPages}},\n\t\thits: []PallocSum{\n\t\t\temptySum,\n\t\t},\n\t}\n\tfor name, v := range tests {\n\t\tv := v\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := makePallocBits(v.free)\n\t\t\t// In the PallocBits we create 1's represent free spots, but in our actual\n\t\t\t// PallocBits 1 means not free, so invert.\n\t\t\tinvertPallocBits(b)\n\t\t\tfor _, h := range v.hits {\n\t\t\t\tcheckPallocSum(t, b.Summarize(), h)\n\t\t\t}\n\t\t})\n\t}\n}",
"func benchGetStackFramePointsCommonDepth(b *testing.B, depth int) {\n\n\tb.ReportAllocs()\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\t_ = getStackFramePoints(0, depth)\n\t}\n}",
"func (in *CF1454B) Run() {\n\tt := in.NextInt()\n\t//fmt.Println(\"t : \" , t)\n\tfor ; t > 0; t-- {\n\t\tn := in.NextInt()\n\t\t//fmt.Println(\"n : \" , n)\n\t\tcount := make(map[int]int)\n\t\ttime := make(map[int]int)\n\t\tfor i := 0; n > 0; n-- {\n\t\t\tvalue := in.NextInt()\n\t\t\t//fmt.Println(\"value : \" , value)\n\t\t\tc, ok := count[value]\n\t\t\tif !ok {\n\t\t\t\tc = 0\n\t\t\t\tcount[value] = 0\n\t\t\t\ttime[value] = i\n\t\t\t}\n\t\t\tcount[value] = c + 1\n\t\t\ti++\n\t\t}\n\t\tminTime, ans := 10000000, -1\n\t\tfor key := range count {\n\t\t\tc := count[key]\n\t\t\tif c == 1 {\n\t\t\t\ttc := time[key]\n\t\t\t\tif ans == -1 || ans > key {\n\t\t\t\t\tans = key\n\t\t\t\t\tminTime = tc\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t\tif ans == -1 {\n\t\t\tfmt.Println(-1)\n\t\t} else {\n\t\t\tfmt.Println(minTime + 1)\n\t\t}\n\t}\n\n}",
"func (s *stats) collect() {\n\truntime.GC()\n\truntime.Gosched()\n\n\tm := new(runtime.MemStats)\n\truntime.ReadMemStats(m)\n\n\tg := runtime.NumGoroutine()\n\tp := player.PlayerList.Length()\n\n\t// Calculate difference in resources since last run\n\tΔa := int64(m.Alloc - s.Alloc)\n\tΔh := int(m.HeapObjects - s.HeapObjects)\n\tΔg := g - s.Goroutines\n\n\t// Calculate max players\n\tmaxPlayers := s.MaxPlayers\n\tif s.MaxPlayers < p {\n\t\tmaxPlayers = p\n\t}\n\n\t// Calculate scaled numeric and prefix parts of Alloc and Alloc difference\n\tan, ap := uscale(m.Alloc)\n\tΔan, Δap := scale(Δa)\n\n\tlog.Printf(\"A[%4d%-2s %+5d%-2s] HO[%14d %+9d] GO[%6d %+6d] PL %d/%d\",\n\t\tan, ap, Δan, Δap, m.HeapObjects, Δh, g, Δg, p, maxPlayers,\n\t)\n\n\t// Save current stats\n\ts.Alloc = m.Alloc\n\ts.HeapObjects = m.HeapObjects\n\ts.Goroutines = g\n\ts.MaxPlayers = maxPlayers\n}",
"func perfTest(arg perfArg, f func()) (res perfResult) {\n\t// Pipeline: request generator -> workers -> sampler\n\tendUtil := startUtil()\n\n\t// Generate requests until arg.dur elapses\n\tstop := time.NewTimer(arg.dur)\n\tdefer stop.Stop()\n\tvar send *time.Ticker\n\tif arg.interval > 0 {\n\t\tsend = time.NewTicker(arg.interval)\n\t\tdefer send.Stop()\n\t}\n\trequests := make(chan time.Time, arg.maxq)\n\tgo func() {\n\t\tdefer close(requests)\n\t\tfor {\n\t\t\tif send == nil {\n\t\t\t\t// No request interval: send whenever the queue has space.\n\t\t\t\tselect {\n\t\t\t\tcase <-stop.C:\n\t\t\t\t\treturn\n\t\t\t\tcase requests <- time.Now():\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\t// Attempt to send a request periodically, drop if queue is full.\n\t\t\t\tselect {\n\t\t\t\tcase <-stop.C:\n\t\t\t\t\treturn\n\t\t\t\tcase <-send.C:\n\t\t\t\t}\n\t\t\t\tselect {\n\t\t\t\tcase requests <- time.Now():\n\t\t\t\tdefault:\n\t\t\t\t\tres.drops++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\t// Workers run f until requests closed.\n\tdurations := make(chan time.Duration)\n\tvar wg sync.WaitGroup\n\twg.Add(arg.par)\n\tfor i := 0; i < arg.par; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\tfor start := range requests {\n\t\t\t\tqueueTime := time.Since(start)\n\t\t\t\t_ = queueTime // not currently used\n\t\t\t\tstart = time.Now()\n\t\t\t\tf()\n\t\t\t\tdurations <- time.Since(start)\n\t\t\t}\n\t\t}()\n\t}\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(durations)\n\t}()\n\n\t// Sampler populates result with samples.\n\tres.par = arg.par\n\tres.sampler = newSampler()\n\tdefer res.sampler.close()\n\tfor elapsed := range durations {\n\t\tres.sampler.add(elapsed)\n\t}\n\tres.walltime, res.exectime = endUtil()\n\treturn\n}",
"func (p Parallel) NumWorkers() int {\n\ti := int(p)\n\tif i > 0 {\n\t\treturn i\n\t}\n\treturn runtime.GOMAXPROCS(0)\n}",
"func GetProcessStats(pid int) (ProcessStats, error) {\n\t// Open the process.\n\tprocess, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid))\n\tif err != nil {\n\t\treturn ProcessStats{}, nil\n\t}\n\tdefer syscall.CloseHandle(process)\n\n\t// Get memory info.\n\tpsapi := syscall.NewLazyDLL(\"psapi.dll\")\n\tgetProcessMemoryInfo := psapi.NewProc(\"GetProcessMemoryInfo\")\n\tmemoryInfo := processMemoryCounters{\n\t\tcb: 72,\n\t}\n\tres, _, _ := getProcessMemoryInfo.Call(uintptr(process), uintptr(unsafe.Pointer(&memoryInfo)), uintptr(memoryInfo.cb))\n\tif res == 0 {\n\t\treturn ProcessStats{}, nil\n\t}\n\n\t// Get CPU info.\n\tcreationTime1 := &syscall.Filetime{}\n\texitTime1 := &syscall.Filetime{}\n\tkernelTime1 := &syscall.Filetime{}\n\tuserTime1 := &syscall.Filetime{}\n\terr = syscall.GetProcessTimes(process, creationTime1, exitTime1, kernelTime1, userTime1)\n\tif err != nil {\n\t\treturn ProcessStats{RSSMemory: float64(memoryInfo.WorkingSetSize)}, nil\n\t}\n\t<-time.After(time.Millisecond * 50) // Not the most accurate, but it'll do.\n\tcreationTime2 := &syscall.Filetime{}\n\texitTime2 := &syscall.Filetime{}\n\tkernelTime2 := &syscall.Filetime{}\n\tuserTime2 := &syscall.Filetime{}\n\terr = syscall.GetProcessTimes(process, creationTime2, exitTime2, kernelTime2, userTime2)\n\tif err != nil {\n\t\treturn ProcessStats{RSSMemory: float64(memoryInfo.WorkingSetSize)}, nil\n\t}\n\tcpuTime := float64((userTime2.Nanoseconds() - userTime1.Nanoseconds()) / int64(runtime.NumCPU()))\n\n\treturn ProcessStats{\n\t\tRSSMemory: float64(memoryInfo.WorkingSetSize),\n\t\tCPUUsage: cpuTime / 500000, // Conversion: (cpuTime / (50*1000*1000)) * 100\n\t}, nil\n}",
"func ProcessorCount() int32 {\n\tif amount := getActiveProcessorCount(ALL_PROCESSOR_GROUPS); amount != 0 {\n\t\treturn int32(amount)\n\t}\n\treturn int32(runtime.NumCPU())\n}",
"func (c *Cruncher) CalculateSpeed() (int64, time.Duration) {\n\tvar n int64\n\tn = 1\n\tstart := time.Now()\n\n\tfor loopStart := time.Now(); ; {\n\t\t// check every 100 loops if time is reached\n\t\tif n%100 == 0 {\n\t\t\tif time.Since(loopStart) > 2*time.Second {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tc.thread <- 1 // will block if there is MAX ints in threads\n\t\tgo func() {\n\t\t\t// dry run\n\t\t\tk, err := newPrivateKey()\n\t\t\tif err != nil {\n\t\t\t\tpanic(err)\n\t\t\t}\n\t\t\t_ = k.String()\n\t\t\tt := strings.ToLower(k.Public().String())\n\n\t\t\t// Allow only one routine at a time to avoid\n\t\t\t// \"concurrent map iteration and map write\"\n\t\t\tc.mapMutex.Lock()\n\t\t\tdefer c.mapMutex.Unlock()\n\t\t\tfor w := range c.WordMap {\n\t\t\t\t_ = strings.HasPrefix(t, w)\n\t\t\t}\n\t\t\t<-c.thread // removes an int from threads, allowing another to proceed\n\t\t\tn++\n\t\t}()\n\t}\n\n\testimate64 := int64(time.Since(start)) / n\n\n\treturn n / 2, time.Duration(estimate64)\n}",
"func GetNumWorkersTotal() (channels.BufferCap, error) {\n\tif !isBigPoolInitialized {\n\t\treturn 0, newError(\"Global worker pool was not initialized\")\n\t}\n\n\treturn GoshPoolGlobal.Cap(), nil\n}",
"func howManyThreads(threads int) int {\n\tmaxThreads := runtime.GOMAXPROCS(0)\n\tif threads == 0 {\n\t\treturn 1\n\t} else if threads > maxThreads {\n\t\tlog.Warnf(\"%d threads set too high, setting to system max, %d\", threads, maxThreads)\n\t\treturn maxThreads\n\t}\n\treturn threads\n}",
"func minimumSize(nums []int, maxOperations int) int {\n\tmaxe := 0\n\tfor _, v := range nums {\n\t\tif v > maxe {\n\t\t\tmaxe = v\n\t\t}\n\t}\n\tl, r := 1, maxe\n\tfor l < r {\n\t\tmid := (l + r) / 2\n\t\tall := 0\n\t\t// count how many ops we need to make the max number of balls as mid\n\t\t// to divide x balls into some parts where each part <= t,\n\t\t// we need ceiling(x/t)-1 = (x-1)/t times of divide.\n\t\tfor _, v := range nums {\n\t\t\tall += (v - 1) / mid\n\t\t}\n\t\tif all > maxOperations {\n\t\t\tl = mid + 1\n\t\t} else {\n\t\t\tr = mid\n\t\t}\n\t}\n\treturn l\n}",
"func (r RetCalc) RunIncomes() []float64 {\n\tincomes := make([]float64, len(r.Sims), len(r.Sims))\n\tfor i := range r.Sims {\n\t\tuntaxed_total_wealth := r.Non_Taxable_balance * r.Sims[i].GrowthFactor(0)\n\t\ttaxed_total_wealth := r.Taxable_balance * r.Sims[i].GrowthFactorWithTaxes(0, r.Effective_tax_rate)\n\t\tsum_t, sum_ut := 0.0, 0.0\n\t\tfor j := range r.Sims[i] {\n\t\t\tif j+r.Age < r.Retirement_age {\n\t\t\t\tuntaxed_total_wealth += r.Non_Taxable_contribution * r.Sims[i].GrowthFactor(j)\n\t\t\t\ttaxed_total_wealth += r.Taxable_contribution * r.Sims[i].GrowthFactorWithTaxes(j, r.Effective_tax_rate)\n\t\t\t\tsum_ut += r.Sims[i].GrowthFactor(j)\n\t\t\t\tsum_t += r.Sims[i].GrowthFactorWithTaxes(j, r.Effective_tax_rate)\n\t\t\t}\n\t\t}\n\t\tf, _ := r.IncomeFactors(i)\n\t\tft, _ := r.IncomeFactorsWithTaxes(i)\n\t\tincomes[i] = (taxed_total_wealth/ft + untaxed_total_wealth*(1-r.Effective_tax_rate)/f)\n\t}\n\tsort.Float64s(incomes)\n\treturn incomes\n}",
"func Benchmark(f func(b *B)) BenchmarkResult",
"func NewStats() (*Stats, error) {\n\t// TODO: Make it singleton if possible.\n\tprocess, err := process.NewProcess(int32(os.Getpid()))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tvar cpuUsage float64\n\tif c, err := process.CPUPercent(); err == nil {\n\t\tcpuUsage = c\n\t}\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\treturn &Stats{\n\t\tGoroutines: runtime.NumGoroutine(),\n\t\tCPUUsage: cpuUsage,\n\t\tMemStats: MemStats{\n\t\t\tHeapAlloc: m.HeapAlloc,\n\t\t\tHeapIdle: m.HeapIdle,\n\t\t\tHeapInuse: m.HeapInuse,\n\t\t},\n\t}, nil\n}",
"func main() {\n\n\tvar counter int64 = 0\n\n\tvar wg sync.WaitGroup\n\n\tfor i := 0; i < 100; i++ {\n\t\twg.Add(1)\n\t\tgo func() {\n\n\t\t\t//this will make and atomic operation over the given memory space\n\t\t\tatomic.AddInt64(&counter, 1)\n\n\t\t\twg.Done()\n\t\t}()\n\t}\n\twg.Wait()\n\n\tfmt.Println(counter)\n}",
"func (s Size) Gigabytes() float64 { return float64(s) / float64(Gigabyte) }",
"func OptimalWorkerCount() int {\n\treturn OptimalWorkerCountInRange(defaultMinWorkers, defaultMaxWorkers)\n}"
] | [
"0.8254955",
"0.5656407",
"0.49444196",
"0.48704565",
"0.481819",
"0.48059502",
"0.4730157",
"0.47268108",
"0.46950564",
"0.46740502",
"0.4658304",
"0.4651064",
"0.46304864",
"0.45891875",
"0.45341447",
"0.45259058",
"0.4485292",
"0.44815636",
"0.4479504",
"0.4464512",
"0.44368133",
"0.44338423",
"0.4420591",
"0.4411236",
"0.43991148",
"0.43921897",
"0.43776482",
"0.43776265",
"0.43763325",
"0.43706256",
"0.43572733",
"0.43572733",
"0.43374407",
"0.43353203",
"0.4331976",
"0.43237415",
"0.43218216",
"0.43203214",
"0.43165496",
"0.43057516",
"0.43033683",
"0.42955866",
"0.4293591",
"0.42928725",
"0.42790365",
"0.4272819",
"0.42696375",
"0.42666525",
"0.42582995",
"0.4254923",
"0.4254722",
"0.42523918",
"0.42445958",
"0.4239912",
"0.42395318",
"0.42363086",
"0.42308986",
"0.42205274",
"0.42118257",
"0.4208274",
"0.42069742",
"0.4196686",
"0.41897768",
"0.4185056",
"0.41825265",
"0.41823387",
"0.41792905",
"0.41701254",
"0.41683486",
"0.41659406",
"0.4161045",
"0.4148527",
"0.41466114",
"0.41457456",
"0.4145326",
"0.41443768",
"0.41441151",
"0.4133786",
"0.41118515",
"0.4102255",
"0.40914378",
"0.40889478",
"0.40848723",
"0.40766004",
"0.40753093",
"0.40750504",
"0.4074126",
"0.40695125",
"0.4055576",
"0.40553686",
"0.4050903",
"0.4050742",
"0.40466297",
"0.40408877",
"0.40346897",
"0.40225315",
"0.40191215",
"0.40145287",
"0.40094188",
"0.400885"
] | 0.8242568 | 1 |
StartTimer starts timing a test. This function is called automatically before a benchmark starts, but it can also be used to resume timing after a call to StopTimer. | func (b *B) StartTimer() {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Mock) StartTimer(d time.Duration) {\n\tc.FakeStartTimer(d)\n}",
"func (t *Timer) StartTimer() {\n\tt.start = time.Now()\n}",
"func (b *B) StartTimer()",
"func startTimer(name string) func() {\n\tt := time.Now()\n\tlog.Println(name, \"started\")\n\treturn func() {\n\t\td := time.Now().Sub(t)\n\t\tlog.Println(name, \"took\", d)\n\t}\n}",
"func StartTimer(path string) *Timer {\n\treturn &Timer{\n\t\tstart: time.Now(),\n\t\tpath: path,\n\t}\n}",
"func (s *TimerService) StartTimer(teamID string, project *models.Project, user *models.TeamUser, taskName string) (*models.Timer, error) {\n\treturn s.repository.create(teamID, project, user, taskName)\n}",
"func (c *Container) startTimer() error {\n\treturn define.ErrNotImplemented\n}",
"func (s *Service) StartTimer(id, duration int) (*Timer, error) {\n\ttimer := NewTimer(id, duration, s.store)\n\tif err := timer.Start(s.channel); err != nil {\n\t\treturn nil, err\n\t}\n\ts.timers[id] = timer\n\n\treturn timer, nil\n}",
"func StartLogTimer(name string) {\n\tlogEvent(name, START_SYMBOL)\n}",
"func (m *TimerMutation) SetTimerStart(t time.Time) {\n\tm.timerStart = &t\n}",
"func TimeStart(input ...string) {\n\tif getLogTypeStatus(\"timer\") == true {\n\t\tvar label string\n\t\tif len(input) == 0 {\n\t\t\tlabel = \"default\"\n\t\t} else {\n\t\t\tlabel = input[0]\n\t\t}\n\t\ttimers[label] = time.Now()\n\t}\n}",
"func startTimer(log *base.LogObject, ctx *DeferredContext) {\n\n\tlog.Functionf(\"startTimer()\")\n\tmin := 1 * time.Minute\n\tmax := 15 * time.Minute\n\tctx.Ticker.UpdateExpTicker(min, max, 0.3)\n}",
"func (r *Raft) StartTimer(timeoutObj int, waitTime int) (timerObj *time.Timer) {\n\texpInSec := time.Duration(waitTime) * msecs\n\ttimerObj = time.AfterFunc(expInSec, func() {\n\t\tr.TimeOut(timeoutObj)\n\t})\n\treturn\n}",
"func startTimer(log *base.LogObject, ctx *DeferredContext) {\n\n\tlog.Functionf(\"startTimer()\")\n\tmin := 1 * time.Minute\n\tmax := 15 * time.Minute\n\tctx.ticker.UpdateExpTicker(min, max, 0.3)\n}",
"func (t *Timer) Start() *Stopwatch {\n\treturn newStopwatch(t)\n}",
"func (s *Stopwatch) Start() {\n\ts.t = time.Now()\n}",
"func startTimer(timePtr *int) {\n\tfmt.Println(\"You have\", *timePtr, \"s for\", *timePtr, \" questions\")\n\ttime.Sleep(time.Duration(*timePtr) * time.Second)\n\tos.Exit(0)\n}",
"func (r *Raft) StartTimer(timeoutObj int, waitTime int) (timerObj *time.Timer) {\n\t//expInSec := secs * time.Duration(waitTime) //gives in seconds\n\texpInSec := time.Duration(waitTime) * time.Millisecond\n\ttimerObj = time.AfterFunc(expInSec, func() {\n\t\tr.TimeOut(timeoutObj)\n\t})\n\treturn\n}",
"func Start(initialDelay time.Duration, fixedDelay time.Duration, fixedRate time.Duration, work func(t *Timer)) *Timer {\n\t// logger.Debug(\"create timer\")\n\tt := &Timer{\n\t\tclose: false,\n\t}\n\tgo t.tick(initialDelay, fixedDelay, fixedRate, work)\n\treturn t\n}",
"func (t *timer) Start() {\n\tt.beforeTime = time.Now().UnixNano()\n\tt.afterTime = t.beforeTime\n}",
"func (s *Stopwatch) Start() {\n\ts.Lock()\n\tdefer s.Unlock()\n\tif !s.active() {\n\t\tdiff := time.Since(s.stop)\n\t\ts.start = s.start.Add(diff)\n\t\ts.stop = time.Time{}\n\t}\n}",
"func StartFileTimer(name string) {\n\twriteFileTimer(name, expandFilePathStart)\n}",
"func NewTimerSingleStart(elapsedCallback func(), interval time.Duration) *Timer {\n\ttimer := NewTimer(elapsedCallback, false, interval)\n\ttimer.Start()\n\treturn timer\n}",
"func Start(category string) *Timer {\n\tt := Timer{\n\t\tcategory: category,\n\t\tstartTime: currentTimeFunc(),\n\t}\n\treturn &t\n}",
"func Start(f func(time.Duration) string) *Stopwatch {\n\ts := New(f)\n\ts.Start()\n\n\treturn s\n}",
"func TestStart(t *testing.T) {\n\tTestingT(t)\n}",
"func (t *PCPTimer) Start() error {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\tif t.started {\n\t\treturn errors.New(\"trying to start an already started timer\")\n\t}\n\n\tt.since = time.Now()\n\tt.started = true\n\treturn nil\n}",
"func (httpCtx HttpContext) StartTimer(w http.ResponseWriter, r *http.Request) {\n\n\t// step 1 - get variables from request\n\tparams := mux.Vars(r)\n\n\t// step 2 - try to extract variables\n\tprojName := params[\"projname\"]\n\ttaskName := params[\"taskname\"]\n\ttaskNote := params[\"tasknote\"]\n\n\tlog.WithFields(log.Fields{\"params\": params}).Trace(\"new request to start timer\")\n\n\t// get time duration parameter and convert to int\n\ttInt, err := strconv.Atoi(params[\"time\"])\n\ttTime := time.Duration(tInt)\n\t// if cant extract the time from request, use default time\n\tif err != nil {\n\t\tlog.WithFields(log.Fields{\"err\": err}).Warn(\"Error during converting time received over rest - using to default time\")\n\t\ttTime = httpCtx.DefaultTime\n\t}\n\n\t// step 3 - send the time received over the channel\n\thttpCtx.CSingleShot <- SingleShotReq{tTime, projName, taskName, taskNote}\n\n}",
"func (s *StopWatch) Start() {\n if !s.running {\n s.start = time.Now()\n s.running = true\n }\n}",
"func (tcr *TestCaseReporter) SetStartTime(t time.Time) {\n\ttcr.startTime = t\n}",
"func (timer *Timer) Start(seconds int64) {\n\tcurrent := int64(0)\n\ttotal := seconds\n\ttimer.ticker = time.NewTicker(time.Second)\n\n\tdefer timer.ticker.Stop()\n\n\tfor {\n\t\tselect {\n\t\tcase <-timer.ticker.C:\n\t\t\tif !timer.pause {\n\t\t\t\tcurrent++\n\t\t\t\ttimer.OnTick(current, total)\n\n\t\t\t\tif current == total {\n\t\t\t\t\ttimer.stop <- true\n\t\t\t\t}\n\t\t\t}\n\t\tcase <-timer.stop:\n\t\t\ttimer.OnFinish()\n\t\t\tif timer.Finished != nil {\n\t\t\t\ttimer.Finished <- true\n\t\t\t}\n\t\t\treturn\n\t\t}\n\t}\n}",
"func (t *Timer) Start() {\n\tif !t.running {\n\t\tt.running = true\n\t\tgo func() {\n\t\t\ttime.Sleep(t.interval)\n\t\t\tt.elapsedCallback()\n\t\t\tt.running = false\n\t\t}()\n\t}\n}",
"func (tsr *TestSuiteReporter) SetStartTime(t time.Time) {\n\ttsr.startTime = t\n}",
"func (s *SafeTestingTBOnStart) Start(t testing.TB) error {\n\ts.SetTestingTB(t)\n\treturn nil\n}",
"func StartTime() {\n\tstart = time.Now()\n}",
"func (c *Mock) SetTimer(t *time.Timer) {\n\tc.FakeSetTimer(t)\n}",
"func Start(logger *zap.Logger, message string, fields ...zap.Field) Timer {\n\tif checkedEntry := logger.Check(zap.DebugLevel, message); checkedEntry != nil {\n\t\treturn newTimer(checkedEntry, fields...)\n\t}\n\treturn nopTimer{}\n}",
"func Start(opts ...StartOption) {\n\tif internal.Testing {\n\t\treturn // mock tracer active\n\t}\n\tt := newTracer(opts...)\n\tif !t.config.enabled {\n\t\treturn\n\t}\n\tinternal.SetGlobalTracer(t)\n\tif t.config.logStartup {\n\t\tlogStartup(t)\n\t}\n}",
"func (t *TestRun) Start() {\n\tlog.Println(\"================\")\n\tlog.Println(\" Starting test \")\n\tlog.Println(\"================\")\n\tlog.Printf(\"concurrency level [%d]\", t.ConcurrencyLevel)\n\tlog.Printf(\"iterations [%d]\", t.Iterations)\n\tlog.Printf(\"writeRate [%f]\", t.WriteRate)\n\tt.StartedAt = time.Now()\n\tlog.Println(\"TESTRUN Starting...\")\n}",
"func (s *Stopwatch) Start() {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif !s.isRunning() {\n\t\ts.refTime = time.Now()\n\t}\n}",
"func (timeout *Timeout) Start() {\n\ttimeout.state = Active\n\ttimeout.start = time.Now()\n}",
"func StartTest(testName string) {\n\tlog.Println(\"\")\n\tlog.Println(\"\")\n\tpc, file, line, _ := runtime.Caller(1)\n\n\tfullPCName := runtime.FuncForPC(pc).Name()\n\tlastIndexOfPc := strings.LastIndex(fullPCName, \"/\") + 1\n\tjustPcName := fullPCName[lastIndexOfPc:len(fullPCName)]\n\n\tlastIndexOfFile := strings.LastIndex(file, \"/\") + 1\n\tjustFileName := file[lastIndexOfFile:len(file)]\n\n\t//log.Printf(\"INFO [%s:%d] [%s] %v\", justFileName, line, justPcName, msg)\n\tlog.Printf(\"***START [%s:%d] [%s] %v\", justFileName, line, justPcName, testName)\n\n\t//log.Printf(\"***START \" + testName + \" [%s:%d] [%s] %v\", justFileName, line, justPcName, msg))\n\tlog.Println(\"\")\n}",
"func (bench *Stopwatch) Start() int32 {\n\tlap := atomic.AddInt32(&bench.nextLap, 1) - 1\n\tif int(lap) > len(bench.spans) {\n\t\treturn -1\n\t}\n\tbench.spans[lap].Start = Now()\n\treturn lap\n}",
"func (runner *MockRunner) Start(runnable runner.Runnable) {\n\trunner.Called(runnable)\n}",
"func (s *Strategy) startTimer() {\n\ts.timer = time.NewTimer(s.timerFrequency)\n\n\teventLoop := func(ctx context.Context) {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.timer.C:\n\t\t\t\tif !s.onTimer() {\n\t\t\t\t\t// We did not publish. Reset the timer and try again later.\n\t\t\t\t\ts.timer.Reset(s.timerFrequency)\n\t\t\t\t}\n\t\t\tcase <-s.resetChan:\n\t\t\t\ts.timer.Reset(s.timerFrequency)\n\t\t\tcase <-ctx.Done():\n\t\t\t\t// User requested to stop the timer.\n\t\t\t\ts.timer.Stop()\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n\n\t// Start a go routine to listen to the timer.\n\t_ = s.worker.Start(nil, eventLoop)\n}",
"func (timer *WallclockTimer) Start() error {\n\ttimer.command <- \"start\"\n\treturn nil\n}",
"func (m *TimerMutation) TimerStart() (r time.Time, exists bool) {\n\tv := m.timerStart\n\tif v == nil {\n\t\treturn\n\t}\n\treturn *v, true\n}",
"func (_LvRecording *LvRecordingCaller) StartTime(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _LvRecording.contract.Call(opts, &out, \"startTime\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func (b *TestDriver) StartRecording() error {\n\treturn nil\n}",
"func (t *FileStorage) Start(ctx context.Context, timerid string, c *claims.MyCustomClaims) (*pb.Timer, error) {\n\tid, _ := strconv.Atoi(timerid)\n\n\texistingTimer, err := t.Db.Timer.\n\t\tQuery().\n\t\tWhere(timer.And(timer.ID(id), timer.Userid(c.Subject))).\n\t\tOnly(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tnewtimer, err := existingTimer.\n\t\tUpdate().\n\t\tSetTimerStart(time.Now()).\n\t\tSetIsRunning(true).\n\t\tSave(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tresponse := toPb(newtimer)\n\treturn response, nil\n}",
"func (s Simulator) Start() Result {\n\tswitch s.timeoutDuration {\n\tcase 0:\n\t\treturn s.run()\n\tdefault:\n\t\treturn s.runWithContext()\n\t}\n}",
"func resetStart() {\n\tstart = time.Now()\n}",
"func (w *Timer) Start() time.Time {\n\treturn time.Now()\n}",
"func TestStart(t *testing.T) {\n\t// Preparation\n\tdir, err := os.Getwd()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdir = dir + \"/../assets\" // path to your tracks for testing\n\n\tlib, err := library.NewLibrary(\"MyLibrary\", dir)\n\tif err != nil {\n\t\tt.Errorf(\"initialize library with valid params: %s\", err.Error())\n\t}\n\n\terr = lib.ScanWithRoutines()\n\tif err != nil {\n\t\tt.Errorf(\"scan library: %s\", err.Error())\n\t}\n\n\t// Test\n\tp := player.NewPlayer(lib, make(chan request.Request, 1000))\n\n\tvar requests = []request.RequestType{\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestNextTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestPrevTrack,\n\t\trequest.RequestRepeatMode,\n\t}\n\tch := p.Start(make(chan string, 1000))\n\tfor _, req := range requests {\n\t\tch <- request.NewRequestToPlayer(req)\n\t\ttime.Sleep(3 * time.Second)\n\t}\n\t// time.Sleep(1000*time.Second)\n\t// Another process is started on the backend, this process can exit naturally.\n}",
"func (s *RuntimeStatSampler) Start() error {\n\treturn routine.RunWorkDaemon(\"RuntimeStat-Sampler\", func() {\n\t\ttimer := time.NewTimer(s.Interval)\n\t\tdefer timer.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-s.Ctx.Done():\n\t\t\t\treturn\n\n\t\t\tcase <-timer.C:\n\t\t\t\ts.MemorySample()\n\t\t\t\ts.SwapSample()\n\t\t\t\ts.CpuSample()\n\t\t\t\ts.DiskSample()\n\t\t\t\ts.NetSample()\n\t\t\t\ts.UpdateTime()\n\t\t\t}\n\n\t\t\ttimer.Reset(s.Interval)\n\t\t}\n\t}, s.Ctx.Done())\n}",
"func StartNewStopwatch() *Stopwatch {\n\treturn &Stopwatch{time.Now()}\n}",
"func (znp *Znp) SysOsalStartTimer(id uint8, timeout uint16) (rsp *StatusResponse, err error) {\n\treq := &SysOsalStartTimer{ID: id, Timeout: timeout}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_SYS, 0x0A, req, &rsp)\n\treturn\n}",
"func (r *Reporter) SetStartTime(t time.Time) {\n\tr.startTime = t\n}",
"func (p *Profile) Start() error {\n\tsuccess, err := C.MXSetProfilerState(C.int(1))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif success != 0 {\n\t\treturn GetLastError()\n\t}\n\tp.startTime = time.Now()\n\tp.started = true\n\n\treturn nil\n}",
"func (p *Profile) Start() error {\n\tsuccess, err := C.MXSetProfilerState(C.int(1))\n\tif err != nil {\n\t\treturn err\n\t}\n\tif success != 0 {\n\t\treturn GetLastError()\n\t}\n\tp.startTime = time.Now()\n\tp.started = true\n\n\treturn nil\n}",
"func (s *Server) StartTest(ctx context.Context, request *StartTest_Request) (response *StartTest_Response, err error) {\n\t// @TODO: maybe make this command sync, and stream interaction to let the\n\t// client know when all the interaction has been sent\n\n\tlogging.Log(fmt.Sprintf(\"StartTest - incoming request: %+v\", request))\n\tresponse = new(StartTest_Response)\n\n\tvar ct *config.Test\n\tvar ts *TestSession\n\n\ts.muTests.Lock()\n\n\t// checks if test exists\n\tif gtest, ok := s.Tests[request.GroupName]; ok {\n\t\tif ct, ok = gtest[request.TestN]; !ok {\n\t\t\ts.muTests.Unlock()\n\t\t\treturn response, logging.LogErr(errors.New(ErrTestNotExist))\n\t\t}\n\t}\n\n\t// checks if test isn't already running\n\tif gtest, ok := s.TestSessions[request.GroupName]; ok {\n\t\tif ts, ok = gtest[request.TestN]; ok {\n\t\t\ts.muTests.Unlock()\n\t\t\treturn response, logging.LogErr(errors.New(ErrTestNotExist))\n\t\t}\n\t}\n\n\tsctx := context.Background()\n\tts = NewTestSession(sctx, ct)\n\ts.TestSessions[request.GroupName][request.TestN] = ts\n\n\ts.muTests.Unlock()\n\n\ttime.Sleep(time.Second * 5)\n\n\tlogging.Log(fmt.Sprintf(\"starting test: %+v\", ct))\n\n\tgo func() {\n\t\tvar x int\n\t\tfor x = 0; ts.IsRunning() && x < ct.AmountInternal; x += 1 {\n\t\t\tswitch ct.TypeInternal {\n\t\t\tcase config.TestTypeText:\n\t\t\t\tmessage := ConstructTextMessage(ct.SizeInternal)\n\t\t\t\terr = s.SendTextMessage(sctx, request.GroupName, message)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t}\n\n\t\t\tcase config.TestTypeMedia:\n\t\t\t\timage, err := ConstructImageMessage(ct.SizeInternal)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\terr = s.SendImageMessage(sctx, request.GroupName, image)\n\t\t\t\tif err != nil {\n\t\t\t\t\tlogging.Log(err.Error())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tlogging.Log(fmt.Sprintf(\"sent message to group: %s\", request.GroupName))\n\t\t\ttime.Sleep(time.Second * time.Duration(ct.IntervalInternal))\n\t\t}\n\n\t\tlogging.Log(fmt.Sprintf(\"sent %d messages to %s\\n\", x, request.GroupName))\n\t}()\n\n\treturn response, logging.LogErr(err)\n}",
"func (t *Timer) Start() {\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-t.Done:\n\t\t\t\tbreak\n\t\t\tdefault:\n\t\t\t\tinterval := microsecondsPerPulse(t.Tempo)\n\t\t\t\ttime.Sleep(interval)\n\t\t\t\tt.Pulses <- 1\n\t\t\t}\n\t\t}\n\t}()\n}",
"func (m *MockStaker) StartTime() time.Time {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StartTime\")\n\tret0, _ := ret[0].(time.Time)\n\treturn ret0\n}",
"func (t *Timer) Start() time.Time {\n\treturn t.start\n}",
"func (t *SelfTester) Start() {}",
"func (c *ProjectsTracesListCall) StartTime(startTime string) *ProjectsTracesListCall {\n\tc.urlParams_.Set(\"startTime\", startTime)\n\treturn c\n}",
"func (s *Session) SetStartTime(t time.Time) {\n\ts.started = t\n}",
"func (c *ClockVal) SetStartTime() {\n\tc.StartTime = NowTime()\n}",
"func Start(timeWheels *TimeWheels) error {\n\tfmt.Println(\"Start the timer scheduler\")\n\tgo func() {\n\t\tticker := time.NewTicker(1 * time.Second)\n\t\tvar wheelIndex = 0\n\t\t//each ticker to exec this statement\n\t\tfor _ = range ticker.C {\n\t\t\tif timeWheels.TimeWheel[wheelIndex].Size > 0 {\n\t\t\t\tif timeWheels.TimeWheel[wheelIndex].Size == len(timeWheels.TimeWheel[wheelIndex].TimerSchedulers) {\n\t\t\t\t\tfmt.Println(\"The size is not eq with length of TimerScheduler array\")\n\t\t\t\t}\n\t\t\t\tfor scheduleIndex, timerScheduler := range timeWheels.TimeWheel[wheelIndex].TimerSchedulers {\n\t\t\t\t\tif timerScheduler.CycNum == 0 {\n\t\t\t\t\t\ttimerScheduler.DefaultExecuteSchedule()\n\t\t\t\t\t} else {\n\t\t\t\t\t\ttimerScheduler.CycNum--\n\t\t\t\t\t\ttimeWheels.UpdateScheduler(&timerScheduler, scheduleIndex)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\tif wheelIndex == int(TIME_WHEEL_SIZE-1) {\n\t\t\t\twheelIndex = 0\n\t\t\t} else {\n\t\t\t\twheelIndex++\n\t\t\t}\n\n\t\t}\n\t}()\n\treturn nil\n}",
"func (m *MockFullNode) StartTime(arg0 context.Context) (time.Time, error) {\n\tm.ctrl.T.Helper()\n\tret := m.ctrl.Call(m, \"StartTime\", arg0)\n\tret0, _ := ret[0].(time.Time)\n\tret1, _ := ret[1].(error)\n\treturn ret0, ret1\n}",
"func (o TriggerBuildArtifactsObjectsTimingOutput) StartTime() pulumi.StringPtrOutput {\n\treturn o.ApplyT(func(v TriggerBuildArtifactsObjectsTiming) *string { return v.StartTime }).(pulumi.StringPtrOutput)\n}",
"func (_LvRecordableStream *LvRecordableStreamCaller) StartTime(opts *bind.CallOpts) (*big.Int, error) {\n\tvar out []interface{}\n\terr := _LvRecordableStream.contract.Call(opts, &out, \"startTime\")\n\n\tif err != nil {\n\t\treturn *new(*big.Int), err\n\t}\n\n\tout0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int)\n\n\treturn out0, err\n\n}",
"func MainStart(deps testDeps, tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M {}",
"func (s *SqlDb) StartProfiling() {\n\ts.profiling = true\n}",
"func (_LvRecording *LvRecordingTransactor) SetStartTime(opts *bind.TransactOpts, _startTime *big.Int) (*types.Transaction, error) {\n\treturn _LvRecording.contract.Transact(opts, \"setStartTime\", _startTime)\n}",
"func (c *Cache) SetTimer(timer func() uint) {\n\tif c == nil {\n\t\treturn\n\t}\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\n\tc.timeNow = timer\n}",
"func (m *MockInternalScheduler) Start() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Start\")\n}",
"func (t *Tailer) startStopTimer() {\n\tstopTimer := time.NewTimer(t.closeTimeout)\n\t<-stopTimer.C\n\tt.stopForward()\n\tt.stop <- struct{}{}\n}",
"func (s *TestSource) Start(ctx context.Context) error {\n\tgo s.closer(ctx)\n\n\treturn nil\n}",
"func StartBillingTest(t *perftest.BillingParams) (id string, err error) {\n\tinitController()\n\n\t// allocate uuid for the test run\n\tt.ID = bson.NewObjectId()\n\n\t// for rating test, controller creates and assigns the stats controller to t;\n\t// Perftest package should be flexible and only deal\n\t// with iController interface for future extensibility\n\tvar statsDBConf stats.DBConfig\n\tstatsDBConf.Server = t.DBConf.Server\n\tstatsDBConf.Port = t.DBConf.Port\n\tstatsDBConf.Database = t.DBConf.Database\n\tstatsDBConf.UID = t.DBConf.UID\n\tstatsDBConf.Pwd = t.DBConf.Pwd\n\n\tsc, err := stats.CreateController(&statsDBConf)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tt.TestParams.DbController = sc\n\n\tc.tm.Add(t.ID, t)\n\treturn t.ID.Hex(), nil\n}",
"func (c *PurchasesVoidedpurchasesListCall) StartTime(startTime int64) *PurchasesVoidedpurchasesListCall {\n\tc.urlParams_.Set(\"startTime\", fmt.Sprint(startTime))\n\treturn c\n}",
"func (o TimelineOutput) StartTime() pulumi.IntPtrOutput {\n\treturn o.ApplyT(func(v *Timeline) pulumi.IntPtrOutput { return v.StartTime }).(pulumi.IntPtrOutput)\n}",
"func (c *Container) SetStartTime(newStart time.Time) {\n\tc.start = newStart\n}",
"func (c *Mock) StopTimer() {\n\tc.FakeStopTimer()\n}",
"func (_m *MockSeriesIterator) Start() time.Time {\n\tret := _m.ctrl.Call(_m, \"Start\")\n\tret0, _ := ret[0].(time.Time)\n\treturn ret0\n}",
"func requiresTestStart() {\n\tif !testsStarted {\n\t\tpanic(\"May only be called from within a test case\")\n\t}\n}",
"func (suite *PouchStartSuite) TestStartCommand(c *check.C) {\n\tname := \"start-normal\"\n\tcommand.PouchRun(\"create\", \"--name\", name, busyboxImage).Assert(c, icmd.Success)\n\n\tcommand.PouchRun(\"start\", name).Assert(c, icmd.Success)\n\n\tcommand.PouchRun(\"stop\", name).Assert(c, icmd.Success)\n}",
"func MainStart(matchString func(pat, str string) (bool, error), tests []InternalTest, benchmarks []InternalBenchmark, examples []InternalExample) *M",
"func (r *FailBack) startResetTimer() chan struct{} {\n\tfailCh := make(chan struct{}, 1)\n\tgo func() {\n\t\ttimer := time.NewTimer(r.opt.ResetAfter)\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-failCh:\n\t\t\t\tif !timer.Stop() {\n\t\t\t\t\t<-timer.C\n\t\t\t\t}\n\t\t\tcase <-timer.C:\n\t\t\t\tr.mu.Lock()\n\t\t\t\tr.active = 0\n\t\t\t\tLog.WithField(\"resolver\", r.resolvers[r.active].String()).Debug(\"failing back to resolver\")\n\t\t\t\tr.mu.Unlock()\n\t\t\t\tr.metrics.available.Add(1)\n\t\t\t\t// we just reset to the first resolver, let's wait for another failure before running again\n\t\t\t\t<-failCh\n\t\t\t}\n\t\t\ttimer.Reset(r.opt.ResetAfter)\n\t\t}\n\t}()\n\treturn failCh\n}",
"func (mr *MockStakerMockRecorder) StartTime() *gomock.Call {\n\tmr.mock.ctrl.T.Helper()\n\treturn mr.mock.ctrl.RecordCallWithMethodType(mr.mock, \"StartTime\", reflect.TypeOf((*MockStaker)(nil).StartTime))\n}",
"func (this *profiler) Start(key string) *profiler {\n\tvar m runtime.MemStats\n\truntime.ReadMemStats(&m)\n\n\tnewProfiler := new(profiler)\n\tnewProfiler.startTime = time.Now()\n\tnewProfiler.name = this.name + \"_\" + key\n\tnewProfiler.startMemory = bToKb(m.Alloc)\n\tthis.profilers = append(this.profilers, newProfiler)\n\treturn newProfiler\n}",
"func (m *SimulationAutomationRun) SetStartDateTime(value *i336074805fc853987abe6f7fe3ad97a6a6f3077a16391fec744f671a015fbd7e.Time)() {\n err := m.GetBackingStore().Set(\"startDateTime\", value)\n if err != nil {\n panic(err)\n }\n}",
"func (o *ApplianceSetupInfoAllOf) SetStartTime(v time.Time) {\n\to.StartTime = &v\n}",
"func (t *Tracer) TLSHandshakeStart() {\n\tatomic.CompareAndSwapInt64(&t.tlsHandshakeStart, 0, now())\n}",
"func (o *OnpremUpgradePhase) SetStartTime(v time.Time) {\n\to.StartTime = &v\n}",
"func Start(cb func(int), dur time.Duration) (*TCPTest, error) {\n\tp, err := tcputil.EmptyPort()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := make(chan struct{}, 10)\n\tgo func(c chan struct{}, p int) {\n\t\tdefer func() { c <- struct{}{} }()\n\t\tcb(p)\n\t}(c, p)\n\n\terr = tcputil.WaitLocalPort(p, dur)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &TCPTest{p, c}, nil\n}",
"func (m *TimerMutation) ResetTimerStart() {\n\tm.timerStart = nil\n\tdelete(m.clearedFields, timer.FieldTimerStart)\n}",
"func (mpi *mempoolImpl) startBatchTimer(reason string) {\n\t// stop old timer\n\tmpi.stopBatchTimer(StopReason3)\n\tmpi.logger.Debugf(\"Start batch timer, reason: %s\", reason)\n\ttimestamp := time.Now().UnixNano()\n\tkey := strconv.FormatInt(timestamp, 10)\n\tmpi.batchTimerMgr.isActive.Set(key, true)\n\n\ttime.AfterFunc(mpi.batchTimerMgr.timeout, func() {\n\t\tif mpi.batchTimerMgr.isActive.Has(key) {\n\t\t\tmpi.batchTimerMgr.timeoutEventC <- true\n\t\t}\n\t})\n}",
"func (m *Mock) Timer(d time.Duration) *Timer {\n\tch := make(chan time.Time, 1)\n\tt := &Timer{\n\t\tC: ch,\n\t\tc: ch,\n\t\tmock: m,\n\t\tnext: m.Now().Add(d),\n\t}\n\tm.addTimer((*internalTimer)(t))\n\treturn t\n}",
"func (s *RuntimeTaskStatus) SetStartTime() {\n\tnow := metav1.Now()\n\ts.StartTime = &now\n}"
] | [
"0.7461336",
"0.7087315",
"0.6641407",
"0.65067357",
"0.6499249",
"0.6474908",
"0.64326847",
"0.6320044",
"0.6317354",
"0.6302466",
"0.6290639",
"0.6218009",
"0.61945933",
"0.61817944",
"0.6150907",
"0.6118142",
"0.6114189",
"0.611406",
"0.61011887",
"0.6058078",
"0.5994329",
"0.59569335",
"0.59234625",
"0.58877295",
"0.58844143",
"0.58790946",
"0.58372825",
"0.5837034",
"0.58068377",
"0.5703132",
"0.56762433",
"0.56625074",
"0.56486243",
"0.56313676",
"0.56109595",
"0.5596421",
"0.55717534",
"0.5557827",
"0.5557031",
"0.55184835",
"0.5498998",
"0.54960364",
"0.546957",
"0.5462905",
"0.54513395",
"0.5448254",
"0.54272014",
"0.5381561",
"0.5377343",
"0.5353019",
"0.5328371",
"0.52789724",
"0.527208",
"0.5256938",
"0.52517915",
"0.5249542",
"0.52403176",
"0.5228484",
"0.5222039",
"0.5222039",
"0.52006775",
"0.5158763",
"0.5155721",
"0.5153114",
"0.5140727",
"0.5134162",
"0.5091122",
"0.50882983",
"0.5083235",
"0.5071338",
"0.5057647",
"0.5039407",
"0.50088954",
"0.5004724",
"0.49876696",
"0.49823573",
"0.49652764",
"0.49580014",
"0.49567202",
"0.49560305",
"0.49556106",
"0.49482784",
"0.4914271",
"0.4904954",
"0.49030003",
"0.48976487",
"0.48968226",
"0.48850062",
"0.48666814",
"0.48656592",
"0.48640245",
"0.48397166",
"0.48352465",
"0.483425",
"0.48271826",
"0.4819831",
"0.47958636",
"0.4785627",
"0.47848007",
"0.4782038"
] | 0.6173007 | 14 |
StopTimer stops timing a test. This can be used to pause the timer while performing complex initialization that you don't want to measure. | func (b *B) StopTimer() {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (c *Mock) StopTimer() {\n\tc.FakeStopTimer()\n}",
"func (b *B) StopTimer()",
"func (t *Timer) StopTimer() {\n\tt.end = time.Now()\n}",
"func (NilTimer) Stop() {}",
"func (t *StandardTimer) Stop() {\n\tt.meter.Stop()\n}",
"func (t *TimerSnapshot) Stop() {}",
"func (t *Timer) Stop() {\n\tif t.w == nil {\n\t\tpanic(\"time: Stop called on uninitialized Timer\")\n\t}\n\n\t_ = t.w.deleteTimer(t)\n\tt.w = nil\n}",
"func (t *Timer) Stop() {\n\tt.tickObj.Stop()\n}",
"func (timer *Timer) Stop() {\n\ttimer.stop <- true\n}",
"func (t *Timer) Stop() {\n\tmetrics.MeasureSince(strings.Split(t.path, \".\"), t.start)\n}",
"func (t *Timer) Stop() {\n\tif t == nil {\n\t\treturn\n\t}\n\n\tif !t.timer.Stop() {\n\t\tselect {\n\t\tcase <-t.timer.C:\n\t\tdefault:\n\t\t}\n\t}\n}",
"func (t *Timer) Stop() bool {\n\tif t.timer != nil {\n\t\treturn t.timer.Stop()\n\t}\n\treturn t.mock.removeClockTimer((*internalTimer)(t))\n}",
"func (s *TimerService) StopTimer(timer *models.Timer) error {\n\tnow := time.Now()\n\ttimer.ActualMinutes = s.CalculateMinutesForActiveTimer(timer)\n\ttimer.Minutes = timer.ActualMinutes\n\ttimer.FinishedAt = &now\n\treturn s.repository.update(timer)\n}",
"func (t *Tailer) startStopTimer() {\n\tstopTimer := time.NewTimer(t.closeTimeout)\n\t<-stopTimer.C\n\tt.stopForward()\n\tt.stop <- struct{}{}\n}",
"func (c *Timer) Stop() {\n\tc.ticker.Stop()\n\tc.options.OnDone(true)\n}",
"func (timer *WallclockTimer) Stop() error {\n\ttimer.command <- \"stop\"\n\treturn nil\n}",
"func (t *deadlineTimer) Stop() {\n\tif t.t == nil {\n\t\treturn\n\t}\n\tt.t.Stop()\n\tt.t = nil\n}",
"func (timeout *Timeout) Stop() {\n\ttimeout.state = Inactive\n\ttimeout.start = time.Now()\n}",
"func (tr *TimedRun) Stop(t *Timer) {\n\tstop := currentTimeFunc()\n\ttr.cl.Lock()\n\tdefer tr.cl.Unlock()\n\tif _, ok := tr.categories[t.category]; !ok {\n\t\ttr.categories[t.category] = 0\n\t}\n\ttr.categories[t.category] += stop.Sub(t.startTime)\n}",
"func (znp *Znp) SysOsalStopTimer(id uint8) (rsp *StatusResponse, err error) {\n\treq := &SysOsalStopTimer{ID: id}\n\terr = znp.ProcessRequest(unp.C_SREQ, unp.S_SYS, 0x0B, req, &rsp)\n\treturn\n}",
"func (t *Timer) Stop() bool {\n\treturn t.reset(time.Time{})\n}",
"func (r *realTimer) Stop() bool {\n\treturn r.timer.Stop()\n}",
"func (pt *panicTimer) stop() {\n\tif pt.t != nil {\n\t\tpt.t.Stop()\n\t\tpt.t = nil\n\t}\n}",
"func (attr *ConnAttr) StopTimer(key string) {\n\tif attr.Timers == nil {\n\t\treturn\n\t}\n\n\tv, e := attr.Timers.Get(key)\n\tif e != nil {\n\t\treturn\n\t}\n\tv.(*time.Timer).Stop()\n\tattr.Timers.Del(key)\n\tlogs.Logger.Debugf(\"ClientID: %s, Key: %s, StopTimer OK\", attr.ClientID, key)\n}",
"func (rf *Raft) stop(timer *time.Timer) {\n\tif !timer.Stop() && len(timer.C) != 0 {\n\t\t<-timer.C\n\t}\n}",
"func (s *Server) StopTest(ctx context.Context, request *StopTest_Request) (res *StopTest_Response, err error) {\n\tres = new(StopTest_Response)\n\n\ts.muTests.Lock()\n\n\tfor _, gtest := range s.TestSessions {\n\t\tfor index, ts := range gtest {\n\t\t\telapsed := ts.Stop()\n\t\t\tlogging.Log(fmt.Sprintf(\"test [%d] ended in %dm%ds\", index, int(elapsed.Minutes()), int(elapsed.Seconds())))\n\n\t\t\t// @TODO: keep trace of the test\n\t\t\tdelete(gtest, index)\n\t\t}\n\t}\n\n\ts.muTests.Unlock()\n\n\treturn res, logging.LogErr(err)\n}",
"func (t *Ticker) Stop() {\n\tif t.ticker != nil {\n\t\tt.ticker.Stop()\n\t} else {\n\t\tt.mock.removeClockTimer((*internalTicker)(t))\n\t}\n}",
"func (it *IdleTimer) Stop() {\n\tif it == nil {\n\t\treturn\n\t}\n\n\tit.Lock()\n\tdefer it.Unlock()\n\tit.state = TimerDead\n\tit.resetTimeout()\n}",
"func (s *Stopwatch) Stop() {\n\ts.Lock()\n\tdefer s.Unlock()\n\tif s.active() {\n\t\ts.stop = time.Now()\n\t}\n}",
"func (b *TestDriver) StopRecording() error {\n\treturn nil\n}",
"func (o *FakeObjectTrackers) Stop() {\n\to.ControlMachine.Stop()\n\to.TargetCore.Stop()\n}",
"func (nt *NickTimer) Stop() {\n\tnt.Lock()\n\tdefer nt.Unlock()\n\tnt.stopInternal()\n}",
"func (t *Tracer) Stop() {}",
"func StopTimeout(t time.Duration) Option {\n\treturn func(o *options) { o.stopTimeout = t }\n}",
"func (sit *SyncIntervalTimer) Stop() {\n\tsit.stop <- struct{}{}\n\tif sit.intervalTimer != nil {\n\t\tsit.intervalTimer.Stop()\n\t}\n}",
"func (s *ContainerDefinition) SetStopTimeout(v int64) *ContainerDefinition {\n\ts.StopTimeout = &v\n\treturn s\n}",
"func (tw *TimingWheel) Stop() {\n\ttw.ticker.Stop()\n}",
"func (tw *TimeWheel) Stop() {\n\ttw.stopFlag <- struct{}{}\n}",
"func (m *Machine) Stop() {\n\tm.Lock()\n\tdefer m.Unlock()\n\n\tif m.backoffTimer != nil {\n\t\tm.backoffTimer.Stop()\n\t}\n\n\tif m.cancel != nil {\n\t\tm.Infof(\"runner\", \"Stopping\")\n\t\tm.cancel()\n\t}\n\n\tm.startTime = time.Time{}\n}",
"func (tr *TestRunner) Stop() {\n\tlog.Println(\"Initiating Stop in TestRunner\")\n\tclose(tr.stop)\n\t// Release the portgroup\n\ttr.pg = nil\n}",
"func (t *Tracker) Stop() {\n\tt.Finish = time.Now()\n\tt.Duration = time.Since(t.Run)\n}",
"func (cm *CertMan) Stop() {\n\tcm.watching <- false\n}",
"func (b *B) ResetTimer()",
"func (e *EvtFailureDetector) Stop() {\n\te.stop <- struct{}{}\n}",
"func (_m *TimeTicker) Stop() {\n\t_m.Called()\n}",
"func (hb *Heartbeat) Stop() {\n\thb.timer.Stop()\n}",
"func (t *timer) stop() int64 {\n\twhen := time.Now()\n\tif t.end.IsZero() {\n\t\tt.end = when\n\t}\n\treturn t.end.Sub(t.start).Nanoseconds() / 1e6\n}",
"func Stop() {\n\tstopMux.Lock()\n\tif stoppedAt != nil {\n\t\tpanic(\"Time was already stopped\")\n\t}\n\tnow := Now()\n\tstoppedAt = &now\n\tstopMux.Unlock()\n}",
"func (rp *Pool) StopTimers() {\n\trp.lock.Lock()\n\tdefer rp.lock.Unlock()\n\n\trp.stopped = true\n\n\tfor _, element := range rp.existMap {\n\t\titem := element.Value.(*requestItem)\n\t\titem.timeout.Stop()\n\t}\n\n\trp.logger.Debugf(\"Stopped all timers: size=%d\", len(rp.existMap))\n}",
"func (c *Mock) SetTimer(t *time.Timer) {\n\tc.FakeSetTimer(t)\n}",
"func (s *maxEPSSampler) Stop() {\n\ts.reportDone <- true\n\t<-s.reportDone\n\n\ts.rateCounter.Stop()\n}",
"func (b *TestDriver) Stop() error {\n\tb.flying = true\n\tb.Publish(Hovering, true)\n\treturn nil\n}",
"func (s *ContinuousExportDescription) SetStopTime(v time.Time) *ContinuousExportDescription {\n\ts.StopTime = &v\n\treturn s\n}",
"func (s *Stopwatch) Stop() {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.isRunning() {\n\t\ts.elapsedTime += time.Since(s.refTime)\n\t\ts.refTime = time.Time{}\n\t} else {\n\t\tfmt.Printf(\"WARNING: Stopwatch.Stop() isRunning is false\\n\")\n\t}\n}",
"func (bench *Stopwatch) Stop(lap int32) {\n\tif lap < 0 {\n\t\treturn\n\t}\n\tbench.spans[lap].Finish = Now()\n\n\tlapsMeasured := atomic.AddInt32(&bench.lapsMeasured, 1)\n\tif int(lapsMeasured) == len(bench.spans) {\n\t\tbench.finalize()\n\t} else if int(lapsMeasured) > len(bench.spans) {\n\t\tpanic(\"stop called too many times\")\n\t}\n}",
"func (tw *TimeWheel) RemoveTimer(key interface{}) {\n\tif key == nil {\n\t\treturn\n\t}\n\ttw.removeTaskChan <- key\n}",
"func (tkr *RandTicker) Stop() {\n\tclose(tkr.done)\n}",
"func (s *samplerBackendRateCounter) Stop() {\n\tclose(s.exit)\n\t<-s.stopped\n}",
"func (m *mockService) Stop() {\n\t// m.ctrl.Finish() calls runtime.Goexit() on errors\n\t// put it in defer so cleanup is always done\n\tdefer func() {\n\t\tm.server.Shutdown()\n\t\tm.started = false\n\t}()\n\tm.ctrl.Finish()\n}",
"func (eCtx *ExecutionContext) CancelTimer(repeating bool) {\n\tact := eCtx.currentStage().act\n\n\tstate := eCtx.pipeline.sm.GetState(eCtx.discriminator)\n\n\tif repeating {\n\t\tstate.RemoveTicker(act)\n\t} else {\n\t\tstate.RemoveTimer(act)\n\t}\n}",
"func (c *SwitchTicker) Stop() {\n\tc.slowTicker.Stop()\n\tc.fastTicker.Stop()\n}",
"func (b *NoopStop) Stop(t testing.TB) error {\n\treturn nil\n}",
"func (bt *BackTest) Stop() {\n\tclose(bt.shutdown)\n}",
"func StopMockups() {\n\tenabledMocks = false\n}",
"func (f *FakeOutput) Stop() error { return nil }",
"func NewTimer() *Timer {\n\ttimer := &Timer{\n\t\tstop: make(chan bool, 1),\n\t\tpause: false,\n\t\tFinished: nil,\n\t\tOnTick: func(current int64, total int64) {},\n\t\tOnFinish: func() {},\n\t}\n\treturn timer\n}",
"func (p *PredefinedFake) NewTimer(d time.Duration) Timer {\n\tklog.Exitf(\"PredefinedFake.NewTimer is not implemented\")\n\treturn nil\n}",
"func (dt *discoveryTool) stop() {\n\tclose(dt.done)\n\n\t//Shutdown timer\n\ttimer := time.NewTimer(time.Second * 3)\n\tdefer timer.Stop()\nL:\n\tfor { //Unblock go routine by reading from dt.dataChan\n\t\tselect {\n\t\tcase <-timer.C:\n\t\t\tbreak L\n\t\tcase <-dt.dataChan:\n\t\t}\n\t}\n\n\tdt.wg.Wait()\n}",
"func (s *GetRunOutput) SetStopTime(v time.Time) *GetRunOutput {\n\ts.StopTime = &v\n\treturn s\n}",
"func (r *Reporter) Stop() {\n\tr.elapsed = time.Since(r.startAt)\n\tr.Report()\n\tos.Exit(0)\n}",
"func (tm *ServiceTracerouteManager) Stop() {\n\ttm.StopChan <- true\n}",
"func (s *GetRunTaskOutput) SetStopTime(v time.Time) *GetRunTaskOutput {\n\ts.StopTime = &v\n\treturn s\n}",
"func (c *Container) removeTimer() error {\n\treturn define.ErrNotImplemented\n}",
"func (t *TCPTest) Stop() {\n\tt.exit<-struct{}{}\n}",
"func (s *RunListItem) SetStopTime(v time.Time) *RunListItem {\n\ts.StopTime = &v\n\treturn s\n}",
"func Stop() {\n\ts.Stop()\n}",
"func (s *Sampler) Stop() {\n\ts.Backend.Stop()\n\tclose(s.exit)\n}",
"func (fd *failureDetector) Stop() {\n\tfd.stop <- struct{}{}\n}",
"func (mpi *mempoolImpl) stopBatchTimer(reason string) {\n\tif mpi.batchTimerMgr.isActive.IsEmpty() {\n\t\treturn\n\t}\n\tmpi.logger.Debugf(\"Stop batch timer, reason: %s\", reason)\n\tmpi.batchTimerMgr.isActive = cmap.New()\n}",
"func (s *MockMetricsServer) Stop() {\n\t_ = s.e.Close()\n}",
"func (b *B) ResetTimer() {}",
"func excludeFromTimer(b *testing.B, f func()) {\n\tb.StopTimer()\n\tf()\n\tb.StartTimer()\n}",
"func (s *TaskListItem) SetStopTime(v time.Time) *TaskListItem {\n\ts.StopTime = &v\n\treturn s\n}",
"func (m *Mock) StopPlugin(name string, cancelFlag task.CancelFlag) (err error) {\n\treturn nil\n}",
"func (sys Systemd) Stop(unit string) error {\n\tns := fmt.Sprintf(\"project_%s_%s\", sys.p.ID, sys.kind)\n\ttarget := ns + \"_\" + unit\n\n\tif err := exec.Command(\"systemctl\", \"stop\", target).Run(); err != nil {\n\t\treturn fmt.Errorf(\"failed to stop systemd unit %s: %s\", target, err)\n\t}\n\treturn nil\n}",
"func (r *Randomizer) Stop() {\n\tif !r.running {\n\t\treturn\n\t}\n\n\tclose(r.quit)\n}",
"func (a *actorsRuntime) Stop() {\n\tif a.placement != nil {\n\t\ta.placement.Stop()\n\t}\n}",
"func (mdsMock *MockedMDS) Stop() {\n\tmdsMock.Called()\n}",
"func (s *Service) PauseTimer(id int) error {\n\tif timer, err := s.GetTimer(id); err == nil {\n\t\ttimer.Pause()\n\n\t\treturn nil\n\t} else {\n\t\treturn err\n\t}\n}",
"func (m *MockInternalScheduler) Stop() {\n\tm.ctrl.T.Helper()\n\tm.ctrl.Call(m, \"Stop\")\n}",
"func (o *ProjectDeploymentRuleResponse) SetStopTime(v time.Time) {\n\to.StopTime.Set(&v)\n}",
"func (f *FakeTunnel) Stop() error {\n\tf.active = false\n\treturn nil\n}",
"func (s *StopContinuousExportOutput) SetStopTime(v time.Time) *StopContinuousExportOutput {\n\ts.StopTime = &v\n\treturn s\n}",
"func (t *PCPTimer) Stop() (float64, error) {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\tif !t.started {\n\t\treturn 0, errors.New(\"trying to stop a stopped timer\")\n\t}\n\n\td := time.Since(t.since)\n\n\tvar inc float64\n\tswitch t.pcpMetricDesc.Unit() {\n\tcase NanosecondUnit:\n\t\tinc = float64(d.Nanoseconds())\n\tcase MicrosecondUnit:\n\t\tinc = float64(d.Nanoseconds()) * 1e-3\n\tcase MillisecondUnit:\n\t\tinc = float64(d.Nanoseconds()) * 1e-6\n\tcase SecondUnit:\n\t\tinc = d.Seconds()\n\tcase MinuteUnit:\n\t\tinc = d.Minutes()\n\tcase HourUnit:\n\t\tinc = d.Hours()\n\t}\n\n\tv := t.val.(float64)\n\n\terr := t.set(v + inc)\n\tif err != nil {\n\t\treturn -1, err\n\t}\n\n\tt.started = false\n\treturn v + inc, nil\n}",
"func (s *ServerlessTraceAgent) Stop() {\n\tif s.cancel != nil {\n\t\ts.cancel()\n\t}\n}",
"func (c *channel) stopDeleteTimer() {\n\tif c.activity.timer != nil {\n\t\tc.activity.timer.Stop()\n\t\tc.activity.timerSet = false\n\t\tif c.stan.debug {\n\t\t\tc.stan.log.Debugf(\"Channel %q delete timer stopped\", c.name)\n\t\t}\n\t}\n}",
"func Stop() {\n\t// pause sched0 fisrt.\n\tPause()\n\n\t// wait until all started tasks (i.e. tasks is executing other than\n\t// timing) stops\n\tfor atomic.LoadUint64(&sched0.running) > 0 {\n\t\truntime.Gosched()\n\t}\n\n\t// reset pausing indicator\n\tatomic.AddUint64(&sched0.pausing, ^uint64(0))\n\tsched0.cache.Close()\n}",
"func (_e *MockDataCoord_Expecter) Stop() *MockDataCoord_Stop_Call {\n\treturn &MockDataCoord_Stop_Call{Call: _e.mock.On(\"Stop\")}\n}",
"func (t *Timer) Stop() bool {\n\tstopped := false\n\tfor b := t.getBucket(); b != nil; b = t.getBucket() {\n\t\t// If b.Remove is called just after the timing wheel's goroutine has:\n\t\t// 1. removed t from b (through b.Flush -> b.remove)\n\t\t// 2. moved t from b to another bucket ab (through b.Flush -> b.remove and ab.Add)\n\t\t// this may fail to remove t due to the change of t's bucket.\n\t\tstopped = b.Remove(t)\n\n\t\t// Thus, here we re-get t's possibly new bucket (nil for case 1, or ab (non-nil) for case 2),\n\t\t// and retry until the bucket becomes nil, which indicates that t has finally been removed.\n\t}\n\treturn stopped\n}",
"func (s *Solver) Stop() {\n\ts.phase1.Stop()\n\tclose(s.stopper)\n}"
] | [
"0.7580399",
"0.7422544",
"0.74062115",
"0.7118259",
"0.68404835",
"0.6725982",
"0.65600497",
"0.6555783",
"0.6548834",
"0.65385425",
"0.64515626",
"0.6443249",
"0.6393915",
"0.63685316",
"0.6154625",
"0.6152977",
"0.61397845",
"0.61309195",
"0.61257225",
"0.60119367",
"0.6009082",
"0.59716153",
"0.594126",
"0.593258",
"0.58775735",
"0.58347684",
"0.5831501",
"0.57604325",
"0.5735256",
"0.5732891",
"0.5728549",
"0.572592",
"0.5724253",
"0.57183427",
"0.56973624",
"0.56136096",
"0.56079817",
"0.559473",
"0.5585143",
"0.5579318",
"0.55747867",
"0.5555043",
"0.55356336",
"0.5514098",
"0.5511841",
"0.54634565",
"0.5458334",
"0.5454701",
"0.54441464",
"0.542471",
"0.5409852",
"0.54051954",
"0.54047513",
"0.53905296",
"0.5365744",
"0.5364483",
"0.5363107",
"0.5358902",
"0.5341245",
"0.5334487",
"0.5329734",
"0.53167653",
"0.5316583",
"0.5297479",
"0.52790356",
"0.52765936",
"0.52542275",
"0.5249054",
"0.52427185",
"0.52423066",
"0.52365977",
"0.5225092",
"0.52233565",
"0.52143663",
"0.5212354",
"0.5197049",
"0.51903176",
"0.51806486",
"0.517889",
"0.51433885",
"0.51366735",
"0.51073843",
"0.50973225",
"0.509492",
"0.5088447",
"0.5087496",
"0.50836986",
"0.5062687",
"0.50621206",
"0.5060627",
"0.5052978",
"0.5036205",
"0.5019333",
"0.50181544",
"0.50098884",
"0.5004778",
"0.500476",
"0.49975413",
"0.49965394",
"0.49953598"
] | 0.6915054 | 4 |
ResetTimer zeroes the elapsed benchmark time and memory allocation counters and deletes userreported metrics. It does not affect whether the timer is running. | func (b *B) ResetTimer() {} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"func (timer *RealRTimer) Reset(d time.Duration) {\n\tif timer.innerTimer == nil {\n\t\ttimer.innerTimer = time.NewTimer(d)\n\t} else {\n\t\ttimer.innerTimer.Reset(d)\n\t}\n}",
"func (b *B) ResetTimer()",
"func (t *Timer) Reset() {\n\tt.goalTime = math.MaxFloat64\n\tt.startTime = time.Now()\n}",
"func (t *Timer) Reset() {\n\tt.Start()\n}",
"func (s *Stopwatch) Reset() {\n\ts.Lock()\n\tdefer s.Unlock()\n\n\tif s.isRunning() {\n\t\tfmt.Printf(\"WARNING: Stopwatch.Reset() isRunning is true\\n\")\n\t}\n\ts.refTime = time.Time{}\n\ts.elapsedTime = 0\n}",
"func (t *Timer) Reset() {\n\tt.currentTime = t.getCurrentTimeMs()\n\tt.lastTime = t.currentTime\n\tt.tick = 0\n}",
"func (t *PCPTimer) Reset() error {\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\tif t.started {\n\t\treturn errors.New(\"trying to reset an already started timer\")\n\t}\n\n\treturn t.set(float64(0))\n}",
"func (t *Timer) Reset(d time.Duration) bool {\n\tif t.timer != nil {\n\t\treturn t.timer.Reset(d)\n\t}\n\tret := t.mock.removeClockTimer((*internalTimer)(t))\n\tt.next = t.mock.Now().Add(d)\n\tt.mock.addTimer((*internalTimer)(t))\n\treturn ret\n}",
"func (t *Timer) Reset(d time.Duration) {\n\tif d <= 0 {\n\t\treturn\n\t}\n\tif t.w == nil {\n\t\tpanic(\"time: Stop called on uninitialized Timer\")\n\t}\n\n\t_ = t.w.resetTimer(t, d)\n}",
"func (sw *Stopwatch) Reset() {\n\tsw.t = time.Now()\n}",
"func (tw *TimingsWrapper) Reset() {\n\ttw.timings.Reset()\n}",
"func (state *ServerState) ResetStateTimer() {\n\tt := time.Duration(MINTIME+rand.Intn(TIMERANGE)) * TIMESCALE\n\tfmt.Println(t)\n\tstate.timer.Reset(t)\n}",
"func (tw *MultiTimingsWrapper) Reset() {\n\ttw.timings.Reset()\n}",
"func (r *Raft) ResetTimer(){\n\t//fmt.Println(\"Election TImer Reset\")\n\tif r.Id==0 {\n \tElectionTimer.Reset(time.Millisecond*10000) \t\n\t}else if r.Id==1 {\n \tElectionTimer.Reset(time.Millisecond*3000)\n }else if r.Id==2 {\n \tElectionTimer.Reset(time.Millisecond*12000)\n\t}else if r.Id==3 {\n \tElectionTimer.Reset(time.Millisecond*14000)\n }else if r.Id==4 {\n \tElectionTimer.Reset(time.Millisecond*16000)\n\t}else {\n\tElectionTimer.Reset(time.Millisecond*18000)\n\t}\n\n}",
"func ResetKillClock(t *time.Timer, d time.Duration) {\n\tif d == 0 {\n\t\treturn\n\t}\n\tif !t.Stop() {\n\t\t<-t.C\n\t}\n\tt.Reset(d)\n}",
"func (timer *WallclockTimer) Reset() error {\n\ttimer.command <- \"reset\"\n\treturn nil\n}",
"func (s *Stopwatch) Reset() {\n\t*s = Stopwatch{}\n}",
"func Reset() {\n\tlog.Warnf(\"Resetting all check stats\")\n\n\tcheckStats.statsLock.Lock()\n\tdefer checkStats.statsLock.Unlock()\n\n\t// Clear checks stats\n\tfor key := range checkStats.stats {\n\t\tdelete(checkStats.stats, key)\n\t}\n\n\t// Clear running checks map\n\trunningChecksStats.Init()\n\n\t// Clear top-level expvars on the runner\n\tfor _, key := range []string{\n\t\terrorsExpvarKey,\n\t\trunsExpvarKey,\n\t\trunningChecksExpvarKey,\n\t\twarningsExpvarKey,\n\t} {\n\t\trunnerStats.Delete(key)\n\t}\n\n\tresetWorkersExpvar(runnerStats)\n}",
"func (m *TimerMutation) ResetElapsedSeconds() {\n\tm.elapsedSeconds = nil\n\tm.addelapsedSeconds = nil\n\tdelete(m.clearedFields, timer.FieldElapsedSeconds)\n}",
"func (a *MetricAggregator) Reset() {\n\ta.metricMapsReceived = 0\n\tnowNano := gostatsd.Nanotime(a.now().UnixNano())\n\n\ta.metricMap.Counters.Each(func(key, tagsKey string, counter gostatsd.Counter) {\n\t\tif isExpired(a.expiryIntervalCounter, nowNano, counter.Timestamp) {\n\t\t\tdeleteMetric(key, tagsKey, a.metricMap.Counters)\n\t\t} else {\n\t\t\ta.metricMap.Counters[key][tagsKey] = gostatsd.Counter{\n\t\t\t\tTimestamp: counter.Timestamp,\n\t\t\t\tSource: counter.Source,\n\t\t\t\tTags: counter.Tags,\n\t\t\t}\n\t\t}\n\t})\n\n\ta.metricMap.Timers.Each(func(key, tagsKey string, timer gostatsd.Timer) {\n\t\tif isExpired(a.expiryIntervalTimer, nowNano, timer.Timestamp) {\n\t\t\tdeleteMetric(key, tagsKey, a.metricMap.Timers)\n\t\t} else {\n\t\t\tif hasHistogramTag(timer) {\n\t\t\t\ta.metricMap.Timers[key][tagsKey] = gostatsd.Timer{\n\t\t\t\t\tTimestamp: timer.Timestamp,\n\t\t\t\t\tSource: timer.Source,\n\t\t\t\t\tTags: timer.Tags,\n\t\t\t\t\tValues: timer.Values[:0],\n\t\t\t\t\tHistogram: emptyHistogram(timer, a.histogramLimit),\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\ta.metricMap.Timers[key][tagsKey] = gostatsd.Timer{\n\t\t\t\t\tTimestamp: timer.Timestamp,\n\t\t\t\t\tSource: timer.Source,\n\t\t\t\t\tTags: timer.Tags,\n\t\t\t\t\tValues: timer.Values[:0],\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t})\n\n\ta.metricMap.Gauges.Each(func(key, tagsKey string, gauge gostatsd.Gauge) {\n\t\tif isExpired(a.expiryIntervalGauge, nowNano, gauge.Timestamp) {\n\t\t\tdeleteMetric(key, tagsKey, a.metricMap.Gauges)\n\t\t}\n\t\t// No reset for gauges, they keep the last value until expiration\n\t})\n\n\ta.metricMap.Sets.Each(func(key, tagsKey string, set gostatsd.Set) {\n\t\tif isExpired(a.expiryIntervalSet, nowNano, set.Timestamp) {\n\t\t\tdeleteMetric(key, tagsKey, a.metricMap.Sets)\n\t\t} else {\n\t\t\ta.metricMap.Sets[key][tagsKey] = gostatsd.Set{\n\t\t\t\tValues: make(map[string]struct{}),\n\t\t\t\tTimestamp: set.Timestamp,\n\t\t\t\tSource: set.Source,\n\t\t\t\tTags: set.Tags,\n\t\t\t}\n\t\t}\n\t})\n}",
"func (m *Metrics) Reset() {\n\tmetrics.Reset()\n\tm.Lock()\n\tm.gauges = make(map[string]metrics.Gauge)\n\tm.counters = make(map[string]metrics.Counter)\n\tm.histograms = make(map[string]*metrics.Histogram)\n\tm.Unlock()\n}",
"func (s *Stopwatch) Reset(offset time.Duration, active bool) {\n\tnow := time.Now()\n\ts.Lock()\n\tdefer s.Unlock()\n\ts.start = now.Add(-offset)\n\tif active {\n\t\ts.stop = time.Time{}\n\t} else {\n\t\ts.stop = now\n\t}\n\ts.mark = 0\n\ts.laps = nil\n}",
"func (t *Timer) StopTimer() {\n\tt.end = time.Now()\n}",
"func (e *Timing) Reset() {\n\te.Min = 0\n\te.Max = 0\n\te.Value = 0\n\te.Values = make(float64Slice, 0)\n\te.Count = 0\n}",
"func (u *Util) ResetControlDuration() {\n\tcontrolDuration = 0\n}",
"func (r *realTimer) Reset(d time.Duration) bool {\n\treturn r.timer.Reset(d)\n}",
"func (i *idlenessManagerImpl) resetIdleTimer(d time.Duration) {\n\ti.idleMu.Lock()\n\tdefer i.idleMu.Unlock()\n\n\tif i.timer == nil {\n\t\t// Only close sets timer to nil. We are done.\n\t\treturn\n\t}\n\n\t// It is safe to ignore the return value from Reset() because this method is\n\t// only ever called from the timer callback, which means the timer has\n\t// already fired.\n\ti.timer.Reset(d)\n}",
"func (a *AzureMonitor) Reset() {\n\tfor tbucket := range a.cache {\n\t\t// Remove aggregates older than 30 minutes\n\t\tif tbucket.Before(a.timeFunc().Add(-time.Minute * 30)) {\n\t\t\tdelete(a.cache, tbucket)\n\t\t\tcontinue\n\t\t}\n\t\t// Metrics updated within the latest 1m have not been pushed and should\n\t\t// not be cleared.\n\t\tif tbucket.After(a.timeFunc().Add(-time.Minute)) {\n\t\t\tcontinue\n\t\t}\n\t\tfor id := range a.cache[tbucket] {\n\t\t\ta.cache[tbucket][id].updated = false\n\t\t}\n\t}\n}",
"func (m *metricMap) Reset() {\n\tm.mtx.Lock()\n\tdefer m.mtx.Unlock()\n\n\tfor h := range m.metrics {\n\t\tdelete(m.metrics, h)\n\t}\n}",
"func Reset() {\n\tstats.Reset()\n}",
"func ResetMetrics() {\n\tdroppedBatchVec.Reset()\n\tdroppedBytesVec.Reset()\n\trotateVec.Reset()\n\tputVec.Reset()\n\tgetVec.Reset()\n\tputBytesVec.Reset()\n\twakeupVec.Reset()\n\tgetBytesVec.Reset()\n\tcapVec.Reset()\n\tbatchSizeVec.Reset()\n\tmaxDataVec.Reset()\n\tsizeVec.Reset()\n\tdatafilesVec.Reset()\n\tgetLatencyVec.Reset()\n\tputLatencyVec.Reset()\n}",
"func (t *BackoffTreeTimer) Clear() {\n\tt.slk.Lock()\n\tdefer t.slk.Unlock()\n\tt.state.Clear(time.Now())\n}",
"func (c *Counter) Reset() {\n\tc.Lock()\n\tc.Unlock()\n\tc.lastFail = time.Time{}\n\tc.lastSuccess = time.Time{}\n\tc.failure = 0\n\tc.success = 0\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (mb *MetricsBuilder) Reset(options ...metricBuilderOption) {\n\tmb.startTime = pcommon.NewTimestampFromTime(time.Now())\n\tfor _, op := range options {\n\t\top(mb)\n\t}\n}",
"func (s *DevStat) ResetCounters() {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\ts.reset()\n}",
"func (c *Mock) StopTimer() {\n\tc.FakeStopTimer()\n}",
"func (context *context) ResetTimings() {\n\tcontext.model.ctx.Whisper_reset_timings()\n}",
"func (f *Sink) resetTimeoutTimer() {\n\tif f.timeoutList.Len() == 0 {\n\t\tf.timeoutTimer.Stop()\n\t\treturn\n\t}\n\n\ttimeout := f.timeoutList.Front().Value.(*Timeout)\n\tlog.Debug(\"Timeout timer reset - due at %v\", timeout.timeoutDue)\n\tf.timeoutTimer.Reset(timeout.timeoutDue.Sub(time.Now()))\n}",
"func CleanupTimer(t *time.Timer) {\n\t// prevent the timer from firing\n\tt.Stop()\n\n\tselect {\n\tcase <-t.C:\n\t\t// drain the channel in case the timer fired\n\tdefault:\n\t\t// do not block if channel is already empty\n\t}\n}",
"func (c *Mock) SetTimer(t *time.Timer) {\n\tc.FakeSetTimer(t)\n}",
"func resetStopwatch(w http.ResponseWriter, r *http.Request) {\n\tlog.Printf(\"Stopwatch start request from %s\", r.RemoteAddr)\n\tstopwatch = time.Now()\n\tresetCounter()\n\tio.WriteString(w, \"\")\n\tif resultsFile != \"\" {\n\t\twriteResults()\n\t}\n}",
"func (t *Timer) Reset(d time.Duration) bool {\n\tt.mu.Lock()\n\tdefer t.mu.Unlock()\n\treturn t.Timer.Reset(d)\n}",
"func (t *Timer) Stop() {\n\tif t.w == nil {\n\t\tpanic(\"time: Stop called on uninitialized Timer\")\n\t}\n\n\t_ = t.w.deleteTimer(t)\n\tt.w = nil\n}",
"func (m *TimerMutation) ResetTimerStart() {\n\tm.timerStart = nil\n\tdelete(m.clearedFields, timer.FieldTimerStart)\n}",
"func (r *RunningStats) Clear() {\n\tr.n = 0\n\tr.m1 = 0.0\n\tr.m2 = 0.0\n\tr.m3 = 0.0\n\tr.m4 = 0.0\n}",
"func (m *MaxTime) Reset() {\n\tatomic.StoreInt64((*int64)(m), 0)\n}",
"func (w *HotCache) ResetMetrics() {\n\thotCacheStatusGauge.Reset()\n}",
"func (p *TimePanel) Reset() {\n}",
"func (s *Statistics) reset() {\n\ts.cycles++\n\ts.totalMessagesCleared += s.messagesCleared\n\n\ts.memoryCleared = 0\n\ts.messagesCleared = 0\n}",
"func (m *BillMutation) ResetTime() {\n\tm.time = nil\n\tm.addtime = nil\n}",
"func (wd *Watchdog) reset(timeoutNanoSecs int64) {\n\twd.resets <- timeoutNanoSecs + time.Now().UnixNano()\n}",
"func NewResettableMetrics() *ResettableMetrics {\n\n\t//next tick happens on the set hour, minute and second of the next day.\n\ttimeToStartTicker := time.Date(time.Now().Year(), time.Now().Month(), time.Now().Day()+1, hourToTick, minuteToTick, secondToTick, 0, time.UTC)\n\n\tresettableMetrics := ResettableMetrics{\n\t\tstartTicker: time.NewTimer(timeToStartTicker.Sub(time.Now())), //tick on the set time\n\t\tresetEvery: time.Hour * 24,\n\t\tresettableMetricsMap: make(map[string]*metrics.Gauge),\n\t\tdone: make(chan struct{}),\n\t}\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-resettableMetrics.startTicker.C:\n\t\t\t\tdefer resettableMetrics.startTicker.Stop()\n\t\t\t\tlogr.Info(\"Starting the resettable metrics ticker!\")\n\t\t\t\tresettableMetrics.ticker = time.NewTicker(resettableMetrics.resetEvery)\n\t\t\t\tgo func() {\n\t\t\t\t\tfor {\n\t\t\t\t\t\tselect {\n\t\t\t\t\t\tcase <-resettableMetrics.ticker.C:\n\t\t\t\t\t\t\tlogr.Info(\"Resetting metrics to 0!\")\n\t\t\t\t\t\t\tresettableMetrics.reset()\n\t\t\t\t\t\tcase <-resettableMetrics.done:\n\t\t\t\t\t\t\tresettableMetrics.stop()\n\t\t\t\t\t\t\tlogr.Info(\"Shutting down resettable metrics ticker\")\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}()\n\t\t\t\treturn\n\n\t\t\t// In case stop is called before ticker ticks\n\t\t\tcase <-resettableMetrics.done:\n\t\t\t\tresettableMetrics.stop()\n\t\t\t\tlogr.Info(\"Resettable metrics ticker was stopped before it could start\")\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn &resettableMetrics\n}",
"func (t *Timer) KillTimer(tid int) {\n\tif tid == INVALID_TICK {\n\t\treturn\n\t}\n\tt.lock.Lock()\n\tdefer t.lock.Unlock()\n\tt.evtTree.EraseById(tid)\n}",
"func (c *PrometheusCollector) Reset() {\n\tc.RWMutex.Lock()\n\tdefer c.RWMutex.Unlock()\n}",
"func (m *TimerMutation) ClearTimerStart() {\n\tm.timerStart = nil\n\tm.clearedFields[timer.FieldTimerStart] = struct{}{}\n}",
"func (m *TimerMutation) ClearElapsedSeconds() {\n\tm.elapsedSeconds = nil\n\tm.addelapsedSeconds = nil\n\tm.clearedFields[timer.FieldElapsedSeconds] = struct{}{}\n}",
"func Reset() {\n\tstopMux.Lock()\n\tstoppedAt = nil\n\tstoppedFor = 0\n\tstopMux.Unlock()\n}",
"func ResetFailedAssertionCounter() {\n\tfailedAssertionCounter = 0\n}",
"func (sl *StagesLatency) ResetStatistics() {\n\tsl.first = duplicateSlice(sl.last)\n\tsl.FirstCollected = sl.LastCollected\n\n\tsl.calculate()\n}",
"func (c *Counter) Reset() {\n\tc.global.Store(0)\n\tc.window.Store(0)\n}",
"func (m *TimerMutation) ResetTimerEnd() {\n\tm.timerEnd = nil\n\tdelete(m.clearedFields, timer.FieldTimerEnd)\n}",
"func (collector *Collector) resetTimeout() {\n\t// We only need to do something if there actually is a ticker (ie: if an interval was specified)\n\tif collector.ticker != nil {\n\t\t// Stop the ticker so it can be garbage collected\n\t\tcollector.ticker.Stop()\n\n\t\t// From everything I've read the only real way to reset a ticker is to recreate it\n\t\tcollector.ticker = time.NewTicker(collector.config.Timeout.Interval)\n\t\tcollector.timeoutChannel = collector.ticker.C\n\t}\n}",
"func (_m *TimeTicker) Reset(d time.Duration) {\n\t_m.Called(d)\n}",
"func (c *Counter) ResetAllMetrics() {\n\tfor _, m := range metricsList {\n\t\tm.Reset()\n\t}\n}",
"func (r *Resampler) Reset() (err error) {\n\tif r.resampler == nil {\n\t\treturn errors.New(\"soxr resampler is nil\")\n\t}\n\tC.soxr_clear(r.resampler)\n\treturn\n}",
"func (t *StandardTimer) Stop() {\n\tt.meter.Stop()\n}",
"func (scsuo *SurveyCellScanUpdateOne) ClearTimingAdvance() *SurveyCellScanUpdateOne {\n\tscsuo.timing_advance = nil\n\tscsuo.cleartiming_advance = true\n\treturn scsuo\n}",
"func StartOfflineCleanupTimer() {\n\t_offlineCleanupTimer = time.NewTimer(5 * time.Minute)\n\tgo func() {\n\t\tfor range _offlineCleanupTimer.C {\n\t\t\t// Reset the session count since the session is over\n\t\t\t_stats.SessionMaxViewerCount = 0\n\t\t\tresetDirectories()\n\t\t\ttransitionToOfflineVideoStreamContent()\n\t\t}\n\t}()\n}",
"func (r *PendingPodsRecorder) Clear() {\n\tr.recorder.Set(float64(0))\n}",
"func (t *Timer) Reset(d time.Duration) bool {\n\tif d <= 0 {\n\t\t// The standard time.Timer requires a positive delay.\n\t\tpanic(\"non-positive delay for Timer.Reset\")\n\t}\n\n\treturn t.reset(t.em.Now().Add(d))\n}",
"func (scsu *SurveyCellScanUpdate) ClearTimingAdvance() *SurveyCellScanUpdate {\n\tscsu.timing_advance = nil\n\tscsu.cleartiming_advance = true\n\treturn scsu\n}",
"func (s *Greeter) ResetCounters() {\n\ts.mutex.Lock()\n\ts.callCounts[Unary] = 0\n\ts.callCounts[ServerStream] = 0\n\ts.callCounts[ClientStream] = 0\n\ts.callCounts[Bidi] = 0\n\ts.mutex.Unlock()\n\n\tif s.Stats != nil {\n\t\ts.Stats.mutex.Lock()\n\t\ts.Stats.connCount = 0\n\t\ts.Stats.mutex.Unlock()\n\t}\n}",
"func (m *metricVec) Reset() { m.metricMap.Reset() }",
"func (b *B) StopTimer()",
"func (t *Tracker) Reset() {\n\t// acquire mutex\n\tt.mutex.Lock()\n\tdefer t.mutex.Unlock()\n\n\t// reset timestamp\n\tt.last = time.Now()\n}",
"func (tc *TestClient) ResetStats() {\n\ttc.Sent = 0\n\ttc.Received = 0\n\ttc.LastPongAt = time.Time{}\n}",
"func (c *channel) resetDeleteTimer(newDuration time.Duration) {\n\ta := c.activity\n\tif a.timer == nil {\n\t\ta.timer = time.AfterFunc(newDuration, func() {\n\t\t\tc.stan.sendDeleteChannelRequest(c)\n\t\t})\n\t} else {\n\t\ta.timer.Reset(newDuration)\n\t}\n\tif c.stan.debug {\n\t\tc.stan.log.Debugf(\"Channel %q delete timer set to fire in %v\", c.name, newDuration)\n\t}\n\ta.timerSet = true\n}",
"func (g *Game) resetFallingTimer() {\n\tg.fallingTimer.Reset(g.speed())\n}",
"func (tm *TimerManager) ClearTimeout(id int) bool {\n\n\tfor pos, t := range tm.timers {\n\t\tif t.id == id {\n\t\t\tcopy(tm.timers[pos:], tm.timers[pos+1:])\n\t\t\ttm.timers[len(tm.timers)-1] = timeout{}\n\t\t\ttm.timers = tm.timers[:len(tm.timers)-1]\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}",
"func (rd *ratedisp) reset() {\n\trd.start = time.Now()\n\trd.cnt = 0\n\trd.size = 0\n}",
"func (j *Job) Reset() {\n\tj.nextTicks.Set(j.timer.ticks.Val() + j.ticks)\n}",
"func (c *SwitchTicker) Reset() {\n\tatomic.StoreInt64(&c.failCount, 0)\n}",
"func (NilTimer) Stop() {}",
"func (plan *DeploymentPlan) ResetCounter() {\n\tplan.commandCounter = 0\n}",
"func (gta *GlobalTSOAllocator) Reset() {\n\ttsoAllocatorRole.WithLabelValues(gta.timestampOracle.dcLocation).Set(0)\n\tgta.timestampOracle.ResetTimestamp()\n}",
"func (c *standardResettingCounter) Clear() {\n\tatomic.StoreInt64(&c.count, 0)\n}",
"func (agent *Agent) initTimer() {\n\tif agent.HTTPTimer == nil {\n\t\tagent.HTTPTimer = metrics.NewTimer()\n\t}\n}",
"func (tf *TestFixture) Reset(ctx context.Context) error {\n\treturn nil\n}",
"func Unset() {\n\tmock = time.Time{}\n}",
"func (hmd *Hmd) ResetFrameTiming(frameIndex int) {\n\tC.ovrHmd_ResetFrameTiming(hmd.cptr(), C.uint(frameIndex))\n}",
"func (m *MockImpl) Reset() {\n\tm.recording = make([]Transaction, 0)\n\tm.simulateGetError = nil\n\tm.simulateAddError = nil\n}",
"func (l *Latency) UpdateReset(m Metadata) { l.update(m, false) }",
"func (m *MockImpl) Reset() {\n\tm.recording = make([]Transaction, 0)\n\tm.simulateGetError = nil\n\tm.simulateAddError = nil\n\tm.simulateUpdateError = nil\n}"
] | [
"0.6965523",
"0.66154885",
"0.65605336",
"0.64812165",
"0.64749223",
"0.6459077",
"0.6390338",
"0.6357963",
"0.6337308",
"0.63212603",
"0.62888485",
"0.6248405",
"0.6209185",
"0.6205625",
"0.61693007",
"0.60795414",
"0.6076847",
"0.6069499",
"0.58611387",
"0.58267415",
"0.58062667",
"0.5737648",
"0.5735189",
"0.5712818",
"0.5703188",
"0.56661433",
"0.56577003",
"0.565581",
"0.56324375",
"0.56303555",
"0.561308",
"0.5570119",
"0.55673194",
"0.5538449",
"0.5538449",
"0.5538449",
"0.5538449",
"0.5538449",
"0.5538449",
"0.5538449",
"0.5538449",
"0.553633",
"0.55220634",
"0.5517797",
"0.54991096",
"0.54935235",
"0.5493017",
"0.5490395",
"0.54610294",
"0.5450715",
"0.5447365",
"0.5445499",
"0.5440913",
"0.542542",
"0.5424488",
"0.5422482",
"0.54141635",
"0.5413841",
"0.5383733",
"0.53796065",
"0.5370521",
"0.53685725",
"0.5361608",
"0.5347371",
"0.53196615",
"0.53186476",
"0.5311819",
"0.5298101",
"0.52973115",
"0.5268806",
"0.52496856",
"0.5224246",
"0.52231216",
"0.52225304",
"0.52177346",
"0.5217043",
"0.5210295",
"0.5201777",
"0.51932126",
"0.5183672",
"0.51764464",
"0.51735073",
"0.516509",
"0.51511556",
"0.51412845",
"0.51374435",
"0.51286435",
"0.51282346",
"0.51152915",
"0.51109934",
"0.5104811",
"0.5098118",
"0.5080197",
"0.50626934",
"0.50490373",
"0.50451267",
"0.503858",
"0.5037475",
"0.50368726",
"0.50309837"
] | 0.6101085 | 15 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.